LCOV - code coverage report
Current view: top level - zephyr - kernel.h Coverage Total Hit
Test: new.info Lines: 80.2 % 409 328
Test Date: 2025-10-20 06:18:59

            Line data    Source code
       1            1 : /*
       2              :  * Copyright (c) 2016, Wind River Systems, Inc.
       3              :  *
       4              :  * SPDX-License-Identifier: Apache-2.0
       5              :  */
       6              : 
       7              : /**
       8              :  * @file
       9              :  *
      10              :  * @brief Public kernel APIs.
      11              :  */
      12              : 
      13              : #ifndef ZEPHYR_INCLUDE_KERNEL_H_
      14              : #define ZEPHYR_INCLUDE_KERNEL_H_
      15              : 
      16              : #if !defined(_ASMLANGUAGE)
      17              : #include <zephyr/kernel_includes.h>
      18              : #include <errno.h>
      19              : #include <limits.h>
      20              : #include <stdbool.h>
      21              : #include <zephyr/toolchain.h>
      22              : #include <zephyr/tracing/tracing_macros.h>
      23              : #include <zephyr/sys/mem_stats.h>
      24              : #include <zephyr/sys/iterable_sections.h>
      25              : #include <zephyr/sys/ring_buffer.h>
      26              : 
      27              : #ifdef __cplusplus
      28              : extern "C" {
      29              : #endif
      30              : 
      31              : /*
      32              :  * Zephyr currently assumes the size of a couple standard types to simplify
      33              :  * print string formats. Let's make sure this doesn't change without notice.
      34              :  */
      35              : BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
      36              : BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
      37              : BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
      38              : 
      39              : /**
      40              :  * @brief Kernel APIs
      41              :  * @defgroup kernel_apis Kernel APIs
      42              :  * @since 1.0
      43              :  * @version 1.0.0
      44              :  * @{
      45              :  * @}
      46              :  */
      47              : 
      48            0 : #define K_ANY NULL
      49              : 
      50              : #if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
      51              : #error Zero available thread priorities defined!
      52              : #endif
      53              : 
      54            0 : #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
      55            0 : #define K_PRIO_PREEMPT(x) (x)
      56              : 
      57            0 : #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
      58            0 : #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
      59            0 : #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
      60            0 : #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
      61            0 : #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
      62              : 
      63              : #ifdef CONFIG_POLL
      64              : #define Z_POLL_EVENT_OBJ_INIT(obj) \
      65              :         .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
      66              : #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
      67              : #else
      68              : #define Z_POLL_EVENT_OBJ_INIT(obj)
      69              : #define Z_DECL_POLL_EVENT
      70              : #endif
      71              : 
      72              : struct k_thread;
      73              : struct k_mutex;
      74              : struct k_sem;
      75              : struct k_msgq;
      76              : struct k_mbox;
      77              : struct k_pipe;
      78              : struct k_queue;
      79              : struct k_fifo;
      80              : struct k_lifo;
      81              : struct k_stack;
      82              : struct k_mem_slab;
      83              : struct k_timer;
      84              : struct k_poll_event;
      85              : struct k_poll_signal;
      86              : struct k_mem_domain;
      87              : struct k_mem_partition;
      88              : struct k_futex;
      89              : struct k_event;
      90              : 
      91            0 : enum execution_context_types {
      92              :         K_ISR = 0,
      93              :         K_COOP_THREAD,
      94              :         K_PREEMPT_THREAD,
      95              : };
      96              : 
      97              : /* private, used by k_poll and k_work_poll */
      98              : struct k_work_poll;
      99              : typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
     100              : 
     101              : /**
     102              :  * @addtogroup thread_apis
     103              :  * @{
     104              :  */
     105              : 
     106              : /**
     107              :  * @brief Resets thread longest frame usage data for specified thread
     108              :  *
     109              :  * This routine resets the longest frame value statistic
     110              :  * after printing to zero, enabling observation of the
     111              :  * longest frame from the most recent interval rather than
     112              :  * the longest frame since startup.
     113              :  *
     114              :  * @param thread Pointer to the thread to reset counter.
     115              :  *
     116              :  * @note @kconfig{CONFIG_THREAD_ANALYZER_LONG_FRAME_PER_INTERVAL} must
     117              :  * be set for this function to be effective.
     118              :  */
     119              : static inline void
     120            1 :         k_thread_runtime_stats_longest_frame_reset(__maybe_unused struct k_thread *thread)
     121              : {
     122              : #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
     123              :         thread->base.usage.longest = 0ULL;
     124              : #endif
     125              : }
     126              : 
     127            0 : typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
     128              :                                    void *user_data);
     129              : 
     130              : /**
     131              :  * @brief Iterate over all the threads in the system.
     132              :  *
     133              :  * This routine iterates over all the threads in the system and
     134              :  * calls the user_cb function for each thread.
     135              :  *
     136              :  * @param user_cb Pointer to the user callback function.
     137              :  * @param user_data Pointer to user data.
     138              :  *
     139              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     140              :  * to be effective.
     141              :  * @note This API uses @ref k_spin_lock to protect the _kernel.threads
     142              :  * list which means creation of new threads and terminations of existing
     143              :  * threads are blocked until this API returns.
     144              :  */
     145            1 : void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
     146              : 
     147              : /**
     148              :  * @brief Iterate over all the threads in running on specified cpu.
     149              :  *
     150              :  * This function is does otherwise the same thing as k_thread_foreach(),
     151              :  * but it only loops through the threads running on specified cpu only.
     152              :  * If CONFIG_SMP is not defined the implementation this is the same as
     153              :  * k_thread_foreach(), with an assert cpu == 0.
     154              :  *
     155              :  * @param cpu The filtered cpu number
     156              :  * @param user_cb Pointer to the user callback function.
     157              :  * @param user_data Pointer to user data.
     158              :  *
     159              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     160              :  * to be effective.
     161              :  * @note This API uses @ref k_spin_lock to protect the _kernel.threads
     162              :  * list which means creation of new threads and terminations of existing
     163              :  * threads are blocked until this API returns.
     164              :  */
     165              : #ifdef CONFIG_SMP
     166            1 : void k_thread_foreach_filter_by_cpu(unsigned int cpu,
     167              :                                     k_thread_user_cb_t user_cb, void *user_data);
     168              : #else
     169              : static inline
     170              : void k_thread_foreach_filter_by_cpu(unsigned int cpu,
     171              :                                     k_thread_user_cb_t user_cb, void *user_data)
     172              : {
     173              :         __ASSERT(cpu == 0, "cpu filter out of bounds");
     174              :         ARG_UNUSED(cpu);
     175              :         k_thread_foreach(user_cb, user_data);
     176              : }
     177              : #endif
     178              : 
     179              : /**
     180              :  * @brief Iterate over all the threads in the system without locking.
     181              :  *
     182              :  * This routine works exactly the same like @ref k_thread_foreach
     183              :  * but unlocks interrupts when user_cb is executed.
     184              :  *
     185              :  * @param user_cb Pointer to the user callback function.
     186              :  * @param user_data Pointer to user data.
     187              :  *
     188              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     189              :  * to be effective.
     190              :  * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
     191              :  * queue elements. It unlocks it during user callback function processing.
     192              :  * If a new task is created when this @c foreach function is in progress,
     193              :  * the added new task would not be included in the enumeration.
     194              :  * If a task is aborted during this enumeration, there would be a race here
     195              :  * and there is a possibility that this aborted task would be included in the
     196              :  * enumeration.
     197              :  * @note If the task is aborted and the memory occupied by its @c k_thread
     198              :  * structure is reused when this @c k_thread_foreach_unlocked is in progress
     199              :  * it might even lead to the system behave unstable.
     200              :  * This function may never return, as it would follow some @c next task
     201              :  * pointers treating given pointer as a pointer to the k_thread structure
     202              :  * while it is something different right now.
     203              :  * Do not reuse the memory that was occupied by k_thread structure of aborted
     204              :  * task if it was aborted after this function was called in any context.
     205              :  */
     206            1 : void k_thread_foreach_unlocked(
     207              :         k_thread_user_cb_t user_cb, void *user_data);
     208              : 
     209              : /**
     210              :  * @brief Iterate over the threads in running on current cpu without locking.
     211              :  *
     212              :  * This function does otherwise the same thing as
     213              :  * k_thread_foreach_unlocked(), but it only loops through the threads
     214              :  * running on specified cpu. If CONFIG_SMP is not defined the
     215              :  * implementation this is the same as k_thread_foreach_unlocked(), with an
     216              :  * assert requiring cpu == 0.
     217              :  *
     218              :  * @param cpu The filtered cpu number
     219              :  * @param user_cb Pointer to the user callback function.
     220              :  * @param user_data Pointer to user data.
     221              :  *
     222              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     223              :  * to be effective.
     224              :  * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
     225              :  * queue elements. It unlocks it during user callback function processing.
     226              :  * If a new task is created when this @c foreach function is in progress,
     227              :  * the added new task would not be included in the enumeration.
     228              :  * If a task is aborted during this enumeration, there would be a race here
     229              :  * and there is a possibility that this aborted task would be included in the
     230              :  * enumeration.
     231              :  * @note If the task is aborted and the memory occupied by its @c k_thread
     232              :  * structure is reused when this @c k_thread_foreach_unlocked is in progress
     233              :  * it might even lead to the system behave unstable.
     234              :  * This function may never return, as it would follow some @c next task
     235              :  * pointers treating given pointer as a pointer to the k_thread structure
     236              :  * while it is something different right now.
     237              :  * Do not reuse the memory that was occupied by k_thread structure of aborted
     238              :  * task if it was aborted after this function was called in any context.
     239              :  */
     240              : #ifdef CONFIG_SMP
     241            1 : void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
     242              :                                              k_thread_user_cb_t user_cb, void *user_data);
     243              : #else
     244              : static inline
     245              : void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
     246              :                                              k_thread_user_cb_t user_cb, void *user_data)
     247              : {
     248              :         __ASSERT(cpu == 0, "cpu filter out of bounds");
     249              :         ARG_UNUSED(cpu);
     250              :         k_thread_foreach_unlocked(user_cb, user_data);
     251              : }
     252              : #endif
     253              : 
     254              : /** @} */
     255              : 
     256              : /**
     257              :  * @defgroup thread_apis Thread APIs
     258              :  * @ingroup kernel_apis
     259              :  * @{
     260              :  */
     261              : 
     262              : #endif /* !_ASMLANGUAGE */
     263              : 
     264              : 
     265              : /*
     266              :  * Thread user options. May be needed by assembly code. Common part uses low
     267              :  * bits, arch-specific use high bits.
     268              :  */
     269              : 
     270              : /**
     271              :  * @brief system thread that must not abort
     272              :  * */
     273            1 : #define K_ESSENTIAL (BIT(0))
     274              : 
     275            0 : #define K_FP_IDX 1
     276              : /**
     277              :  * @brief FPU registers are managed by context switch
     278              :  *
     279              :  * @details
     280              :  * This option indicates that the thread uses the CPU's floating point
     281              :  * registers. This instructs the kernel to take additional steps to save
     282              :  * and restore the contents of these registers when scheduling the thread.
     283              :  * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
     284              :  */
     285            1 : #define K_FP_REGS (BIT(K_FP_IDX))
     286              : 
     287              : /**
     288              :  * @brief user mode thread
     289              :  *
     290              :  * This thread has dropped from supervisor mode to user mode and consequently
     291              :  * has additional restrictions
     292              :  */
     293            1 : #define K_USER (BIT(2))
     294              : 
     295              : /**
     296              :  * @brief Inherit Permissions
     297              :  *
     298              :  * @details
     299              :  * Indicates that the thread being created should inherit all kernel object
     300              :  * permissions from the thread that created it. No effect if
     301              :  * @kconfig{CONFIG_USERSPACE} is not enabled.
     302              :  */
     303            1 : #define K_INHERIT_PERMS (BIT(3))
     304              : 
     305              : /**
     306              :  * @brief Callback item state
     307              :  *
     308              :  * @details
     309              :  * This is a single bit of state reserved for "callback manager"
     310              :  * utilities (p4wq initially) who need to track operations invoked
     311              :  * from within a user-provided callback they have been invoked.
     312              :  * Effectively it serves as a tiny bit of zero-overhead TLS data.
     313              :  */
     314            1 : #define K_CALLBACK_STATE (BIT(4))
     315              : 
     316              : /**
     317              :  * @brief DSP registers are managed by context switch
     318              :  *
     319              :  * @details
     320              :  * This option indicates that the thread uses the CPU's DSP registers.
     321              :  * This instructs the kernel to take additional steps to save and
     322              :  * restore the contents of these registers when scheduling the thread.
     323              :  * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
     324              :  */
     325            1 : #define K_DSP_IDX 6
     326            0 : #define K_DSP_REGS (BIT(K_DSP_IDX))
     327              : 
     328              : /**
     329              :  * @brief AGU registers are managed by context switch
     330              :  *
     331              :  * @details
     332              :  * This option indicates that the thread uses the ARC processor's XY
     333              :  * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
     334              :  * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
     335              :  */
     336            1 : #define K_AGU_IDX 7
     337            0 : #define K_AGU_REGS (BIT(K_AGU_IDX))
     338              : 
     339              : /**
     340              :  * @brief FP and SSE registers are managed by context switch on x86
     341              :  *
     342              :  * @details
     343              :  * This option indicates that the thread uses the x86 CPU's floating point
     344              :  * and SSE registers. This instructs the kernel to take additional steps to
     345              :  * save and restore the contents of these registers when scheduling
     346              :  * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
     347              :  */
     348            1 : #define K_SSE_REGS (BIT(7))
     349              : 
     350              : /* end - thread options */
     351              : 
     352              : #if !defined(_ASMLANGUAGE)
     353              : /**
     354              :  * @brief Dynamically allocate a thread stack.
     355              :  *
     356              :  * Dynamically allocate a thread stack either from a pool of thread stacks of
     357              :  * size @kconfig{CONFIG_DYNAMIC_THREAD_POOL_SIZE}, or from the system heap.
     358              :  * Order is determined by the @kconfig{CONFIG_DYNAMIC_THREAD_PREFER_ALLOC} and
     359              :  * @kconfig{CONFIG_DYNAMIC_THREAD_PREFER_POOL} options. Thread stacks from the
     360              :  * pool are of maximum size @kconfig{CONFIG_DYNAMIC_THREAD_STACK_SIZE}.
     361              :  *
     362              :  * @note When no longer required, thread stacks allocated with
     363              :  * `k_thread_stack_alloc()` must be freed with @ref k_thread_stack_free to
     364              :  * avoid leaking memory.
     365              :  *
     366              :  * @param size Stack size in bytes.
     367              :  * @param flags Stack creation flags, or 0.
     368              :  *
     369              :  * @retval the allocated thread stack on success.
     370              :  * @retval NULL on failure.
     371              :  *
     372              :  * Relevant stack creation flags include:
     373              :  * - @ref K_USER allocate a userspace thread (requires @kconfig{CONFIG_USERSPACE})
     374              :  *
     375              :  * @see @kconfig{CONFIG_DYNAMIC_THREAD}
     376              :  */
     377            1 : __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
     378              : 
     379              : /**
     380              :  * @brief Free a dynamically allocated thread stack.
     381              :  *
     382              :  * @param stack Pointer to the thread stack.
     383              :  *
     384              :  * @retval 0 on success.
     385              :  * @retval -EBUSY if the thread stack is in use.
     386              :  * @retval -EINVAL if @p stack is invalid.
     387              :  * @retval -ENOSYS if dynamic thread stack allocation is disabled
     388              :  *
     389              :  * @see @kconfig{CONFIG_DYNAMIC_THREAD}
     390              :  */
     391            1 : __syscall int k_thread_stack_free(k_thread_stack_t *stack);
     392              : 
     393              : /**
     394              :  * @brief Create a thread.
     395              :  *
     396              :  * This routine initializes a thread, then schedules it for execution.
     397              :  *
     398              :  * The new thread may be scheduled for immediate execution or a delayed start.
     399              :  * If the newly spawned thread does not have a delayed start the kernel
     400              :  * scheduler may preempt the current thread to allow the new thread to
     401              :  * execute.
     402              :  *
     403              :  * Thread options are architecture-specific, and can include K_ESSENTIAL,
     404              :  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
     405              :  * them using "|" (the logical OR operator).
     406              :  *
     407              :  * Stack objects passed to this function may be statically allocated with
     408              :  * either of these macros in order to be portable:
     409              :  *
     410              :  * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
     411              :  *   supervisor threads.
     412              :  * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
     413              :  *   threads only. These stacks use less memory if CONFIG_USERSPACE is
     414              :  *   enabled.
     415              :  *
     416              :  * Alternatively, the stack may be dynamically allocated using
     417              :  * @ref k_thread_stack_alloc.
     418              :  *
     419              :  * The stack_size parameter has constraints. It must either be:
     420              :  *
     421              :  * - The original size value passed to K_THREAD_STACK_DEFINE() or
     422              :  *   K_KERNEL_STACK_DEFINE()
     423              :  * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
     424              :  *   defined with K_THREAD_STACK_DEFINE()
     425              :  * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
     426              :  *   defined with K_KERNEL_STACK_DEFINE().
     427              :  *
     428              :  * Using other values, or sizeof(stack) may produce undefined behavior.
     429              :  *
     430              :  * @param new_thread Pointer to uninitialized struct k_thread
     431              :  * @param stack Pointer to the stack space.
     432              :  * @param stack_size Stack size in bytes.
     433              :  * @param entry Thread entry function.
     434              :  * @param p1 1st entry point parameter.
     435              :  * @param p2 2nd entry point parameter.
     436              :  * @param p3 3rd entry point parameter.
     437              :  * @param prio Thread priority.
     438              :  * @param options Thread options.
     439              :  * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
     440              :  *
     441              :  * @return ID of new thread.
     442              :  *
     443              :  */
     444            1 : __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
     445              :                                   k_thread_stack_t *stack,
     446              :                                   size_t stack_size,
     447              :                                   k_thread_entry_t entry,
     448              :                                   void *p1, void *p2, void *p3,
     449              :                                   int prio, uint32_t options, k_timeout_t delay);
     450              : 
     451              : /**
     452              :  * @brief Drop a thread's privileges permanently to user mode
     453              :  *
     454              :  * This allows a supervisor thread to be re-used as a user thread.
     455              :  * This function does not return, but control will transfer to the provided
     456              :  * entry point as if this was a new user thread.
     457              :  *
     458              :  * The implementation ensures that the stack buffer contents are erased.
     459              :  * Any thread-local storage will be reverted to a pristine state.
     460              :  *
     461              :  * Memory domain membership, resource pool assignment, kernel object
     462              :  * permissions, priority, and thread options are preserved.
     463              :  *
     464              :  * A common use of this function is to re-use the main thread as a user thread
     465              :  * once all supervisor mode-only tasks have been completed.
     466              :  *
     467              :  * @param entry Function to start executing from
     468              :  * @param p1 1st entry point parameter
     469              :  * @param p2 2nd entry point parameter
     470              :  * @param p3 3rd entry point parameter
     471              :  */
     472            1 : FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
     473              :                                                    void *p1, void *p2,
     474              :                                                    void *p3);
     475              : 
     476              : /**
     477              :  * @brief Grant a thread access to a set of kernel objects
     478              :  *
     479              :  * This is a convenience function. For the provided thread, grant access to
     480              :  * the remaining arguments, which must be pointers to kernel objects.
     481              :  *
     482              :  * The thread object must be initialized (i.e. running). The objects don't
     483              :  * need to be.
     484              :  * Note that NULL shouldn't be passed as an argument.
     485              :  *
     486              :  * @param thread Thread to grant access to objects
     487              :  * @param ... list of kernel object pointers
     488              :  */
     489            1 : #define k_thread_access_grant(thread, ...) \
     490              :         FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
     491              : 
     492              : /**
     493              :  * @brief Assign a resource memory pool to a thread
     494              :  *
     495              :  * By default, threads have no resource pool assigned unless their parent
     496              :  * thread has a resource pool, in which case it is inherited. Multiple
     497              :  * threads may be assigned to the same memory pool.
     498              :  *
     499              :  * Changing a thread's resource pool will not migrate allocations from the
     500              :  * previous pool.
     501              :  *
     502              :  * @param thread Target thread to assign a memory pool for resource requests.
     503              :  * @param heap Heap object to use for resources,
     504              :  *             or NULL if the thread should no longer have a memory pool.
     505              :  */
     506            1 : static inline void k_thread_heap_assign(struct k_thread *thread,
     507              :                                         struct k_heap *heap)
     508              : {
     509              :         thread->resource_pool = heap;
     510              : }
     511              : 
     512              : #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
     513              : /**
     514              :  * @brief Obtain stack usage information for the specified thread
     515              :  *
     516              :  * User threads will need to have permission on the target thread object.
     517              :  *
     518              :  * Some hardware may prevent inspection of a stack buffer currently in use.
     519              :  * If this API is called from supervisor mode, on the currently running thread,
     520              :  * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
     521              :  * error will be generated.
     522              :  *
     523              :  * @param thread Thread to inspect stack information
     524              :  * @param unused_ptr Output parameter, filled in with the unused stack space
     525              :  *      of the target thread in bytes.
     526              :  * @return 0 on success
     527              :  * @return -EBADF Bad thread object (user mode only)
     528              :  * @return -EPERM No permissions on thread object (user mode only)
     529              :  * #return -ENOTSUP Forbidden by hardware policy
     530              :  * @return -EINVAL Thread is uninitialized or exited (user mode only)
     531              :  * @return -EFAULT Bad memory address for unused_ptr (user mode only)
     532              :  */
     533              : __syscall int k_thread_stack_space_get(const struct k_thread *thread,
     534              :                                        size_t *unused_ptr);
     535              : #endif
     536              : 
     537              : #if (K_HEAP_MEM_POOL_SIZE > 0)
     538              : /**
     539              :  * @brief Assign the system heap as a thread's resource pool
     540              :  *
     541              :  * Similar to k_thread_heap_assign(), but the thread will use
     542              :  * the kernel heap to draw memory.
     543              :  *
     544              :  * Use with caution, as a malicious thread could perform DoS attacks on the
     545              :  * kernel heap.
     546              :  *
     547              :  * @param thread Target thread to assign the system heap for resource requests
     548              :  *
     549              :  */
     550              : void k_thread_system_pool_assign(struct k_thread *thread);
     551              : #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
     552              : 
     553              : /**
     554              :  * @brief Sleep until a thread exits
     555              :  *
     556              :  * The caller will be put to sleep until the target thread exits, either due
     557              :  * to being aborted, self-exiting, or taking a fatal error. This API returns
     558              :  * immediately if the thread isn't running.
     559              :  *
     560              :  * This API may only be called from ISRs with a K_NO_WAIT timeout,
     561              :  * where it can be useful as a predicate to detect when a thread has
     562              :  * aborted.
     563              :  *
     564              :  * @param thread Thread to wait to exit
     565              :  * @param timeout upper bound time to wait for the thread to exit.
     566              :  * @retval 0 success, target thread has exited or wasn't running
     567              :  * @retval -EBUSY returned without waiting
     568              :  * @retval -EAGAIN waiting period timed out
     569              :  * @retval -EDEADLK target thread is joining on the caller, or target thread
     570              :  *                  is the caller
     571              :  */
     572            1 : __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
     573              : 
     574              : /**
     575              :  * @brief Put the current thread to sleep.
     576              :  *
     577              :  * This routine puts the current thread to sleep for @a duration,
     578              :  * specified as a k_timeout_t object.
     579              :  *
     580              :  * @param timeout Desired duration of sleep.
     581              :  *
     582              :  * @return Zero if the requested time has elapsed or the time left to
     583              :  * sleep rounded up to the nearest millisecond (e.g. if the thread was
     584              :  * awoken by the \ref k_wakeup call).  Will be clamped to INT_MAX in
     585              :  * the case where the remaining time is unrepresentable in an int32_t.
     586              :  */
     587            1 : __syscall int32_t k_sleep(k_timeout_t timeout);
     588              : 
     589              : /**
     590              :  * @brief Put the current thread to sleep.
     591              :  *
     592              :  * This routine puts the current thread to sleep for @a duration milliseconds.
     593              :  *
     594              :  * @param ms Number of milliseconds to sleep.
     595              :  *
     596              :  * @return Zero if the requested time has elapsed or if the thread was woken up
     597              :  * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
     598              :  * millisecond.
     599              :  */
     600            1 : static inline int32_t k_msleep(int32_t ms)
     601              : {
     602              :         return k_sleep(Z_TIMEOUT_MS(ms));
     603              : }
     604              : 
     605              : /**
     606              :  * @brief Put the current thread to sleep with microsecond resolution.
     607              :  *
     608              :  * This function is unlikely to work as expected without kernel tuning.
     609              :  * In particular, because the lower bound on the duration of a sleep is
     610              :  * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
     611              :  * adjusted to achieve the resolution desired. The implications of doing
     612              :  * this must be understood before attempting to use k_usleep(). Use with
     613              :  * caution.
     614              :  *
     615              :  * @param us Number of microseconds to sleep.
     616              :  *
     617              :  * @return Zero if the requested time has elapsed or if the thread was woken up
     618              :  * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
     619              :  * microsecond.
     620              :  */
     621            1 : __syscall int32_t k_usleep(int32_t us);
     622              : 
     623              : /**
     624              :  * @brief Cause the current thread to busy wait.
     625              :  *
     626              :  * This routine causes the current thread to execute a "do nothing" loop for
     627              :  * @a usec_to_wait microseconds.
     628              :  *
     629              :  * @note The clock used for the microsecond-resolution delay here may
     630              :  * be skewed relative to the clock used for system timeouts like
     631              :  * k_sleep().  For example k_busy_wait(1000) may take slightly more or
     632              :  * less time than k_sleep(K_MSEC(1)), with the offset dependent on
     633              :  * clock tolerances.
     634              :  *
     635              :  * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
     636              :  * @kconfig{CONFIG_PM} options are enabled, this function may not work.
     637              :  * The timer/clock used for delay processing may be disabled/inactive.
     638              :  */
     639            1 : __syscall void k_busy_wait(uint32_t usec_to_wait);
     640              : 
     641              : /**
     642              :  * @brief Check whether it is possible to yield in the current context.
     643              :  *
     644              :  * This routine checks whether the kernel is in a state where it is possible to
     645              :  * yield or call blocking API's. It should be used by code that needs to yield
     646              :  * to perform correctly, but can feasibly be called from contexts where that
     647              :  * is not possible. For example in the PRE_KERNEL initialization step, or when
     648              :  * being run from the idle thread.
     649              :  *
     650              :  * @return True if it is possible to yield in the current context, false otherwise.
     651              :  */
     652            1 : bool k_can_yield(void);
     653              : 
     654              : /**
     655              :  * @brief Yield the current thread.
     656              :  *
     657              :  * This routine causes the current thread to yield execution to another
     658              :  * thread of the same or higher priority. If there are no other ready threads
     659              :  * of the same or higher priority, the routine returns immediately.
     660              :  */
     661            1 : __syscall void k_yield(void);
     662              : 
     663              : /**
     664              :  * @brief Wake up a sleeping thread.
     665              :  *
     666              :  * This routine prematurely wakes up @a thread from sleeping.
     667              :  *
     668              :  * If @a thread is not currently sleeping, the routine has no effect.
     669              :  *
     670              :  * @param thread ID of thread to wake.
     671              :  */
     672            1 : __syscall void k_wakeup(k_tid_t thread);
     673              : 
     674              : /**
     675              :  * @brief Query thread ID of the current thread.
     676              :  *
     677              :  * This unconditionally queries the kernel via a system call.
     678              :  *
     679              :  * @note Use k_current_get() unless absolutely sure this is necessary.
     680              :  *       This should only be used directly where the thread local
     681              :  *       variable cannot be used or may contain invalid values
     682              :  *       if thread local storage (TLS) is enabled. If TLS is not
     683              :  *       enabled, this is the same as k_current_get().
     684              :  *
     685              :  * @return ID of current thread.
     686              :  */
     687              : __attribute_const__
     688            1 : __syscall k_tid_t k_sched_current_thread_query(void);
     689              : 
     690              : /**
     691              :  * @brief Get thread ID of the current thread.
     692              :  *
     693              :  * @return ID of current thread.
     694              :  *
     695              :  */
     696              : __attribute_const__
     697            1 : static inline k_tid_t k_current_get(void)
     698              : {
     699              : #ifdef CONFIG_CURRENT_THREAD_USE_TLS
     700              : 
     701              :         /* Thread-local cache of current thread ID, set in z_thread_entry() */
     702              :         extern Z_THREAD_LOCAL k_tid_t z_tls_current;
     703              : 
     704              :         return z_tls_current;
     705              : #else
     706              :         return k_sched_current_thread_query();
     707              : #endif
     708              : }
     709              : 
     710              : /**
     711              :  * @brief Abort a thread.
     712              :  *
     713              :  * This routine permanently stops execution of @a thread. The thread is taken
     714              :  * off all kernel queues it is part of (i.e. the ready queue, the timeout
     715              :  * queue, or a kernel object wait queue). However, any kernel resources the
     716              :  * thread might currently own (such as mutexes or memory blocks) are not
     717              :  * released. It is the responsibility of the caller of this routine to ensure
     718              :  * all necessary cleanup is performed.
     719              :  *
     720              :  * After k_thread_abort() returns, the thread is guaranteed not to be
     721              :  * running or to become runnable anywhere on the system.  Normally
     722              :  * this is done via blocking the caller (in the same manner as
     723              :  * k_thread_join()), but in interrupt context on SMP systems the
     724              :  * implementation is required to spin for threads that are running on
     725              :  * other CPUs.
     726              :  *
     727              :  * @param thread ID of thread to abort.
     728              :  */
     729            1 : __syscall void k_thread_abort(k_tid_t thread);
     730              : 
     731              : k_ticks_t z_timeout_expires(const struct _timeout *timeout);
     732              : k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
     733              : 
     734              : #ifdef CONFIG_SYS_CLOCK_EXISTS
     735              : 
     736              : /**
     737              :  * @brief Get time when a thread wakes up, in system ticks
     738              :  *
     739              :  * This routine computes the system uptime when a waiting thread next
     740              :  * executes, in units of system ticks.  If the thread is not waiting,
     741              :  * it returns current system time.
     742              :  */
     743            1 : __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
     744              : 
     745              : static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
     746              :                                                 const struct k_thread *thread)
     747              : {
     748              :         return z_timeout_expires(&thread->base.timeout);
     749              : }
     750              : 
     751              : /**
     752              :  * @brief Get time remaining before a thread wakes up, in system ticks
     753              :  *
     754              :  * This routine computes the time remaining before a waiting thread
     755              :  * next executes, in units of system ticks.  If the thread is not
     756              :  * waiting, it returns zero.
     757              :  */
     758            1 : __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread);
     759              : 
     760              : static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
     761              :                                                 const struct k_thread *thread)
     762              : {
     763              :         return z_timeout_remaining(&thread->base.timeout);
     764              : }
     765              : 
     766              : #endif /* CONFIG_SYS_CLOCK_EXISTS */
     767              : 
     768              : /**
     769              :  * @cond INTERNAL_HIDDEN
     770              :  */
     771              : 
     772              : struct _static_thread_data {
     773              :         struct k_thread *init_thread;
     774              :         k_thread_stack_t *init_stack;
     775              :         unsigned int init_stack_size;
     776              :         k_thread_entry_t init_entry;
     777              :         void *init_p1;
     778              :         void *init_p2;
     779              :         void *init_p3;
     780              :         int init_prio;
     781              :         uint32_t init_options;
     782              :         const char *init_name;
     783              : #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
     784              :         int32_t init_delay_ms;
     785              : #else
     786              :         k_timeout_t init_delay;
     787              : #endif
     788              : };
     789              : 
     790              : #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
     791              : #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
     792              : #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
     793              : #else
     794              : #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
     795              : #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
     796              : #endif
     797              : 
     798              : #define Z_THREAD_INITIALIZER(thread, stack, stack_size,           \
     799              :                             entry, p1, p2, p3,                   \
     800              :                             prio, options, delay, tname)         \
     801              :         {                                                        \
     802              :         .init_thread = (thread),                                 \
     803              :         .init_stack = (stack),                                   \
     804              :         .init_stack_size = (stack_size),                         \
     805              :         .init_entry = (k_thread_entry_t)entry,                   \
     806              :         .init_p1 = (void *)p1,                                   \
     807              :         .init_p2 = (void *)p2,                                   \
     808              :         .init_p3 = (void *)p3,                                   \
     809              :         .init_prio = (prio),                                     \
     810              :         .init_options = (options),                               \
     811              :         .init_name = STRINGIFY(tname),                           \
     812              :         Z_THREAD_INIT_DELAY_INITIALIZER(delay)                   \
     813              :         }
     814              : 
     815              : /*
     816              :  * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
     817              :  * information on arguments.
     818              :  */
     819              : #define Z_THREAD_COMMON_DEFINE(name, stack_size,                        \
     820              :                                entry, p1, p2, p3,                       \
     821              :                                prio, options, delay)                    \
     822              :         struct k_thread _k_thread_obj_##name;                           \
     823              :         STRUCT_SECTION_ITERABLE(_static_thread_data,                    \
     824              :                                 _k_thread_data_##name) =                \
     825              :                 Z_THREAD_INITIALIZER(&_k_thread_obj_##name,         \
     826              :                                      _k_thread_stack_##name, stack_size,\
     827              :                                      entry, p1, p2, p3, prio, options,  \
     828              :                                      delay, name);                      \
     829              :         __maybe_unused const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
     830              : 
     831              : /**
     832              :  * INTERNAL_HIDDEN @endcond
     833              :  */
     834              : 
     835              : /**
     836              :  * @brief Statically define and initialize a thread.
     837              :  *
     838              :  * The thread may be scheduled for immediate execution or a delayed start.
     839              :  *
     840              :  * Thread options are architecture-specific, and can include K_ESSENTIAL,
     841              :  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
     842              :  * them using "|" (the logical OR operator).
     843              :  *
     844              :  * The ID of the thread can be accessed using:
     845              :  *
     846              :  * @code extern const k_tid_t <name>; @endcode
     847              :  *
     848              :  * @param name Name of the thread.
     849              :  * @param stack_size Stack size in bytes.
     850              :  * @param entry Thread entry function.
     851              :  * @param p1 1st entry point parameter.
     852              :  * @param p2 2nd entry point parameter.
     853              :  * @param p3 3rd entry point parameter.
     854              :  * @param prio Thread priority.
     855              :  * @param options Thread options.
     856              :  * @param delay Scheduling delay (in milliseconds), zero for no delay.
     857              :  *
     858              :  * @note Static threads with zero delay should not normally have
     859              :  * MetaIRQ priority levels.  This can preempt the system
     860              :  * initialization handling (depending on the priority of the main
     861              :  * thread) and cause surprising ordering side effects.  It will not
     862              :  * affect anything in the OS per se, but consider it bad practice.
     863              :  * Use a SYS_INIT() callback if you need to run code before entrance
     864              :  * to the application main().
     865              :  */
     866              : #define K_THREAD_DEFINE(name, stack_size,                                \
     867              :                         entry, p1, p2, p3,                               \
     868            1 :                         prio, options, delay)                            \
     869              :         K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size);       \
     870              :         Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,      \
     871              :                                prio, options, delay)
     872              : 
     873              : /**
     874              :  * @brief Statically define and initialize a thread intended to run only in kernel mode.
     875              :  *
     876              :  * The thread may be scheduled for immediate execution or a delayed start.
     877              :  *
     878              :  * Thread options are architecture-specific, and can include K_ESSENTIAL,
     879              :  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
     880              :  * them using "|" (the logical OR operator).
     881              :  *
     882              :  * The ID of the thread can be accessed using:
     883              :  *
     884              :  * @code extern const k_tid_t <name>; @endcode
     885              :  *
     886              :  * @note Threads defined by this can only run in kernel mode, and cannot be
     887              :  *       transformed into user thread via k_thread_user_mode_enter().
     888              :  *
     889              :  * @warning Depending on the architecture, the stack size (@p stack_size)
     890              :  *          may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
     891              :  *          or in power-of-two size (if MPU).
     892              :  *
     893              :  * @param name Name of the thread.
     894              :  * @param stack_size Stack size in bytes.
     895              :  * @param entry Thread entry function.
     896              :  * @param p1 1st entry point parameter.
     897              :  * @param p2 2nd entry point parameter.
     898              :  * @param p3 3rd entry point parameter.
     899              :  * @param prio Thread priority.
     900              :  * @param options Thread options.
     901              :  * @param delay Scheduling delay (in milliseconds), zero for no delay.
     902              :  */
     903              : #define K_KERNEL_THREAD_DEFINE(name, stack_size,                        \
     904              :                                entry, p1, p2, p3,                       \
     905            1 :                                prio, options, delay)                    \
     906              :         K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size);      \
     907              :         Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,     \
     908              :                                prio, options, delay)
     909              : 
     910              : /**
     911              :  * @brief Get a thread's priority.
     912              :  *
     913              :  * This routine gets the priority of @a thread.
     914              :  *
     915              :  * @param thread ID of thread whose priority is needed.
     916              :  *
     917              :  * @return Priority of @a thread.
     918              :  */
     919            1 : __syscall int k_thread_priority_get(k_tid_t thread);
     920              : 
     921              : /**
     922              :  * @brief Set a thread's priority.
     923              :  *
     924              :  * This routine immediately changes the priority of @a thread.
     925              :  *
     926              :  * Rescheduling can occur immediately depending on the priority @a thread is
     927              :  * set to:
     928              :  *
     929              :  * - If its priority is raised above the priority of a currently scheduled
     930              :  * preemptible thread, @a thread will be scheduled in.
     931              :  *
     932              :  * - If the caller lowers the priority of a currently scheduled preemptible
     933              :  * thread below that of other threads in the system, the thread of the highest
     934              :  * priority will be scheduled in.
     935              :  *
     936              :  * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
     937              :  * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
     938              :  * highest priority.
     939              :  *
     940              :  * @param thread ID of thread whose priority is to be set.
     941              :  * @param prio New priority.
     942              :  *
     943              :  * @warning Changing the priority of a thread currently involved in mutex
     944              :  * priority inheritance may result in undefined behavior.
     945              :  */
     946            1 : __syscall void k_thread_priority_set(k_tid_t thread, int prio);
     947              : 
     948              : 
     949              : #ifdef CONFIG_SCHED_DEADLINE
     950              : /**
     951              :  * @brief Set relative deadline expiration time for scheduler
     952              :  *
     953              :  * This sets the "deadline" expiration as a time delta from the
     954              :  * current time, in the same units used by k_cycle_get_32().  The
     955              :  * scheduler (when deadline scheduling is enabled) will choose the
     956              :  * next expiring thread when selecting between threads at the same
     957              :  * static priority.  Threads at different priorities will be scheduled
     958              :  * according to their static priority.
     959              :  *
     960              :  * @note Deadlines are stored internally using 32 bit unsigned
     961              :  * integers.  The number of cycles between the "first" deadline in the
     962              :  * scheduler queue and the "last" deadline must be less than 2^31 (i.e
     963              :  * a signed non-negative quantity).  Failure to adhere to this rule
     964              :  * may result in scheduled threads running in an incorrect deadline
     965              :  * order.
     966              :  *
     967              :  * @note Despite the API naming, the scheduler makes no guarantees
     968              :  * the thread WILL be scheduled within that deadline, nor does it take
     969              :  * extra metadata (like e.g. the "runtime" and "period" parameters in
     970              :  * Linux sched_setattr()) that allows the kernel to validate the
     971              :  * scheduling for achievability.  Such features could be implemented
     972              :  * above this call, which is simply input to the priority selection
     973              :  * logic.
     974              :  *
     975              :  * @kconfig_dep{CONFIG_SCHED_DEADLINE}
     976              :  *
     977              :  * @param thread A thread on which to set the deadline
     978              :  * @param deadline A time delta, in cycle units
     979              :  *
     980              :  */
     981            1 : __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
     982              : 
     983              : /**
     984              :  * @brief Set absolute deadline expiration time for scheduler
     985              :  *
     986              :  * This sets the "deadline" expiration as a timestamp in the same
     987              :  * units used by k_cycle_get_32(). The scheduler (when deadline scheduling
     988              :  * is enabled) will choose the next expiring thread when selecting between
     989              :  * threads at the same static priority.  Threads at different priorities
     990              :  * will be scheduled according to their static priority.
     991              :  *
     992              :  * Unlike @ref k_thread_deadline_set which sets a relative timestamp to a
     993              :  * "now" implicitly determined during its call, this routine sets an
     994              :  * absolute timestamp that is computed from a timestamp relative to
     995              :  * an explicit "now" that was determined before this routine is called.
     996              :  * This allows the caller to specify deadlines for multiple threads
     997              :  * using a common "now".
     998              :  *
     999              :  * @note Deadlines are stored internally using 32 bit unsigned
    1000              :  * integers.  The number of cycles between the "first" deadline in the
    1001              :  * scheduler queue and the "last" deadline must be less than 2^31 (i.e
    1002              :  * a signed non-negative quantity).  Failure to adhere to this rule
    1003              :  * may result in scheduled threads running in an incorrect deadline
    1004              :  * order.
    1005              :  *
    1006              :  * @note Even if a provided timestamp is in the past, the kernel will
    1007              :  * still schedule threads with deadlines in order from the earliest to
    1008              :  * the latest.
    1009              :  *
    1010              :  * @note Despite the API naming, the scheduler makes no guarantees
    1011              :  * the thread WILL be scheduled within that deadline, nor does it take
    1012              :  * extra metadata (like e.g. the "runtime" and "period" parameters in
    1013              :  * Linux sched_setattr()) that allows the kernel to validate the
    1014              :  * scheduling for achievability.  Such features could be implemented
    1015              :  * above this call, which is simply input to the priority selection
    1016              :  * logic.
    1017              :  *
    1018              :  * @kconfig_dep{CONFIG_SCHED_DEADLINE}
    1019              :  *
    1020              :  * @param thread A thread on which to set the deadline
    1021              :  * @param deadline A timestamp, in cycle units
    1022              :  */
    1023            1 : __syscall void k_thread_absolute_deadline_set(k_tid_t thread, int deadline);
    1024              : #endif
    1025              : 
    1026              : /**
    1027              :  * @brief Invoke the scheduler
    1028              :  *
    1029              :  * This routine invokes the scheduler to force a schedule point on the current
    1030              :  * CPU. If invoked from within a thread, the scheduler will be invoked
    1031              :  * immediately (provided interrupts were not locked when invoked). If invoked
    1032              :  * from within an ISR, the scheduler will be invoked upon exiting the ISR.
    1033              :  *
    1034              :  * Invoking the scheduler allows the kernel to make an immediate determination
    1035              :  * as to what the next thread to execute should be. Unlike yielding, this
    1036              :  * routine is not guaranteed to switch to a thread of equal or higher priority
    1037              :  * if any are available. For example, if the current thread is cooperative and
    1038              :  * there is a still higher priority cooperative thread that is ready, then
    1039              :  * yielding will switch to that higher priority thread whereas this routine
    1040              :  * will not.
    1041              :  *
    1042              :  * Most applications will never use this routine.
    1043              :  */
    1044            1 : __syscall void k_reschedule(void);
    1045              : 
    1046              : #ifdef CONFIG_SCHED_CPU_MASK
    1047              : /**
    1048              :  * @brief Sets all CPU enable masks to zero
    1049              :  *
    1050              :  * After this returns, the thread will no longer be schedulable on any
    1051              :  * CPUs.  The thread must not be currently runnable.
    1052              :  *
    1053              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
    1054              :  * configuration.
    1055              :  *
    1056              :  * @param thread Thread to operate upon
    1057              :  * @return Zero on success, otherwise error code
    1058              :  */
    1059            1 : int k_thread_cpu_mask_clear(k_tid_t thread);
    1060              : 
    1061              : /**
    1062              :  * @brief Sets all CPU enable masks to one
    1063              :  *
    1064              :  * After this returns, the thread will be schedulable on any CPU.  The
    1065              :  * thread must not be currently runnable.
    1066              :  *
    1067              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
    1068              :  * configuration.
    1069              :  *
    1070              :  * @param thread Thread to operate upon
    1071              :  * @return Zero on success, otherwise error code
    1072              :  */
    1073            1 : int k_thread_cpu_mask_enable_all(k_tid_t thread);
    1074              : 
    1075              : /**
    1076              :  * @brief Enable thread to run on specified CPU
    1077              :  *
    1078              :  * The thread must not be currently runnable.
    1079              :  *
    1080              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
    1081              :  * configuration.
    1082              :  *
    1083              :  * @param thread Thread to operate upon
    1084              :  * @param cpu CPU index
    1085              :  * @return Zero on success, otherwise error code
    1086              :  */
    1087            1 : int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
    1088              : 
    1089              : /**
    1090              :  * @brief Prevent thread to run on specified CPU
    1091              :  *
    1092              :  * The thread must not be currently runnable.
    1093              :  *
    1094              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
    1095              :  * configuration.
    1096              :  *
    1097              :  * @param thread Thread to operate upon
    1098              :  * @param cpu CPU index
    1099              :  * @return Zero on success, otherwise error code
    1100              :  */
    1101            1 : int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
    1102              : 
    1103              : /**
    1104              :  * @brief Pin a thread to a CPU
    1105              :  *
    1106              :  * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
    1107              :  * thread on the selected CPU.
    1108              :  *
    1109              :  * @param thread Thread to operate upon
    1110              :  * @param cpu CPU index
    1111              :  * @return Zero on success, otherwise error code
    1112              :  */
    1113            1 : int k_thread_cpu_pin(k_tid_t thread, int cpu);
    1114              : #endif
    1115              : 
    1116              : /**
    1117              :  * @brief Suspend a thread.
    1118              :  *
    1119              :  * This routine prevents the kernel scheduler from making @a thread
    1120              :  * the current thread. All other internal operations on @a thread are
    1121              :  * still performed; for example, kernel objects it is waiting on are
    1122              :  * still handed to it. Thread suspension does not impact any timeout
    1123              :  * upon which the thread may be waiting (such as a timeout from a call
    1124              :  * to k_sem_take() or k_sleep()). Thus if the timeout expires while the
    1125              :  * thread is suspended, it is still suspended until k_thread_resume()
    1126              :  * is called.
    1127              :  *
    1128              :  * When the target thread is active on another CPU, the caller will block until
    1129              :  * the target thread is halted (suspended or aborted).  But if the caller is in
    1130              :  * an interrupt context, it will spin waiting for that target thread active on
    1131              :  * another CPU to halt.
    1132              :  *
    1133              :  * If @a thread is already suspended, the routine has no effect.
    1134              :  *
    1135              :  * @param thread ID of thread to suspend.
    1136              :  */
    1137            1 : __syscall void k_thread_suspend(k_tid_t thread);
    1138              : 
    1139              : /**
    1140              :  * @brief Resume a suspended thread.
    1141              :  *
    1142              :  * This routine reverses the thread suspension from k_thread_suspend()
    1143              :  * and allows the kernel scheduler to make @a thread the current thread
    1144              :  * when it is next eligible for that role.
    1145              :  *
    1146              :  * If @a thread is not currently suspended, the routine has no effect.
    1147              :  *
    1148              :  * @param thread ID of thread to resume.
    1149              :  */
    1150            1 : __syscall void k_thread_resume(k_tid_t thread);
    1151              : 
    1152              : /**
    1153              :  * @brief Start an inactive thread
    1154              :  *
    1155              :  * If a thread was created with K_FOREVER in the delay parameter, it will
    1156              :  * not be added to the scheduling queue until this function is called
    1157              :  * on it.
    1158              :  *
    1159              :  * @note This is a legacy API for compatibility.  Modern Zephyr
    1160              :  * threads are initialized in the "sleeping" state and do not need
    1161              :  * special handling for "start".
    1162              :  *
    1163              :  * @param thread thread to start
    1164              :  */
    1165            1 : static inline void k_thread_start(k_tid_t thread)
    1166              : {
    1167              :         k_wakeup(thread);
    1168              : }
    1169              : 
    1170              : /**
    1171              :  * @brief Set time-slicing period and scope.
    1172              :  *
    1173              :  * This routine specifies how the scheduler will perform time slicing of
    1174              :  * preemptible threads.
    1175              :  *
    1176              :  * To enable time slicing, @a slice must be non-zero. The scheduler
    1177              :  * ensures that no thread runs for more than the specified time limit
    1178              :  * before other threads of that priority are given a chance to execute.
    1179              :  * Any thread whose priority is higher than @a prio is exempted, and may
    1180              :  * execute as long as desired without being preempted due to time slicing.
    1181              :  *
    1182              :  * Time slicing only limits the maximum amount of time a thread may continuously
    1183              :  * execute. Once the scheduler selects a thread for execution, there is no
    1184              :  * minimum guaranteed time the thread will execute before threads of greater or
    1185              :  * equal priority are scheduled.
    1186              :  *
    1187              :  * When the current thread is the only one of that priority eligible
    1188              :  * for execution, this routine has no effect; the thread is immediately
    1189              :  * rescheduled after the slice period expires.
    1190              :  *
    1191              :  * To disable timeslicing, set both @a slice and @a prio to zero.
    1192              :  *
    1193              :  * @param slice Maximum time slice length (in milliseconds).
    1194              :  * @param prio Highest thread priority level eligible for time slicing.
    1195              :  */
    1196            1 : void k_sched_time_slice_set(int32_t slice, int prio);
    1197              : 
    1198              : /**
    1199              :  * @brief Set thread time slice
    1200              :  *
    1201              :  * As for k_sched_time_slice_set, but (when
    1202              :  * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
    1203              :  * thread.  When non-zero, this timeslice will take precedence over
    1204              :  * the global value.
    1205              :  *
    1206              :  * When such a thread's timeslice expires, the configured callback
    1207              :  * will be called before the thread is removed/re-added to the run
    1208              :  * queue.  This callback will occur in interrupt context, and the
    1209              :  * specified thread is guaranteed to have been preempted by the
    1210              :  * currently-executing ISR.  Such a callback is free to, for example,
    1211              :  * modify the thread priority or slice time for future execution,
    1212              :  * suspend the thread, etc...
    1213              :  *
    1214              :  * @note Unlike the older API, the time slice parameter here is
    1215              :  * specified in ticks, not milliseconds.  Ticks have always been the
    1216              :  * internal unit, and not all platforms have integer conversions
    1217              :  * between the two.
    1218              :  *
    1219              :  * @note Threads with a non-zero slice time set will be timesliced
    1220              :  * always, even if they are higher priority than the maximum timeslice
    1221              :  * priority set via k_sched_time_slice_set().
    1222              :  *
    1223              :  * @note The callback notification for slice expiration happens, as it
    1224              :  * must, while the thread is still "current", and thus it happens
    1225              :  * before any registered timeouts at this tick.  This has the somewhat
    1226              :  * confusing side effect that the tick time (c.f. k_uptime_get()) does
    1227              :  * not yet reflect the expired ticks.  Applications wishing to make
    1228              :  * fine-grained timing decisions within this callback should use the
    1229              :  * cycle API, or derived facilities like k_thread_runtime_stats_get().
    1230              :  *
    1231              :  * @param th A valid, initialized thread
    1232              :  * @param slice_ticks Maximum timeslice, in ticks
    1233              :  * @param expired Callback function called on slice expiration
    1234              :  * @param data Parameter for the expiration handler
    1235              :  */
    1236            1 : void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
    1237              :                              k_thread_timeslice_fn_t expired, void *data);
    1238              : 
    1239              : /** @} */
    1240              : 
    1241              : /**
    1242              :  * @addtogroup isr_apis
    1243              :  * @{
    1244              :  */
    1245              : 
    1246              : /**
    1247              :  * @brief Determine if code is running at interrupt level.
    1248              :  *
    1249              :  * This routine allows the caller to customize its actions, depending on
    1250              :  * whether it is a thread or an ISR.
    1251              :  *
    1252              :  * @funcprops \isr_ok
    1253              :  *
    1254              :  * @return false if invoked by a thread.
    1255              :  * @return true if invoked by an ISR.
    1256              :  */
    1257            1 : bool k_is_in_isr(void);
    1258              : 
    1259              : /**
    1260              :  * @brief Determine if code is running in a preemptible thread.
    1261              :  *
    1262              :  * This routine allows the caller to customize its actions, depending on
    1263              :  * whether it can be preempted by another thread. The routine returns a 'true'
    1264              :  * value if all of the following conditions are met:
    1265              :  *
    1266              :  * - The code is running in a thread, not at ISR.
    1267              :  * - The thread's priority is in the preemptible range.
    1268              :  * - The thread has not locked the scheduler.
    1269              :  *
    1270              :  * @funcprops \isr_ok
    1271              :  *
    1272              :  * @return 0 if invoked by an ISR or by a cooperative thread.
    1273              :  * @return Non-zero if invoked by a preemptible thread.
    1274              :  */
    1275            1 : __syscall int k_is_preempt_thread(void);
    1276              : 
    1277              : /**
    1278              :  * @brief Test whether startup is in the before-main-task phase.
    1279              :  *
    1280              :  * This routine allows the caller to customize its actions, depending on
    1281              :  * whether it being invoked before the kernel is fully active.
    1282              :  *
    1283              :  * @funcprops \isr_ok
    1284              :  *
    1285              :  * @return true if invoked before post-kernel initialization
    1286              :  * @return false if invoked during/after post-kernel initialization
    1287              :  */
    1288            1 : static inline bool k_is_pre_kernel(void)
    1289              : {
    1290              :         extern bool z_sys_post_kernel; /* in init.c */
    1291              : 
    1292              :         return !z_sys_post_kernel;
    1293              : }
    1294              : 
    1295              : /**
    1296              :  * @}
    1297              :  */
    1298              : 
    1299              : /**
    1300              :  * @addtogroup thread_apis
    1301              :  * @{
    1302              :  */
    1303              : 
    1304              : /**
    1305              :  * @brief Lock the scheduler.
    1306              :  *
    1307              :  * This routine prevents the current thread from being preempted by another
    1308              :  * thread by instructing the scheduler to treat it as a cooperative thread.
    1309              :  * If the thread subsequently performs an operation that makes it unready,
    1310              :  * it will be context switched out in the normal manner. When the thread
    1311              :  * again becomes the current thread, its non-preemptible status is maintained.
    1312              :  *
    1313              :  * This routine can be called recursively.
    1314              :  *
    1315              :  * Owing to clever implementation details, scheduler locks are
    1316              :  * extremely fast for non-userspace threads (just one byte
    1317              :  * inc/decrement in the thread struct).
    1318              :  *
    1319              :  * @note This works by elevating the thread priority temporarily to a
    1320              :  * cooperative priority, allowing cheap synchronization vs. other
    1321              :  * preemptible or cooperative threads running on the current CPU.  It
    1322              :  * does not prevent preemption or asynchrony of other types.  It does
    1323              :  * not prevent threads from running on other CPUs when CONFIG_SMP=y.
    1324              :  * It does not prevent interrupts from happening, nor does it prevent
    1325              :  * threads with MetaIRQ priorities from preempting the current thread.
    1326              :  * In general this is a historical API not well-suited to modern
    1327              :  * applications, use with care.
    1328              :  */
    1329            1 : void k_sched_lock(void);
    1330              : 
    1331              : /**
    1332              :  * @brief Unlock the scheduler.
    1333              :  *
    1334              :  * This routine reverses the effect of a previous call to k_sched_lock().
    1335              :  * A thread must call the routine once for each time it called k_sched_lock()
    1336              :  * before the thread becomes preemptible.
    1337              :  */
    1338            1 : void k_sched_unlock(void);
    1339              : 
    1340              : /**
    1341              :  * @brief Set current thread's custom data.
    1342              :  *
    1343              :  * This routine sets the custom data for the current thread to @ value.
    1344              :  *
    1345              :  * Custom data is not used by the kernel itself, and is freely available
    1346              :  * for a thread to use as it sees fit. It can be used as a framework
    1347              :  * upon which to build thread-local storage.
    1348              :  *
    1349              :  * @param value New custom data value.
    1350              :  *
    1351              :  */
    1352            1 : __syscall void k_thread_custom_data_set(void *value);
    1353              : 
    1354              : /**
    1355              :  * @brief Get current thread's custom data.
    1356              :  *
    1357              :  * This routine returns the custom data for the current thread.
    1358              :  *
    1359              :  * @return Current custom data value.
    1360              :  */
    1361            1 : __syscall void *k_thread_custom_data_get(void);
    1362              : 
    1363              : /**
    1364              :  * @brief Set current thread name
    1365              :  *
    1366              :  * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
    1367              :  * is enabled for tracing and debugging.
    1368              :  *
    1369              :  * @param thread Thread to set name, or NULL to set the current thread
    1370              :  * @param str Name string
    1371              :  * @retval 0 on success
    1372              :  * @retval -EFAULT Memory access error with supplied string
    1373              :  * @retval -ENOSYS Thread name configuration option not enabled
    1374              :  * @retval -EINVAL Thread name too long
    1375              :  */
    1376            1 : __syscall int k_thread_name_set(k_tid_t thread, const char *str);
    1377              : 
    1378              : /**
    1379              :  * @brief Get thread name
    1380              :  *
    1381              :  * Get the name of a thread
    1382              :  *
    1383              :  * @param thread Thread ID
    1384              :  * @retval Thread name, or NULL if configuration not enabled
    1385              :  */
    1386            1 : const char *k_thread_name_get(k_tid_t thread);
    1387              : 
    1388              : /**
    1389              :  * @brief Copy the thread name into a supplied buffer
    1390              :  *
    1391              :  * @param thread Thread to obtain name information
    1392              :  * @param buf Destination buffer
    1393              :  * @param size Destination buffer size
    1394              :  * @retval -ENOSPC Destination buffer too small
    1395              :  * @retval -EFAULT Memory access error
    1396              :  * @retval -ENOSYS Thread name feature not enabled
    1397              :  * @retval 0 Success
    1398              :  */
    1399            1 : __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
    1400              :                                  size_t size);
    1401              : 
    1402              : /**
    1403              :  * @brief Get thread state string
    1404              :  *
    1405              :  * This routine generates a human friendly string containing the thread's
    1406              :  * state, and copies as much of it as possible into @a buf.
    1407              :  *
    1408              :  * @param thread_id Thread ID
    1409              :  * @param buf Buffer into which to copy state strings
    1410              :  * @param buf_size Size of the buffer
    1411              :  *
    1412              :  * @retval Pointer to @a buf if data was copied, else a pointer to "".
    1413              :  */
    1414            1 : const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
    1415              : 
    1416              : /**
    1417              :  * @}
    1418              :  */
    1419              : 
    1420              : /**
    1421              :  * @addtogroup clock_apis
    1422              :  * @{
    1423              :  */
    1424              : 
    1425              : /**
    1426              :  * @brief Generate null timeout delay.
    1427              :  *
    1428              :  * This macro generates a timeout delay that instructs a kernel API
    1429              :  * not to wait if the requested operation cannot be performed immediately.
    1430              :  *
    1431              :  * @return Timeout delay value.
    1432              :  */
    1433            1 : #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
    1434              : 
    1435              : /**
    1436              :  * @brief Generate timeout delay from nanoseconds.
    1437              :  *
    1438              :  * This macro generates a timeout delay that instructs a kernel API to
    1439              :  * wait up to @a t nanoseconds to perform the requested operation.
    1440              :  * Note that timer precision is limited to the tick rate, not the
    1441              :  * requested value.
    1442              :  *
    1443              :  * @param t Duration in nanoseconds.
    1444              :  *
    1445              :  * @return Timeout delay value.
    1446              :  */
    1447            1 : #define K_NSEC(t)     Z_TIMEOUT_NS(t)
    1448              : 
    1449              : /**
    1450              :  * @brief Generate timeout delay from microseconds.
    1451              :  *
    1452              :  * This macro generates a timeout delay that instructs a kernel API
    1453              :  * to wait up to @a t microseconds to perform the requested operation.
    1454              :  * Note that timer precision is limited to the tick rate, not the
    1455              :  * requested value.
    1456              :  *
    1457              :  * @param t Duration in microseconds.
    1458              :  *
    1459              :  * @return Timeout delay value.
    1460              :  */
    1461            1 : #define K_USEC(t)     Z_TIMEOUT_US(t)
    1462              : 
    1463              : /**
    1464              :  * @brief Generate timeout delay from cycles.
    1465              :  *
    1466              :  * This macro generates a timeout delay that instructs a kernel API
    1467              :  * to wait up to @a t cycles to perform the requested operation.
    1468              :  *
    1469              :  * @param t Duration in cycles.
    1470              :  *
    1471              :  * @return Timeout delay value.
    1472              :  */
    1473            1 : #define K_CYC(t)     Z_TIMEOUT_CYC(t)
    1474              : 
    1475              : /**
    1476              :  * @brief Generate timeout delay from system ticks.
    1477              :  *
    1478              :  * This macro generates a timeout delay that instructs a kernel API
    1479              :  * to wait up to @a t ticks to perform the requested operation.
    1480              :  *
    1481              :  * @param t Duration in system ticks.
    1482              :  *
    1483              :  * @return Timeout delay value.
    1484              :  */
    1485            1 : #define K_TICKS(t)     Z_TIMEOUT_TICKS(t)
    1486              : 
    1487              : /**
    1488              :  * @brief Generate timeout delay from milliseconds.
    1489              :  *
    1490              :  * This macro generates a timeout delay that instructs a kernel API
    1491              :  * to wait up to @a ms milliseconds to perform the requested operation.
    1492              :  *
    1493              :  * @param ms Duration in milliseconds.
    1494              :  *
    1495              :  * @return Timeout delay value.
    1496              :  */
    1497            1 : #define K_MSEC(ms)     Z_TIMEOUT_MS(ms)
    1498              : 
    1499              : /**
    1500              :  * @brief Generate timeout delay from seconds.
    1501              :  *
    1502              :  * This macro generates a timeout delay that instructs a kernel API
    1503              :  * to wait up to @a s seconds to perform the requested operation.
    1504              :  *
    1505              :  * @param s Duration in seconds.
    1506              :  *
    1507              :  * @return Timeout delay value.
    1508              :  */
    1509            1 : #define K_SECONDS(s)   K_MSEC((s) * MSEC_PER_SEC)
    1510              : 
    1511              : /**
    1512              :  * @brief Generate timeout delay from minutes.
    1513              : 
    1514              :  * This macro generates a timeout delay that instructs a kernel API
    1515              :  * to wait up to @a m minutes to perform the requested operation.
    1516              :  *
    1517              :  * @param m Duration in minutes.
    1518              :  *
    1519              :  * @return Timeout delay value.
    1520              :  */
    1521            1 : #define K_MINUTES(m)   K_SECONDS((m) * 60)
    1522              : 
    1523              : /**
    1524              :  * @brief Generate timeout delay from hours.
    1525              :  *
    1526              :  * This macro generates a timeout delay that instructs a kernel API
    1527              :  * to wait up to @a h hours to perform the requested operation.
    1528              :  *
    1529              :  * @param h Duration in hours.
    1530              :  *
    1531              :  * @return Timeout delay value.
    1532              :  */
    1533            1 : #define K_HOURS(h)     K_MINUTES((h) * 60)
    1534              : 
    1535              : /**
    1536              :  * @brief Generate infinite timeout delay.
    1537              :  *
    1538              :  * This macro generates a timeout delay that instructs a kernel API
    1539              :  * to wait as long as necessary to perform the requested operation.
    1540              :  *
    1541              :  * @return Timeout delay value.
    1542              :  */
    1543            1 : #define K_FOREVER Z_FOREVER
    1544              : 
    1545              : #ifdef CONFIG_TIMEOUT_64BIT
    1546              : 
    1547              : /**
    1548              :  * @brief Generates an absolute/uptime timeout value from system ticks
    1549              :  *
    1550              :  * This macro generates a timeout delay that represents an expiration
    1551              :  * at the absolute uptime value specified, in system ticks.  That is, the
    1552              :  * timeout will expire immediately after the system uptime reaches the
    1553              :  * specified tick count. Value is clamped to the range 0 to INT64_MAX-1.
    1554              :  *
    1555              :  * @param t Tick uptime value
    1556              :  * @return Timeout delay value
    1557              :  */
    1558              : #define K_TIMEOUT_ABS_TICKS(t) \
    1559              :         Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)CLAMP(t, 0, (INT64_MAX - 1))))
    1560              : 
    1561              : /**
    1562              :  * @brief Generates an absolute/uptime timeout value from seconds
    1563              :  *
    1564              :  * This macro generates a timeout delay that represents an expiration
    1565              :  * at the absolute uptime value specified, in seconds.  That is, the
    1566              :  * timeout will expire immediately after the system uptime reaches the
    1567              :  * specified tick count.
    1568              :  *
    1569              :  * @param t Second uptime value
    1570              :  * @return Timeout delay value
    1571              :  */
    1572              : #define K_TIMEOUT_ABS_SEC(t) K_TIMEOUT_ABS_TICKS(k_sec_to_ticks_ceil64(t))
    1573              : 
    1574              : /**
    1575              :  * @brief Generates an absolute/uptime timeout value from milliseconds
    1576              :  *
    1577              :  * This macro generates a timeout delay that represents an expiration
    1578              :  * at the absolute uptime value specified, in milliseconds.  That is,
    1579              :  * the timeout will expire immediately after the system uptime reaches
    1580              :  * the specified tick count.
    1581              :  *
    1582              :  * @param t Millisecond uptime value
    1583              :  * @return Timeout delay value
    1584              :  */
    1585              : #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
    1586              : 
    1587              : /**
    1588              :  * @brief Generates an absolute/uptime timeout value from microseconds
    1589              :  *
    1590              :  * This macro generates a timeout delay that represents an expiration
    1591              :  * at the absolute uptime value specified, in microseconds.  That is,
    1592              :  * the timeout will expire immediately after the system uptime reaches
    1593              :  * the specified time.  Note that timer precision is limited by the
    1594              :  * system tick rate and not the requested timeout value.
    1595              :  *
    1596              :  * @param t Microsecond uptime value
    1597              :  * @return Timeout delay value
    1598              :  */
    1599              : #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
    1600              : 
    1601              : /**
    1602              :  * @brief Generates an absolute/uptime timeout value from nanoseconds
    1603              :  *
    1604              :  * This macro generates a timeout delay that represents an expiration
    1605              :  * at the absolute uptime value specified, in nanoseconds.  That is,
    1606              :  * the timeout will expire immediately after the system uptime reaches
    1607              :  * the specified time.  Note that timer precision is limited by the
    1608              :  * system tick rate and not the requested timeout value.
    1609              :  *
    1610              :  * @param t Nanosecond uptime value
    1611              :  * @return Timeout delay value
    1612              :  */
    1613              : #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
    1614              : 
    1615              : /**
    1616              :  * @brief Generates an absolute/uptime timeout value from system cycles
    1617              :  *
    1618              :  * This macro generates a timeout delay that represents an expiration
    1619              :  * at the absolute uptime value specified, in cycles.  That is, the
    1620              :  * timeout will expire immediately after the system uptime reaches the
    1621              :  * specified time.  Note that timer precision is limited by the system
    1622              :  * tick rate and not the requested timeout value.
    1623              :  *
    1624              :  * @param t Cycle uptime value
    1625              :  * @return Timeout delay value
    1626              :  */
    1627              : #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
    1628              : 
    1629              : #endif
    1630              : 
    1631              : /**
    1632              :  * @}
    1633              :  */
    1634              : 
    1635              : /**
    1636              :  * @brief Kernel timer structure
    1637              :  *
    1638              :  * This structure is used to represent a kernel timer.
    1639              :  * All the members are internal and should not be accessed directly.
    1640              :  */
    1641            1 : struct k_timer {
    1642              :         /**
    1643              :          * @cond INTERNAL_HIDDEN
    1644              :          */
    1645              : 
    1646              :         /*
    1647              :          * _timeout structure must be first here if we want to use
    1648              :          * dynamic timer allocation. timeout.node is used in the double-linked
    1649              :          * list of free timers
    1650              :          */
    1651              :         struct _timeout timeout;
    1652              : 
    1653              :         /* wait queue for the (single) thread waiting on this timer */
    1654              :         _wait_q_t wait_q;
    1655              : 
    1656              :         /* runs in ISR context */
    1657              :         void (*expiry_fn)(struct k_timer *timer);
    1658              : 
    1659              :         /* runs in the context of the thread that calls k_timer_stop() */
    1660              :         void (*stop_fn)(struct k_timer *timer);
    1661              : 
    1662              :         /* timer period */
    1663              :         k_timeout_t period;
    1664              : 
    1665              :         /* timer status */
    1666              :         uint32_t status;
    1667              : 
    1668              :         /* user-specific data, also used to support legacy features */
    1669              :         void *user_data;
    1670              : 
    1671              :         SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
    1672              : 
    1673              : #ifdef CONFIG_OBJ_CORE_TIMER
    1674              :         struct k_obj_core  obj_core;
    1675              : #endif
    1676              :         /**
    1677              :          * INTERNAL_HIDDEN @endcond
    1678              :          */
    1679              : };
    1680              : 
    1681              : /**
    1682              :  * @cond INTERNAL_HIDDEN
    1683              :  */
    1684              : #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
    1685              :         { \
    1686              :         .timeout = { \
    1687              :                 .node = {},\
    1688              :                 .fn = z_timer_expiration_handler, \
    1689              :                 .dticks = 0, \
    1690              :         }, \
    1691              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
    1692              :         .expiry_fn = expiry, \
    1693              :         .stop_fn = stop, \
    1694              :         .period = {}, \
    1695              :         .status = 0, \
    1696              :         .user_data = 0, \
    1697              :         }
    1698              : 
    1699              : /**
    1700              :  * INTERNAL_HIDDEN @endcond
    1701              :  */
    1702              : 
    1703              : /**
    1704              :  * @defgroup timer_apis Timer APIs
    1705              :  * @ingroup kernel_apis
    1706              :  * @{
    1707              :  */
    1708              : 
    1709              : /**
    1710              :  * @typedef k_timer_expiry_t
    1711              :  * @brief Timer expiry function type.
    1712              :  *
    1713              :  * A timer's expiry function is executed by the system clock interrupt handler
    1714              :  * each time the timer expires. The expiry function is optional, and is only
    1715              :  * invoked if the timer has been initialized with one.
    1716              :  *
    1717              :  * @param timer     Address of timer.
    1718              :  */
    1719            1 : typedef void (*k_timer_expiry_t)(struct k_timer *timer);
    1720              : 
    1721              : /**
    1722              :  * @typedef k_timer_stop_t
    1723              :  * @brief Timer stop function type.
    1724              :  *
    1725              :  * A timer's stop function is executed if the timer is stopped prematurely.
    1726              :  * The function runs in the context of call that stops the timer.  As
    1727              :  * k_timer_stop() can be invoked from an ISR, the stop function must be
    1728              :  * callable from interrupt context (isr-ok).
    1729              :  *
    1730              :  * The stop function is optional, and is only invoked if the timer has been
    1731              :  * initialized with one.
    1732              :  *
    1733              :  * @param timer     Address of timer.
    1734              :  */
    1735            1 : typedef void (*k_timer_stop_t)(struct k_timer *timer);
    1736              : 
    1737              : /**
    1738              :  * @brief Statically define and initialize a timer.
    1739              :  *
    1740              :  * The timer can be accessed outside the module where it is defined using:
    1741              :  *
    1742              :  * @code extern struct k_timer <name>; @endcode
    1743              :  *
    1744              :  * @param name Name of the timer variable.
    1745              :  * @param expiry_fn Function to invoke each time the timer expires.
    1746              :  * @param stop_fn   Function to invoke if the timer is stopped while running.
    1747              :  */
    1748            1 : #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
    1749              :         STRUCT_SECTION_ITERABLE(k_timer, name) = \
    1750              :                 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
    1751              : 
    1752              : /**
    1753              :  * @brief Initialize a timer.
    1754              :  *
    1755              :  * This routine initializes a timer, prior to its first use.
    1756              :  *
    1757              :  * @param timer     Address of timer.
    1758              :  * @param expiry_fn Function to invoke each time the timer expires.
    1759              :  * @param stop_fn   Function to invoke if the timer is stopped while running.
    1760              :  */
    1761            1 : void k_timer_init(struct k_timer *timer,
    1762              :                          k_timer_expiry_t expiry_fn,
    1763              :                          k_timer_stop_t stop_fn);
    1764              : 
    1765              : /**
    1766              :  * @brief Start a timer.
    1767              :  *
    1768              :  * This routine starts a timer, and resets its status to zero. The timer
    1769              :  * begins counting down using the specified duration and period values.
    1770              :  *
    1771              :  * Attempting to start a timer that is already running is permitted.
    1772              :  * The timer's status is reset to zero and the timer begins counting down
    1773              :  * using the new duration and period values.
    1774              :  *
    1775              :  * This routine neither updates nor has any other effect on the specified
    1776              :  * timer if @a duration is K_FOREVER.
    1777              :  *
    1778              :  * @param timer     Address of timer.
    1779              :  * @param duration  Initial timer duration.
    1780              :  * @param period    Timer period.
    1781              :  */
    1782            1 : __syscall void k_timer_start(struct k_timer *timer,
    1783              :                              k_timeout_t duration, k_timeout_t period);
    1784              : 
    1785              : /**
    1786              :  * @brief Stop a timer.
    1787              :  *
    1788              :  * This routine stops a running timer prematurely. The timer's stop function,
    1789              :  * if one exists, is invoked by the caller.
    1790              :  *
    1791              :  * Attempting to stop a timer that is not running is permitted, but has no
    1792              :  * effect on the timer.
    1793              :  *
    1794              :  * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
    1795              :  * be called from ISRs.
    1796              :  *
    1797              :  * @funcprops \isr_ok
    1798              :  *
    1799              :  * @param timer     Address of timer.
    1800              :  */
    1801            1 : __syscall void k_timer_stop(struct k_timer *timer);
    1802              : 
    1803              : /**
    1804              :  * @brief Read timer status.
    1805              :  *
    1806              :  * This routine reads the timer's status, which indicates the number of times
    1807              :  * it has expired since its status was last read.
    1808              :  *
    1809              :  * Calling this routine resets the timer's status to zero.
    1810              :  *
    1811              :  * @param timer     Address of timer.
    1812              :  *
    1813              :  * @return Timer status.
    1814              :  */
    1815            1 : __syscall uint32_t k_timer_status_get(struct k_timer *timer);
    1816              : 
    1817              : /**
    1818              :  * @brief Synchronize thread to timer expiration.
    1819              :  *
    1820              :  * This routine blocks the calling thread until the timer's status is non-zero
    1821              :  * (indicating that it has expired at least once since it was last examined)
    1822              :  * or the timer is stopped. If the timer status is already non-zero,
    1823              :  * or the timer is already stopped, the caller continues without waiting.
    1824              :  *
    1825              :  * Calling this routine resets the timer's status to zero.
    1826              :  *
    1827              :  * This routine must not be used by interrupt handlers, since they are not
    1828              :  * allowed to block.
    1829              :  *
    1830              :  * @param timer     Address of timer.
    1831              :  *
    1832              :  * @return Timer status.
    1833              :  */
    1834            1 : __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
    1835              : 
    1836              : #ifdef CONFIG_SYS_CLOCK_EXISTS
    1837              : 
    1838              : /**
    1839              :  * @brief Get next expiration time of a timer, in system ticks
    1840              :  *
    1841              :  * This routine returns the future system uptime reached at the next
    1842              :  * time of expiration of the timer, in units of system ticks.  If the
    1843              :  * timer is not running, current system time is returned.
    1844              :  *
    1845              :  * @param timer The timer object
    1846              :  * @return Uptime of expiration, in ticks
    1847              :  */
    1848            1 : __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
    1849              : 
    1850              : static inline k_ticks_t z_impl_k_timer_expires_ticks(
    1851              :                                        const struct k_timer *timer)
    1852              : {
    1853              :         return z_timeout_expires(&timer->timeout);
    1854              : }
    1855              : 
    1856              : /**
    1857              :  * @brief Get time remaining before a timer next expires, in system ticks
    1858              :  *
    1859              :  * This routine computes the time remaining before a running timer
    1860              :  * next expires, in units of system ticks.  If the timer is not
    1861              :  * running, it returns zero.
    1862              :  *
    1863              :  * @param timer The timer object
    1864              :  * @return Remaining time until expiration, in ticks
    1865              :  */
    1866            1 : __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
    1867              : 
    1868              : static inline k_ticks_t z_impl_k_timer_remaining_ticks(
    1869              :                                        const struct k_timer *timer)
    1870              : {
    1871              :         return z_timeout_remaining(&timer->timeout);
    1872              : }
    1873              : 
    1874              : /**
    1875              :  * @brief Get time remaining before a timer next expires.
    1876              :  *
    1877              :  * This routine computes the (approximate) time remaining before a running
    1878              :  * timer next expires. If the timer is not running, it returns zero.
    1879              :  *
    1880              :  * @param timer     Address of timer.
    1881              :  *
    1882              :  * @return Remaining time (in milliseconds).
    1883              :  */
    1884            1 : static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
    1885              : {
    1886              :         return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
    1887              : }
    1888              : 
    1889              : #endif /* CONFIG_SYS_CLOCK_EXISTS */
    1890              : 
    1891              : /**
    1892              :  * @brief Associate user-specific data with a timer.
    1893              :  *
    1894              :  * This routine records the @a user_data with the @a timer, to be retrieved
    1895              :  * later.
    1896              :  *
    1897              :  * It can be used e.g. in a timer handler shared across multiple subsystems to
    1898              :  * retrieve data specific to the subsystem this timer is associated with.
    1899              :  *
    1900              :  * @param timer     Address of timer.
    1901              :  * @param user_data User data to associate with the timer.
    1902              :  */
    1903            1 : __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
    1904              : 
    1905              : /**
    1906              :  * @internal
    1907              :  */
    1908              : static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
    1909              :                                                void *user_data)
    1910              : {
    1911              :         timer->user_data = user_data;
    1912              : }
    1913              : 
    1914              : /**
    1915              :  * @brief Retrieve the user-specific data from a timer.
    1916              :  *
    1917              :  * @param timer     Address of timer.
    1918              :  *
    1919              :  * @return The user data.
    1920              :  */
    1921            1 : __syscall void *k_timer_user_data_get(const struct k_timer *timer);
    1922              : 
    1923              : static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
    1924              : {
    1925              :         return timer->user_data;
    1926              : }
    1927              : 
    1928              : /** @} */
    1929              : 
    1930              : /**
    1931              :  * @addtogroup clock_apis
    1932              :  * @ingroup kernel_apis
    1933              :  * @{
    1934              :  */
    1935              : 
    1936              : /**
    1937              :  * @brief Get system uptime, in system ticks.
    1938              :  *
    1939              :  * This routine returns the elapsed time since the system booted, in
    1940              :  * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
    1941              :  * fundamental unit of resolution of kernel timekeeping.
    1942              :  *
    1943              :  * @return Current uptime in ticks.
    1944              :  */
    1945            1 : __syscall int64_t k_uptime_ticks(void);
    1946              : 
    1947              : /**
    1948              :  * @brief Get system uptime.
    1949              :  *
    1950              :  * This routine returns the elapsed time since the system booted,
    1951              :  * in milliseconds.
    1952              :  *
    1953              :  * @note
    1954              :  *    While this function returns time in milliseconds, it does
    1955              :  *    not mean it has millisecond resolution. The actual resolution depends on
    1956              :  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
    1957              :  *
    1958              :  * @return Current uptime in milliseconds.
    1959              :  */
    1960            1 : static inline int64_t k_uptime_get(void)
    1961              : {
    1962              :         return k_ticks_to_ms_floor64(k_uptime_ticks());
    1963              : }
    1964              : 
    1965              : /**
    1966              :  * @brief Get system uptime (32-bit version).
    1967              :  *
    1968              :  * This routine returns the lower 32 bits of the system uptime in
    1969              :  * milliseconds.
    1970              :  *
    1971              :  * Because correct conversion requires full precision of the system
    1972              :  * clock there is no benefit to using this over k_uptime_get() unless
    1973              :  * you know the application will never run long enough for the system
    1974              :  * clock to approach 2^32 ticks.  Calls to this function may involve
    1975              :  * interrupt blocking and 64-bit math.
    1976              :  *
    1977              :  * @note
    1978              :  *    While this function returns time in milliseconds, it does
    1979              :  *    not mean it has millisecond resolution. The actual resolution depends on
    1980              :  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
    1981              :  *
    1982              :  * @return The low 32 bits of the current uptime, in milliseconds.
    1983              :  */
    1984            1 : static inline uint32_t k_uptime_get_32(void)
    1985              : {
    1986              :         return (uint32_t)k_uptime_get();
    1987              : }
    1988              : 
    1989              : /**
    1990              :  * @brief Get system uptime in seconds.
    1991              :  *
    1992              :  * This routine returns the elapsed time since the system booted,
    1993              :  * in seconds.
    1994              :  *
    1995              :  * @return Current uptime in seconds.
    1996              :  */
    1997            1 : static inline uint32_t k_uptime_seconds(void)
    1998              : {
    1999              :         return k_ticks_to_sec_floor32(k_uptime_ticks());
    2000              : }
    2001              : 
    2002              : /**
    2003              :  * @brief Get elapsed time.
    2004              :  *
    2005              :  * This routine computes the elapsed time between the current system uptime
    2006              :  * and an earlier reference time, in milliseconds.
    2007              :  *
    2008              :  * @param reftime Pointer to a reference time, which is updated to the current
    2009              :  *                uptime upon return.
    2010              :  *
    2011              :  * @return Elapsed time.
    2012              :  */
    2013            1 : static inline int64_t k_uptime_delta(int64_t *reftime)
    2014              : {
    2015              :         int64_t uptime, delta;
    2016              : 
    2017              :         uptime = k_uptime_get();
    2018              :         delta = uptime - *reftime;
    2019              :         *reftime = uptime;
    2020              : 
    2021              :         return delta;
    2022              : }
    2023              : 
    2024              : /**
    2025              :  * @brief Read the hardware clock.
    2026              :  *
    2027              :  * This routine returns the current time, as measured by the system's hardware
    2028              :  * clock.
    2029              :  *
    2030              :  * @return Current hardware clock up-counter (in cycles).
    2031              :  */
    2032            1 : static inline uint32_t k_cycle_get_32(void)
    2033              : {
    2034              :         return arch_k_cycle_get_32();
    2035              : }
    2036              : 
    2037              : /**
    2038              :  * @brief Read the 64-bit hardware clock.
    2039              :  *
    2040              :  * This routine returns the current time in 64-bits, as measured by the
    2041              :  * system's hardware clock, if available.
    2042              :  *
    2043              :  * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
    2044              :  *
    2045              :  * @return Current hardware clock up-counter (in cycles).
    2046              :  */
    2047            1 : static inline uint64_t k_cycle_get_64(void)
    2048              : {
    2049              :         if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
    2050              :                 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
    2051              :                             "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
    2052              :                 return 0;
    2053              :         }
    2054              : 
    2055              :         return arch_k_cycle_get_64();
    2056              : }
    2057              : 
    2058              : /**
    2059              :  * @}
    2060              :  */
    2061              : 
    2062            0 : struct k_queue {
    2063            0 :         sys_sflist_t data_q;
    2064            0 :         struct k_spinlock lock;
    2065            0 :         _wait_q_t wait_q;
    2066              : 
    2067              :         Z_DECL_POLL_EVENT
    2068              : 
    2069              :         SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
    2070              : };
    2071              : 
    2072              : /**
    2073              :  * @cond INTERNAL_HIDDEN
    2074              :  */
    2075              : 
    2076              : #define Z_QUEUE_INITIALIZER(obj) \
    2077              :         { \
    2078              :         .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
    2079              :         .lock = { }, \
    2080              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q),       \
    2081              :         Z_POLL_EVENT_OBJ_INIT(obj)              \
    2082              :         }
    2083              : 
    2084              : /**
    2085              :  * INTERNAL_HIDDEN @endcond
    2086              :  */
    2087              : 
    2088              : /**
    2089              :  * @defgroup queue_apis Queue APIs
    2090              :  * @ingroup kernel_apis
    2091              :  * @{
    2092              :  */
    2093              : 
    2094              : /**
    2095              :  * @brief Initialize a queue.
    2096              :  *
    2097              :  * This routine initializes a queue object, prior to its first use.
    2098              :  *
    2099              :  * @param queue Address of the queue.
    2100              :  */
    2101            1 : __syscall void k_queue_init(struct k_queue *queue);
    2102              : 
    2103              : /**
    2104              :  * @brief Cancel waiting on a queue.
    2105              :  *
    2106              :  * This routine causes first thread pending on @a queue, if any, to
    2107              :  * return from k_queue_get() call with NULL value (as if timeout expired).
    2108              :  * If the queue is being waited on by k_poll(), it will return with
    2109              :  * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
    2110              :  * k_queue_get() will return NULL).
    2111              :  *
    2112              :  * @funcprops \isr_ok
    2113              :  *
    2114              :  * @param queue Address of the queue.
    2115              :  */
    2116            1 : __syscall void k_queue_cancel_wait(struct k_queue *queue);
    2117              : 
    2118              : /**
    2119              :  * @brief Append an element to the end of a queue.
    2120              :  *
    2121              :  * This routine appends a data item to @a queue. A queue data item must be
    2122              :  * aligned on a word boundary, and the first word of the item is reserved
    2123              :  * for the kernel's use.
    2124              :  *
    2125              :  * @funcprops \isr_ok
    2126              :  *
    2127              :  * @param queue Address of the queue.
    2128              :  * @param data Address of the data item.
    2129              :  */
    2130            1 : void k_queue_append(struct k_queue *queue, void *data);
    2131              : 
    2132              : /**
    2133              :  * @brief Append an element to a queue.
    2134              :  *
    2135              :  * This routine appends a data item to @a queue. There is an implicit memory
    2136              :  * allocation to create an additional temporary bookkeeping data structure from
    2137              :  * the calling thread's resource pool, which is automatically freed when the
    2138              :  * item is removed. The data itself is not copied.
    2139              :  *
    2140              :  * @funcprops \isr_ok
    2141              :  *
    2142              :  * @param queue Address of the queue.
    2143              :  * @param data Address of the data item.
    2144              :  *
    2145              :  * @retval 0 on success
    2146              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    2147              :  */
    2148            1 : __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
    2149              : 
    2150              : /**
    2151              :  * @brief Prepend an element to a queue.
    2152              :  *
    2153              :  * This routine prepends a data item to @a queue. A queue data item must be
    2154              :  * aligned on a word boundary, and the first word of the item is reserved
    2155              :  * for the kernel's use.
    2156              :  *
    2157              :  * @funcprops \isr_ok
    2158              :  *
    2159              :  * @param queue Address of the queue.
    2160              :  * @param data Address of the data item.
    2161              :  */
    2162            1 : void k_queue_prepend(struct k_queue *queue, void *data);
    2163              : 
    2164              : /**
    2165              :  * @brief Prepend an element to a queue.
    2166              :  *
    2167              :  * This routine prepends a data item to @a queue. There is an implicit memory
    2168              :  * allocation to create an additional temporary bookkeeping data structure from
    2169              :  * the calling thread's resource pool, which is automatically freed when the
    2170              :  * item is removed. The data itself is not copied.
    2171              :  *
    2172              :  * @funcprops \isr_ok
    2173              :  *
    2174              :  * @param queue Address of the queue.
    2175              :  * @param data Address of the data item.
    2176              :  *
    2177              :  * @retval 0 on success
    2178              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    2179              :  */
    2180            1 : __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
    2181              : 
    2182              : /**
    2183              :  * @brief Inserts an element to a queue.
    2184              :  *
    2185              :  * This routine inserts a data item to @a queue after previous item. A queue
    2186              :  * data item must be aligned on a word boundary, and the first word of
    2187              :  * the item is reserved for the kernel's use.
    2188              :  *
    2189              :  * @funcprops \isr_ok
    2190              :  *
    2191              :  * @param queue Address of the queue.
    2192              :  * @param prev Address of the previous data item.
    2193              :  * @param data Address of the data item.
    2194              :  */
    2195            1 : void k_queue_insert(struct k_queue *queue, void *prev, void *data);
    2196              : 
    2197              : /**
    2198              :  * @brief Atomically append a list of elements to a queue.
    2199              :  *
    2200              :  * This routine adds a list of data items to @a queue in one operation.
    2201              :  * The data items must be in a singly-linked list, with the first word
    2202              :  * in each data item pointing to the next data item; the list must be
    2203              :  * NULL-terminated.
    2204              :  *
    2205              :  * @funcprops \isr_ok
    2206              :  *
    2207              :  * @param queue Address of the queue.
    2208              :  * @param head Pointer to first node in singly-linked list.
    2209              :  * @param tail Pointer to last node in singly-linked list.
    2210              :  *
    2211              :  * @retval 0 on success
    2212              :  * @retval -EINVAL on invalid supplied data
    2213              :  *
    2214              :  */
    2215            1 : int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
    2216              : 
    2217              : /**
    2218              :  * @brief Atomically add a list of elements to a queue.
    2219              :  *
    2220              :  * This routine adds a list of data items to @a queue in one operation.
    2221              :  * The data items must be in a singly-linked list implemented using a
    2222              :  * sys_slist_t object. Upon completion, the original list is empty.
    2223              :  *
    2224              :  * @funcprops \isr_ok
    2225              :  *
    2226              :  * @param queue Address of the queue.
    2227              :  * @param list Pointer to sys_slist_t object.
    2228              :  *
    2229              :  * @retval 0 on success
    2230              :  * @retval -EINVAL on invalid data
    2231              :  */
    2232            1 : int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
    2233              : 
    2234              : /**
    2235              :  * @brief Get an element from a queue.
    2236              :  *
    2237              :  * This routine removes first data item from @a queue. The first word of the
    2238              :  * data item is reserved for the kernel's use.
    2239              :  *
    2240              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    2241              :  *
    2242              :  * @funcprops \isr_ok
    2243              :  *
    2244              :  * @param queue Address of the queue.
    2245              :  * @param timeout Waiting period to obtain a data item, or one of the special
    2246              :  *                values K_NO_WAIT and K_FOREVER.
    2247              :  *
    2248              :  * @return Address of the data item if successful; NULL if returned
    2249              :  * without waiting, or waiting period timed out.
    2250              :  */
    2251            1 : __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
    2252              : 
    2253              : /**
    2254              :  * @brief Remove an element from a queue.
    2255              :  *
    2256              :  * This routine removes data item from @a queue. The first word of the
    2257              :  * data item is reserved for the kernel's use. Removing elements from k_queue
    2258              :  * rely on sys_slist_find_and_remove which is not a constant time operation.
    2259              :  *
    2260              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    2261              :  *
    2262              :  * @funcprops \isr_ok
    2263              :  *
    2264              :  * @param queue Address of the queue.
    2265              :  * @param data Address of the data item.
    2266              :  *
    2267              :  * @return true if data item was removed
    2268              :  */
    2269            1 : bool k_queue_remove(struct k_queue *queue, void *data);
    2270              : 
    2271              : /**
    2272              :  * @brief Append an element to a queue only if it's not present already.
    2273              :  *
    2274              :  * This routine appends data item to @a queue. The first word of the data
    2275              :  * item is reserved for the kernel's use. Appending elements to k_queue
    2276              :  * relies on sys_slist_is_node_in_list which is not a constant time operation.
    2277              :  *
    2278              :  * @funcprops \isr_ok
    2279              :  *
    2280              :  * @param queue Address of the queue.
    2281              :  * @param data Address of the data item.
    2282              :  *
    2283              :  * @return true if data item was added, false if not
    2284              :  */
    2285            1 : bool k_queue_unique_append(struct k_queue *queue, void *data);
    2286              : 
    2287              : /**
    2288              :  * @brief Query a queue to see if it has data available.
    2289              :  *
    2290              :  * Note that the data might be already gone by the time this function returns
    2291              :  * if other threads are also trying to read from the queue.
    2292              :  *
    2293              :  * @funcprops \isr_ok
    2294              :  *
    2295              :  * @param queue Address of the queue.
    2296              :  *
    2297              :  * @return Non-zero if the queue is empty.
    2298              :  * @return 0 if data is available.
    2299              :  */
    2300            1 : __syscall int k_queue_is_empty(struct k_queue *queue);
    2301              : 
    2302              : static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
    2303              : {
    2304              :         return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
    2305              : }
    2306              : 
    2307              : /**
    2308              :  * @brief Peek element at the head of queue.
    2309              :  *
    2310              :  * Return element from the head of queue without removing it.
    2311              :  *
    2312              :  * @param queue Address of the queue.
    2313              :  *
    2314              :  * @return Head element, or NULL if queue is empty.
    2315              :  */
    2316            1 : __syscall void *k_queue_peek_head(struct k_queue *queue);
    2317              : 
    2318              : /**
    2319              :  * @brief Peek element at the tail of queue.
    2320              :  *
    2321              :  * Return element from the tail of queue without removing it.
    2322              :  *
    2323              :  * @param queue Address of the queue.
    2324              :  *
    2325              :  * @return Tail element, or NULL if queue is empty.
    2326              :  */
    2327            1 : __syscall void *k_queue_peek_tail(struct k_queue *queue);
    2328              : 
    2329              : /**
    2330              :  * @brief Statically define and initialize a queue.
    2331              :  *
    2332              :  * The queue can be accessed outside the module where it is defined using:
    2333              :  *
    2334              :  * @code extern struct k_queue <name>; @endcode
    2335              :  *
    2336              :  * @param name Name of the queue.
    2337              :  */
    2338            1 : #define K_QUEUE_DEFINE(name) \
    2339              :         STRUCT_SECTION_ITERABLE(k_queue, name) = \
    2340              :                 Z_QUEUE_INITIALIZER(name)
    2341              : 
    2342              : /** @} */
    2343              : 
    2344              : #ifdef CONFIG_USERSPACE
    2345              : /**
    2346              :  * @brief futex structure
    2347              :  *
    2348              :  * A k_futex is a lightweight mutual exclusion primitive designed
    2349              :  * to minimize kernel involvement. Uncontended operation relies
    2350              :  * only on atomic access to shared memory. k_futex are tracked as
    2351              :  * kernel objects and can live in user memory so that any access
    2352              :  * bypasses the kernel object permission management mechanism.
    2353              :  */
    2354            1 : struct k_futex {
    2355            0 :         atomic_t val;
    2356              : };
    2357              : 
    2358              : /**
    2359              :  * @brief futex kernel data structure
    2360              :  *
    2361              :  * z_futex_data are the helper data structure for k_futex to complete
    2362              :  * futex contended operation on kernel side, structure z_futex_data
    2363              :  * of every futex object is invisible in user mode.
    2364              :  */
    2365              : struct z_futex_data {
    2366              :         _wait_q_t wait_q;
    2367              :         struct k_spinlock lock;
    2368              : };
    2369              : 
    2370              : #define Z_FUTEX_DATA_INITIALIZER(obj) \
    2371              :         { \
    2372              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
    2373              :         }
    2374              : 
    2375              : /**
    2376              :  * @defgroup futex_apis FUTEX APIs
    2377              :  * @ingroup kernel_apis
    2378              :  * @{
    2379              :  */
    2380              : 
    2381              : /**
    2382              :  * @brief Pend the current thread on a futex
    2383              :  *
    2384              :  * Tests that the supplied futex contains the expected value, and if so,
    2385              :  * goes to sleep until some other thread calls k_futex_wake() on it.
    2386              :  *
    2387              :  * @param futex Address of the futex.
    2388              :  * @param expected Expected value of the futex, if it is different the caller
    2389              :  *                 will not wait on it.
    2390              :  * @param timeout Waiting period on the futex, or one of the special values
    2391              :  *                K_NO_WAIT or K_FOREVER.
    2392              :  * @retval -EACCES Caller does not have read access to futex address.
    2393              :  * @retval -EAGAIN If the futex value did not match the expected parameter.
    2394              :  * @retval -EINVAL Futex parameter address not recognized by the kernel.
    2395              :  * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
    2396              :  * @retval 0 if the caller went to sleep and was woken up. The caller
    2397              :  *           should check the futex's value on wakeup to determine if it needs
    2398              :  *           to block again.
    2399              :  */
    2400            1 : __syscall int k_futex_wait(struct k_futex *futex, int expected,
    2401              :                            k_timeout_t timeout);
    2402              : 
    2403              : /**
    2404              :  * @brief Wake one/all threads pending on a futex
    2405              :  *
    2406              :  * Wake up the highest priority thread pending on the supplied futex, or
    2407              :  * wakeup all the threads pending on the supplied futex, and the behavior
    2408              :  * depends on wake_all.
    2409              :  *
    2410              :  * @param futex Futex to wake up pending threads.
    2411              :  * @param wake_all If true, wake up all pending threads; If false,
    2412              :  *                 wakeup the highest priority thread.
    2413              :  * @retval -EACCES Caller does not have access to the futex address.
    2414              :  * @retval -EINVAL Futex parameter address not recognized by the kernel.
    2415              :  * @retval Number of threads that were woken up.
    2416              :  */
    2417            1 : __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
    2418              : 
    2419              : /** @} */
    2420              : #endif
    2421              : 
    2422              : /**
    2423              :  * @defgroup event_apis Event APIs
    2424              :  * @ingroup kernel_apis
    2425              :  * @{
    2426              :  */
    2427              : 
    2428              : /**
    2429              :  * Event Structure
    2430              :  * @ingroup event_apis
    2431              :  */
    2432              : 
    2433              : /**
    2434              :  * @brief Kernel Event structure
    2435              :  *
    2436              :  * This structure is used to represent kernel events. All the members
    2437              :  * are internal and should not be accessed directly.
    2438              :  */
    2439              : 
    2440            1 : struct k_event {
    2441              : /**
    2442              :  * @cond INTERNAL_HIDDEN
    2443              :  */
    2444              :         _wait_q_t         wait_q;
    2445              :         uint32_t          events;
    2446              :         struct k_spinlock lock;
    2447              : 
    2448              :         SYS_PORT_TRACING_TRACKING_FIELD(k_event)
    2449              : 
    2450              : #ifdef CONFIG_OBJ_CORE_EVENT
    2451              :         struct k_obj_core obj_core;
    2452              : #endif
    2453              : /**
    2454              :  * INTERNAL_HIDDEN @endcond
    2455              :  */
    2456              : 
    2457              : };
    2458              : 
    2459              : /**
    2460              :  * @cond INTERNAL_HIDDEN
    2461              :  */
    2462              : 
    2463              : #define Z_EVENT_INITIALIZER(obj) \
    2464              :         { \
    2465              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
    2466              :         .events = 0, \
    2467              :         .lock = {}, \
    2468              :         }
    2469              : /**
    2470              :  * INTERNAL_HIDDEN @endcond
    2471              :  */
    2472              : 
    2473              : /**
    2474              :  * @brief Initialize an event object
    2475              :  *
    2476              :  * This routine initializes an event object, prior to its first use.
    2477              :  *
    2478              :  * @param event Address of the event object.
    2479              :  */
    2480            1 : __syscall void k_event_init(struct k_event *event);
    2481              : 
    2482              : /**
    2483              :  * @brief Post one or more events to an event object
    2484              :  *
    2485              :  * This routine posts one or more events to an event object. All tasks waiting
    2486              :  * on the event object @a event whose waiting conditions become met by this
    2487              :  * posting immediately unpend.
    2488              :  *
    2489              :  * Posting differs from setting in that posted events are merged together with
    2490              :  * the current set of events tracked by the event object.
    2491              :  *
    2492              :  * @funcprops \isr_ok
    2493              :  *
    2494              :  * @param event Address of the event object
    2495              :  * @param events Set of events to post to @a event
    2496              :  *
    2497              :  * @retval Previous value of the events in @a event
    2498              :  */
    2499            1 : __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
    2500              : 
    2501              : /**
    2502              :  * @brief Set the events in an event object
    2503              :  *
    2504              :  * This routine sets the events stored in event object to the specified value.
    2505              :  * All tasks waiting on the event object @a event whose waiting conditions
    2506              :  * become met by this immediately unpend.
    2507              :  *
    2508              :  * Setting differs from posting in that set events replace the current set of
    2509              :  * events tracked by the event object.
    2510              :  *
    2511              :  * @funcprops \isr_ok
    2512              :  *
    2513              :  * @param event Address of the event object
    2514              :  * @param events Set of events to set in @a event
    2515              :  *
    2516              :  * @retval Previous value of the events in @a event
    2517              :  */
    2518            1 : __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
    2519              : 
    2520              : /**
    2521              :  * @brief Set or clear the events in an event object
    2522              :  *
    2523              :  * This routine sets the events stored in event object to the specified value.
    2524              :  * All tasks waiting on the event object @a event whose waiting conditions
    2525              :  * become met by this immediately unpend. Unlike @ref k_event_set, this routine
    2526              :  * allows specific event bits to be set and cleared as determined by the mask.
    2527              :  *
    2528              :  * @funcprops \isr_ok
    2529              :  *
    2530              :  * @param event Address of the event object
    2531              :  * @param events Set of events to set/clear in @a event
    2532              :  * @param events_mask Mask to be applied to @a events
    2533              :  *
    2534              :  * @retval Previous value of the events in @a events_mask
    2535              :  */
    2536            1 : __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
    2537              :                                   uint32_t events_mask);
    2538              : 
    2539              : /**
    2540              :  * @brief Clear the events in an event object
    2541              :  *
    2542              :  * This routine clears (resets) the specified events stored in an event object.
    2543              :  *
    2544              :  * @funcprops \isr_ok
    2545              :  *
    2546              :  * @param event Address of the event object
    2547              :  * @param events Set of events to clear in @a event
    2548              :  *
    2549              :  * @retval Previous value of the events in @a event
    2550              :  */
    2551            1 : __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
    2552              : 
    2553              : /**
    2554              :  * @brief Wait for any of the specified events
    2555              :  *
    2556              :  * This routine waits on event object @a event until any of the specified
    2557              :  * events have been delivered to the event object, or the maximum wait time
    2558              :  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
    2559              :  * events that are expressed as bits in a single 32-bit word.
    2560              :  *
    2561              :  * @note The caller must be careful when resetting if there are multiple threads
    2562              :  * waiting for the event object @a event.
    2563              :  *
    2564              :  * @note This function may be called from ISR context only when @a timeout is
    2565              :  * set to K_NO_WAIT.
    2566              :  *
    2567              :  * @param event Address of the event object
    2568              :  * @param events Set of desired events on which to wait
    2569              :  * @param reset If true, clear the set of events tracked by the event object
    2570              :  *              before waiting. If false, do not clear the events.
    2571              :  * @param timeout Waiting period for the desired set of events or one of the
    2572              :  *                special values K_NO_WAIT and K_FOREVER.
    2573              :  *
    2574              :  * @retval set of matching events upon success
    2575              :  * @retval 0 if matching events were not received within the specified time
    2576              :  */
    2577            1 : __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
    2578              :                                 bool reset, k_timeout_t timeout);
    2579              : 
    2580              : /**
    2581              :  * @brief Wait for all of the specified events
    2582              :  *
    2583              :  * This routine waits on event object @a event until all of the specified
    2584              :  * events have been delivered to the event object, or the maximum wait time
    2585              :  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
    2586              :  * events that are expressed as bits in a single 32-bit word.
    2587              :  *
    2588              :  * @note The caller must be careful when resetting if there are multiple threads
    2589              :  * waiting for the event object @a event.
    2590              :  *
    2591              :  * @note This function may be called from ISR context only when @a timeout is
    2592              :  * set to K_NO_WAIT.
    2593              :  *
    2594              :  * @param event Address of the event object
    2595              :  * @param events Set of desired events on which to wait
    2596              :  * @param reset If true, clear the set of events tracked by the event object
    2597              :  *              before waiting. If false, do not clear the events.
    2598              :  * @param timeout Waiting period for the desired set of events or one of the
    2599              :  *                special values K_NO_WAIT and K_FOREVER.
    2600              :  *
    2601              :  * @retval set of matching events upon success
    2602              :  * @retval 0 if matching events were not received within the specified time
    2603              :  */
    2604            1 : __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
    2605              :                                     bool reset, k_timeout_t timeout);
    2606              : 
    2607              : /**
    2608              :  * @brief Wait for any of the specified events (safe version)
    2609              :  *
    2610              :  * This call is nearly identical to @ref k_event_wait with the main difference
    2611              :  * being that the safe version atomically clears received events from the
    2612              :  * event object. This mitigates the need for calling @ref k_event_clear, or
    2613              :  * passing a "reset" argument, since doing so may result in lost event
    2614              :  * information.
    2615              :  *
    2616              :  * @param event Address of the event object
    2617              :  * @param events Set of desired events on which to wait
    2618              :  * @param reset If true, clear the set of events tracked by the event object
    2619              :  *              before waiting. If false, do not clear the events.
    2620              :  * @param timeout Waiting period for the desired set of events or one of the
    2621              :  *                special values K_NO_WAIT and K_FOREVER.
    2622              :  *
    2623              :  * @retval set of matching events upon success
    2624              :  * @retval 0 if no matching event was received within the specified time
    2625              :  */
    2626            1 : __syscall uint32_t k_event_wait_safe(struct k_event *event, uint32_t events,
    2627              :                                      bool reset, k_timeout_t timeout);
    2628              : 
    2629              : /**
    2630              :  * @brief Wait for all of the specified events (safe version)
    2631              :  *
    2632              :  * This call is nearly identical to @ref k_event_wait_all with the main
    2633              :  * difference being that the safe version atomically clears received events
    2634              :  * from the event object. This mitigates the need for calling
    2635              :  * @ref k_event_clear, or passing a "reset" argument, since doing so may
    2636              :  * result in lost event information.
    2637              :  *
    2638              :  * @param event Address of the event object
    2639              :  * @param events Set of desired events on which to wait
    2640              :  * @param reset If true, clear the set of events tracked by the event object
    2641              :  *              before waiting. If false, do not clear the events.
    2642              :  * @param timeout Waiting period for the desired set of events or one of the
    2643              :  *                special values K_NO_WAIT and K_FOREVER.
    2644              :  *
    2645              :  * @retval set of matching events upon success
    2646              :  * @retval 0 if all matching events were not received within the specified time
    2647              :  */
    2648            1 : __syscall uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events,
    2649              :                                          bool reset, k_timeout_t timeout);
    2650              : 
    2651              : 
    2652              : 
    2653              : /**
    2654              :  * @brief Test the events currently tracked in the event object
    2655              :  *
    2656              :  * @funcprops \isr_ok
    2657              :  *
    2658              :  * @param event Address of the event object
    2659              :  * @param events_mask Set of desired events to test
    2660              :  *
    2661              :  * @retval Current value of events in @a events_mask
    2662              :  */
    2663            1 : static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
    2664              : {
    2665              :         return k_event_wait(event, events_mask, false, K_NO_WAIT);
    2666              : }
    2667              : 
    2668              : /**
    2669              :  * @brief Statically define and initialize an event object
    2670              :  *
    2671              :  * The event can be accessed outside the module where it is defined using:
    2672              :  *
    2673              :  * @code extern struct k_event <name>; @endcode
    2674              :  *
    2675              :  * @param name Name of the event object.
    2676              :  */
    2677            1 : #define K_EVENT_DEFINE(name)                                   \
    2678              :         STRUCT_SECTION_ITERABLE(k_event, name) =               \
    2679              :                 Z_EVENT_INITIALIZER(name);
    2680              : 
    2681              : /** @} */
    2682              : 
    2683            0 : struct k_fifo {
    2684              :         struct k_queue _queue;
    2685              : #ifdef CONFIG_OBJ_CORE_FIFO
    2686              :         struct k_obj_core  obj_core;
    2687              : #endif
    2688              : };
    2689              : 
    2690              : /**
    2691              :  * @cond INTERNAL_HIDDEN
    2692              :  */
    2693              : #define Z_FIFO_INITIALIZER(obj) \
    2694              :         { \
    2695              :         ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
    2696              :         }
    2697              : 
    2698              : /**
    2699              :  * INTERNAL_HIDDEN @endcond
    2700              :  */
    2701              : 
    2702              : /**
    2703              :  * @defgroup fifo_apis FIFO APIs
    2704              :  * @ingroup kernel_apis
    2705              :  * @{
    2706              :  */
    2707              : 
    2708              : /**
    2709              :  * @brief Initialize a FIFO queue.
    2710              :  *
    2711              :  * This routine initializes a FIFO queue, prior to its first use.
    2712              :  *
    2713              :  * @param fifo Address of the FIFO queue.
    2714              :  */
    2715            1 : #define k_fifo_init(fifo)                                    \
    2716              :         ({                                                   \
    2717              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
    2718              :         k_queue_init(&(fifo)->_queue);                       \
    2719              :         K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo);   \
    2720              :         K_OBJ_CORE_LINK(K_OBJ_CORE(fifo));                   \
    2721              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo);  \
    2722              :         })
    2723              : 
    2724              : /**
    2725              :  * @brief Cancel waiting on a FIFO queue.
    2726              :  *
    2727              :  * This routine causes first thread pending on @a fifo, if any, to
    2728              :  * return from k_fifo_get() call with NULL value (as if timeout
    2729              :  * expired).
    2730              :  *
    2731              :  * @funcprops \isr_ok
    2732              :  *
    2733              :  * @param fifo Address of the FIFO queue.
    2734              :  */
    2735            1 : #define k_fifo_cancel_wait(fifo) \
    2736              :         ({ \
    2737              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
    2738              :         k_queue_cancel_wait(&(fifo)->_queue); \
    2739              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
    2740              :         })
    2741              : 
    2742              : /**
    2743              :  * @brief Add an element to a FIFO queue.
    2744              :  *
    2745              :  * This routine adds a data item to @a fifo. A FIFO data item must be
    2746              :  * aligned on a word boundary, and the first word of the item is reserved
    2747              :  * for the kernel's use.
    2748              :  *
    2749              :  * @funcprops \isr_ok
    2750              :  *
    2751              :  * @param fifo Address of the FIFO.
    2752              :  * @param data Address of the data item.
    2753              :  */
    2754            1 : #define k_fifo_put(fifo, data) \
    2755              :         ({ \
    2756              :         void *_data = data; \
    2757              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
    2758              :         k_queue_append(&(fifo)->_queue, _data); \
    2759              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
    2760              :         })
    2761              : 
    2762              : /**
    2763              :  * @brief Add an element to a FIFO queue.
    2764              :  *
    2765              :  * This routine adds a data item to @a fifo. There is an implicit memory
    2766              :  * allocation to create an additional temporary bookkeeping data structure from
    2767              :  * the calling thread's resource pool, which is automatically freed when the
    2768              :  * item is removed. The data itself is not copied.
    2769              :  *
    2770              :  * @funcprops \isr_ok
    2771              :  *
    2772              :  * @param fifo Address of the FIFO.
    2773              :  * @param data Address of the data item.
    2774              :  *
    2775              :  * @retval 0 on success
    2776              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    2777              :  */
    2778            1 : #define k_fifo_alloc_put(fifo, data) \
    2779              :         ({ \
    2780              :         void *_data = data; \
    2781              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
    2782              :         int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
    2783              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
    2784              :         fap_ret; \
    2785              :         })
    2786              : 
    2787              : /**
    2788              :  * @brief Atomically add a list of elements to a FIFO.
    2789              :  *
    2790              :  * This routine adds a list of data items to @a fifo in one operation.
    2791              :  * The data items must be in a singly-linked list, with the first word of
    2792              :  * each data item pointing to the next data item; the list must be
    2793              :  * NULL-terminated.
    2794              :  *
    2795              :  * @funcprops \isr_ok
    2796              :  *
    2797              :  * @param fifo Address of the FIFO queue.
    2798              :  * @param head Pointer to first node in singly-linked list.
    2799              :  * @param tail Pointer to last node in singly-linked list.
    2800              :  */
    2801            1 : #define k_fifo_put_list(fifo, head, tail) \
    2802              :         ({ \
    2803              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
    2804              :         k_queue_append_list(&(fifo)->_queue, head, tail); \
    2805              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
    2806              :         })
    2807              : 
    2808              : /**
    2809              :  * @brief Atomically add a list of elements to a FIFO queue.
    2810              :  *
    2811              :  * This routine adds a list of data items to @a fifo in one operation.
    2812              :  * The data items must be in a singly-linked list implemented using a
    2813              :  * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
    2814              :  * and must be re-initialized via sys_slist_init().
    2815              :  *
    2816              :  * @funcprops \isr_ok
    2817              :  *
    2818              :  * @param fifo Address of the FIFO queue.
    2819              :  * @param list Pointer to sys_slist_t object.
    2820              :  */
    2821            1 : #define k_fifo_put_slist(fifo, list) \
    2822              :         ({ \
    2823              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
    2824              :         k_queue_merge_slist(&(fifo)->_queue, list); \
    2825              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
    2826              :         })
    2827              : 
    2828              : /**
    2829              :  * @brief Get an element from a FIFO queue.
    2830              :  *
    2831              :  * This routine removes a data item from @a fifo in a "first in, first out"
    2832              :  * manner. The first word of the data item is reserved for the kernel's use.
    2833              :  *
    2834              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    2835              :  *
    2836              :  * @funcprops \isr_ok
    2837              :  *
    2838              :  * @param fifo Address of the FIFO queue.
    2839              :  * @param timeout Waiting period to obtain a data item,
    2840              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    2841              :  *
    2842              :  * @return Address of the data item if successful; NULL if returned
    2843              :  * without waiting, or waiting period timed out.
    2844              :  */
    2845            1 : #define k_fifo_get(fifo, timeout) \
    2846              :         ({ \
    2847              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
    2848              :         void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
    2849              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
    2850              :         fg_ret; \
    2851              :         })
    2852              : 
    2853              : /**
    2854              :  * @brief Query a FIFO queue to see if it has data available.
    2855              :  *
    2856              :  * Note that the data might be already gone by the time this function returns
    2857              :  * if other threads is also trying to read from the FIFO.
    2858              :  *
    2859              :  * @funcprops \isr_ok
    2860              :  *
    2861              :  * @param fifo Address of the FIFO queue.
    2862              :  *
    2863              :  * @return Non-zero if the FIFO queue is empty.
    2864              :  * @return 0 if data is available.
    2865              :  */
    2866            1 : #define k_fifo_is_empty(fifo) \
    2867              :         k_queue_is_empty(&(fifo)->_queue)
    2868              : 
    2869              : /**
    2870              :  * @brief Peek element at the head of a FIFO queue.
    2871              :  *
    2872              :  * Return element from the head of FIFO queue without removing it. A usecase
    2873              :  * for this is if elements of the FIFO object are themselves containers. Then
    2874              :  * on each iteration of processing, a head container will be peeked,
    2875              :  * and some data processed out of it, and only if the container is empty,
    2876              :  * it will be completely remove from the FIFO queue.
    2877              :  *
    2878              :  * @param fifo Address of the FIFO queue.
    2879              :  *
    2880              :  * @return Head element, or NULL if the FIFO queue is empty.
    2881              :  */
    2882            1 : #define k_fifo_peek_head(fifo) \
    2883              :         ({ \
    2884              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
    2885              :         void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
    2886              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
    2887              :         fph_ret; \
    2888              :         })
    2889              : 
    2890              : /**
    2891              :  * @brief Peek element at the tail of FIFO queue.
    2892              :  *
    2893              :  * Return element from the tail of FIFO queue (without removing it). A usecase
    2894              :  * for this is if elements of the FIFO queue are themselves containers. Then
    2895              :  * it may be useful to add more data to the last container in a FIFO queue.
    2896              :  *
    2897              :  * @param fifo Address of the FIFO queue.
    2898              :  *
    2899              :  * @return Tail element, or NULL if a FIFO queue is empty.
    2900              :  */
    2901            1 : #define k_fifo_peek_tail(fifo) \
    2902              :         ({ \
    2903              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
    2904              :         void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
    2905              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
    2906              :         fpt_ret; \
    2907              :         })
    2908              : 
    2909              : /**
    2910              :  * @brief Statically define and initialize a FIFO queue.
    2911              :  *
    2912              :  * The FIFO queue can be accessed outside the module where it is defined using:
    2913              :  *
    2914              :  * @code extern struct k_fifo <name>; @endcode
    2915              :  *
    2916              :  * @param name Name of the FIFO queue.
    2917              :  */
    2918            1 : #define K_FIFO_DEFINE(name) \
    2919              :         STRUCT_SECTION_ITERABLE(k_fifo, name) = \
    2920              :                 Z_FIFO_INITIALIZER(name)
    2921              : 
    2922              : /** @} */
    2923              : 
    2924            0 : struct k_lifo {
    2925              :         struct k_queue _queue;
    2926              : #ifdef CONFIG_OBJ_CORE_LIFO
    2927              :         struct k_obj_core  obj_core;
    2928              : #endif
    2929              : };
    2930              : 
    2931              : /**
    2932              :  * @cond INTERNAL_HIDDEN
    2933              :  */
    2934              : 
    2935              : #define Z_LIFO_INITIALIZER(obj) \
    2936              :         { \
    2937              :         ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
    2938              :         }
    2939              : 
    2940              : /**
    2941              :  * INTERNAL_HIDDEN @endcond
    2942              :  */
    2943              : 
    2944              : /**
    2945              :  * @defgroup lifo_apis LIFO APIs
    2946              :  * @ingroup kernel_apis
    2947              :  * @{
    2948              :  */
    2949              : 
    2950              : /**
    2951              :  * @brief Initialize a LIFO queue.
    2952              :  *
    2953              :  * This routine initializes a LIFO queue object, prior to its first use.
    2954              :  *
    2955              :  * @param lifo Address of the LIFO queue.
    2956              :  */
    2957            1 : #define k_lifo_init(lifo)                                    \
    2958              :         ({                                                   \
    2959              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
    2960              :         k_queue_init(&(lifo)->_queue);                       \
    2961              :         K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo);   \
    2962              :         K_OBJ_CORE_LINK(K_OBJ_CORE(lifo));                   \
    2963              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo);  \
    2964              :         })
    2965              : 
    2966              : /**
    2967              :  * @brief Add an element to a LIFO queue.
    2968              :  *
    2969              :  * This routine adds a data item to @a lifo. A LIFO queue data item must be
    2970              :  * aligned on a word boundary, and the first word of the item is
    2971              :  * reserved for the kernel's use.
    2972              :  *
    2973              :  * @funcprops \isr_ok
    2974              :  *
    2975              :  * @param lifo Address of the LIFO queue.
    2976              :  * @param data Address of the data item.
    2977              :  */
    2978            1 : #define k_lifo_put(lifo, data) \
    2979              :         ({ \
    2980              :         void *_data = data; \
    2981              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
    2982              :         k_queue_prepend(&(lifo)->_queue, _data); \
    2983              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
    2984              :         })
    2985              : 
    2986              : /**
    2987              :  * @brief Add an element to a LIFO queue.
    2988              :  *
    2989              :  * This routine adds a data item to @a lifo. There is an implicit memory
    2990              :  * allocation to create an additional temporary bookkeeping data structure from
    2991              :  * the calling thread's resource pool, which is automatically freed when the
    2992              :  * item is removed. The data itself is not copied.
    2993              :  *
    2994              :  * @funcprops \isr_ok
    2995              :  *
    2996              :  * @param lifo Address of the LIFO.
    2997              :  * @param data Address of the data item.
    2998              :  *
    2999              :  * @retval 0 on success
    3000              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    3001              :  */
    3002            1 : #define k_lifo_alloc_put(lifo, data) \
    3003              :         ({ \
    3004              :         void *_data = data; \
    3005              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
    3006              :         int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
    3007              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
    3008              :         lap_ret; \
    3009              :         })
    3010              : 
    3011              : /**
    3012              :  * @brief Get an element from a LIFO queue.
    3013              :  *
    3014              :  * This routine removes a data item from @a LIFO in a "last in, first out"
    3015              :  * manner. The first word of the data item is reserved for the kernel's use.
    3016              :  *
    3017              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    3018              :  *
    3019              :  * @funcprops \isr_ok
    3020              :  *
    3021              :  * @param lifo Address of the LIFO queue.
    3022              :  * @param timeout Waiting period to obtain a data item,
    3023              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    3024              :  *
    3025              :  * @return Address of the data item if successful; NULL if returned
    3026              :  * without waiting, or waiting period timed out.
    3027              :  */
    3028            1 : #define k_lifo_get(lifo, timeout) \
    3029              :         ({ \
    3030              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
    3031              :         void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
    3032              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
    3033              :         lg_ret; \
    3034              :         })
    3035              : 
    3036              : /**
    3037              :  * @brief Statically define and initialize a LIFO queue.
    3038              :  *
    3039              :  * The LIFO queue can be accessed outside the module where it is defined using:
    3040              :  *
    3041              :  * @code extern struct k_lifo <name>; @endcode
    3042              :  *
    3043              :  * @param name Name of the fifo.
    3044              :  */
    3045            1 : #define K_LIFO_DEFINE(name) \
    3046              :         STRUCT_SECTION_ITERABLE(k_lifo, name) = \
    3047              :                 Z_LIFO_INITIALIZER(name)
    3048              : 
    3049              : /** @} */
    3050              : 
    3051              : /**
    3052              :  * @cond INTERNAL_HIDDEN
    3053              :  */
    3054              : #define K_STACK_FLAG_ALLOC      ((uint8_t)1)    /* Buffer was allocated */
    3055              : 
    3056              : typedef uintptr_t stack_data_t;
    3057              : 
    3058              : struct k_stack {
    3059              :         _wait_q_t wait_q;
    3060              :         struct k_spinlock lock;
    3061              :         stack_data_t *base, *next, *top;
    3062              : 
    3063              :         uint8_t flags;
    3064              : 
    3065              :         SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
    3066              : 
    3067              : #ifdef CONFIG_OBJ_CORE_STACK
    3068              :         struct k_obj_core  obj_core;
    3069              : #endif
    3070              : };
    3071              : 
    3072              : #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
    3073              :         { \
    3074              :         .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q),     \
    3075              :         .base = (stack_buffer), \
    3076              :         .next = (stack_buffer), \
    3077              :         .top = (stack_buffer) + (stack_num_entries), \
    3078              :         }
    3079              : 
    3080              : /**
    3081              :  * INTERNAL_HIDDEN @endcond
    3082              :  */
    3083              : 
    3084              : /**
    3085              :  * @defgroup stack_apis Stack APIs
    3086              :  * @ingroup kernel_apis
    3087              :  * @{
    3088              :  */
    3089              : 
    3090              : /**
    3091              :  * @brief Initialize a stack.
    3092              :  *
    3093              :  * This routine initializes a stack object, prior to its first use.
    3094              :  *
    3095              :  * @param stack Address of the stack.
    3096              :  * @param buffer Address of array used to hold stacked values.
    3097              :  * @param num_entries Maximum number of values that can be stacked.
    3098              :  */
    3099            1 : void k_stack_init(struct k_stack *stack,
    3100              :                   stack_data_t *buffer, uint32_t num_entries);
    3101              : 
    3102              : 
    3103              : /**
    3104              :  * @brief Initialize a stack.
    3105              :  *
    3106              :  * This routine initializes a stack object, prior to its first use. Internal
    3107              :  * buffers will be allocated from the calling thread's resource pool.
    3108              :  * This memory will be released if k_stack_cleanup() is called, or
    3109              :  * userspace is enabled and the stack object loses all references to it.
    3110              :  *
    3111              :  * @param stack Address of the stack.
    3112              :  * @param num_entries Maximum number of values that can be stacked.
    3113              :  *
    3114              :  * @return -ENOMEM if memory couldn't be allocated
    3115              :  */
    3116              : 
    3117            1 : __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
    3118              :                                    uint32_t num_entries);
    3119              : 
    3120              : /**
    3121              :  * @brief Release a stack's allocated buffer
    3122              :  *
    3123              :  * If a stack object was given a dynamically allocated buffer via
    3124              :  * k_stack_alloc_init(), this will free it. This function does nothing
    3125              :  * if the buffer wasn't dynamically allocated.
    3126              :  *
    3127              :  * @param stack Address of the stack.
    3128              :  * @retval 0 on success
    3129              :  * @retval -EAGAIN when object is still in use
    3130              :  */
    3131            1 : int k_stack_cleanup(struct k_stack *stack);
    3132              : 
    3133              : /**
    3134              :  * @brief Push an element onto a stack.
    3135              :  *
    3136              :  * This routine adds a stack_data_t value @a data to @a stack.
    3137              :  *
    3138              :  * @funcprops \isr_ok
    3139              :  *
    3140              :  * @param stack Address of the stack.
    3141              :  * @param data Value to push onto the stack.
    3142              :  *
    3143              :  * @retval 0 on success
    3144              :  * @retval -ENOMEM if stack is full
    3145              :  */
    3146            1 : __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
    3147              : 
    3148              : /**
    3149              :  * @brief Pop an element from a stack.
    3150              :  *
    3151              :  * This routine removes a stack_data_t value from @a stack in a "last in,
    3152              :  * first out" manner and stores the value in @a data.
    3153              :  *
    3154              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    3155              :  *
    3156              :  * @funcprops \isr_ok
    3157              :  *
    3158              :  * @param stack Address of the stack.
    3159              :  * @param data Address of area to hold the value popped from the stack.
    3160              :  * @param timeout Waiting period to obtain a value,
    3161              :  *                or one of the special values K_NO_WAIT and
    3162              :  *                K_FOREVER.
    3163              :  *
    3164              :  * @retval 0 Element popped from stack.
    3165              :  * @retval -EBUSY Returned without waiting.
    3166              :  * @retval -EAGAIN Waiting period timed out.
    3167              :  */
    3168            1 : __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
    3169              :                           k_timeout_t timeout);
    3170              : 
    3171              : /**
    3172              :  * @brief Statically define and initialize a stack
    3173              :  *
    3174              :  * The stack can be accessed outside the module where it is defined using:
    3175              :  *
    3176              :  * @code extern struct k_stack <name>; @endcode
    3177              :  *
    3178              :  * @param name Name of the stack.
    3179              :  * @param stack_num_entries Maximum number of values that can be stacked.
    3180              :  */
    3181            1 : #define K_STACK_DEFINE(name, stack_num_entries)                \
    3182              :         stack_data_t __noinit                                  \
    3183              :                 _k_stack_buf_##name[stack_num_entries];        \
    3184              :         STRUCT_SECTION_ITERABLE(k_stack, name) =               \
    3185              :                 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
    3186              :                                     stack_num_entries)
    3187              : 
    3188              : /** @} */
    3189              : 
    3190              : /**
    3191              :  * @cond INTERNAL_HIDDEN
    3192              :  */
    3193              : 
    3194              : struct k_work;
    3195              : struct k_work_q;
    3196              : struct k_work_queue_config;
    3197              : extern struct k_work_q k_sys_work_q;
    3198              : 
    3199              : /**
    3200              :  * INTERNAL_HIDDEN @endcond
    3201              :  */
    3202              : 
    3203              : /**
    3204              :  * @defgroup mutex_apis Mutex APIs
    3205              :  * @ingroup kernel_apis
    3206              :  * @{
    3207              :  */
    3208              : 
    3209              : /**
    3210              :  * Mutex Structure
    3211              :  * @ingroup mutex_apis
    3212              :  */
    3213            1 : struct k_mutex {
    3214              :         /** Mutex wait queue */
    3215            1 :         _wait_q_t wait_q;
    3216              :         /** Mutex owner */
    3217            1 :         struct k_thread *owner;
    3218              : 
    3219              :         /** Current lock count */
    3220            1 :         uint32_t lock_count;
    3221              : 
    3222              :         /** Original thread priority */
    3223            1 :         int owner_orig_prio;
    3224              : 
    3225              :         SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
    3226              : 
    3227              : #ifdef CONFIG_OBJ_CORE_MUTEX
    3228              :         struct k_obj_core obj_core;
    3229              : #endif
    3230              : };
    3231              : 
    3232              : /**
    3233              :  * @cond INTERNAL_HIDDEN
    3234              :  */
    3235              : #define Z_MUTEX_INITIALIZER(obj) \
    3236              :         { \
    3237              :         .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
    3238              :         .owner = NULL, \
    3239              :         .lock_count = 0, \
    3240              :         .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
    3241              :         }
    3242              : 
    3243              : /**
    3244              :  * INTERNAL_HIDDEN @endcond
    3245              :  */
    3246              : 
    3247              : /**
    3248              :  * @brief Statically define and initialize a mutex.
    3249              :  *
    3250              :  * The mutex can be accessed outside the module where it is defined using:
    3251              :  *
    3252              :  * @code extern struct k_mutex <name>; @endcode
    3253              :  *
    3254              :  * @param name Name of the mutex.
    3255              :  */
    3256            1 : #define K_MUTEX_DEFINE(name) \
    3257              :         STRUCT_SECTION_ITERABLE(k_mutex, name) = \
    3258              :                 Z_MUTEX_INITIALIZER(name)
    3259              : 
    3260              : /**
    3261              :  * @brief Initialize a mutex.
    3262              :  *
    3263              :  * This routine initializes a mutex object, prior to its first use.
    3264              :  *
    3265              :  * Upon completion, the mutex is available and does not have an owner.
    3266              :  *
    3267              :  * @param mutex Address of the mutex.
    3268              :  *
    3269              :  * @retval 0 Mutex object created
    3270              :  *
    3271              :  */
    3272            1 : __syscall int k_mutex_init(struct k_mutex *mutex);
    3273              : 
    3274              : 
    3275              : /**
    3276              :  * @brief Lock a mutex.
    3277              :  *
    3278              :  * This routine locks @a mutex. If the mutex is locked by another thread,
    3279              :  * the calling thread waits until the mutex becomes available or until
    3280              :  * a timeout occurs.
    3281              :  *
    3282              :  * A thread is permitted to lock a mutex it has already locked. The operation
    3283              :  * completes immediately and the lock count is increased by 1.
    3284              :  *
    3285              :  * Mutexes may not be locked in ISRs.
    3286              :  *
    3287              :  * @param mutex Address of the mutex.
    3288              :  * @param timeout Waiting period to lock the mutex,
    3289              :  *                or one of the special values K_NO_WAIT and
    3290              :  *                K_FOREVER.
    3291              :  *
    3292              :  * @retval 0 Mutex locked.
    3293              :  * @retval -EBUSY Returned without waiting.
    3294              :  * @retval -EAGAIN Waiting period timed out.
    3295              :  */
    3296            1 : __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
    3297              : 
    3298              : /**
    3299              :  * @brief Unlock a mutex.
    3300              :  *
    3301              :  * This routine unlocks @a mutex. The mutex must already be locked by the
    3302              :  * calling thread.
    3303              :  *
    3304              :  * The mutex cannot be claimed by another thread until it has been unlocked by
    3305              :  * the calling thread as many times as it was previously locked by that
    3306              :  * thread.
    3307              :  *
    3308              :  * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
    3309              :  * in thread context due to ownership and priority inheritance semantics.
    3310              :  *
    3311              :  * @param mutex Address of the mutex.
    3312              :  *
    3313              :  * @retval 0 Mutex unlocked.
    3314              :  * @retval -EPERM The current thread does not own the mutex
    3315              :  * @retval -EINVAL The mutex is not locked
    3316              :  *
    3317              :  */
    3318            1 : __syscall int k_mutex_unlock(struct k_mutex *mutex);
    3319              : 
    3320              : /**
    3321              :  * @}
    3322              :  */
    3323              : 
    3324              : 
    3325            0 : struct k_condvar {
    3326            0 :         _wait_q_t wait_q;
    3327              : 
    3328              : #ifdef CONFIG_OBJ_CORE_CONDVAR
    3329              :         struct k_obj_core  obj_core;
    3330              : #endif
    3331              : };
    3332              : 
    3333              : #define Z_CONDVAR_INITIALIZER(obj)                                             \
    3334              :         {                                                                      \
    3335              :                 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q),                          \
    3336              :         }
    3337              : 
    3338              : /**
    3339              :  * @defgroup condvar_apis Condition Variables APIs
    3340              :  * @ingroup kernel_apis
    3341              :  * @{
    3342              :  */
    3343              : 
    3344              : /**
    3345              :  * @brief Initialize a condition variable
    3346              :  *
    3347              :  * @param condvar pointer to a @p k_condvar structure
    3348              :  * @retval 0 Condition variable created successfully
    3349              :  */
    3350            1 : __syscall int k_condvar_init(struct k_condvar *condvar);
    3351              : 
    3352              : /**
    3353              :  * @brief Signals one thread that is pending on the condition variable
    3354              :  *
    3355              :  * @param condvar pointer to a @p k_condvar structure
    3356              :  * @retval 0 On success
    3357              :  */
    3358            1 : __syscall int k_condvar_signal(struct k_condvar *condvar);
    3359              : 
    3360              : /**
    3361              :  * @brief Unblock all threads that are pending on the condition
    3362              :  * variable
    3363              :  *
    3364              :  * @param condvar pointer to a @p k_condvar structure
    3365              :  * @return An integer with number of woken threads on success
    3366              :  */
    3367            1 : __syscall int k_condvar_broadcast(struct k_condvar *condvar);
    3368              : 
    3369              : /**
    3370              :  * @brief Waits on the condition variable releasing the mutex lock
    3371              :  *
    3372              :  * Atomically releases the currently owned mutex, blocks the current thread
    3373              :  * waiting on the condition variable specified by @a condvar,
    3374              :  * and finally acquires the mutex again.
    3375              :  *
    3376              :  * The waiting thread unblocks only after another thread calls
    3377              :  * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
    3378              :  *
    3379              :  * @param condvar pointer to a @p k_condvar structure
    3380              :  * @param mutex Address of the mutex.
    3381              :  * @param timeout Waiting period for the condition variable
    3382              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    3383              :  * @retval 0 On success
    3384              :  * @retval -EAGAIN Waiting period timed out.
    3385              :  */
    3386            1 : __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
    3387              :                              k_timeout_t timeout);
    3388              : 
    3389              : /**
    3390              :  * @brief Statically define and initialize a condition variable.
    3391              :  *
    3392              :  * The condition variable can be accessed outside the module where it is
    3393              :  * defined using:
    3394              :  *
    3395              :  * @code extern struct k_condvar <name>; @endcode
    3396              :  *
    3397              :  * @param name Name of the condition variable.
    3398              :  */
    3399            1 : #define K_CONDVAR_DEFINE(name)                                                 \
    3400              :         STRUCT_SECTION_ITERABLE(k_condvar, name) =                             \
    3401              :                 Z_CONDVAR_INITIALIZER(name)
    3402              : /**
    3403              :  * @}
    3404              :  */
    3405              : 
    3406              : /**
    3407              :  * @defgroup semaphore_apis Semaphore APIs
    3408              :  * @ingroup kernel_apis
    3409              :  * @{
    3410              :  */
    3411              : 
    3412              : /**
    3413              :  * @brief Semaphore structure
    3414              :  *
    3415              :  * This structure is used to represent a semaphore.
    3416              :  * All the members are internal and should not be accessed directly.
    3417              :  */
    3418            1 : struct k_sem {
    3419              :         /**
    3420              :          * @cond INTERNAL_HIDDEN
    3421              :          */
    3422              :         _wait_q_t wait_q;
    3423              :         unsigned int count;
    3424              :         unsigned int limit;
    3425              : 
    3426              :         Z_DECL_POLL_EVENT
    3427              : 
    3428              :         SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
    3429              : 
    3430              : #ifdef CONFIG_OBJ_CORE_SEM
    3431              :         struct k_obj_core  obj_core;
    3432              : #endif
    3433              :         /** @endcond */
    3434              : };
    3435              : 
    3436              : /**
    3437              :  * @cond INTERNAL_HIDDEN
    3438              :  */
    3439              : 
    3440              : #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
    3441              :         { \
    3442              :         .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
    3443              :         .count = (initial_count), \
    3444              :         .limit = (count_limit), \
    3445              :         Z_POLL_EVENT_OBJ_INIT(obj) \
    3446              :         }
    3447              : 
    3448              : /**
    3449              :  * @endcond
    3450              :  */
    3451              : 
    3452              : /**
    3453              :  * @brief Maximum limit value allowed for a semaphore.
    3454              :  *
    3455              :  * This is intended for use when a semaphore does not have
    3456              :  * an explicit maximum limit, and instead is just used for
    3457              :  * counting purposes.
    3458              :  *
    3459              :  */
    3460            1 : #define K_SEM_MAX_LIMIT UINT_MAX
    3461              : 
    3462              : /**
    3463              :  * @brief Initialize a semaphore.
    3464              :  *
    3465              :  * This routine initializes a semaphore object, prior to its first use.
    3466              :  *
    3467              :  * @param sem Address of the semaphore.
    3468              :  * @param initial_count Initial semaphore count.
    3469              :  * @param limit Maximum permitted semaphore count.
    3470              :  *
    3471              :  * @see K_SEM_MAX_LIMIT
    3472              :  *
    3473              :  * @retval 0 Semaphore created successfully
    3474              :  * @retval -EINVAL Invalid values
    3475              :  *
    3476              :  */
    3477            1 : __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
    3478              :                           unsigned int limit);
    3479              : 
    3480              : /**
    3481              :  * @brief Take a semaphore.
    3482              :  *
    3483              :  * This routine takes @a sem.
    3484              :  *
    3485              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    3486              :  *
    3487              :  * @funcprops \isr_ok
    3488              :  *
    3489              :  * @param sem Address of the semaphore.
    3490              :  * @param timeout Waiting period to take the semaphore,
    3491              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    3492              :  *
    3493              :  * @retval 0 Semaphore taken.
    3494              :  * @retval -EBUSY Returned without waiting.
    3495              :  * @retval -EAGAIN Waiting period timed out,
    3496              :  *                      or the semaphore was reset during the waiting period.
    3497              :  */
    3498            1 : __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
    3499              : 
    3500              : /**
    3501              :  * @brief Give a semaphore.
    3502              :  *
    3503              :  * This routine gives @a sem, unless the semaphore is already at its maximum
    3504              :  * permitted count.
    3505              :  *
    3506              :  * @funcprops \isr_ok
    3507              :  *
    3508              :  * @param sem Address of the semaphore.
    3509              :  */
    3510            1 : __syscall void k_sem_give(struct k_sem *sem);
    3511              : 
    3512              : /**
    3513              :  * @brief Resets a semaphore's count to zero.
    3514              :  *
    3515              :  * This routine sets the count of @a sem to zero.
    3516              :  * Any outstanding semaphore takes will be aborted
    3517              :  * with -EAGAIN.
    3518              :  *
    3519              :  * @param sem Address of the semaphore.
    3520              :  */
    3521            1 : __syscall void k_sem_reset(struct k_sem *sem);
    3522              : 
    3523              : /**
    3524              :  * @brief Get a semaphore's count.
    3525              :  *
    3526              :  * This routine returns the current count of @a sem.
    3527              :  *
    3528              :  * @param sem Address of the semaphore.
    3529              :  *
    3530              :  * @return Current semaphore count.
    3531              :  */
    3532            1 : __syscall unsigned int k_sem_count_get(struct k_sem *sem);
    3533              : 
    3534              : /**
    3535              :  * @internal
    3536              :  */
    3537              : static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
    3538              : {
    3539              :         return sem->count;
    3540              : }
    3541              : 
    3542              : /**
    3543              :  * @brief Statically define and initialize a semaphore.
    3544              :  *
    3545              :  * The semaphore can be accessed outside the module where it is defined using:
    3546              :  *
    3547              :  * @code extern struct k_sem <name>; @endcode
    3548              :  *
    3549              :  * @param name Name of the semaphore.
    3550              :  * @param initial_count Initial semaphore count.
    3551              :  * @param count_limit Maximum permitted semaphore count.
    3552              :  */
    3553            1 : #define K_SEM_DEFINE(name, initial_count, count_limit)                                             \
    3554              :         STRUCT_SECTION_ITERABLE(k_sem, name) =                                                     \
    3555              :                 Z_SEM_INITIALIZER(name, initial_count, count_limit);                               \
    3556              :         BUILD_ASSERT(((count_limit) != 0) &&                                                       \
    3557              :                      (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) &&  \
    3558              :                      ((count_limit) <= K_SEM_MAX_LIMIT));
    3559              : 
    3560              : /** @} */
    3561              : 
    3562              : #if defined(CONFIG_SCHED_IPI_SUPPORTED) || defined(__DOXYGEN__)
    3563              : struct k_ipi_work;
    3564              : 
    3565              : 
    3566            0 : typedef void (*k_ipi_func_t)(struct k_ipi_work *work);
    3567              : 
    3568              : /**
    3569              :  * @brief IPI work item structure
    3570              :  *
    3571              :  * This structure is used to represent an IPI work item.
    3572              :  * All the members are internal and should not be accessed directly.
    3573              :  */
    3574            1 : struct k_ipi_work {
    3575              : /**
    3576              :  * @cond INTERNAL_HIDDEN
    3577              :  */
    3578              :         sys_dnode_t    node[CONFIG_MP_MAX_NUM_CPUS];   /* Node in IPI work queue */
    3579              :         k_ipi_func_t   func;     /* Function to execute on target CPU */
    3580              :         struct k_event event;    /* Event to signal when processed */
    3581              :         uint32_t       bitmask;  /* Bitmask of targeted CPUs */
    3582              : /** INTERNAL_HIDDEN @endcond */
    3583              : };
    3584              : 
    3585              : 
    3586              : /**
    3587              :  * @brief Initialize the specified IPI work item
    3588              :  *
    3589              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3590              :  *
    3591              :  * @param work Pointer to the IPI work item to be initialized
    3592              :  */
    3593            1 : static inline void k_ipi_work_init(struct k_ipi_work *work)
    3594              : {
    3595              :         k_event_init(&work->event);
    3596              :         for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
    3597              :                 sys_dnode_init(&work->node[i]);
    3598              :         }
    3599              :         work->bitmask = 0;
    3600              : }
    3601              : 
    3602              : /**
    3603              :  * @brief Add an IPI work item to the IPI work queue
    3604              :  *
    3605              :  * Adds the specified IPI work item to the IPI work queues of each CPU
    3606              :  * identified by @a cpu_bitmask. The specified IPI work item will subsequently
    3607              :  * execute at ISR level as those CPUs process their received IPIs. Do not
    3608              :  * re-use the specified IPI work item until it has been processed by all of
    3609              :  * the identified CPUs.
    3610              :  *
    3611              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3612              :  *
    3613              :  * @param work Pointer to the IPI work item
    3614              :  * @param cpu_bitmask Set of CPUs to which the IPI work item will be sent
    3615              :  * @param func Function to execute on the targeted CPU(s)
    3616              :  *
    3617              :  * @retval 0 on success
    3618              :  * @retval -EBUSY if the specified IPI work item is still being processed
    3619              :  */
    3620            1 : int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask,
    3621              :                    k_ipi_func_t func);
    3622              : 
    3623              : /**
    3624              :  * @brief Wait until the IPI work item has been processed by all targeted CPUs
    3625              :  *
    3626              :  * This routine waits until the IPI work item has been processed by all CPUs
    3627              :  * to which it was sent. If called from an ISR, then @a timeout must be set to
    3628              :  * K_NO_WAIT. To prevent deadlocks the caller must not have IRQs locked when
    3629              :  * calling this function.
    3630              :  *
    3631              :  * @note It is not in general possible to poll safely for completion of this
    3632              :  * function in ISR or locked contexts where the calling CPU cannot service IPIs
    3633              :  * (because the targeted CPUs may themselves be waiting on the calling CPU).
    3634              :  * Application code must be prepared for failure or to poll from a thread
    3635              :  * context.
    3636              :  *
    3637              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3638              :  *
    3639              :  * @param work Pointer to the IPI work item
    3640              :  * @param timeout Maximum time to wait for IPI work to be processed
    3641              :  *
    3642              :  * @retval -EAGAIN Waiting period timed out.
    3643              :  * @retval 0 if processed by all targeted CPUs
    3644              :  */
    3645            1 : int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout);
    3646              : 
    3647              : /**
    3648              :  * @brief Signal that there is one or more IPI work items to process
    3649              :  *
    3650              :  * This routine sends an IPI to the set of CPUs identified by calls to
    3651              :  * k_ipi_work_add() since this CPU sent its last set of IPIs.
    3652              :  *
    3653              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3654              :  */
    3655            1 : void k_ipi_work_signal(void);
    3656              : 
    3657              : #endif /* CONFIG_SCHED_IPI_SUPPORTED */
    3658              : 
    3659              : /**
    3660              :  * @cond INTERNAL_HIDDEN
    3661              :  */
    3662              : 
    3663              : struct k_work_delayable;
    3664              : struct k_work_sync;
    3665              : 
    3666              : /**
    3667              :  * INTERNAL_HIDDEN @endcond
    3668              :  */
    3669              : 
    3670              : /**
    3671              :  * @defgroup workqueue_apis Work Queue APIs
    3672              :  * @ingroup kernel_apis
    3673              :  * @{
    3674              :  */
    3675              : 
    3676              : /** @brief The signature for a work item handler function.
    3677              :  *
    3678              :  * The function will be invoked by the thread animating a work queue.
    3679              :  *
    3680              :  * @param work the work item that provided the handler.
    3681              :  */
    3682            1 : typedef void (*k_work_handler_t)(struct k_work *work);
    3683              : 
    3684              : /** @brief Initialize a (non-delayable) work structure.
    3685              :  *
    3686              :  * This must be invoked before submitting a work structure for the first time.
    3687              :  * It need not be invoked again on the same work structure.  It can be
    3688              :  * re-invoked to change the associated handler, but this must be done when the
    3689              :  * work item is idle.
    3690              :  *
    3691              :  * @funcprops \isr_ok
    3692              :  *
    3693              :  * @param work the work structure to be initialized.
    3694              :  *
    3695              :  * @param handler the handler to be invoked by the work item.
    3696              :  */
    3697            1 : void k_work_init(struct k_work *work,
    3698              :                   k_work_handler_t handler);
    3699              : 
    3700              : /** @brief Busy state flags from the work item.
    3701              :  *
    3702              :  * A zero return value indicates the work item appears to be idle.
    3703              :  *
    3704              :  * @note This is a live snapshot of state, which may change before the result
    3705              :  * is checked.  Use locks where appropriate.
    3706              :  *
    3707              :  * @funcprops \isr_ok
    3708              :  *
    3709              :  * @param work pointer to the work item.
    3710              :  *
    3711              :  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
    3712              :  * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
    3713              :  */
    3714            1 : int k_work_busy_get(const struct k_work *work);
    3715              : 
    3716              : /** @brief Test whether a work item is currently pending.
    3717              :  *
    3718              :  * Wrapper to determine whether a work item is in a non-idle state.
    3719              :  *
    3720              :  * @note This is a live snapshot of state, which may change before the result
    3721              :  * is checked.  Use locks where appropriate.
    3722              :  *
    3723              :  * @funcprops \isr_ok
    3724              :  *
    3725              :  * @param work pointer to the work item.
    3726              :  *
    3727              :  * @return true if and only if k_work_busy_get() returns a non-zero value.
    3728              :  */
    3729              : static inline bool k_work_is_pending(const struct k_work *work);
    3730              : 
    3731              : /** @brief Submit a work item to a queue.
    3732              :  *
    3733              :  * @param queue pointer to the work queue on which the item should run.  If
    3734              :  * NULL the queue from the most recent submission will be used.
    3735              :  *
    3736              :  * @funcprops \isr_ok
    3737              :  *
    3738              :  * @param work pointer to the work item.
    3739              :  *
    3740              :  * @retval 0 if work was already submitted to a queue
    3741              :  * @retval 1 if work was not submitted and has been queued to @p queue
    3742              :  * @retval 2 if work was running and has been queued to the queue that was
    3743              :  * running it
    3744              :  * @retval -EBUSY
    3745              :  * * if work submission was rejected because the work item is cancelling; or
    3746              :  * * @p queue is draining; or
    3747              :  * * @p queue is plugged.
    3748              :  * @retval -EINVAL if @p queue is null and the work item has never been run.
    3749              :  * @retval -ENODEV if @p queue has not been started.
    3750              :  */
    3751            1 : int k_work_submit_to_queue(struct k_work_q *queue,
    3752              :                            struct k_work *work);
    3753              : 
    3754              : /** @brief Submit a work item to the system queue.
    3755              :  *
    3756              :  * @funcprops \isr_ok
    3757              :  *
    3758              :  * @param work pointer to the work item.
    3759              :  *
    3760              :  * @return as with k_work_submit_to_queue().
    3761              :  */
    3762            1 : int k_work_submit(struct k_work *work);
    3763              : 
    3764              : /** @brief Wait for last-submitted instance to complete.
    3765              :  *
    3766              :  * Resubmissions may occur while waiting, including chained submissions (from
    3767              :  * within the handler).
    3768              :  *
    3769              :  * @note Be careful of caller and work queue thread relative priority.  If
    3770              :  * this function sleeps it will not return until the work queue thread
    3771              :  * completes the tasks that allow this thread to resume.
    3772              :  *
    3773              :  * @note Behavior is undefined if this function is invoked on @p work from a
    3774              :  * work queue running @p work.
    3775              :  *
    3776              :  * @param work pointer to the work item.
    3777              :  *
    3778              :  * @param sync pointer to an opaque item containing state related to the
    3779              :  * pending cancellation.  The object must persist until the call returns, and
    3780              :  * be accessible from both the caller thread and the work queue thread.  The
    3781              :  * object must not be used for any other flush or cancel operation until this
    3782              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    3783              :  * must be allocated in coherent memory.
    3784              :  *
    3785              :  * @retval true if call had to wait for completion
    3786              :  * @retval false if work was already idle
    3787              :  */
    3788            1 : bool k_work_flush(struct k_work *work,
    3789              :                   struct k_work_sync *sync);
    3790              : 
    3791              : /** @brief Cancel a work item.
    3792              :  *
    3793              :  * This attempts to prevent a pending (non-delayable) work item from being
    3794              :  * processed by removing it from the work queue.  If the item is being
    3795              :  * processed, the work item will continue to be processed, but resubmissions
    3796              :  * are rejected until cancellation completes.
    3797              :  *
    3798              :  * If this returns zero cancellation is complete, otherwise something
    3799              :  * (probably a work queue thread) is still referencing the item.
    3800              :  *
    3801              :  * See also k_work_cancel_sync().
    3802              :  *
    3803              :  * @funcprops \isr_ok
    3804              :  *
    3805              :  * @param work pointer to the work item.
    3806              :  *
    3807              :  * @return the k_work_busy_get() status indicating the state of the item after all
    3808              :  * cancellation steps performed by this call are completed.
    3809              :  */
    3810            1 : int k_work_cancel(struct k_work *work);
    3811              : 
    3812              : /** @brief Cancel a work item and wait for it to complete.
    3813              :  *
    3814              :  * Same as k_work_cancel() but does not return until cancellation is complete.
    3815              :  * This can be invoked by a thread after k_work_cancel() to synchronize with a
    3816              :  * previous cancellation.
    3817              :  *
    3818              :  * On return the work structure will be idle unless something submits it after
    3819              :  * the cancellation was complete.
    3820              :  *
    3821              :  * @note Be careful of caller and work queue thread relative priority.  If
    3822              :  * this function sleeps it will not return until the work queue thread
    3823              :  * completes the tasks that allow this thread to resume.
    3824              :  *
    3825              :  * @note Behavior is undefined if this function is invoked on @p work from a
    3826              :  * work queue running @p work.
    3827              :  *
    3828              :  * @param work pointer to the work item.
    3829              :  *
    3830              :  * @param sync pointer to an opaque item containing state related to the
    3831              :  * pending cancellation.  The object must persist until the call returns, and
    3832              :  * be accessible from both the caller thread and the work queue thread.  The
    3833              :  * object must not be used for any other flush or cancel operation until this
    3834              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    3835              :  * must be allocated in coherent memory.
    3836              :  *
    3837              :  * @retval true if work was pending (call had to wait for cancellation of a
    3838              :  * running handler to complete, or scheduled or submitted operations were
    3839              :  * cancelled);
    3840              :  * @retval false otherwise
    3841              :  */
    3842            1 : bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
    3843              : 
    3844              : /** @brief Initialize a work queue structure.
    3845              :  *
    3846              :  * This must be invoked before starting a work queue structure for the first time.
    3847              :  * It need not be invoked again on the same work queue structure.
    3848              :  *
    3849              :  * @funcprops \isr_ok
    3850              :  *
    3851              :  * @param queue the queue structure to be initialized.
    3852              :  */
    3853            1 : void k_work_queue_init(struct k_work_q *queue);
    3854              : 
    3855              : /** @brief Initialize a work queue.
    3856              :  *
    3857              :  * This configures the work queue thread and starts it running.  The function
    3858              :  * should not be re-invoked on a queue.
    3859              :  *
    3860              :  * @param queue pointer to the queue structure. It must be initialized
    3861              :  *        in zeroed/bss memory or with @ref k_work_queue_init before
    3862              :  *        use.
    3863              :  *
    3864              :  * @param stack pointer to the work thread stack area.
    3865              :  *
    3866              :  * @param stack_size size of the work thread stack area, in bytes.
    3867              :  *
    3868              :  * @param prio initial thread priority
    3869              :  *
    3870              :  * @param cfg optional additional configuration parameters.  Pass @c
    3871              :  * NULL if not required, to use the defaults documented in
    3872              :  * k_work_queue_config.
    3873              :  */
    3874            1 : void k_work_queue_start(struct k_work_q *queue,
    3875              :                         k_thread_stack_t *stack, size_t stack_size,
    3876              :                         int prio, const struct k_work_queue_config *cfg);
    3877              : 
    3878              : /** @brief Run work queue using calling thread
    3879              :  *
    3880              :  * This will run the work queue forever unless stopped by @ref k_work_queue_stop.
    3881              :  *
    3882              :  * @param queue the queue to run
    3883              :  *
    3884              :  * @param cfg optional additional configuration parameters.  Pass @c
    3885              :  * NULL if not required, to use the defaults documented in
    3886              :  * k_work_queue_config.
    3887              :  */
    3888            1 : void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg);
    3889              : 
    3890              : /** @brief Access the thread that animates a work queue.
    3891              :  *
    3892              :  * This is necessary to grant a work queue thread access to things the work
    3893              :  * items it will process are expected to use.
    3894              :  *
    3895              :  * @param queue pointer to the queue structure.
    3896              :  *
    3897              :  * @return the thread associated with the work queue.
    3898              :  */
    3899              : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
    3900              : 
    3901              : /** @brief Wait until the work queue has drained, optionally plugging it.
    3902              :  *
    3903              :  * This blocks submission to the work queue except when coming from queue
    3904              :  * thread, and blocks the caller until no more work items are available in the
    3905              :  * queue.
    3906              :  *
    3907              :  * If @p plug is true then submission will continue to be blocked after the
    3908              :  * drain operation completes until k_work_queue_unplug() is invoked.
    3909              :  *
    3910              :  * Note that work items that are delayed are not yet associated with their
    3911              :  * work queue.  They must be cancelled externally if a goal is to ensure the
    3912              :  * work queue remains empty.  The @p plug feature can be used to prevent
    3913              :  * delayed items from being submitted after the drain completes.
    3914              :  *
    3915              :  * @param queue pointer to the queue structure.
    3916              :  *
    3917              :  * @param plug if true the work queue will continue to block new submissions
    3918              :  * after all items have drained.
    3919              :  *
    3920              :  * @retval 1 if call had to wait for the drain to complete
    3921              :  * @retval 0 if call did not have to wait
    3922              :  * @retval negative if wait was interrupted or failed
    3923              :  */
    3924            1 : int k_work_queue_drain(struct k_work_q *queue, bool plug);
    3925              : 
    3926              : /** @brief Release a work queue to accept new submissions.
    3927              :  *
    3928              :  * This releases the block on new submissions placed when k_work_queue_drain()
    3929              :  * is invoked with the @p plug option enabled.  If this is invoked before the
    3930              :  * drain completes new items may be submitted as soon as the drain completes.
    3931              :  *
    3932              :  * @funcprops \isr_ok
    3933              :  *
    3934              :  * @param queue pointer to the queue structure.
    3935              :  *
    3936              :  * @retval 0 if successfully unplugged
    3937              :  * @retval -EALREADY if the work queue was not plugged.
    3938              :  */
    3939            1 : int k_work_queue_unplug(struct k_work_q *queue);
    3940              : 
    3941              : /** @brief Stop a work queue.
    3942              :  *
    3943              :  * Stops the work queue thread and ensures that no further work will be processed.
    3944              :  * This call is blocking and guarantees that the work queue thread has terminated
    3945              :  * cleanly if successful, no work will be processed past this point.
    3946              :  *
    3947              :  * @param queue Pointer to the queue structure.
    3948              :  * @param timeout Maximum time to wait for the work queue to stop.
    3949              :  *
    3950              :  * @retval 0 if the work queue was stopped
    3951              :  * @retval -EALREADY if the work queue was not started (or already stopped)
    3952              :  * @retval -EBUSY if the work queue is actively processing work items
    3953              :  * @retval -ETIMEDOUT if the work queue did not stop within the stipulated timeout
    3954              :  * @retval -ENOSUP if the work queue is essential
    3955              :  */
    3956            1 : int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
    3957              : 
    3958              : /** @brief Initialize a delayable work structure.
    3959              :  *
    3960              :  * This must be invoked before scheduling a delayable work structure for the
    3961              :  * first time.  It need not be invoked again on the same work structure.  It
    3962              :  * can be re-invoked to change the associated handler, but this must be done
    3963              :  * when the work item is idle.
    3964              :  *
    3965              :  * @funcprops \isr_ok
    3966              :  *
    3967              :  * @param dwork the delayable work structure to be initialized.
    3968              :  *
    3969              :  * @param handler the handler to be invoked by the work item.
    3970              :  */
    3971            1 : void k_work_init_delayable(struct k_work_delayable *dwork,
    3972              :                            k_work_handler_t handler);
    3973              : 
    3974              : /**
    3975              :  * @brief Get the parent delayable work structure from a work pointer.
    3976              :  *
    3977              :  * This function is necessary when a @c k_work_handler_t function is passed to
    3978              :  * k_work_schedule_for_queue() and the handler needs to access data from the
    3979              :  * container of the containing `k_work_delayable`.
    3980              :  *
    3981              :  * @param work Address passed to the work handler
    3982              :  *
    3983              :  * @return Address of the containing @c k_work_delayable structure.
    3984              :  */
    3985              : static inline struct k_work_delayable *
    3986              : k_work_delayable_from_work(struct k_work *work);
    3987              : 
    3988              : /** @brief Busy state flags from the delayable work item.
    3989              :  *
    3990              :  * @funcprops \isr_ok
    3991              :  *
    3992              :  * @note This is a live snapshot of state, which may change before the result
    3993              :  * can be inspected.  Use locks where appropriate.
    3994              :  *
    3995              :  * @param dwork pointer to the delayable work item.
    3996              :  *
    3997              :  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
    3998              :  * K_WORK_CANCELING, and K_WORK_FLUSHING.  A zero return value indicates the
    3999              :  * work item appears to be idle.
    4000              :  */
    4001            1 : int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
    4002              : 
    4003              : /** @brief Test whether a delayed work item is currently pending.
    4004              :  *
    4005              :  * Wrapper to determine whether a delayed work item is in a non-idle state.
    4006              :  *
    4007              :  * @note This is a live snapshot of state, which may change before the result
    4008              :  * can be inspected.  Use locks where appropriate.
    4009              :  *
    4010              :  * @funcprops \isr_ok
    4011              :  *
    4012              :  * @param dwork pointer to the delayable work item.
    4013              :  *
    4014              :  * @return true if and only if k_work_delayable_busy_get() returns a non-zero
    4015              :  * value.
    4016              :  */
    4017              : static inline bool k_work_delayable_is_pending(
    4018              :         const struct k_work_delayable *dwork);
    4019              : 
    4020              : /** @brief Get the absolute tick count at which a scheduled delayable work
    4021              :  * will be submitted.
    4022              :  *
    4023              :  * @note This is a live snapshot of state, which may change before the result
    4024              :  * can be inspected.  Use locks where appropriate.
    4025              :  *
    4026              :  * @funcprops \isr_ok
    4027              :  *
    4028              :  * @param dwork pointer to the delayable work item.
    4029              :  *
    4030              :  * @return the tick count when the timer that will schedule the work item will
    4031              :  * expire, or the current tick count if the work is not scheduled.
    4032              :  */
    4033              : static inline k_ticks_t k_work_delayable_expires_get(
    4034              :         const struct k_work_delayable *dwork);
    4035              : 
    4036              : /** @brief Get the number of ticks until a scheduled delayable work will be
    4037              :  * submitted.
    4038              :  *
    4039              :  * @note This is a live snapshot of state, which may change before the result
    4040              :  * can be inspected.  Use locks where appropriate.
    4041              :  *
    4042              :  * @funcprops \isr_ok
    4043              :  *
    4044              :  * @param dwork pointer to the delayable work item.
    4045              :  *
    4046              :  * @return the number of ticks until the timer that will schedule the work
    4047              :  * item will expire, or zero if the item is not scheduled.
    4048              :  */
    4049              : static inline k_ticks_t k_work_delayable_remaining_get(
    4050              :         const struct k_work_delayable *dwork);
    4051              : 
    4052              : /** @brief Submit an idle work item to a queue after a delay.
    4053              :  *
    4054              :  * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
    4055              :  * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
    4056              :  *
    4057              :  * @funcprops \isr_ok
    4058              :  *
    4059              :  * @param queue the queue on which the work item should be submitted after the
    4060              :  * delay.
    4061              :  *
    4062              :  * @param dwork pointer to the delayable work item.
    4063              :  *
    4064              :  * @param delay the time to wait before submitting the work item.  If @c
    4065              :  * K_NO_WAIT and the work is not pending this is equivalent to
    4066              :  * k_work_submit_to_queue().
    4067              :  *
    4068              :  * @retval 0 if work was already scheduled or submitted.
    4069              :  * @retval 1 if work has been scheduled.
    4070              :  * @retval 2 if @p delay is @c K_NO_WAIT and work
    4071              :  *         was running and has been queued to the queue that was running it.
    4072              :  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
    4073              :  *         k_work_submit_to_queue() fails with this code.
    4074              :  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
    4075              :  *         k_work_submit_to_queue() fails with this code.
    4076              :  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
    4077              :  *         k_work_submit_to_queue() fails with this code.
    4078              :  */
    4079            1 : int k_work_schedule_for_queue(struct k_work_q *queue,
    4080              :                                struct k_work_delayable *dwork,
    4081              :                                k_timeout_t delay);
    4082              : 
    4083              : /** @brief Submit an idle work item to the system work queue after a
    4084              :  * delay.
    4085              :  *
    4086              :  * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
    4087              :  * characteristics of that function.
    4088              :  *
    4089              :  * @param dwork pointer to the delayable work item.
    4090              :  *
    4091              :  * @param delay the time to wait before submitting the work item.  If @c
    4092              :  * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
    4093              :  *
    4094              :  * @return as with k_work_schedule_for_queue().
    4095              :  */
    4096            1 : int k_work_schedule(struct k_work_delayable *dwork,
    4097              :                                    k_timeout_t delay);
    4098              : 
    4099              : /** @brief Reschedule a work item to a queue after a delay.
    4100              :  *
    4101              :  * Unlike k_work_schedule_for_queue() this function can change the deadline of
    4102              :  * a scheduled work item, and will schedule a work item that is in any state
    4103              :  * (e.g. is idle, submitted, or running).  This function does not affect
    4104              :  * ("unsubmit") a work item that has been submitted to a queue.
    4105              :  *
    4106              :  * @funcprops \isr_ok
    4107              :  *
    4108              :  * @param queue the queue on which the work item should be submitted after the
    4109              :  * delay.
    4110              :  *
    4111              :  * @param dwork pointer to the delayable work item.
    4112              :  *
    4113              :  * @param delay the time to wait before submitting the work item.  If @c
    4114              :  * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
    4115              :  * any previous scheduled submission.
    4116              :  *
    4117              :  * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
    4118              :  * k_work_submit_to_queue().
    4119              :  *
    4120              :  * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
    4121              :  * @retval 1 if
    4122              :  * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
    4123              :  *   to @p queue; or
    4124              :  * * delay not @c K_NO_WAIT and work has been scheduled
    4125              :  * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
    4126              :  * to the queue that was running it
    4127              :  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
    4128              :  *         k_work_submit_to_queue() fails with this code.
    4129              :  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
    4130              :  *         k_work_submit_to_queue() fails with this code.
    4131              :  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
    4132              :  *         k_work_submit_to_queue() fails with this code.
    4133              :  */
    4134            1 : int k_work_reschedule_for_queue(struct k_work_q *queue,
    4135              :                                  struct k_work_delayable *dwork,
    4136              :                                  k_timeout_t delay);
    4137              : 
    4138              : /** @brief Reschedule a work item to the system work queue after a
    4139              :  * delay.
    4140              :  *
    4141              :  * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
    4142              :  * API characteristics of that function.
    4143              :  *
    4144              :  * @param dwork pointer to the delayable work item.
    4145              :  *
    4146              :  * @param delay the time to wait before submitting the work item.
    4147              :  *
    4148              :  * @return as with k_work_reschedule_for_queue().
    4149              :  */
    4150            1 : int k_work_reschedule(struct k_work_delayable *dwork,
    4151              :                                      k_timeout_t delay);
    4152              : 
    4153              : /** @brief Flush delayable work.
    4154              :  *
    4155              :  * If the work is scheduled, it is immediately submitted.  Then the caller
    4156              :  * blocks until the work completes, as with k_work_flush().
    4157              :  *
    4158              :  * @note Be careful of caller and work queue thread relative priority.  If
    4159              :  * this function sleeps it will not return until the work queue thread
    4160              :  * completes the tasks that allow this thread to resume.
    4161              :  *
    4162              :  * @note Behavior is undefined if this function is invoked on @p dwork from a
    4163              :  * work queue running @p dwork.
    4164              :  *
    4165              :  * @param dwork pointer to the delayable work item.
    4166              :  *
    4167              :  * @param sync pointer to an opaque item containing state related to the
    4168              :  * pending cancellation.  The object must persist until the call returns, and
    4169              :  * be accessible from both the caller thread and the work queue thread.  The
    4170              :  * object must not be used for any other flush or cancel operation until this
    4171              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    4172              :  * must be allocated in coherent memory.
    4173              :  *
    4174              :  * @retval true if call had to wait for completion
    4175              :  * @retval false if work was already idle
    4176              :  */
    4177            1 : bool k_work_flush_delayable(struct k_work_delayable *dwork,
    4178              :                             struct k_work_sync *sync);
    4179              : 
    4180              : /** @brief Cancel delayable work.
    4181              :  *
    4182              :  * Similar to k_work_cancel() but for delayable work.  If the work is
    4183              :  * scheduled or submitted it is canceled.  This function does not wait for the
    4184              :  * cancellation to complete.
    4185              :  *
    4186              :  * @note The work may still be running when this returns.  Use
    4187              :  * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
    4188              :  * not running.
    4189              :  *
    4190              :  * @note Canceling delayable work does not prevent rescheduling it.  It does
    4191              :  * prevent submitting it until the cancellation completes.
    4192              :  *
    4193              :  * @funcprops \isr_ok
    4194              :  *
    4195              :  * @param dwork pointer to the delayable work item.
    4196              :  *
    4197              :  * @return the k_work_delayable_busy_get() status indicating the state of the
    4198              :  * item after all cancellation steps performed by this call are completed.
    4199              :  */
    4200            1 : int k_work_cancel_delayable(struct k_work_delayable *dwork);
    4201              : 
    4202              : /** @brief Cancel delayable work and wait.
    4203              :  *
    4204              :  * Like k_work_cancel_delayable() but waits until the work becomes idle.
    4205              :  *
    4206              :  * @note Canceling delayable work does not prevent rescheduling it.  It does
    4207              :  * prevent submitting it until the cancellation completes.
    4208              :  *
    4209              :  * @note Be careful of caller and work queue thread relative priority.  If
    4210              :  * this function sleeps it will not return until the work queue thread
    4211              :  * completes the tasks that allow this thread to resume.
    4212              :  *
    4213              :  * @note Behavior is undefined if this function is invoked on @p dwork from a
    4214              :  * work queue running @p dwork.
    4215              :  *
    4216              :  * @param dwork pointer to the delayable work item.
    4217              :  *
    4218              :  * @param sync pointer to an opaque item containing state related to the
    4219              :  * pending cancellation.  The object must persist until the call returns, and
    4220              :  * be accessible from both the caller thread and the work queue thread.  The
    4221              :  * object must not be used for any other flush or cancel operation until this
    4222              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    4223              :  * must be allocated in coherent memory.
    4224              :  *
    4225              :  * @retval true if work was not idle (call had to wait for cancellation of a
    4226              :  * running handler to complete, or scheduled or submitted operations were
    4227              :  * cancelled);
    4228              :  * @retval false otherwise
    4229              :  */
    4230            1 : bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
    4231              :                                   struct k_work_sync *sync);
    4232              : 
    4233            0 : enum {
    4234              : /**
    4235              :  * @cond INTERNAL_HIDDEN
    4236              :  */
    4237              : 
    4238              :         /* The atomic API is used for all work and queue flags fields to
    4239              :          * enforce sequential consistency in SMP environments.
    4240              :          */
    4241              : 
    4242              :         /* Bits that represent the work item states.  At least nine of the
    4243              :          * combinations are distinct valid stable states.
    4244              :          */
    4245              :         K_WORK_RUNNING_BIT = 0,
    4246              :         K_WORK_CANCELING_BIT = 1,
    4247              :         K_WORK_QUEUED_BIT = 2,
    4248              :         K_WORK_DELAYED_BIT = 3,
    4249              :         K_WORK_FLUSHING_BIT = 4,
    4250              : 
    4251              :         K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
    4252              :                 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
    4253              : 
    4254              :         /* Static work flags */
    4255              :         K_WORK_DELAYABLE_BIT = 8,
    4256              :         K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
    4257              : 
    4258              :         /* Dynamic work queue flags */
    4259              :         K_WORK_QUEUE_STARTED_BIT = 0,
    4260              :         K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
    4261              :         K_WORK_QUEUE_BUSY_BIT = 1,
    4262              :         K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
    4263              :         K_WORK_QUEUE_DRAIN_BIT = 2,
    4264              :         K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
    4265              :         K_WORK_QUEUE_PLUGGED_BIT = 3,
    4266              :         K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
    4267              :         K_WORK_QUEUE_STOP_BIT = 4,
    4268              :         K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
    4269              : 
    4270              :         /* Static work queue flags */
    4271              :         K_WORK_QUEUE_NO_YIELD_BIT = 8,
    4272              :         K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
    4273              : 
    4274              : /**
    4275              :  * INTERNAL_HIDDEN @endcond
    4276              :  */
    4277              :         /* Transient work flags */
    4278              : 
    4279              :         /** @brief Flag indicating a work item that is running under a work
    4280              :          * queue thread.
    4281              :          *
    4282              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4283              :          */
    4284              :         K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
    4285              : 
    4286              :         /** @brief Flag indicating a work item that is being canceled.
    4287              :          *
    4288              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4289              :          */
    4290              :         K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
    4291              : 
    4292              :         /** @brief Flag indicating a work item that has been submitted to a
    4293              :          * queue but has not started running.
    4294              :          *
    4295              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4296              :          */
    4297              :         K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
    4298              : 
    4299              :         /** @brief Flag indicating a delayed work item that is scheduled for
    4300              :          * submission to a queue.
    4301              :          *
    4302              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4303              :          */
    4304              :         K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
    4305              : 
    4306              :         /** @brief Flag indicating a synced work item that is being flushed.
    4307              :          *
    4308              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4309              :          */
    4310              :         K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
    4311              : };
    4312              : 
    4313              : /** @brief A structure used to submit work. */
    4314            1 : struct k_work {
    4315              :         /* All fields are protected by the work module spinlock.  No fields
    4316              :          * are to be accessed except through kernel API.
    4317              :          */
    4318              : 
    4319              :         /* Node to link into k_work_q pending list. */
    4320            0 :         sys_snode_t node;
    4321              : 
    4322              :         /* The function to be invoked by the work queue thread. */
    4323            0 :         k_work_handler_t handler;
    4324              : 
    4325              :         /* The queue on which the work item was last submitted. */
    4326            0 :         struct k_work_q *queue;
    4327              : 
    4328              :         /* State of the work item.
    4329              :          *
    4330              :          * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
    4331              :          *
    4332              :          * It can be RUNNING and CANCELING simultaneously.
    4333              :          */
    4334            0 :         uint32_t flags;
    4335              : };
    4336              : 
    4337              : #define Z_WORK_INITIALIZER(work_handler) { \
    4338              :         .handler = (work_handler), \
    4339              : }
    4340              : 
    4341              : /** @brief A structure used to submit work after a delay. */
    4342            1 : struct k_work_delayable {
    4343              :         /* The work item. */
    4344            0 :         struct k_work work;
    4345              : 
    4346              :         /* Timeout used to submit work after a delay. */
    4347            0 :         struct _timeout timeout;
    4348              : 
    4349              :         /* The queue to which the work should be submitted. */
    4350            0 :         struct k_work_q *queue;
    4351              : };
    4352              : 
    4353              : #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
    4354              :         .work = { \
    4355              :                 .handler = (work_handler), \
    4356              :                 .flags = K_WORK_DELAYABLE, \
    4357              :         }, \
    4358              : }
    4359              : 
    4360              : /**
    4361              :  * @brief Initialize a statically-defined delayable work item.
    4362              :  *
    4363              :  * This macro can be used to initialize a statically-defined delayable
    4364              :  * work item, prior to its first use. For example,
    4365              :  *
    4366              :  * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
    4367              :  *
    4368              :  * Note that if the runtime dependencies support initialization with
    4369              :  * k_work_init_delayable() using that will eliminate the initialized
    4370              :  * object in ROM that is produced by this macro and copied in at
    4371              :  * system startup.
    4372              :  *
    4373              :  * @param work Symbol name for delayable work item object
    4374              :  * @param work_handler Function to invoke each time work item is processed.
    4375              :  */
    4376            1 : #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
    4377              :         struct k_work_delayable work \
    4378              :           = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
    4379              : 
    4380              : /**
    4381              :  * @cond INTERNAL_HIDDEN
    4382              :  */
    4383              : 
    4384              : /* Record used to wait for work to flush.
    4385              :  *
    4386              :  * The work item is inserted into the queue that will process (or is
    4387              :  * processing) the item, and will be processed as soon as the item
    4388              :  * completes.  When the flusher is processed the semaphore will be
    4389              :  * signaled, releasing the thread waiting for the flush.
    4390              :  */
    4391              : struct z_work_flusher {
    4392              :         struct k_work work;
    4393              :         struct k_sem sem;
    4394              : };
    4395              : 
    4396              : /* Record used to wait for work to complete a cancellation.
    4397              :  *
    4398              :  * The work item is inserted into a global queue of pending cancels.
    4399              :  * When a cancelling work item goes idle any matching waiters are
    4400              :  * removed from pending_cancels and are woken.
    4401              :  */
    4402              : struct z_work_canceller {
    4403              :         sys_snode_t node;
    4404              :         struct k_work *work;
    4405              :         struct k_sem sem;
    4406              : };
    4407              : 
    4408              : /**
    4409              :  * INTERNAL_HIDDEN @endcond
    4410              :  */
    4411              : 
    4412              : /** @brief A structure holding internal state for a pending synchronous
    4413              :  * operation on a work item or queue.
    4414              :  *
    4415              :  * Instances of this type are provided by the caller for invocation of
    4416              :  * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs.  A
    4417              :  * referenced object must persist until the call returns, and be accessible
    4418              :  * from both the caller thread and the work queue thread.
    4419              :  *
    4420              :  * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
    4421              :  * coherent memory; see arch_mem_coherent().  The stack on these architectures
    4422              :  * is generally not coherent.  be stack-allocated.  Violations are detected by
    4423              :  * runtime assertion.
    4424              :  */
    4425            1 : struct k_work_sync {
    4426              :         union {
    4427            0 :                 struct z_work_flusher flusher;
    4428            0 :                 struct z_work_canceller canceller;
    4429            0 :         };
    4430              : };
    4431              : 
    4432              : /** @brief A structure holding optional configuration items for a work
    4433              :  * queue.
    4434              :  *
    4435              :  * This structure, and values it references, are not retained by
    4436              :  * k_work_queue_start().
    4437              :  */
    4438            1 : struct k_work_queue_config {
    4439              :         /** The name to be given to the work queue thread.
    4440              :          *
    4441              :          * If left null the thread will not have a name.
    4442              :          */
    4443            1 :         const char *name;
    4444              : 
    4445              :         /** Control whether the work queue thread should yield between
    4446              :          * items.
    4447              :          *
    4448              :          * Yielding between items helps guarantee the work queue
    4449              :          * thread does not starve other threads, including cooperative
    4450              :          * ones released by a work item.  This is the default behavior.
    4451              :          *
    4452              :          * Set this to @c true to prevent the work queue thread from
    4453              :          * yielding between items.  This may be appropriate when a
    4454              :          * sequence of items should complete without yielding
    4455              :          * control.
    4456              :          */
    4457            1 :         bool no_yield;
    4458              : 
    4459              :         /** Control whether the work queue thread should be marked as
    4460              :          * essential thread.
    4461              :          */
    4462            1 :         bool essential;
    4463              : 
    4464              :         /** Controls whether work queue monitors work timeouts.
    4465              :          *
    4466              :          * If non-zero, and CONFIG_WORKQUEUE_WORK_TIMEOUT is enabled,
    4467              :          * the work queue will monitor the duration of each work item.
    4468              :          * If the work item handler takes longer than the specified
    4469              :          * time to execute, the work queue thread will be aborted, and
    4470              :          * an error will be logged if CONFIG_LOG is enabled.
    4471              :          */
    4472            1 :         uint32_t work_timeout_ms;
    4473              : };
    4474              : 
    4475              : /** @brief A structure used to hold work until it can be processed. */
    4476            1 : struct k_work_q {
    4477              :         /* The thread that animates the work. */
    4478            0 :         struct k_thread thread;
    4479              : 
    4480              :         /* The thread ID that animates the work. This may be an external thread
    4481              :          * if k_work_queue_run() is used.
    4482              :          */
    4483            0 :         k_tid_t thread_id;
    4484              : 
    4485              :         /* All the following fields must be accessed only while the
    4486              :          * work module spinlock is held.
    4487              :          */
    4488              : 
    4489              :         /* List of k_work items to be worked. */
    4490            0 :         sys_slist_t pending;
    4491              : 
    4492              :         /* Wait queue for idle work thread. */
    4493            0 :         _wait_q_t notifyq;
    4494              : 
    4495              :         /* Wait queue for threads waiting for the queue to drain. */
    4496            0 :         _wait_q_t drainq;
    4497              : 
    4498              :         /* Flags describing queue state. */
    4499            0 :         uint32_t flags;
    4500              : 
    4501              : #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT)
    4502              :         struct _timeout work_timeout_record;
    4503              :         struct k_work *work;
    4504              :         k_timeout_t work_timeout;
    4505              : #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
    4506              : };
    4507              : 
    4508              : /* Provide the implementation for inline functions declared above */
    4509              : 
    4510            1 : static inline bool k_work_is_pending(const struct k_work *work)
    4511              : {
    4512              :         return k_work_busy_get(work) != 0;
    4513              : }
    4514              : 
    4515              : static inline struct k_work_delayable *
    4516            1 : k_work_delayable_from_work(struct k_work *work)
    4517              : {
    4518              :         return CONTAINER_OF(work, struct k_work_delayable, work);
    4519              : }
    4520              : 
    4521            1 : static inline bool k_work_delayable_is_pending(
    4522              :         const struct k_work_delayable *dwork)
    4523              : {
    4524              :         return k_work_delayable_busy_get(dwork) != 0;
    4525              : }
    4526              : 
    4527            1 : static inline k_ticks_t k_work_delayable_expires_get(
    4528              :         const struct k_work_delayable *dwork)
    4529              : {
    4530              :         return z_timeout_expires(&dwork->timeout);
    4531              : }
    4532              : 
    4533            1 : static inline k_ticks_t k_work_delayable_remaining_get(
    4534              :         const struct k_work_delayable *dwork)
    4535              : {
    4536              :         return z_timeout_remaining(&dwork->timeout);
    4537              : }
    4538              : 
    4539            1 : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
    4540              : {
    4541              :         return queue->thread_id;
    4542              : }
    4543              : 
    4544              : /** @} */
    4545              : 
    4546              : struct k_work_user;
    4547              : 
    4548              : /**
    4549              :  * @addtogroup workqueue_apis
    4550              :  * @{
    4551              :  */
    4552              : 
    4553              : /**
    4554              :  * @typedef k_work_user_handler_t
    4555              :  * @brief Work item handler function type for user work queues.
    4556              :  *
    4557              :  * A work item's handler function is executed by a user workqueue's thread
    4558              :  * when the work item is processed by the workqueue.
    4559              :  *
    4560              :  * @param work Address of the work item.
    4561              :  */
    4562            1 : typedef void (*k_work_user_handler_t)(struct k_work_user *work);
    4563              : 
    4564              : /**
    4565              :  * @cond INTERNAL_HIDDEN
    4566              :  */
    4567              : 
    4568              : struct k_work_user_q {
    4569              :         struct k_queue queue;
    4570              :         struct k_thread thread;
    4571              : };
    4572              : 
    4573              : enum {
    4574              :         K_WORK_USER_STATE_PENDING,      /* Work item pending state */
    4575              : };
    4576              : 
    4577              : struct k_work_user {
    4578              :         void *_reserved;                /* Used by k_queue implementation. */
    4579              :         k_work_user_handler_t handler;
    4580              :         atomic_t flags;
    4581              : };
    4582              : 
    4583              : /**
    4584              :  * INTERNAL_HIDDEN @endcond
    4585              :  */
    4586              : 
    4587              : #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
    4588              : #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
    4589              : #else
    4590              : #define Z_WORK_USER_INITIALIZER(work_handler) \
    4591              :         { \
    4592              :         ._reserved = NULL, \
    4593              :         .handler = (work_handler), \
    4594              :         .flags = 0 \
    4595              :         }
    4596              : #endif
    4597              : 
    4598              : /**
    4599              :  * @brief Initialize a statically-defined user work item.
    4600              :  *
    4601              :  * This macro can be used to initialize a statically-defined user work
    4602              :  * item, prior to its first use. For example,
    4603              :  *
    4604              :  * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
    4605              :  *
    4606              :  * @param work Symbol name for work item object
    4607              :  * @param work_handler Function to invoke each time work item is processed.
    4608              :  */
    4609            1 : #define K_WORK_USER_DEFINE(work, work_handler) \
    4610              :         struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
    4611              : 
    4612              : /**
    4613              :  * @brief Initialize a userspace work item.
    4614              :  *
    4615              :  * This routine initializes a user workqueue work item, prior to its
    4616              :  * first use.
    4617              :  *
    4618              :  * @param work Address of work item.
    4619              :  * @param handler Function to invoke each time work item is processed.
    4620              :  */
    4621            1 : static inline void k_work_user_init(struct k_work_user *work,
    4622              :                                     k_work_user_handler_t handler)
    4623              : {
    4624              :         *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
    4625              : }
    4626              : 
    4627              : /**
    4628              :  * @brief Check if a userspace work item is pending.
    4629              :  *
    4630              :  * This routine indicates if user work item @a work is pending in a workqueue's
    4631              :  * queue.
    4632              :  *
    4633              :  * @note Checking if the work is pending gives no guarantee that the
    4634              :  *       work will still be pending when this information is used. It is up to
    4635              :  *       the caller to make sure that this information is used in a safe manner.
    4636              :  *
    4637              :  * @funcprops \isr_ok
    4638              :  *
    4639              :  * @param work Address of work item.
    4640              :  *
    4641              :  * @return true if work item is pending, or false if it is not pending.
    4642              :  */
    4643            1 : static inline bool k_work_user_is_pending(struct k_work_user *work)
    4644              : {
    4645              :         return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
    4646              : }
    4647              : 
    4648              : /**
    4649              :  * @brief Submit a work item to a user mode workqueue
    4650              :  *
    4651              :  * Submits a work item to a workqueue that runs in user mode. A temporary
    4652              :  * memory allocation is made from the caller's resource pool which is freed
    4653              :  * once the worker thread consumes the k_work item. The workqueue
    4654              :  * thread must have memory access to the k_work item being submitted. The caller
    4655              :  * must have permission granted on the work_q parameter's queue object.
    4656              :  *
    4657              :  * @funcprops \isr_ok
    4658              :  *
    4659              :  * @param work_q Address of workqueue.
    4660              :  * @param work Address of work item.
    4661              :  *
    4662              :  * @retval -EBUSY if the work item was already in some workqueue
    4663              :  * @retval -ENOMEM if no memory for thread resource pool allocation
    4664              :  * @retval 0 Success
    4665              :  */
    4666            1 : static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
    4667              :                                               struct k_work_user *work)
    4668              : {
    4669              :         int ret = -EBUSY;
    4670              : 
    4671              :         if (!atomic_test_and_set_bit(&work->flags,
    4672              :                                      K_WORK_USER_STATE_PENDING)) {
    4673              :                 ret = k_queue_alloc_append(&work_q->queue, work);
    4674              : 
    4675              :                 /* Couldn't insert into the queue. Clear the pending bit
    4676              :                  * so the work item can be submitted again
    4677              :                  */
    4678              :                 if (ret != 0) {
    4679              :                         atomic_clear_bit(&work->flags,
    4680              :                                          K_WORK_USER_STATE_PENDING);
    4681              :                 }
    4682              :         }
    4683              : 
    4684              :         return ret;
    4685              : }
    4686              : 
    4687              : /**
    4688              :  * @brief Start a workqueue in user mode
    4689              :  *
    4690              :  * This works identically to k_work_queue_start() except it is callable from
    4691              :  * user mode, and the worker thread created will run in user mode.  The caller
    4692              :  * must have permissions granted on both the work_q parameter's thread and
    4693              :  * queue objects, and the same restrictions on priority apply as
    4694              :  * k_thread_create().
    4695              :  *
    4696              :  * @param work_q Address of workqueue.
    4697              :  * @param stack Pointer to work queue thread's stack space, as defined by
    4698              :  *              K_THREAD_STACK_DEFINE()
    4699              :  * @param stack_size Size of the work queue thread's stack (in bytes), which
    4700              :  *              should either be the same constant passed to
    4701              :  *              K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
    4702              :  * @param prio Priority of the work queue's thread.
    4703              :  * @param name optional thread name.  If not null a copy is made into the
    4704              :  *              thread's name buffer.
    4705              :  */
    4706            1 : void k_work_user_queue_start(struct k_work_user_q *work_q,
    4707              :                                     k_thread_stack_t *stack,
    4708              :                                     size_t stack_size, int prio,
    4709              :                                     const char *name);
    4710              : 
    4711              : /**
    4712              :  * @brief Access the user mode thread that animates a work queue.
    4713              :  *
    4714              :  * This is necessary to grant a user mode work queue thread access to things
    4715              :  * the work items it will process are expected to use.
    4716              :  *
    4717              :  * @param work_q pointer to the user mode queue structure.
    4718              :  *
    4719              :  * @return the user mode thread associated with the work queue.
    4720              :  */
    4721            1 : static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
    4722              : {
    4723              :         return &work_q->thread;
    4724              : }
    4725              : 
    4726              : /** @} */
    4727              : 
    4728              : /**
    4729              :  * @cond INTERNAL_HIDDEN
    4730              :  */
    4731              : 
    4732              : struct k_work_poll {
    4733              :         struct k_work work;
    4734              :         struct k_work_q *workq;
    4735              :         struct z_poller poller;
    4736              :         struct k_poll_event *events;
    4737              :         int num_events;
    4738              :         k_work_handler_t real_handler;
    4739              :         struct _timeout timeout;
    4740              :         int poll_result;
    4741              : };
    4742              : 
    4743              : /**
    4744              :  * INTERNAL_HIDDEN @endcond
    4745              :  */
    4746              : 
    4747              : /**
    4748              :  * @addtogroup workqueue_apis
    4749              :  * @{
    4750              :  */
    4751              : 
    4752              : /**
    4753              :  * @brief Initialize a statically-defined work item.
    4754              :  *
    4755              :  * This macro can be used to initialize a statically-defined workqueue work
    4756              :  * item, prior to its first use. For example,
    4757              :  *
    4758              :  * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
    4759              :  *
    4760              :  * @param work Symbol name for work item object
    4761              :  * @param work_handler Function to invoke each time work item is processed.
    4762              :  */
    4763            1 : #define K_WORK_DEFINE(work, work_handler) \
    4764              :         struct k_work work = Z_WORK_INITIALIZER(work_handler)
    4765              : 
    4766              : /**
    4767              :  * @brief Initialize a triggered work item.
    4768              :  *
    4769              :  * This routine initializes a workqueue triggered work item, prior to
    4770              :  * its first use.
    4771              :  *
    4772              :  * @param work Address of triggered work item.
    4773              :  * @param handler Function to invoke each time work item is processed.
    4774              :  */
    4775            1 : void k_work_poll_init(struct k_work_poll *work,
    4776              :                              k_work_handler_t handler);
    4777              : 
    4778              : /**
    4779              :  * @brief Submit a triggered work item.
    4780              :  *
    4781              :  * This routine schedules work item @a work to be processed by workqueue
    4782              :  * @a work_q when one of the given @a events is signaled. The routine
    4783              :  * initiates internal poller for the work item and then returns to the caller.
    4784              :  * Only when one of the watched events happen the work item is actually
    4785              :  * submitted to the workqueue and becomes pending.
    4786              :  *
    4787              :  * Submitting a previously submitted triggered work item that is still
    4788              :  * waiting for the event cancels the existing submission and reschedules it
    4789              :  * the using the new event list. Note that this behavior is inherently subject
    4790              :  * to race conditions with the pre-existing triggered work item and work queue,
    4791              :  * so care must be taken to synchronize such resubmissions externally.
    4792              :  *
    4793              :  * @funcprops \isr_ok
    4794              :  *
    4795              :  * @warning
    4796              :  * Provided array of events as well as a triggered work item must be placed
    4797              :  * in persistent memory (valid until work handler execution or work
    4798              :  * cancellation) and cannot be modified after submission.
    4799              :  *
    4800              :  * @param work_q Address of workqueue.
    4801              :  * @param work Address of delayed work item.
    4802              :  * @param events An array of events which trigger the work.
    4803              :  * @param num_events The number of events in the array.
    4804              :  * @param timeout Timeout after which the work will be scheduled
    4805              :  *                for execution even if not triggered.
    4806              :  *
    4807              :  *
    4808              :  * @retval 0 Work item started watching for events.
    4809              :  * @retval -EINVAL Work item is being processed or has completed its work.
    4810              :  * @retval -EADDRINUSE Work item is pending on a different workqueue.
    4811              :  */
    4812            1 : int k_work_poll_submit_to_queue(struct k_work_q *work_q,
    4813              :                                        struct k_work_poll *work,
    4814              :                                        struct k_poll_event *events,
    4815              :                                        int num_events,
    4816              :                                        k_timeout_t timeout);
    4817              : 
    4818              : /**
    4819              :  * @brief Submit a triggered work item to the system workqueue.
    4820              :  *
    4821              :  * This routine schedules work item @a work to be processed by system
    4822              :  * workqueue when one of the given @a events is signaled. The routine
    4823              :  * initiates internal poller for the work item and then returns to the caller.
    4824              :  * Only when one of the watched events happen the work item is actually
    4825              :  * submitted to the workqueue and becomes pending.
    4826              :  *
    4827              :  * Submitting a previously submitted triggered work item that is still
    4828              :  * waiting for the event cancels the existing submission and reschedules it
    4829              :  * the using the new event list. Note that this behavior is inherently subject
    4830              :  * to race conditions with the pre-existing triggered work item and work queue,
    4831              :  * so care must be taken to synchronize such resubmissions externally.
    4832              :  *
    4833              :  * @funcprops \isr_ok
    4834              :  *
    4835              :  * @warning
    4836              :  * Provided array of events as well as a triggered work item must not be
    4837              :  * modified until the item has been processed by the workqueue.
    4838              :  *
    4839              :  * @param work Address of delayed work item.
    4840              :  * @param events An array of events which trigger the work.
    4841              :  * @param num_events The number of events in the array.
    4842              :  * @param timeout Timeout after which the work will be scheduled
    4843              :  *                for execution even if not triggered.
    4844              :  *
    4845              :  * @retval 0 Work item started watching for events.
    4846              :  * @retval -EINVAL Work item is being processed or has completed its work.
    4847              :  * @retval -EADDRINUSE Work item is pending on a different workqueue.
    4848              :  */
    4849            1 : int k_work_poll_submit(struct k_work_poll *work,
    4850              :                                      struct k_poll_event *events,
    4851              :                                      int num_events,
    4852              :                                      k_timeout_t timeout);
    4853              : 
    4854              : /**
    4855              :  * @brief Cancel a triggered work item.
    4856              :  *
    4857              :  * This routine cancels the submission of triggered work item @a work.
    4858              :  * A triggered work item can only be canceled if no event triggered work
    4859              :  * submission.
    4860              :  *
    4861              :  * @funcprops \isr_ok
    4862              :  *
    4863              :  * @param work Address of delayed work item.
    4864              :  *
    4865              :  * @retval 0 Work item canceled.
    4866              :  * @retval -EINVAL Work item is being processed or has completed its work.
    4867              :  */
    4868            1 : int k_work_poll_cancel(struct k_work_poll *work);
    4869              : 
    4870              : /** @} */
    4871              : 
    4872              : /**
    4873              :  * @defgroup msgq_apis Message Queue APIs
    4874              :  * @ingroup kernel_apis
    4875              :  * @{
    4876              :  */
    4877              : 
    4878              : /**
    4879              :  * @brief Message Queue Structure
    4880              :  */
    4881            1 : struct k_msgq {
    4882              :         /** Message queue wait queue */
    4883            1 :         _wait_q_t wait_q;
    4884              :         /** Lock */
    4885            1 :         struct k_spinlock lock;
    4886              :         /** Message size */
    4887            1 :         size_t msg_size;
    4888              :         /** Maximal number of messages */
    4889            1 :         uint32_t max_msgs;
    4890              :         /** Start of message buffer */
    4891            1 :         char *buffer_start;
    4892              :         /** End of message buffer */
    4893            1 :         char *buffer_end;
    4894              :         /** Read pointer */
    4895            1 :         char *read_ptr;
    4896              :         /** Write pointer */
    4897            1 :         char *write_ptr;
    4898              :         /** Number of used messages */
    4899            1 :         uint32_t used_msgs;
    4900              : 
    4901              :         Z_DECL_POLL_EVENT
    4902              : 
    4903              :         /** Message queue */
    4904            1 :         uint8_t flags;
    4905              : 
    4906              :         SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
    4907              : 
    4908              : #ifdef CONFIG_OBJ_CORE_MSGQ
    4909              :         struct k_obj_core  obj_core;
    4910              : #endif
    4911              : };
    4912              : /**
    4913              :  * @cond INTERNAL_HIDDEN
    4914              :  */
    4915              : 
    4916              : 
    4917              : #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
    4918              :         { \
    4919              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
    4920              :         .lock = {}, \
    4921              :         .msg_size = q_msg_size, \
    4922              :         .max_msgs = q_max_msgs, \
    4923              :         .buffer_start = q_buffer, \
    4924              :         .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
    4925              :         .read_ptr = q_buffer, \
    4926              :         .write_ptr = q_buffer, \
    4927              :         .used_msgs = 0, \
    4928              :         Z_POLL_EVENT_OBJ_INIT(obj) \
    4929              :         .flags = 0, \
    4930              :         }
    4931              : 
    4932              : /**
    4933              :  * INTERNAL_HIDDEN @endcond
    4934              :  */
    4935              : 
    4936              : 
    4937            0 : #define K_MSGQ_FLAG_ALLOC       BIT(0)
    4938              : 
    4939              : /**
    4940              :  * @brief Message Queue Attributes
    4941              :  */
    4942            1 : struct k_msgq_attrs {
    4943              :         /** Message Size */
    4944            1 :         size_t msg_size;
    4945              :         /** Maximal number of messages */
    4946            1 :         uint32_t max_msgs;
    4947              :         /** Used messages */
    4948            1 :         uint32_t used_msgs;
    4949              : };
    4950              : 
    4951              : 
    4952              : /**
    4953              :  * @brief Statically define and initialize a message queue.
    4954              :  *
    4955              :  * The message queue's ring buffer contains space for @a q_max_msgs messages,
    4956              :  * each of which is @a q_msg_size bytes long. Alignment of the message queue's
    4957              :  * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
    4958              :  *
    4959              :  * The message queue can be accessed outside the module where it is defined
    4960              :  * using:
    4961              :  *
    4962              :  * @code extern struct k_msgq <name>; @endcode
    4963              :  *
    4964              :  * @param q_name Name of the message queue.
    4965              :  * @param q_msg_size Message size (in bytes).
    4966              :  * @param q_max_msgs Maximum number of messages that can be queued.
    4967              :  * @param q_align Alignment of the message queue's ring buffer (power of 2).
    4968              :  *
    4969              :  */
    4970            1 : #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align)          \
    4971              :         static char __noinit __aligned(q_align)                         \
    4972              :                 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)];      \
    4973              :         STRUCT_SECTION_ITERABLE(k_msgq, q_name) =                       \
    4974              :                Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
    4975              :                                   (q_msg_size), (q_max_msgs))
    4976              : 
    4977              : /**
    4978              :  * @brief Initialize a message queue.
    4979              :  *
    4980              :  * This routine initializes a message queue object, prior to its first use.
    4981              :  *
    4982              :  * The message queue's ring buffer must contain space for @a max_msgs messages,
    4983              :  * each of which is @a msg_size bytes long. Alignment of the message queue's
    4984              :  * ring buffer is not necessary.
    4985              :  *
    4986              :  * @param msgq Address of the message queue.
    4987              :  * @param buffer Pointer to ring buffer that holds queued messages.
    4988              :  * @param msg_size Message size (in bytes).
    4989              :  * @param max_msgs Maximum number of messages that can be queued.
    4990              :  */
    4991            1 : void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
    4992              :                  uint32_t max_msgs);
    4993              : 
    4994              : /**
    4995              :  * @brief Initialize a message queue.
    4996              :  *
    4997              :  * This routine initializes a message queue object, prior to its first use,
    4998              :  * allocating its internal ring buffer from the calling thread's resource
    4999              :  * pool.
    5000              :  *
    5001              :  * Memory allocated for the ring buffer can be released by calling
    5002              :  * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
    5003              :  * all of its references.
    5004              :  *
    5005              :  * @param msgq Address of the message queue.
    5006              :  * @param msg_size Message size (in bytes).
    5007              :  * @param max_msgs Maximum number of messages that can be queued.
    5008              :  *
    5009              :  * @return 0 on success, -ENOMEM if there was insufficient memory in the
    5010              :  *      thread's resource pool, or -EINVAL if the size parameters cause
    5011              :  *      an integer overflow.
    5012              :  */
    5013            1 : __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
    5014              :                                 uint32_t max_msgs);
    5015              : 
    5016              : /**
    5017              :  * @brief Release allocated buffer for a queue
    5018              :  *
    5019              :  * Releases memory allocated for the ring buffer.
    5020              :  *
    5021              :  * @param msgq message queue to cleanup
    5022              :  *
    5023              :  * @retval 0 on success
    5024              :  * @retval -EBUSY Queue not empty
    5025              :  */
    5026            1 : int k_msgq_cleanup(struct k_msgq *msgq);
    5027              : 
    5028              : /**
    5029              :  * @brief Send a message to the end of a message queue.
    5030              :  *
    5031              :  * This routine sends a message to message queue @a q.
    5032              :  *
    5033              :  * @note The message content is copied from @a data into @a msgq and the @a data
    5034              :  * pointer is not retained, so the message content will not be modified
    5035              :  * by this function.
    5036              :  *
    5037              :  * @funcprops \isr_ok
    5038              :  *
    5039              :  * @param msgq Address of the message queue.
    5040              :  * @param data Pointer to the message.
    5041              :  * @param timeout Waiting period to add the message, or one of the special
    5042              :  *                values K_NO_WAIT and K_FOREVER.
    5043              :  *
    5044              :  * @retval 0 Message sent.
    5045              :  * @retval -ENOMSG Returned without waiting or queue purged.
    5046              :  * @retval -EAGAIN Waiting period timed out.
    5047              :  */
    5048            1 : __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
    5049              : 
    5050              : /**
    5051              :  * @brief Send a message to the front of a message queue.
    5052              :  *
    5053              :  * This routine sends a message to the beginning (head) of message queue @a q.
    5054              :  * Messages sent with this method will be retrieved before any pre-existing
    5055              :  * messages in the queue.
    5056              :  *
    5057              :  * @note if there is no space in the message queue, this function will
    5058              :  * behave the same as k_msgq_put.
    5059              :  *
    5060              :  * @note The message content is copied from @a data into @a msgq and the @a data
    5061              :  * pointer is not retained, so the message content will not be modified
    5062              :  * by this function.
    5063              :  *
    5064              :  * @note k_msgq_put_front() does not block.
    5065              :  *
    5066              :  * @funcprops \isr_ok
    5067              :  *
    5068              :  * @param msgq Address of the message queue.
    5069              :  * @param data Pointer to the message.
    5070              :  *
    5071              :  * @retval 0 Message sent.
    5072              :  * @retval -ENOMSG Returned without waiting or queue purged.
    5073              :  */
    5074            1 : __syscall int k_msgq_put_front(struct k_msgq *msgq, const void *data);
    5075              : 
    5076              : /**
    5077              :  * @brief Receive a message from a message queue.
    5078              :  *
    5079              :  * This routine receives a message from message queue @a q in a "first in,
    5080              :  * first out" manner.
    5081              :  *
    5082              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5083              :  *
    5084              :  * @funcprops \isr_ok
    5085              :  *
    5086              :  * @param msgq Address of the message queue.
    5087              :  * @param data Address of area to hold the received message.
    5088              :  * @param timeout Waiting period to receive the message,
    5089              :  *                or one of the special values K_NO_WAIT and
    5090              :  *                K_FOREVER.
    5091              :  *
    5092              :  * @retval 0 Message received.
    5093              :  * @retval -ENOMSG Returned without waiting or queue purged.
    5094              :  * @retval -EAGAIN Waiting period timed out.
    5095              :  */
    5096            1 : __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
    5097              : 
    5098              : /**
    5099              :  * @brief Peek/read a message from a message queue.
    5100              :  *
    5101              :  * This routine reads a message from message queue @a q in a "first in,
    5102              :  * first out" manner and leaves the message in the queue.
    5103              :  *
    5104              :  * @funcprops \isr_ok
    5105              :  *
    5106              :  * @param msgq Address of the message queue.
    5107              :  * @param data Address of area to hold the message read from the queue.
    5108              :  *
    5109              :  * @retval 0 Message read.
    5110              :  * @retval -ENOMSG Returned when the queue has no message.
    5111              :  */
    5112            1 : __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
    5113              : 
    5114              : /**
    5115              :  * @brief Peek/read a message from a message queue at the specified index
    5116              :  *
    5117              :  * This routine reads a message from message queue at the specified index
    5118              :  * and leaves the message in the queue.
    5119              :  * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
    5120              :  *
    5121              :  * @funcprops \isr_ok
    5122              :  *
    5123              :  * @param msgq Address of the message queue.
    5124              :  * @param data Address of area to hold the message read from the queue.
    5125              :  * @param idx Message queue index at which to peek
    5126              :  *
    5127              :  * @retval 0 Message read.
    5128              :  * @retval -ENOMSG Returned when the queue has no message at index.
    5129              :  */
    5130            1 : __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
    5131              : 
    5132              : /**
    5133              :  * @brief Purge a message queue.
    5134              :  *
    5135              :  * This routine discards all unreceived messages in a message queue's ring
    5136              :  * buffer. Any threads that are blocked waiting to send a message to the
    5137              :  * message queue are unblocked and see an -ENOMSG error code.
    5138              :  *
    5139              :  * @param msgq Address of the message queue.
    5140              :  */
    5141            1 : __syscall void k_msgq_purge(struct k_msgq *msgq);
    5142              : 
    5143              : /**
    5144              :  * @brief Get the amount of free space in a message queue.
    5145              :  *
    5146              :  * This routine returns the number of unused entries in a message queue's
    5147              :  * ring buffer.
    5148              :  *
    5149              :  * @param msgq Address of the message queue.
    5150              :  *
    5151              :  * @return Number of unused ring buffer entries.
    5152              :  */
    5153            1 : __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
    5154              : 
    5155              : /**
    5156              :  * @brief Get basic attributes of a message queue.
    5157              :  *
    5158              :  * This routine fetches basic attributes of message queue into attr argument.
    5159              :  *
    5160              :  * @param msgq Address of the message queue.
    5161              :  * @param attrs pointer to message queue attribute structure.
    5162              :  */
    5163            1 : __syscall void  k_msgq_get_attrs(struct k_msgq *msgq,
    5164              :                                  struct k_msgq_attrs *attrs);
    5165              : 
    5166              : 
    5167              : static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
    5168              : {
    5169              :         return msgq->max_msgs - msgq->used_msgs;
    5170              : }
    5171              : 
    5172              : /**
    5173              :  * @brief Get the number of messages in a message queue.
    5174              :  *
    5175              :  * This routine returns the number of messages in a message queue's ring buffer.
    5176              :  *
    5177              :  * @param msgq Address of the message queue.
    5178              :  *
    5179              :  * @return Number of messages.
    5180              :  */
    5181            1 : __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
    5182              : 
    5183              : static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
    5184              : {
    5185              :         return msgq->used_msgs;
    5186              : }
    5187              : 
    5188              : /** @} */
    5189              : 
    5190              : /**
    5191              :  * @defgroup mailbox_apis Mailbox APIs
    5192              :  * @ingroup kernel_apis
    5193              :  * @{
    5194              :  */
    5195              : 
    5196              : /**
    5197              :  * @brief Mailbox Message Structure
    5198              :  *
    5199              :  */
    5200            1 : struct k_mbox_msg {
    5201              :         /** size of message (in bytes) */
    5202            1 :         size_t size;
    5203              :         /** application-defined information value */
    5204            1 :         uint32_t info;
    5205              :         /** sender's message data buffer */
    5206            1 :         void *tx_data;
    5207              :         /** source thread id */
    5208            1 :         k_tid_t rx_source_thread;
    5209              :         /** target thread id */
    5210            1 :         k_tid_t tx_target_thread;
    5211              :         /** internal use only - thread waiting on send (may be a dummy) */
    5212              :         k_tid_t _syncing_thread;
    5213              : #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
    5214              :         /** internal use only - semaphore used during asynchronous send */
    5215              :         struct k_sem *_async_sem;
    5216              : #endif
    5217              : };
    5218              : /**
    5219              :  * @brief Mailbox Structure
    5220              :  *
    5221              :  */
    5222            1 : struct k_mbox {
    5223              :         /** Transmit messages queue */
    5224            1 :         _wait_q_t tx_msg_queue;
    5225              :         /** Receive message queue */
    5226            1 :         _wait_q_t rx_msg_queue;
    5227            0 :         struct k_spinlock lock;
    5228              : 
    5229              :         SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
    5230              : 
    5231              : #ifdef CONFIG_OBJ_CORE_MAILBOX
    5232              :         struct k_obj_core  obj_core;
    5233              : #endif
    5234              : };
    5235              : /**
    5236              :  * @cond INTERNAL_HIDDEN
    5237              :  */
    5238              : 
    5239              : #define Z_MBOX_INITIALIZER(obj) \
    5240              :         { \
    5241              :         .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
    5242              :         .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
    5243              :         }
    5244              : 
    5245              : /**
    5246              :  * INTERNAL_HIDDEN @endcond
    5247              :  */
    5248              : 
    5249              : /**
    5250              :  * @brief Statically define and initialize a mailbox.
    5251              :  *
    5252              :  * The mailbox is to be accessed outside the module where it is defined using:
    5253              :  *
    5254              :  * @code extern struct k_mbox <name>; @endcode
    5255              :  *
    5256              :  * @param name Name of the mailbox.
    5257              :  */
    5258            1 : #define K_MBOX_DEFINE(name) \
    5259              :         STRUCT_SECTION_ITERABLE(k_mbox, name) = \
    5260              :                 Z_MBOX_INITIALIZER(name) \
    5261              : 
    5262              : /**
    5263              :  * @brief Initialize a mailbox.
    5264              :  *
    5265              :  * This routine initializes a mailbox object, prior to its first use.
    5266              :  *
    5267              :  * @param mbox Address of the mailbox.
    5268              :  */
    5269            1 : void k_mbox_init(struct k_mbox *mbox);
    5270              : 
    5271              : /**
    5272              :  * @brief Send a mailbox message in a synchronous manner.
    5273              :  *
    5274              :  * This routine sends a message to @a mbox and waits for a receiver to both
    5275              :  * receive and process it. The message data may be in a buffer or non-existent
    5276              :  * (i.e. an empty message).
    5277              :  *
    5278              :  * @param mbox Address of the mailbox.
    5279              :  * @param tx_msg Address of the transmit message descriptor.
    5280              :  * @param timeout Waiting period for the message to be received,
    5281              :  *                or one of the special values K_NO_WAIT
    5282              :  *                and K_FOREVER. Once the message has been received,
    5283              :  *                this routine waits as long as necessary for the message
    5284              :  *                to be completely processed.
    5285              :  *
    5286              :  * @retval 0 Message sent.
    5287              :  * @retval -ENOMSG Returned without waiting.
    5288              :  * @retval -EAGAIN Waiting period timed out.
    5289              :  */
    5290            1 : int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
    5291              :                       k_timeout_t timeout);
    5292              : 
    5293              : /**
    5294              :  * @brief Send a mailbox message in an asynchronous manner.
    5295              :  *
    5296              :  * This routine sends a message to @a mbox without waiting for a receiver
    5297              :  * to process it. The message data may be in a buffer or non-existent
    5298              :  * (i.e. an empty message). Optionally, the semaphore @a sem will be given
    5299              :  * when the message has been both received and completely processed by
    5300              :  * the receiver.
    5301              :  *
    5302              :  * @param mbox Address of the mailbox.
    5303              :  * @param tx_msg Address of the transmit message descriptor.
    5304              :  * @param sem Address of a semaphore, or NULL if none is needed.
    5305              :  */
    5306            1 : void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
    5307              :                              struct k_sem *sem);
    5308              : 
    5309              : /**
    5310              :  * @brief Receive a mailbox message.
    5311              :  *
    5312              :  * This routine receives a message from @a mbox, then optionally retrieves
    5313              :  * its data and disposes of the message.
    5314              :  *
    5315              :  * @param mbox Address of the mailbox.
    5316              :  * @param rx_msg Address of the receive message descriptor.
    5317              :  * @param buffer Address of the buffer to receive data, or NULL to defer data
    5318              :  *               retrieval and message disposal until later.
    5319              :  * @param timeout Waiting period for a message to be received,
    5320              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    5321              :  *
    5322              :  * @retval 0 Message received.
    5323              :  * @retval -ENOMSG Returned without waiting.
    5324              :  * @retval -EAGAIN Waiting period timed out.
    5325              :  */
    5326            1 : int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
    5327              :                       void *buffer, k_timeout_t timeout);
    5328              : 
    5329              : /**
    5330              :  * @brief Retrieve mailbox message data into a buffer.
    5331              :  *
    5332              :  * This routine completes the processing of a received message by retrieving
    5333              :  * its data into a buffer, then disposing of the message.
    5334              :  *
    5335              :  * Alternatively, this routine can be used to dispose of a received message
    5336              :  * without retrieving its data.
    5337              :  *
    5338              :  * @param rx_msg Address of the receive message descriptor.
    5339              :  * @param buffer Address of the buffer to receive data, or NULL to discard
    5340              :  *               the data.
    5341              :  */
    5342            1 : void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
    5343              : 
    5344              : /** @} */
    5345              : 
    5346              : /**
    5347              :  * @defgroup pipe_apis Pipe APIs
    5348              :  * @ingroup kernel_apis
    5349              :  * @{
    5350              :  */
    5351              : 
    5352              : /**
    5353              :  * @brief initialize a pipe
    5354              :  *
    5355              :  * This routine initializes a pipe object, prior to its first use.
    5356              :  *
    5357              :  * @param pipe Address of the pipe.
    5358              :  * @param buffer Address of the pipe's buffer, or NULL if no ring buffer is used.
    5359              :  * @param buffer_size Size of the pipe's buffer, or zero if no ring buffer is used.
    5360              :  */
    5361            1 : __syscall void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size);
    5362              : 
    5363            0 : enum pipe_flags {
    5364              :         PIPE_FLAG_OPEN = BIT(0),
    5365              :         PIPE_FLAG_RESET = BIT(1),
    5366              : };
    5367              : 
    5368            0 : struct k_pipe {
    5369            0 :         size_t waiting;
    5370            0 :         struct ring_buf buf;
    5371            0 :         struct k_spinlock lock;
    5372            0 :         _wait_q_t data;
    5373            0 :         _wait_q_t space;
    5374            0 :         uint8_t flags;
    5375              : 
    5376              :         Z_DECL_POLL_EVENT
    5377              : #ifdef CONFIG_OBJ_CORE_PIPE
    5378              :         struct k_obj_core  obj_core;
    5379              : #endif
    5380              :         SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
    5381              : };
    5382              : 
    5383              : /**
    5384              :  * @cond INTERNAL_HIDDEN
    5385              :  */
    5386              : #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size)  \
    5387              : {                                                               \
    5388              :         .waiting = 0,                                           \
    5389              :         .buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size),    \
    5390              :         .data = Z_WAIT_Q_INIT(&obj.data),                   \
    5391              :         .space = Z_WAIT_Q_INIT(&obj.space),                 \
    5392              :         .flags = PIPE_FLAG_OPEN,                                \
    5393              :         Z_POLL_EVENT_OBJ_INIT(obj)                              \
    5394              : }
    5395              : /**
    5396              :  * INTERNAL_HIDDEN @endcond
    5397              :  */
    5398              : 
    5399              : /**
    5400              :  * @brief Statically define and initialize a pipe.
    5401              :  *
    5402              :  * The pipe can be accessed outside the module where it is defined using:
    5403              :  *
    5404              :  * @code extern struct k_pipe <name>; @endcode
    5405              :  *
    5406              :  * @param name Name of the pipe.
    5407              :  * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes)
    5408              :  *                         or zero if no ring buffer is used.
    5409              :  * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
    5410              :  *
    5411              :  */
    5412            1 : #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align)               \
    5413              :         static unsigned char __noinit __aligned(pipe_align)             \
    5414              :                 _k_pipe_buf_##name[pipe_buffer_size];                   \
    5415              :         STRUCT_SECTION_ITERABLE(k_pipe, name) =                         \
    5416              :                 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
    5417              : 
    5418              : 
    5419              : /**
    5420              :  * @brief Write data to a pipe
    5421              :  *
    5422              :  * This routine writes up to @a len bytes of data to @a pipe.
    5423              :  * If the pipe is full, the routine will block until the data can be written or the timeout expires.
    5424              :  *
    5425              :  * @param pipe Address of the pipe.
    5426              :  * @param data Address of data to write.
    5427              :  * @param len Size of data (in bytes).
    5428              :  * @param timeout Waiting period to wait for the data to be written.
    5429              :  *
    5430              :  * @retval number of bytes written on success
    5431              :  * @retval -EAGAIN if no data could be written before the timeout expired
    5432              :  * @retval -ECANCELED if the write was interrupted by k_pipe_reset(..)
    5433              :  * @retval -EPIPE if the pipe was closed
    5434              :  */
    5435            1 : __syscall int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len,
    5436              :                            k_timeout_t timeout);
    5437              : 
    5438              : /**
    5439              :  * @brief Read data from a pipe
    5440              :  * This routine reads up to @a len bytes of data from @a pipe.
    5441              :  * If the pipe is empty, the routine will block until the data can be read or the timeout expires.
    5442              :  *
    5443              :  * @param pipe Address of the pipe.
    5444              :  * @param data Address to place the data read from pipe.
    5445              :  * @param len Requested number of bytes to read.
    5446              :  * @param timeout Waiting period to wait for the data to be read.
    5447              :  *
    5448              :  * @retval number of bytes read on success
    5449              :  * @retval -EAGAIN if no data could be read before the timeout expired
    5450              :  * @retval -ECANCELED if the read was interrupted by k_pipe_reset(..)
    5451              :  * @retval -EPIPE if the pipe was closed
    5452              :  */
    5453            1 : __syscall int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len,
    5454              :                           k_timeout_t timeout);
    5455              : 
    5456              : /**
    5457              :  * @brief Reset a pipe
    5458              :  * This routine resets the pipe, discarding any unread data and unblocking any threads waiting to
    5459              :  * write or read, causing the waiting threads to return with -ECANCELED. Calling k_pipe_read(..) or
    5460              :  * k_pipe_write(..) when the pipe is resetting but not yet reset will return -ECANCELED.
    5461              :  * The pipe is left open after a reset and can be used as normal.
    5462              :  *
    5463              :  * @param pipe Address of the pipe.
    5464              :  */
    5465            1 : __syscall void k_pipe_reset(struct k_pipe *pipe);
    5466              : 
    5467              : /**
    5468              :  * @brief Close a pipe
    5469              :  *
    5470              :  * This routine closes a pipe. Any threads that were blocked on the pipe
    5471              :  * will be unblocked and receive an error code.
    5472              :  *
    5473              :  * @param pipe Address of the pipe.
    5474              :  */
    5475            1 : __syscall void k_pipe_close(struct k_pipe *pipe);
    5476              : /** @} */
    5477              : 
    5478              : /**
    5479              :  * @cond INTERNAL_HIDDEN
    5480              :  */
    5481              : struct k_mem_slab_info {
    5482              :         uint32_t num_blocks;
    5483              :         size_t   block_size;
    5484              :         uint32_t num_used;
    5485              : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
    5486              :         uint32_t max_used;
    5487              : #endif
    5488              : };
    5489              : 
    5490              : struct k_mem_slab {
    5491              :         _wait_q_t wait_q;
    5492              :         struct k_spinlock lock;
    5493              :         char *buffer;
    5494              :         char *free_list;
    5495              :         struct k_mem_slab_info info;
    5496              : 
    5497              :         SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
    5498              : 
    5499              : #ifdef CONFIG_OBJ_CORE_MEM_SLAB
    5500              :         struct k_obj_core  obj_core;
    5501              : #endif
    5502              : };
    5503              : 
    5504              : #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
    5505              :                                _slab_num_blocks)                      \
    5506              :         {                                                             \
    5507              :         .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q),                     \
    5508              :         .lock = {},                                                   \
    5509              :         .buffer = _slab_buffer,                                       \
    5510              :         .free_list = NULL,                                            \
    5511              :         .info = {_slab_num_blocks, _slab_block_size, 0}               \
    5512              :         }
    5513              : 
    5514              : 
    5515              : /**
    5516              :  * INTERNAL_HIDDEN @endcond
    5517              :  */
    5518              : 
    5519              : /**
    5520              :  * @defgroup mem_slab_apis Memory Slab APIs
    5521              :  * @ingroup kernel_apis
    5522              :  * @{
    5523              :  */
    5524              : 
    5525              : /**
    5526              :  * @brief Statically define and initialize a memory slab in a user-provided memory section with
    5527              :  * public (non-static) scope.
    5528              :  *
    5529              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5530              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5531              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5532              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5533              :  * @a slab_align.
    5534              :  *
    5535              :  * The memory slab can be accessed outside the module where it is defined
    5536              :  * using:
    5537              :  *
    5538              :  * @code extern struct k_mem_slab <name>; @endcode
    5539              :  *
    5540              :  * @note This macro cannot be used together with a static keyword.
    5541              :  *       If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_IN_SECT_STATIC
    5542              :  *       instead.
    5543              :  *
    5544              :  * @param name Name of the memory slab.
    5545              :  * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
    5546              :  * @param slab_block_size Size of each memory block (in bytes).
    5547              :  * @param slab_num_blocks Number memory blocks.
    5548              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5549              :  */
    5550            1 : #define K_MEM_SLAB_DEFINE_IN_SECT(name, in_section, slab_block_size, slab_num_blocks, slab_align)  \
    5551              :         BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0,                                      \
    5552              :                      "slab_block_size must be a multiple of slab_align");                          \
    5553              :         BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0),                                   \
    5554              :                      "slab_align must be a power of 2");                                           \
    5555              :         char in_section __aligned(WB_UP(                                                           \
    5556              :                 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)];   \
    5557              :         STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER(                        \
    5558              :                 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
    5559              : 
    5560              : /**
    5561              :  * @brief Statically define and initialize a memory slab in a public (non-static) scope.
    5562              :  *
    5563              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5564              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5565              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5566              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5567              :  * @a slab_align.
    5568              :  *
    5569              :  * The memory slab can be accessed outside the module where it is defined
    5570              :  * using:
    5571              :  *
    5572              :  * @code extern struct k_mem_slab <name>; @endcode
    5573              :  *
    5574              :  * @note This macro cannot be used together with a static keyword.
    5575              :  *       If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
    5576              :  *       instead.
    5577              :  *
    5578              :  * @param name Name of the memory slab.
    5579              :  * @param slab_block_size Size of each memory block (in bytes).
    5580              :  * @param slab_num_blocks Number memory blocks.
    5581              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5582              :  */
    5583            1 : #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align)                      \
    5584              :         K_MEM_SLAB_DEFINE_IN_SECT(name, __noinit_named(k_mem_slab_buf_##name), slab_block_size,    \
    5585              :                                   slab_num_blocks, slab_align)
    5586              : 
    5587              : /**
    5588              :  * @brief Statically define and initialize a memory slab in a user-provided memory section with
    5589              :  * private (static) scope.
    5590              :  *
    5591              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5592              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5593              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5594              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5595              :  * @a slab_align.
    5596              :  *
    5597              :  * @param name Name of the memory slab.
    5598              :  * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
    5599              :  * @param slab_block_size Size of each memory block (in bytes).
    5600              :  * @param slab_num_blocks Number memory blocks.
    5601              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5602              :  */
    5603              : #define K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, in_section, slab_block_size, slab_num_blocks,       \
    5604            1 :                                          slab_align)                                               \
    5605              :         BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0,                                      \
    5606              :                      "slab_block_size must be a multiple of slab_align");                          \
    5607              :         BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0),                                   \
    5608              :                      "slab_align must be a power of 2");                                           \
    5609              :         static char in_section __aligned(WB_UP(                                                    \
    5610              :                 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)];   \
    5611              :         static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER(                 \
    5612              :                 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
    5613              : 
    5614              : /**
    5615              :  * @brief Statically define and initialize a memory slab in a private (static) scope.
    5616              :  *
    5617              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5618              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5619              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5620              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5621              :  * @a slab_align.
    5622              :  *
    5623              :  * @param name Name of the memory slab.
    5624              :  * @param slab_block_size Size of each memory block (in bytes).
    5625              :  * @param slab_num_blocks Number memory blocks.
    5626              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5627              :  */
    5628            1 : #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align)               \
    5629              :         K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, __noinit_named(k_mem_slab_buf_##name),              \
    5630              :                                          slab_block_size, slab_num_blocks, slab_align)
    5631              : 
    5632              : /**
    5633              :  * @brief Initialize a memory slab.
    5634              :  *
    5635              :  * Initializes a memory slab, prior to its first use.
    5636              :  *
    5637              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5638              :  * that are @a slab_block_size bytes long. The buffer must be aligned to an
    5639              :  * N-byte boundary matching a word boundary, where N is a power of 2
    5640              :  * (i.e. 4 on 32-bit systems, 8, 16, ...).
    5641              :  * To ensure that each memory block is similarly aligned to this boundary,
    5642              :  * @a slab_block_size must also be a multiple of N.
    5643              :  *
    5644              :  * @param slab Address of the memory slab.
    5645              :  * @param buffer Pointer to buffer used for the memory blocks.
    5646              :  * @param block_size Size of each memory block (in bytes).
    5647              :  * @param num_blocks Number of memory blocks.
    5648              :  *
    5649              :  * @retval 0 on success
    5650              :  * @retval -EINVAL invalid data supplied
    5651              :  *
    5652              :  */
    5653            1 : int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
    5654              :                            size_t block_size, uint32_t num_blocks);
    5655              : 
    5656              : /**
    5657              :  * @brief Allocate memory from a memory slab.
    5658              :  *
    5659              :  * This routine allocates a memory block from a memory slab.
    5660              :  *
    5661              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5662              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5663              :  *
    5664              :  * @funcprops \isr_ok
    5665              :  *
    5666              :  * @param slab Address of the memory slab.
    5667              :  * @param mem Pointer to block address area.
    5668              :  * @param timeout Waiting period to wait for operation to complete.
    5669              :  *        Use K_NO_WAIT to return without waiting,
    5670              :  *        or K_FOREVER to wait as long as necessary.
    5671              :  *
    5672              :  * @retval 0 Memory allocated. The block address area pointed at by @a mem
    5673              :  *         is set to the starting address of the memory block.
    5674              :  * @retval -ENOMEM Returned without waiting.
    5675              :  * @retval -EAGAIN Waiting period timed out.
    5676              :  * @retval -EINVAL Invalid data supplied
    5677              :  */
    5678            1 : int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
    5679              :                             k_timeout_t timeout);
    5680              : 
    5681              : /**
    5682              :  * @brief Free memory allocated from a memory slab.
    5683              :  *
    5684              :  * This routine releases a previously allocated memory block back to its
    5685              :  * associated memory slab.
    5686              :  *
    5687              :  * @funcprops \isr_ok
    5688              :  *
    5689              :  * @param slab Address of the memory slab.
    5690              :  * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
    5691              :  */
    5692            1 : void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
    5693              : 
    5694              : /**
    5695              :  * @brief Get the number of used blocks in a memory slab.
    5696              :  *
    5697              :  * This routine gets the number of memory blocks that are currently
    5698              :  * allocated in @a slab.
    5699              :  *
    5700              :  * @funcprops \isr_ok
    5701              :  *
    5702              :  * @param slab Address of the memory slab.
    5703              :  *
    5704              :  * @return Number of allocated memory blocks.
    5705              :  */
    5706            1 : static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
    5707              : {
    5708              :         return slab->info.num_used;
    5709              : }
    5710              : 
    5711              : /**
    5712              :  * @brief Get the number of maximum used blocks so far in a memory slab.
    5713              :  *
    5714              :  * This routine gets the maximum number of memory blocks that were
    5715              :  * allocated in @a slab.
    5716              :  *
    5717              :  * @funcprops \isr_ok
    5718              :  *
    5719              :  * @param slab Address of the memory slab.
    5720              :  *
    5721              :  * @return Maximum number of allocated memory blocks.
    5722              :  */
    5723            1 : static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
    5724              : {
    5725              : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
    5726              :         return slab->info.max_used;
    5727              : #else
    5728              :         ARG_UNUSED(slab);
    5729              :         return 0;
    5730              : #endif
    5731              : }
    5732              : 
    5733              : /**
    5734              :  * @brief Get the number of unused blocks in a memory slab.
    5735              :  *
    5736              :  * This routine gets the number of memory blocks that are currently
    5737              :  * unallocated in @a slab.
    5738              :  *
    5739              :  * @funcprops \isr_ok
    5740              :  *
    5741              :  * @param slab Address of the memory slab.
    5742              :  *
    5743              :  * @return Number of unallocated memory blocks.
    5744              :  */
    5745            1 : static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
    5746              : {
    5747              :         return slab->info.num_blocks - slab->info.num_used;
    5748              : }
    5749              : 
    5750              : /**
    5751              :  * @brief Get the memory stats for a memory slab
    5752              :  *
    5753              :  * This routine gets the runtime memory usage stats for the slab @a slab.
    5754              :  *
    5755              :  * @funcprops \isr_ok
    5756              :  *
    5757              :  * @param slab Address of the memory slab
    5758              :  * @param stats Pointer to memory into which to copy memory usage statistics
    5759              :  *
    5760              :  * @retval 0 Success
    5761              :  * @retval -EINVAL Any parameter points to NULL
    5762              :  */
    5763              : 
    5764            1 : int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
    5765              : 
    5766              : /**
    5767              :  * @brief Reset the maximum memory usage for a slab
    5768              :  *
    5769              :  * This routine resets the maximum memory usage for the slab @a slab to its
    5770              :  * current usage.
    5771              :  *
    5772              :  * @funcprops \isr_ok
    5773              :  *
    5774              :  * @param slab Address of the memory slab
    5775              :  *
    5776              :  * @retval 0 Success
    5777              :  * @retval -EINVAL Memory slab is NULL
    5778              :  */
    5779            1 : int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
    5780              : 
    5781              : /** @} */
    5782              : 
    5783              : /**
    5784              :  * @addtogroup heap_apis
    5785              :  * @{
    5786              :  */
    5787              : 
    5788              : /* kernel synchronized heap struct */
    5789              : 
    5790            0 : struct k_heap {
    5791            0 :         struct sys_heap heap;
    5792            0 :         _wait_q_t wait_q;
    5793            0 :         struct k_spinlock lock;
    5794              : };
    5795              : 
    5796              : /**
    5797              :  * @brief Initialize a k_heap
    5798              :  *
    5799              :  * This constructs a synchronized k_heap object over a memory region
    5800              :  * specified by the user.  Note that while any alignment and size can
    5801              :  * be passed as valid parameters, internal alignment restrictions
    5802              :  * inside the inner sys_heap mean that not all bytes may be usable as
    5803              :  * allocated memory.
    5804              :  *
    5805              :  * @param h Heap struct to initialize
    5806              :  * @param mem Pointer to memory.
    5807              :  * @param bytes Size of memory region, in bytes
    5808              :  */
    5809            1 : void k_heap_init(struct k_heap *h, void *mem,
    5810              :                 size_t bytes) __attribute_nonnull(1);
    5811              : 
    5812              : /**
    5813              :  * @brief Allocate aligned memory from a k_heap
    5814              :  *
    5815              :  * Behaves in all ways like k_heap_alloc(), except that the returned
    5816              :  * memory (if available) will have a starting address in memory which
    5817              :  * is a multiple of the specified power-of-two alignment value in
    5818              :  * bytes.  The resulting memory can be returned to the heap using
    5819              :  * k_heap_free().
    5820              :  *
    5821              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5822              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5823              :  *
    5824              :  * @funcprops \isr_ok
    5825              :  *
    5826              :  * @param h Heap from which to allocate
    5827              :  * @param align Alignment in bytes, must be a power of two
    5828              :  * @param bytes Number of bytes requested
    5829              :  * @param timeout How long to wait, or K_NO_WAIT
    5830              :  * @return Pointer to memory the caller can now use
    5831              :  */
    5832            1 : void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
    5833              :                         k_timeout_t timeout) __attribute_nonnull(1);
    5834              : 
    5835              : /**
    5836              :  * @brief Allocate memory from a k_heap
    5837              :  *
    5838              :  * Allocates and returns a memory buffer from the memory region owned
    5839              :  * by the heap.  If no memory is available immediately, the call will
    5840              :  * block for the specified timeout (constructed via the standard
    5841              :  * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
    5842              :  * freed.  If the allocation cannot be performed by the expiration of
    5843              :  * the timeout, NULL will be returned.
    5844              :  * Allocated memory is aligned on a multiple of pointer sizes.
    5845              :  *
    5846              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5847              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5848              :  *
    5849              :  * @funcprops \isr_ok
    5850              :  *
    5851              :  * @param h Heap from which to allocate
    5852              :  * @param bytes Desired size of block to allocate
    5853              :  * @param timeout How long to wait, or K_NO_WAIT
    5854              :  * @return A pointer to valid heap memory, or NULL
    5855              :  */
    5856            1 : void *k_heap_alloc(struct k_heap *h, size_t bytes,
    5857              :                 k_timeout_t timeout) __attribute_nonnull(1);
    5858              : 
    5859              : /**
    5860              :  * @brief Allocate and initialize memory for an array of objects from a k_heap
    5861              :  *
    5862              :  * Allocates memory for an array of num objects of size and initializes all
    5863              :  * bytes in the allocated storage to zero.  If no memory is available
    5864              :  * immediately, the call will block for the specified timeout (constructed
    5865              :  * via the standard timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory
    5866              :  * to be freed.  If the allocation cannot be performed by the expiration of
    5867              :  * the timeout, NULL will be returned.
    5868              :  * Allocated memory is aligned on a multiple of pointer sizes.
    5869              :  *
    5870              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5871              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5872              :  *
    5873              :  * @funcprops \isr_ok
    5874              :  *
    5875              :  * @param h Heap from which to allocate
    5876              :  * @param num Number of objects to allocate
    5877              :  * @param size Desired size of each object to allocate
    5878              :  * @param timeout How long to wait, or K_NO_WAIT
    5879              :  * @return A pointer to valid heap memory, or NULL
    5880              :  */
    5881            1 : void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
    5882              :         __attribute_nonnull(1);
    5883              : 
    5884              : /**
    5885              :  * @brief Reallocate memory from a k_heap
    5886              :  *
    5887              :  * Reallocates and returns a memory buffer from the memory region owned
    5888              :  * by the heap.  If no memory is available immediately, the call will
    5889              :  * block for the specified timeout (constructed via the standard
    5890              :  * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
    5891              :  * freed.  If the allocation cannot be performed by the expiration of
    5892              :  * the timeout, NULL will be returned.
    5893              :  * Reallocated memory is aligned on a multiple of pointer sizes.
    5894              :  *
    5895              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5896              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5897              :  *
    5898              :  * @funcprops \isr_ok
    5899              :  *
    5900              :  * @param h Heap from which to allocate
    5901              :  * @param ptr Original pointer returned from a previous allocation
    5902              :  * @param bytes Desired size of block to allocate
    5903              :  * @param timeout How long to wait, or K_NO_WAIT
    5904              :  *
    5905              :  * @return Pointer to memory the caller can now use, or NULL
    5906              :  */
    5907            1 : void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
    5908              :         __attribute_nonnull(1);
    5909              : 
    5910              : /**
    5911              :  * @brief Free memory allocated by k_heap_alloc()
    5912              :  *
    5913              :  * Returns the specified memory block, which must have been returned
    5914              :  * from k_heap_alloc(), to the heap for use by other callers.  Passing
    5915              :  * a NULL block is legal, and has no effect.
    5916              :  *
    5917              :  * @param h Heap to which to return the memory
    5918              :  * @param mem A valid memory block, or NULL
    5919              :  */
    5920            1 : void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
    5921              : 
    5922              : /* Hand-calculated minimum heap sizes needed to return a successful
    5923              :  * 1-byte allocation.  See details in lib/os/heap.[ch]
    5924              :  */
    5925              : #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
    5926              : 
    5927              : /**
    5928              :  * @brief Define a static k_heap in the specified linker section
    5929              :  *
    5930              :  * This macro defines and initializes a static memory region and
    5931              :  * k_heap of the requested size in the specified linker section.
    5932              :  * After kernel start, &name can be used as if k_heap_init() had
    5933              :  * been called.
    5934              :  *
    5935              :  * Note that this macro enforces a minimum size on the memory region
    5936              :  * to accommodate metadata requirements.  Very small heaps will be
    5937              :  * padded to fit.
    5938              :  *
    5939              :  * @param name Symbol name for the struct k_heap object
    5940              :  * @param bytes Size of memory region, in bytes
    5941              :  * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
    5942              :  */
    5943              : #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section)          \
    5944              :         char in_section                                         \
    5945              :              __aligned(8) /* CHUNK_UNIT */                      \
    5946              :              kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)];         \
    5947              :         STRUCT_SECTION_ITERABLE(k_heap, name) = {               \
    5948              :                 .heap = {                                       \
    5949              :                         .init_mem = kheap_##name,               \
    5950              :                         .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
    5951              :                  },                                             \
    5952              :         }
    5953              : 
    5954              : /**
    5955              :  * @brief Define a static k_heap
    5956              :  *
    5957              :  * This macro defines and initializes a static memory region and
    5958              :  * k_heap of the requested size.  After kernel start, &name can be
    5959              :  * used as if k_heap_init() had been called.
    5960              :  *
    5961              :  * Note that this macro enforces a minimum size on the memory region
    5962              :  * to accommodate metadata requirements.  Very small heaps will be
    5963              :  * padded to fit.
    5964              :  *
    5965              :  * @param name Symbol name for the struct k_heap object
    5966              :  * @param bytes Size of memory region, in bytes
    5967              :  */
    5968            1 : #define K_HEAP_DEFINE(name, bytes)                              \
    5969              :         Z_HEAP_DEFINE_IN_SECT(name, bytes,                      \
    5970              :                               __noinit_named(kheap_buf_##name))
    5971              : 
    5972              : /**
    5973              :  * @brief Define a static k_heap in uncached memory
    5974              :  *
    5975              :  * This macro defines and initializes a static memory region and
    5976              :  * k_heap of the requested size in uncached memory.  After kernel
    5977              :  * start, &name can be used as if k_heap_init() had been called.
    5978              :  *
    5979              :  * Note that this macro enforces a minimum size on the memory region
    5980              :  * to accommodate metadata requirements.  Very small heaps will be
    5981              :  * padded to fit.
    5982              :  *
    5983              :  * @param name Symbol name for the struct k_heap object
    5984              :  * @param bytes Size of memory region, in bytes
    5985              :  */
    5986            1 : #define K_HEAP_DEFINE_NOCACHE(name, bytes)                      \
    5987              :         Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
    5988              : 
    5989              : /** @brief Get the array of statically defined heaps
    5990              :  *
    5991              :  * Returns the pointer to the start of the static heap array.
    5992              :  * Static heaps are those declared through one of the `K_HEAP_DEFINE`
    5993              :  * macros.
    5994              :  *
    5995              :  * @param heap Pointer to location where heap array address is written
    5996              :  * @return Number of static heaps
    5997              :  */
    5998            1 : int k_heap_array_get(struct k_heap **heap);
    5999              : 
    6000              : /**
    6001              :  * @}
    6002              :  */
    6003              : 
    6004              : /**
    6005              :  * @defgroup heap_apis Heap APIs
    6006              :  * @brief Memory allocation from the Heap
    6007              :  * @ingroup kernel_apis
    6008              :  * @{
    6009              :  */
    6010              : 
    6011              : /**
    6012              :  * @brief Allocate memory from the heap with a specified alignment.
    6013              :  *
    6014              :  * This routine provides semantics similar to aligned_alloc(); memory is
    6015              :  * allocated from the heap with a specified alignment. However, one minor
    6016              :  * difference is that k_aligned_alloc() accepts any non-zero @p size,
    6017              :  * whereas aligned_alloc() only accepts a @p size that is an integral
    6018              :  * multiple of @p align.
    6019              :  *
    6020              :  * Above, aligned_alloc() refers to:
    6021              :  * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
    6022              :  * The aligned_alloc function (p: 347-348)
    6023              :  *
    6024              :  * @param align Alignment of memory requested (in bytes).
    6025              :  * @param size Amount of memory requested (in bytes).
    6026              :  *
    6027              :  * @return Address of the allocated memory if successful; otherwise NULL.
    6028              :  */
    6029            1 : void *k_aligned_alloc(size_t align, size_t size);
    6030              : 
    6031              : /**
    6032              :  * @brief Allocate memory from the heap.
    6033              :  *
    6034              :  * This routine provides traditional malloc() semantics. Memory is
    6035              :  * allocated from the heap memory pool.
    6036              :  * Allocated memory is aligned on a multiple of pointer sizes.
    6037              :  *
    6038              :  * @param size Amount of memory requested (in bytes).
    6039              :  *
    6040              :  * @return Address of the allocated memory if successful; otherwise NULL.
    6041              :  */
    6042            1 : void *k_malloc(size_t size);
    6043              : 
    6044              : /**
    6045              :  * @brief Free memory allocated from heap.
    6046              :  *
    6047              :  * This routine provides traditional free() semantics. The memory being
    6048              :  * returned must have been allocated from the heap memory pool.
    6049              :  *
    6050              :  * If @a ptr is NULL, no operation is performed.
    6051              :  *
    6052              :  * @param ptr Pointer to previously allocated memory.
    6053              :  */
    6054            1 : void k_free(void *ptr);
    6055              : 
    6056              : /**
    6057              :  * @brief Allocate memory from heap, array style
    6058              :  *
    6059              :  * This routine provides traditional calloc() semantics. Memory is
    6060              :  * allocated from the heap memory pool and zeroed.
    6061              :  *
    6062              :  * @param nmemb Number of elements in the requested array
    6063              :  * @param size Size of each array element (in bytes).
    6064              :  *
    6065              :  * @return Address of the allocated memory if successful; otherwise NULL.
    6066              :  */
    6067            1 : void *k_calloc(size_t nmemb, size_t size);
    6068              : 
    6069              : /** @brief Expand the size of an existing allocation
    6070              :  *
    6071              :  * Returns a pointer to a new memory region with the same contents,
    6072              :  * but a different allocated size.  If the new allocation can be
    6073              :  * expanded in place, the pointer returned will be identical.
    6074              :  * Otherwise the data will be copies to a new block and the old one
    6075              :  * will be freed as per sys_heap_free().  If the specified size is
    6076              :  * smaller than the original, the block will be truncated in place and
    6077              :  * the remaining memory returned to the heap.  If the allocation of a
    6078              :  * new block fails, then NULL will be returned and the old block will
    6079              :  * not be freed or modified.
    6080              :  *
    6081              :  * @param ptr Original pointer returned from a previous allocation
    6082              :  * @param size Amount of memory requested (in bytes).
    6083              :  *
    6084              :  * @return Pointer to memory the caller can now use, or NULL.
    6085              :  */
    6086            1 : void *k_realloc(void *ptr, size_t size);
    6087              : 
    6088              : /** @} */
    6089              : 
    6090              : /* polling API - PRIVATE */
    6091              : 
    6092              : #ifdef CONFIG_POLL
    6093              : #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
    6094              : #else
    6095              : #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
    6096              : #endif
    6097              : 
    6098              : /* private - types bit positions */
    6099              : enum _poll_types_bits {
    6100              :         /* can be used to ignore an event */
    6101              :         _POLL_TYPE_IGNORE,
    6102              : 
    6103              :         /* to be signaled by k_poll_signal_raise() */
    6104              :         _POLL_TYPE_SIGNAL,
    6105              : 
    6106              :         /* semaphore availability */
    6107              :         _POLL_TYPE_SEM_AVAILABLE,
    6108              : 
    6109              :         /* queue/FIFO/LIFO data availability */
    6110              :         _POLL_TYPE_DATA_AVAILABLE,
    6111              : 
    6112              :         /* msgq data availability */
    6113              :         _POLL_TYPE_MSGQ_DATA_AVAILABLE,
    6114              : 
    6115              :         /* pipe data availability */
    6116              :         _POLL_TYPE_PIPE_DATA_AVAILABLE,
    6117              : 
    6118              :         _POLL_NUM_TYPES
    6119              : };
    6120              : 
    6121              : #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
    6122              : 
    6123              : /* private - states bit positions */
    6124              : enum _poll_states_bits {
    6125              :         /* default state when creating event */
    6126              :         _POLL_STATE_NOT_READY,
    6127              : 
    6128              :         /* signaled by k_poll_signal_raise() */
    6129              :         _POLL_STATE_SIGNALED,
    6130              : 
    6131              :         /* semaphore is available */
    6132              :         _POLL_STATE_SEM_AVAILABLE,
    6133              : 
    6134              :         /* data is available to read on queue/FIFO/LIFO */
    6135              :         _POLL_STATE_DATA_AVAILABLE,
    6136              : 
    6137              :         /* queue/FIFO/LIFO wait was cancelled */
    6138              :         _POLL_STATE_CANCELLED,
    6139              : 
    6140              :         /* data is available to read on a message queue */
    6141              :         _POLL_STATE_MSGQ_DATA_AVAILABLE,
    6142              : 
    6143              :         /* data is available to read from a pipe */
    6144              :         _POLL_STATE_PIPE_DATA_AVAILABLE,
    6145              : 
    6146              :         _POLL_NUM_STATES
    6147              : };
    6148              : 
    6149              : #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
    6150              : 
    6151              : #define _POLL_EVENT_NUM_UNUSED_BITS \
    6152              :         (32 - (0 \
    6153              :                + 8 /* tag */ \
    6154              :                + _POLL_NUM_TYPES \
    6155              :                + _POLL_NUM_STATES \
    6156              :                + 1 /* modes */ \
    6157              :               ))
    6158              : 
    6159              : /* end of polling API - PRIVATE */
    6160              : 
    6161              : 
    6162              : /**
    6163              :  * @defgroup poll_apis Async polling APIs
    6164              :  * @brief An API to wait concurrently for any one of multiple conditions to be
    6165              :  *        fulfilled
    6166              :  * @ingroup kernel_apis
    6167              :  * @{
    6168              :  */
    6169              : 
    6170              : /* Public polling API */
    6171              : 
    6172              : /* public - values for k_poll_event.type bitfield */
    6173            0 : #define K_POLL_TYPE_IGNORE 0
    6174            0 : #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
    6175            0 : #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
    6176            0 : #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
    6177            0 : #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
    6178            0 : #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
    6179            0 : #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
    6180              : 
    6181              : /* public - polling modes */
    6182            0 : enum k_poll_modes {
    6183              :         /* polling thread does not take ownership of objects when available */
    6184              :         K_POLL_MODE_NOTIFY_ONLY = 0,
    6185              : 
    6186              :         K_POLL_NUM_MODES
    6187              : };
    6188              : 
    6189              : /* public - values for k_poll_event.state bitfield */
    6190            0 : #define K_POLL_STATE_NOT_READY 0
    6191            0 : #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
    6192            0 : #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
    6193            0 : #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
    6194            0 : #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
    6195            0 : #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
    6196            0 : #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
    6197            0 : #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
    6198              : 
    6199              : /* public - poll signal object */
    6200            0 : struct k_poll_signal {
    6201              :         /** PRIVATE - DO NOT TOUCH */
    6202            1 :         sys_dlist_t poll_events;
    6203              : 
    6204              :         /**
    6205              :          * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
    6206              :          * user resets it to 0.
    6207              :          */
    6208            1 :         unsigned int signaled;
    6209              : 
    6210              :         /** custom result value passed to k_poll_signal_raise() if needed */
    6211            1 :         int result;
    6212              : };
    6213              : 
    6214            0 : #define K_POLL_SIGNAL_INITIALIZER(obj) \
    6215              :         { \
    6216              :         .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
    6217              :         .signaled = 0, \
    6218              :         .result = 0, \
    6219              :         }
    6220              : /**
    6221              :  * @brief Poll Event
    6222              :  *
    6223              :  */
    6224            1 : struct k_poll_event {
    6225              :         /** PRIVATE - DO NOT TOUCH */
    6226              :         sys_dnode_t _node;
    6227              : 
    6228              :         /** PRIVATE - DO NOT TOUCH */
    6229            1 :         struct z_poller *poller;
    6230              : 
    6231              :         /** optional user-specified tag, opaque, untouched by the API */
    6232            1 :         uint32_t tag:8;
    6233              : 
    6234              :         /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
    6235            1 :         uint32_t type:_POLL_NUM_TYPES;
    6236              : 
    6237              :         /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
    6238            1 :         uint32_t state:_POLL_NUM_STATES;
    6239              : 
    6240              :         /** mode of operation, from enum k_poll_modes */
    6241            1 :         uint32_t mode:1;
    6242              : 
    6243              :         /** unused bits in 32-bit word */
    6244            1 :         uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
    6245              : 
    6246              :         /** per-type data */
    6247              :         union {
    6248              :                 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
    6249              :                  * type safety of polled objects.
    6250              :                  */
    6251            0 :                 void *obj, *typed_K_POLL_TYPE_IGNORE;
    6252            0 :                 struct k_poll_signal *signal, *typed_K_POLL_TYPE_SIGNAL;
    6253            0 :                 struct k_sem *sem, *typed_K_POLL_TYPE_SEM_AVAILABLE;
    6254            0 :                 struct k_fifo *fifo, *typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE;
    6255            0 :                 struct k_queue *queue, *typed_K_POLL_TYPE_DATA_AVAILABLE;
    6256            0 :                 struct k_msgq *msgq, *typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE;
    6257            0 :                 struct k_pipe *pipe, *typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE;
    6258            1 :         };
    6259              : };
    6260              : 
    6261            0 : #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
    6262              :         { \
    6263              :         .poller = NULL, \
    6264              :         .type = _event_type, \
    6265              :         .state = K_POLL_STATE_NOT_READY, \
    6266              :         .mode = _event_mode, \
    6267              :         .unused = 0, \
    6268              :         { \
    6269              :                 .typed_##_event_type = _event_obj, \
    6270              :         }, \
    6271              :         }
    6272              : 
    6273              : #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
    6274            0 :                                         event_tag) \
    6275              :         { \
    6276              :         .tag = event_tag, \
    6277              :         .type = _event_type, \
    6278              :         .state = K_POLL_STATE_NOT_READY, \
    6279              :         .mode = _event_mode, \
    6280              :         .unused = 0, \
    6281              :         { \
    6282              :                 .typed_##_event_type = _event_obj, \
    6283              :         }, \
    6284              :         }
    6285              : 
    6286              : /**
    6287              :  * @brief Initialize one struct k_poll_event instance
    6288              :  *
    6289              :  * After this routine is called on a poll event, the event it ready to be
    6290              :  * placed in an event array to be passed to k_poll().
    6291              :  *
    6292              :  * @param event The event to initialize.
    6293              :  * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
    6294              :  *             values. Only values that apply to the same object being polled
    6295              :  *             can be used together. Choosing K_POLL_TYPE_IGNORE disables the
    6296              :  *             event.
    6297              :  * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
    6298              :  * @param obj Kernel object or poll signal.
    6299              :  */
    6300              : 
    6301            1 : void k_poll_event_init(struct k_poll_event *event, uint32_t type,
    6302              :                               int mode, void *obj);
    6303              : 
    6304              : /**
    6305              :  * @brief Wait for one or many of multiple poll events to occur
    6306              :  *
    6307              :  * This routine allows a thread to wait concurrently for one or many of
    6308              :  * multiple poll events to have occurred. Such events can be a kernel object
    6309              :  * being available, like a semaphore, or a poll signal event.
    6310              :  *
    6311              :  * When an event notifies that a kernel object is available, the kernel object
    6312              :  * is not "given" to the thread calling k_poll(): it merely signals the fact
    6313              :  * that the object was available when the k_poll() call was in effect. Also,
    6314              :  * all threads trying to acquire an object the regular way, i.e. by pending on
    6315              :  * the object, have precedence over the thread polling on the object. This
    6316              :  * means that the polling thread will never get the poll event on an object
    6317              :  * until the object becomes available and its pend queue is empty. For this
    6318              :  * reason, the k_poll() call is more effective when the objects being polled
    6319              :  * only have one thread, the polling thread, trying to acquire them.
    6320              :  *
    6321              :  * When k_poll() returns 0, the caller should loop on all the events that were
    6322              :  * passed to k_poll() and check the state field for the values that were
    6323              :  * expected and take the associated actions.
    6324              :  *
    6325              :  * Before being reused for another call to k_poll(), the user has to reset the
    6326              :  * state field to K_POLL_STATE_NOT_READY.
    6327              :  *
    6328              :  * When called from user mode, a temporary memory allocation is required from
    6329              :  * the caller's resource pool.
    6330              :  *
    6331              :  * @param events An array of events to be polled for.
    6332              :  * @param num_events The number of events in the array.
    6333              :  * @param timeout Waiting period for an event to be ready,
    6334              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    6335              :  *
    6336              :  * @retval 0 One or more events are ready.
    6337              :  * @retval -EAGAIN Waiting period timed out.
    6338              :  * @retval -EINTR Polling has been interrupted, e.g. with
    6339              :  *         k_queue_cancel_wait(). All output events are still set and valid,
    6340              :  *         cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
    6341              :  *         words, -EINTR status means that at least one of output events is
    6342              :  *         K_POLL_STATE_CANCELLED.
    6343              :  * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
    6344              :  * @retval -EINVAL Bad parameters (user mode only)
    6345              :  */
    6346              : 
    6347            1 : __syscall int k_poll(struct k_poll_event *events, int num_events,
    6348              :                      k_timeout_t timeout);
    6349              : 
    6350              : /**
    6351              :  * @brief Initialize a poll signal object.
    6352              :  *
    6353              :  * Ready a poll signal object to be signaled via k_poll_signal_raise().
    6354              :  *
    6355              :  * @param sig A poll signal.
    6356              :  */
    6357              : 
    6358            1 : __syscall void k_poll_signal_init(struct k_poll_signal *sig);
    6359              : 
    6360              : /**
    6361              :  * @brief Reset a poll signal object's state to unsignaled.
    6362              :  *
    6363              :  * @param sig A poll signal object
    6364              :  */
    6365            1 : __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
    6366              : 
    6367              : /**
    6368              :  * @brief Fetch the signaled state and result value of a poll signal
    6369              :  *
    6370              :  * @param sig A poll signal object
    6371              :  * @param signaled An integer buffer which will be written nonzero if the
    6372              :  *                 object was signaled
    6373              :  * @param result An integer destination buffer which will be written with the
    6374              :  *                 result value if the object was signaled, or an undefined
    6375              :  *                 value if it was not.
    6376              :  */
    6377            1 : __syscall void k_poll_signal_check(struct k_poll_signal *sig,
    6378              :                                    unsigned int *signaled, int *result);
    6379              : 
    6380              : /**
    6381              :  * @brief Signal a poll signal object.
    6382              :  *
    6383              :  * This routine makes ready a poll signal, which is basically a poll event of
    6384              :  * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
    6385              :  * made ready to run. A @a result value can be specified.
    6386              :  *
    6387              :  * The poll signal contains a 'signaled' field that, when set by
    6388              :  * k_poll_signal_raise(), stays set until the user sets it back to 0 with
    6389              :  * k_poll_signal_reset(). It thus has to be reset by the user before being
    6390              :  * passed again to k_poll() or k_poll() will consider it being signaled, and
    6391              :  * will return immediately.
    6392              :  *
    6393              :  * @note The result is stored and the 'signaled' field is set even if
    6394              :  * this function returns an error indicating that an expiring poll was
    6395              :  * not notified.  The next k_poll() will detect the missed raise.
    6396              :  *
    6397              :  * @param sig A poll signal.
    6398              :  * @param result The value to store in the result field of the signal.
    6399              :  *
    6400              :  * @retval 0 The signal was delivered successfully.
    6401              :  * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
    6402              :  */
    6403              : 
    6404            1 : __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
    6405              : 
    6406              : /** @} */
    6407              : 
    6408              : /**
    6409              :  * @defgroup cpu_idle_apis CPU Idling APIs
    6410              :  * @ingroup kernel_apis
    6411              :  * @{
    6412              :  */
    6413              : /**
    6414              :  * @brief Make the CPU idle.
    6415              :  *
    6416              :  * This function makes the CPU idle until an event wakes it up.
    6417              :  *
    6418              :  * In a regular system, the idle thread should be the only thread responsible
    6419              :  * for making the CPU idle and triggering any type of power management.
    6420              :  * However, in some more constrained systems, such as a single-threaded system,
    6421              :  * the only thread would be responsible for this if needed.
    6422              :  *
    6423              :  * @note In some architectures, before returning, the function unmasks interrupts
    6424              :  * unconditionally.
    6425              :  */
    6426            1 : static inline void k_cpu_idle(void)
    6427              : {
    6428              :         arch_cpu_idle();
    6429              : }
    6430              : 
    6431              : /**
    6432              :  * @brief Make the CPU idle in an atomic fashion.
    6433              :  *
    6434              :  * Similar to k_cpu_idle(), but must be called with interrupts locked.
    6435              :  *
    6436              :  * Enabling interrupts and entering a low-power mode will be atomic,
    6437              :  * i.e. there will be no period of time where interrupts are enabled before
    6438              :  * the processor enters a low-power mode.
    6439              :  *
    6440              :  * After waking up from the low-power mode, the interrupt lockout state will
    6441              :  * be restored as if by irq_unlock(key).
    6442              :  *
    6443              :  * @param key Interrupt locking key obtained from irq_lock().
    6444              :  */
    6445            1 : static inline void k_cpu_atomic_idle(unsigned int key)
    6446              : {
    6447              :         arch_cpu_atomic_idle(key);
    6448              : }
    6449              : 
    6450              : /**
    6451              :  * @}
    6452              :  */
    6453              : 
    6454              : /**
    6455              :  * @cond INTERNAL_HIDDEN
    6456              :  * @internal
    6457              :  */
    6458              : #ifdef ARCH_EXCEPT
    6459              : /* This architecture has direct support for triggering a CPU exception */
    6460              : #define z_except_reason(reason) ARCH_EXCEPT(reason)
    6461              : #else
    6462              : 
    6463              : #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
    6464              : #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
    6465              : #else
    6466              : #define __EXCEPT_LOC()
    6467              : #endif
    6468              : 
    6469              : /* NOTE: This is the implementation for arches that do not implement
    6470              :  * ARCH_EXCEPT() to generate a real CPU exception.
    6471              :  *
    6472              :  * We won't have a real exception frame to determine the PC value when
    6473              :  * the oops occurred, so print file and line number before we jump into
    6474              :  * the fatal error handler.
    6475              :  */
    6476              : #define z_except_reason(reason) do { \
    6477              :                 __EXCEPT_LOC();              \
    6478              :                 z_fatal_error(reason, NULL); \
    6479              :         } while (false)
    6480              : 
    6481              : #endif /* _ARCH__EXCEPT */
    6482              : /**
    6483              :  * INTERNAL_HIDDEN @endcond
    6484              :  */
    6485              : 
    6486              : /**
    6487              :  * @brief Fatally terminate a thread
    6488              :  *
    6489              :  * This should be called when a thread has encountered an unrecoverable
    6490              :  * runtime condition and needs to terminate. What this ultimately
    6491              :  * means is determined by the _fatal_error_handler() implementation, which
    6492              :  * will be called with reason code K_ERR_KERNEL_OOPS.
    6493              :  *
    6494              :  * If this is called from ISR context, the default system fatal error handler
    6495              :  * will treat it as an unrecoverable system error, just like k_panic().
    6496              :  */
    6497            1 : #define k_oops()        z_except_reason(K_ERR_KERNEL_OOPS)
    6498              : 
    6499              : /**
    6500              :  * @brief Fatally terminate the system
    6501              :  *
    6502              :  * This should be called when the Zephyr kernel has encountered an
    6503              :  * unrecoverable runtime condition and needs to terminate. What this ultimately
    6504              :  * means is determined by the _fatal_error_handler() implementation, which
    6505              :  * will be called with reason code K_ERR_KERNEL_PANIC.
    6506              :  */
    6507            1 : #define k_panic()       z_except_reason(K_ERR_KERNEL_PANIC)
    6508              : 
    6509              : /**
    6510              :  * @cond INTERNAL_HIDDEN
    6511              :  */
    6512              : 
    6513              : /*
    6514              :  * private APIs that are utilized by one or more public APIs
    6515              :  */
    6516              : 
    6517              : /**
    6518              :  * @internal
    6519              :  */
    6520              : void z_timer_expiration_handler(struct _timeout *timeout);
    6521              : /**
    6522              :  * INTERNAL_HIDDEN @endcond
    6523              :  */
    6524              : 
    6525              : #ifdef CONFIG_PRINTK
    6526              : /**
    6527              :  * @brief Emit a character buffer to the console device
    6528              :  *
    6529              :  * @param c String of characters to print
    6530              :  * @param n The length of the string
    6531              :  *
    6532              :  */
    6533              : __syscall void k_str_out(char *c, size_t n);
    6534              : #endif
    6535              : 
    6536              : /**
    6537              :  * @defgroup float_apis Floating Point APIs
    6538              :  * @ingroup kernel_apis
    6539              :  * @{
    6540              :  */
    6541              : 
    6542              : /**
    6543              :  * @brief Disable preservation of floating point context information.
    6544              :  *
    6545              :  * This routine informs the kernel that the specified thread
    6546              :  * will no longer be using the floating point registers.
    6547              :  *
    6548              :  * @warning
    6549              :  * Some architectures apply restrictions on how the disabling of floating
    6550              :  * point preservation may be requested, see arch_float_disable.
    6551              :  *
    6552              :  * @warning
    6553              :  * This routine should only be used to disable floating point support for
    6554              :  * a thread that currently has such support enabled.
    6555              :  *
    6556              :  * @param thread ID of thread.
    6557              :  *
    6558              :  * @retval 0        On success.
    6559              :  * @retval -ENOTSUP If the floating point disabling is not implemented.
    6560              :  *         -EINVAL  If the floating point disabling could not be performed.
    6561              :  */
    6562            1 : __syscall int k_float_disable(struct k_thread *thread);
    6563              : 
    6564              : /**
    6565              :  * @brief Enable preservation of floating point context information.
    6566              :  *
    6567              :  * This routine informs the kernel that the specified thread
    6568              :  * will use the floating point registers.
    6569              : 
    6570              :  * Invoking this routine initializes the thread's floating point context info
    6571              :  * to that of an FPU that has been reset. The next time the thread is scheduled
    6572              :  * by z_swap() it will either inherit an FPU that is guaranteed to be in a
    6573              :  * "sane" state (if the most recent user of the FPU was cooperatively swapped
    6574              :  * out) or the thread's own floating point context will be loaded (if the most
    6575              :  * recent user of the FPU was preempted, or if this thread is the first user
    6576              :  * of the FPU). Thereafter, the kernel will protect the thread's FP context
    6577              :  * so that it is not altered during a preemptive context switch.
    6578              :  *
    6579              :  * The @a options parameter indicates which floating point register sets will
    6580              :  * be used by the specified thread.
    6581              :  *
    6582              :  * For x86 options:
    6583              :  *
    6584              :  * - K_FP_REGS  indicates x87 FPU and MMX registers only
    6585              :  * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
    6586              :  *
    6587              :  * @warning
    6588              :  * Some architectures apply restrictions on how the enabling of floating
    6589              :  * point preservation may be requested, see arch_float_enable.
    6590              :  *
    6591              :  * @warning
    6592              :  * This routine should only be used to enable floating point support for
    6593              :  * a thread that currently has such support enabled.
    6594              :  *
    6595              :  * @param thread  ID of thread.
    6596              :  * @param options architecture dependent options
    6597              :  *
    6598              :  * @retval 0        On success.
    6599              :  * @retval -ENOTSUP If the floating point enabling is not implemented.
    6600              :  *         -EINVAL  If the floating point enabling could not be performed.
    6601              :  */
    6602            1 : __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
    6603              : 
    6604              : /**
    6605              :  * @}
    6606              :  */
    6607              : 
    6608              : /**
    6609              :  * @brief Get the runtime statistics of a thread
    6610              :  *
    6611              :  * @param thread ID of thread.
    6612              :  * @param stats Pointer to struct to copy statistics into.
    6613              :  * @return -EINVAL if null pointers, otherwise 0
    6614              :  */
    6615            1 : int k_thread_runtime_stats_get(k_tid_t thread,
    6616              :                                k_thread_runtime_stats_t *stats);
    6617              : 
    6618              : /**
    6619              :  * @brief Get the runtime statistics of all threads
    6620              :  *
    6621              :  * @param stats Pointer to struct to copy statistics into.
    6622              :  * @return -EINVAL if null pointers, otherwise 0
    6623              :  */
    6624            1 : int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
    6625              : 
    6626              : /**
    6627              :  * @brief Get the runtime statistics of all threads on specified cpu
    6628              :  *
    6629              :  * @param cpu The cpu number
    6630              :  * @param stats Pointer to struct to copy statistics into.
    6631              :  * @return -EINVAL if null pointers, otherwise 0
    6632              :  */
    6633            1 : int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
    6634              : 
    6635              : /**
    6636              :  * @brief Enable gathering of runtime statistics for specified thread
    6637              :  *
    6638              :  * This routine enables the gathering of runtime statistics for the specified
    6639              :  * thread.
    6640              :  *
    6641              :  * @param thread ID of thread
    6642              :  * @return -EINVAL if invalid thread ID, otherwise 0
    6643              :  */
    6644            1 : int k_thread_runtime_stats_enable(k_tid_t thread);
    6645              : 
    6646              : /**
    6647              :  * @brief Disable gathering of runtime statistics for specified thread
    6648              :  *
    6649              :  * This routine disables the gathering of runtime statistics for the specified
    6650              :  * thread.
    6651              :  *
    6652              :  * @param thread ID of thread
    6653              :  * @return -EINVAL if invalid thread ID, otherwise 0
    6654              :  */
    6655            1 : int k_thread_runtime_stats_disable(k_tid_t thread);
    6656              : 
    6657              : /**
    6658              :  * @brief Enable gathering of system runtime statistics
    6659              :  *
    6660              :  * This routine enables the gathering of system runtime statistics. Note that
    6661              :  * it does not affect the gathering of similar statistics for individual
    6662              :  * threads.
    6663              :  */
    6664            1 : void k_sys_runtime_stats_enable(void);
    6665              : 
    6666              : /**
    6667              :  * @brief Disable gathering of system runtime statistics
    6668              :  *
    6669              :  * This routine disables the gathering of system runtime statistics. Note that
    6670              :  * it does not affect the gathering of similar statistics for individual
    6671              :  * threads.
    6672              :  */
    6673            1 : void k_sys_runtime_stats_disable(void);
    6674              : 
    6675              : #ifdef __cplusplus
    6676              : }
    6677              : #endif
    6678              : 
    6679              : #include <zephyr/tracing/tracing.h>
    6680              : #include <zephyr/syscalls/kernel.h>
    6681              : 
    6682              : #endif /* !_ASMLANGUAGE */
    6683              : 
    6684              : #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
        

Generated by: LCOV version 2.0-1