LCOV - code coverage report
Current view: top level - zephyr - kernel.h Coverage Total Hit
Test: new.info Lines: 79.5 % 405 322
Test Date: 2025-09-05 20:47:19

            Line data    Source code
       1            1 : /*
       2              :  * Copyright (c) 2016, Wind River Systems, Inc.
       3              :  *
       4              :  * SPDX-License-Identifier: Apache-2.0
       5              :  */
       6              : 
       7              : /**
       8              :  * @file
       9              :  *
      10              :  * @brief Public kernel APIs.
      11              :  */
      12              : 
      13              : #ifndef ZEPHYR_INCLUDE_KERNEL_H_
      14              : #define ZEPHYR_INCLUDE_KERNEL_H_
      15              : 
      16              : #if !defined(_ASMLANGUAGE)
      17              : #include <zephyr/kernel_includes.h>
      18              : #include <errno.h>
      19              : #include <limits.h>
      20              : #include <stdbool.h>
      21              : #include <zephyr/toolchain.h>
      22              : #include <zephyr/tracing/tracing_macros.h>
      23              : #include <zephyr/sys/mem_stats.h>
      24              : #include <zephyr/sys/iterable_sections.h>
      25              : #include <zephyr/sys/ring_buffer.h>
      26              : 
      27              : #ifdef __cplusplus
      28              : extern "C" {
      29              : #endif
      30              : 
      31              : /*
      32              :  * Zephyr currently assumes the size of a couple standard types to simplify
      33              :  * print string formats. Let's make sure this doesn't change without notice.
      34              :  */
      35              : BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
      36              : BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
      37              : BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
      38              : 
      39              : /**
      40              :  * @brief Kernel APIs
      41              :  * @defgroup kernel_apis Kernel APIs
      42              :  * @since 1.0
      43              :  * @version 1.0.0
      44              :  * @{
      45              :  * @}
      46              :  */
      47              : 
      48            0 : #define K_ANY NULL
      49              : 
      50              : #if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
      51              : #error Zero available thread priorities defined!
      52              : #endif
      53              : 
      54            0 : #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
      55            0 : #define K_PRIO_PREEMPT(x) (x)
      56              : 
      57            0 : #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
      58            0 : #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
      59            0 : #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
      60            0 : #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
      61            0 : #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
      62              : 
      63              : #ifdef CONFIG_POLL
      64              : #define Z_POLL_EVENT_OBJ_INIT(obj) \
      65              :         .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
      66              : #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
      67              : #else
      68              : #define Z_POLL_EVENT_OBJ_INIT(obj)
      69              : #define Z_DECL_POLL_EVENT
      70              : #endif
      71              : 
      72              : struct k_thread;
      73              : struct k_mutex;
      74              : struct k_sem;
      75              : struct k_msgq;
      76              : struct k_mbox;
      77              : struct k_pipe;
      78              : struct k_queue;
      79              : struct k_fifo;
      80              : struct k_lifo;
      81              : struct k_stack;
      82              : struct k_mem_slab;
      83              : struct k_timer;
      84              : struct k_poll_event;
      85              : struct k_poll_signal;
      86              : struct k_mem_domain;
      87              : struct k_mem_partition;
      88              : struct k_futex;
      89              : struct k_event;
      90              : 
      91            0 : enum execution_context_types {
      92              :         K_ISR = 0,
      93              :         K_COOP_THREAD,
      94              :         K_PREEMPT_THREAD,
      95              : };
      96              : 
      97              : /* private, used by k_poll and k_work_poll */
      98              : struct k_work_poll;
      99              : typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
     100              : 
     101              : /**
     102              :  * @addtogroup thread_apis
     103              :  * @{
     104              :  */
     105              : 
     106            0 : typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
     107              :                                    void *user_data);
     108              : 
     109              : /**
     110              :  * @brief Iterate over all the threads in the system.
     111              :  *
     112              :  * This routine iterates over all the threads in the system and
     113              :  * calls the user_cb function for each thread.
     114              :  *
     115              :  * @param user_cb Pointer to the user callback function.
     116              :  * @param user_data Pointer to user data.
     117              :  *
     118              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     119              :  * to be effective.
     120              :  * @note This API uses @ref k_spin_lock to protect the _kernel.threads
     121              :  * list which means creation of new threads and terminations of existing
     122              :  * threads are blocked until this API returns.
     123              :  */
     124            1 : void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
     125              : 
     126              : /**
     127              :  * @brief Iterate over all the threads in running on specified cpu.
     128              :  *
     129              :  * This function is does otherwise the same thing as k_thread_foreach(),
     130              :  * but it only loops through the threads running on specified cpu only.
     131              :  * If CONFIG_SMP is not defined the implementation this is the same as
     132              :  * k_thread_foreach(), with an assert cpu == 0.
     133              :  *
     134              :  * @param cpu The filtered cpu number
     135              :  * @param user_cb Pointer to the user callback function.
     136              :  * @param user_data Pointer to user data.
     137              :  *
     138              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     139              :  * to be effective.
     140              :  * @note This API uses @ref k_spin_lock to protect the _kernel.threads
     141              :  * list which means creation of new threads and terminations of existing
     142              :  * threads are blocked until this API returns.
     143              :  */
     144              : #ifdef CONFIG_SMP
     145            1 : void k_thread_foreach_filter_by_cpu(unsigned int cpu,
     146              :                                     k_thread_user_cb_t user_cb, void *user_data);
     147              : #else
     148              : static inline
     149              : void k_thread_foreach_filter_by_cpu(unsigned int cpu,
     150              :                                     k_thread_user_cb_t user_cb, void *user_data)
     151              : {
     152              :         __ASSERT(cpu == 0, "cpu filter out of bounds");
     153              :         ARG_UNUSED(cpu);
     154              :         k_thread_foreach(user_cb, user_data);
     155              : }
     156              : #endif
     157              : 
     158              : /**
     159              :  * @brief Iterate over all the threads in the system without locking.
     160              :  *
     161              :  * This routine works exactly the same like @ref k_thread_foreach
     162              :  * but unlocks interrupts when user_cb is executed.
     163              :  *
     164              :  * @param user_cb Pointer to the user callback function.
     165              :  * @param user_data Pointer to user data.
     166              :  *
     167              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     168              :  * to be effective.
     169              :  * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
     170              :  * queue elements. It unlocks it during user callback function processing.
     171              :  * If a new task is created when this @c foreach function is in progress,
     172              :  * the added new task would not be included in the enumeration.
     173              :  * If a task is aborted during this enumeration, there would be a race here
     174              :  * and there is a possibility that this aborted task would be included in the
     175              :  * enumeration.
     176              :  * @note If the task is aborted and the memory occupied by its @c k_thread
     177              :  * structure is reused when this @c k_thread_foreach_unlocked is in progress
     178              :  * it might even lead to the system behave unstable.
     179              :  * This function may never return, as it would follow some @c next task
     180              :  * pointers treating given pointer as a pointer to the k_thread structure
     181              :  * while it is something different right now.
     182              :  * Do not reuse the memory that was occupied by k_thread structure of aborted
     183              :  * task if it was aborted after this function was called in any context.
     184              :  */
     185            1 : void k_thread_foreach_unlocked(
     186              :         k_thread_user_cb_t user_cb, void *user_data);
     187              : 
     188              : /**
     189              :  * @brief Iterate over the threads in running on current cpu without locking.
     190              :  *
     191              :  * This function does otherwise the same thing as
     192              :  * k_thread_foreach_unlocked(), but it only loops through the threads
     193              :  * running on specified cpu. If CONFIG_SMP is not defined the
     194              :  * implementation this is the same as k_thread_foreach_unlocked(), with an
     195              :  * assert requiring cpu == 0.
     196              :  *
     197              :  * @param cpu The filtered cpu number
     198              :  * @param user_cb Pointer to the user callback function.
     199              :  * @param user_data Pointer to user data.
     200              :  *
     201              :  * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
     202              :  * to be effective.
     203              :  * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
     204              :  * queue elements. It unlocks it during user callback function processing.
     205              :  * If a new task is created when this @c foreach function is in progress,
     206              :  * the added new task would not be included in the enumeration.
     207              :  * If a task is aborted during this enumeration, there would be a race here
     208              :  * and there is a possibility that this aborted task would be included in the
     209              :  * enumeration.
     210              :  * @note If the task is aborted and the memory occupied by its @c k_thread
     211              :  * structure is reused when this @c k_thread_foreach_unlocked is in progress
     212              :  * it might even lead to the system behave unstable.
     213              :  * This function may never return, as it would follow some @c next task
     214              :  * pointers treating given pointer as a pointer to the k_thread structure
     215              :  * while it is something different right now.
     216              :  * Do not reuse the memory that was occupied by k_thread structure of aborted
     217              :  * task if it was aborted after this function was called in any context.
     218              :  */
     219              : #ifdef CONFIG_SMP
     220            1 : void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
     221              :                                              k_thread_user_cb_t user_cb, void *user_data);
     222              : #else
     223              : static inline
     224              : void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
     225              :                                              k_thread_user_cb_t user_cb, void *user_data)
     226              : {
     227              :         __ASSERT(cpu == 0, "cpu filter out of bounds");
     228              :         ARG_UNUSED(cpu);
     229              :         k_thread_foreach_unlocked(user_cb, user_data);
     230              : }
     231              : #endif
     232              : 
     233              : /** @} */
     234              : 
     235              : /**
     236              :  * @defgroup thread_apis Thread APIs
     237              :  * @ingroup kernel_apis
     238              :  * @{
     239              :  */
     240              : 
     241              : #endif /* !_ASMLANGUAGE */
     242              : 
     243              : 
     244              : /*
     245              :  * Thread user options. May be needed by assembly code. Common part uses low
     246              :  * bits, arch-specific use high bits.
     247              :  */
     248              : 
     249              : /**
     250              :  * @brief system thread that must not abort
     251              :  * */
     252            1 : #define K_ESSENTIAL (BIT(0))
     253              : 
     254            0 : #define K_FP_IDX 1
     255              : /**
     256              :  * @brief FPU registers are managed by context switch
     257              :  *
     258              :  * @details
     259              :  * This option indicates that the thread uses the CPU's floating point
     260              :  * registers. This instructs the kernel to take additional steps to save
     261              :  * and restore the contents of these registers when scheduling the thread.
     262              :  * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
     263              :  */
     264            1 : #define K_FP_REGS (BIT(K_FP_IDX))
     265              : 
     266              : /**
     267              :  * @brief user mode thread
     268              :  *
     269              :  * This thread has dropped from supervisor mode to user mode and consequently
     270              :  * has additional restrictions
     271              :  */
     272            1 : #define K_USER (BIT(2))
     273              : 
     274              : /**
     275              :  * @brief Inherit Permissions
     276              :  *
     277              :  * @details
     278              :  * Indicates that the thread being created should inherit all kernel object
     279              :  * permissions from the thread that created it. No effect if
     280              :  * @kconfig{CONFIG_USERSPACE} is not enabled.
     281              :  */
     282            1 : #define K_INHERIT_PERMS (BIT(3))
     283              : 
     284              : /**
     285              :  * @brief Callback item state
     286              :  *
     287              :  * @details
     288              :  * This is a single bit of state reserved for "callback manager"
     289              :  * utilities (p4wq initially) who need to track operations invoked
     290              :  * from within a user-provided callback they have been invoked.
     291              :  * Effectively it serves as a tiny bit of zero-overhead TLS data.
     292              :  */
     293            1 : #define K_CALLBACK_STATE (BIT(4))
     294              : 
     295              : /**
     296              :  * @brief DSP registers are managed by context switch
     297              :  *
     298              :  * @details
     299              :  * This option indicates that the thread uses the CPU's DSP registers.
     300              :  * This instructs the kernel to take additional steps to save and
     301              :  * restore the contents of these registers when scheduling the thread.
     302              :  * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
     303              :  */
     304            1 : #define K_DSP_IDX 6
     305            0 : #define K_DSP_REGS (BIT(K_DSP_IDX))
     306              : 
     307              : /**
     308              :  * @brief AGU registers are managed by context switch
     309              :  *
     310              :  * @details
     311              :  * This option indicates that the thread uses the ARC processor's XY
     312              :  * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
     313              :  * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
     314              :  */
     315            1 : #define K_AGU_IDX 7
     316            0 : #define K_AGU_REGS (BIT(K_AGU_IDX))
     317              : 
     318              : /**
     319              :  * @brief FP and SSE registers are managed by context switch on x86
     320              :  *
     321              :  * @details
     322              :  * This option indicates that the thread uses the x86 CPU's floating point
     323              :  * and SSE registers. This instructs the kernel to take additional steps to
     324              :  * save and restore the contents of these registers when scheduling
     325              :  * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
     326              :  */
     327            1 : #define K_SSE_REGS (BIT(7))
     328              : 
     329              : /* end - thread options */
     330              : 
     331              : #if !defined(_ASMLANGUAGE)
     332              : /**
     333              :  * @brief Dynamically allocate a thread stack.
     334              :  *
     335              :  * Dynamically allocate a thread stack either from a pool of thread stacks of
     336              :  * size @kconfig{CONFIG_DYNAMIC_THREAD_POOL_SIZE}, or from the system heap.
     337              :  * Order is determined by the @kconfig{CONFIG_DYNAMIC_THREAD_PREFER_ALLOC} and
     338              :  * @kconfig{CONFIG_DYNAMIC_THREAD_PREFER_POOL} options. Thread stacks from the
     339              :  * pool are of maximum size @kconfig{CONFIG_DYNAMIC_THREAD_STACK_SIZE}.
     340              :  *
     341              :  * @note When no longer required, thread stacks allocated with
     342              :  * `k_thread_stack_alloc()` must be freed with @ref k_thread_stack_free to
     343              :  * avoid leaking memory.
     344              :  *
     345              :  * @param size Stack size in bytes.
     346              :  * @param flags Stack creation flags, or 0.
     347              :  *
     348              :  * @retval the allocated thread stack on success.
     349              :  * @retval NULL on failure.
     350              :  *
     351              :  * Relevant stack creation flags include:
     352              :  * - @ref K_USER allocate a userspace thread (requires @kconfig{CONFIG_USERSPACE})
     353              :  *
     354              :  * @see @kconfig{CONFIG_DYNAMIC_THREAD}
     355              :  */
     356            1 : __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
     357              : 
     358              : /**
     359              :  * @brief Free a dynamically allocated thread stack.
     360              :  *
     361              :  * @param stack Pointer to the thread stack.
     362              :  *
     363              :  * @retval 0 on success.
     364              :  * @retval -EBUSY if the thread stack is in use.
     365              :  * @retval -EINVAL if @p stack is invalid.
     366              :  * @retval -ENOSYS if dynamic thread stack allocation is disabled
     367              :  *
     368              :  * @see @kconfig{CONFIG_DYNAMIC_THREAD}
     369              :  */
     370            1 : __syscall int k_thread_stack_free(k_thread_stack_t *stack);
     371              : 
     372              : /**
     373              :  * @brief Create a thread.
     374              :  *
     375              :  * This routine initializes a thread, then schedules it for execution.
     376              :  *
     377              :  * The new thread may be scheduled for immediate execution or a delayed start.
     378              :  * If the newly spawned thread does not have a delayed start the kernel
     379              :  * scheduler may preempt the current thread to allow the new thread to
     380              :  * execute.
     381              :  *
     382              :  * Thread options are architecture-specific, and can include K_ESSENTIAL,
     383              :  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
     384              :  * them using "|" (the logical OR operator).
     385              :  *
     386              :  * Stack objects passed to this function may be statically allocated with
     387              :  * either of these macros in order to be portable:
     388              :  *
     389              :  * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
     390              :  *   supervisor threads.
     391              :  * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
     392              :  *   threads only. These stacks use less memory if CONFIG_USERSPACE is
     393              :  *   enabled.
     394              :  *
     395              :  * Alternatively, the stack may be dynamically allocated using
     396              :  * @ref k_thread_stack_alloc.
     397              :  *
     398              :  * The stack_size parameter has constraints. It must either be:
     399              :  *
     400              :  * - The original size value passed to K_THREAD_STACK_DEFINE() or
     401              :  *   K_KERNEL_STACK_DEFINE()
     402              :  * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
     403              :  *   defined with K_THREAD_STACK_DEFINE()
     404              :  * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
     405              :  *   defined with K_KERNEL_STACK_DEFINE().
     406              :  *
     407              :  * Using other values, or sizeof(stack) may produce undefined behavior.
     408              :  *
     409              :  * @param new_thread Pointer to uninitialized struct k_thread
     410              :  * @param stack Pointer to the stack space.
     411              :  * @param stack_size Stack size in bytes.
     412              :  * @param entry Thread entry function.
     413              :  * @param p1 1st entry point parameter.
     414              :  * @param p2 2nd entry point parameter.
     415              :  * @param p3 3rd entry point parameter.
     416              :  * @param prio Thread priority.
     417              :  * @param options Thread options.
     418              :  * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
     419              :  *
     420              :  * @return ID of new thread.
     421              :  *
     422              :  */
     423            1 : __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
     424              :                                   k_thread_stack_t *stack,
     425              :                                   size_t stack_size,
     426              :                                   k_thread_entry_t entry,
     427              :                                   void *p1, void *p2, void *p3,
     428              :                                   int prio, uint32_t options, k_timeout_t delay);
     429              : 
     430              : /**
     431              :  * @brief Drop a thread's privileges permanently to user mode
     432              :  *
     433              :  * This allows a supervisor thread to be re-used as a user thread.
     434              :  * This function does not return, but control will transfer to the provided
     435              :  * entry point as if this was a new user thread.
     436              :  *
     437              :  * The implementation ensures that the stack buffer contents are erased.
     438              :  * Any thread-local storage will be reverted to a pristine state.
     439              :  *
     440              :  * Memory domain membership, resource pool assignment, kernel object
     441              :  * permissions, priority, and thread options are preserved.
     442              :  *
     443              :  * A common use of this function is to re-use the main thread as a user thread
     444              :  * once all supervisor mode-only tasks have been completed.
     445              :  *
     446              :  * @param entry Function to start executing from
     447              :  * @param p1 1st entry point parameter
     448              :  * @param p2 2nd entry point parameter
     449              :  * @param p3 3rd entry point parameter
     450              :  */
     451            1 : FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
     452              :                                                    void *p1, void *p2,
     453              :                                                    void *p3);
     454              : 
     455              : /**
     456              :  * @brief Grant a thread access to a set of kernel objects
     457              :  *
     458              :  * This is a convenience function. For the provided thread, grant access to
     459              :  * the remaining arguments, which must be pointers to kernel objects.
     460              :  *
     461              :  * The thread object must be initialized (i.e. running). The objects don't
     462              :  * need to be.
     463              :  * Note that NULL shouldn't be passed as an argument.
     464              :  *
     465              :  * @param thread Thread to grant access to objects
     466              :  * @param ... list of kernel object pointers
     467              :  */
     468            1 : #define k_thread_access_grant(thread, ...) \
     469              :         FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
     470              : 
     471              : /**
     472              :  * @brief Assign a resource memory pool to a thread
     473              :  *
     474              :  * By default, threads have no resource pool assigned unless their parent
     475              :  * thread has a resource pool, in which case it is inherited. Multiple
     476              :  * threads may be assigned to the same memory pool.
     477              :  *
     478              :  * Changing a thread's resource pool will not migrate allocations from the
     479              :  * previous pool.
     480              :  *
     481              :  * @param thread Target thread to assign a memory pool for resource requests.
     482              :  * @param heap Heap object to use for resources,
     483              :  *             or NULL if the thread should no longer have a memory pool.
     484              :  */
     485            1 : static inline void k_thread_heap_assign(struct k_thread *thread,
     486              :                                         struct k_heap *heap)
     487              : {
     488              :         thread->resource_pool = heap;
     489              : }
     490              : 
     491              : #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
     492              : /**
     493              :  * @brief Obtain stack usage information for the specified thread
     494              :  *
     495              :  * User threads will need to have permission on the target thread object.
     496              :  *
     497              :  * Some hardware may prevent inspection of a stack buffer currently in use.
     498              :  * If this API is called from supervisor mode, on the currently running thread,
     499              :  * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
     500              :  * error will be generated.
     501              :  *
     502              :  * @param thread Thread to inspect stack information
     503              :  * @param unused_ptr Output parameter, filled in with the unused stack space
     504              :  *      of the target thread in bytes.
     505              :  * @return 0 on success
     506              :  * @return -EBADF Bad thread object (user mode only)
     507              :  * @return -EPERM No permissions on thread object (user mode only)
     508              :  * #return -ENOTSUP Forbidden by hardware policy
     509              :  * @return -EINVAL Thread is uninitialized or exited (user mode only)
     510              :  * @return -EFAULT Bad memory address for unused_ptr (user mode only)
     511              :  */
     512              : __syscall int k_thread_stack_space_get(const struct k_thread *thread,
     513              :                                        size_t *unused_ptr);
     514              : #endif
     515              : 
     516              : #if (K_HEAP_MEM_POOL_SIZE > 0)
     517              : /**
     518              :  * @brief Assign the system heap as a thread's resource pool
     519              :  *
     520              :  * Similar to k_thread_heap_assign(), but the thread will use
     521              :  * the kernel heap to draw memory.
     522              :  *
     523              :  * Use with caution, as a malicious thread could perform DoS attacks on the
     524              :  * kernel heap.
     525              :  *
     526              :  * @param thread Target thread to assign the system heap for resource requests
     527              :  *
     528              :  */
     529              : void k_thread_system_pool_assign(struct k_thread *thread);
     530              : #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
     531              : 
     532              : /**
     533              :  * @brief Sleep until a thread exits
     534              :  *
     535              :  * The caller will be put to sleep until the target thread exits, either due
     536              :  * to being aborted, self-exiting, or taking a fatal error. This API returns
     537              :  * immediately if the thread isn't running.
     538              :  *
     539              :  * This API may only be called from ISRs with a K_NO_WAIT timeout,
     540              :  * where it can be useful as a predicate to detect when a thread has
     541              :  * aborted.
     542              :  *
     543              :  * @param thread Thread to wait to exit
     544              :  * @param timeout upper bound time to wait for the thread to exit.
     545              :  * @retval 0 success, target thread has exited or wasn't running
     546              :  * @retval -EBUSY returned without waiting
     547              :  * @retval -EAGAIN waiting period timed out
     548              :  * @retval -EDEADLK target thread is joining on the caller, or target thread
     549              :  *                  is the caller
     550              :  */
     551            1 : __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
     552              : 
     553              : /**
     554              :  * @brief Put the current thread to sleep.
     555              :  *
     556              :  * This routine puts the current thread to sleep for @a duration,
     557              :  * specified as a k_timeout_t object.
     558              :  *
     559              :  * @param timeout Desired duration of sleep.
     560              :  *
     561              :  * @return Zero if the requested time has elapsed or the time left to
     562              :  * sleep rounded up to the nearest millisecond (e.g. if the thread was
     563              :  * awoken by the \ref k_wakeup call).  Will be clamped to INT_MAX in
     564              :  * the case where the remaining time is unrepresentable in an int32_t.
     565              :  */
     566            1 : __syscall int32_t k_sleep(k_timeout_t timeout);
     567              : 
     568              : /**
     569              :  * @brief Put the current thread to sleep.
     570              :  *
     571              :  * This routine puts the current thread to sleep for @a duration milliseconds.
     572              :  *
     573              :  * @param ms Number of milliseconds to sleep.
     574              :  *
     575              :  * @return Zero if the requested time has elapsed or if the thread was woken up
     576              :  * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
     577              :  * millisecond.
     578              :  */
     579            1 : static inline int32_t k_msleep(int32_t ms)
     580              : {
     581              :         return k_sleep(Z_TIMEOUT_MS(ms));
     582              : }
     583              : 
     584              : /**
     585              :  * @brief Put the current thread to sleep with microsecond resolution.
     586              :  *
     587              :  * This function is unlikely to work as expected without kernel tuning.
     588              :  * In particular, because the lower bound on the duration of a sleep is
     589              :  * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
     590              :  * adjusted to achieve the resolution desired. The implications of doing
     591              :  * this must be understood before attempting to use k_usleep(). Use with
     592              :  * caution.
     593              :  *
     594              :  * @param us Number of microseconds to sleep.
     595              :  *
     596              :  * @return Zero if the requested time has elapsed or if the thread was woken up
     597              :  * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
     598              :  * microsecond.
     599              :  */
     600            1 : __syscall int32_t k_usleep(int32_t us);
     601              : 
     602              : /**
     603              :  * @brief Cause the current thread to busy wait.
     604              :  *
     605              :  * This routine causes the current thread to execute a "do nothing" loop for
     606              :  * @a usec_to_wait microseconds.
     607              :  *
     608              :  * @note The clock used for the microsecond-resolution delay here may
     609              :  * be skewed relative to the clock used for system timeouts like
     610              :  * k_sleep().  For example k_busy_wait(1000) may take slightly more or
     611              :  * less time than k_sleep(K_MSEC(1)), with the offset dependent on
     612              :  * clock tolerances.
     613              :  *
     614              :  * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
     615              :  * @kconfig{CONFIG_PM} options are enabled, this function may not work.
     616              :  * The timer/clock used for delay processing may be disabled/inactive.
     617              :  */
     618            1 : __syscall void k_busy_wait(uint32_t usec_to_wait);
     619              : 
     620              : /**
     621              :  * @brief Check whether it is possible to yield in the current context.
     622              :  *
     623              :  * This routine checks whether the kernel is in a state where it is possible to
     624              :  * yield or call blocking API's. It should be used by code that needs to yield
     625              :  * to perform correctly, but can feasibly be called from contexts where that
     626              :  * is not possible. For example in the PRE_KERNEL initialization step, or when
     627              :  * being run from the idle thread.
     628              :  *
     629              :  * @return True if it is possible to yield in the current context, false otherwise.
     630              :  */
     631            1 : bool k_can_yield(void);
     632              : 
     633              : /**
     634              :  * @brief Yield the current thread.
     635              :  *
     636              :  * This routine causes the current thread to yield execution to another
     637              :  * thread of the same or higher priority. If there are no other ready threads
     638              :  * of the same or higher priority, the routine returns immediately.
     639              :  */
     640            1 : __syscall void k_yield(void);
     641              : 
     642              : /**
     643              :  * @brief Wake up a sleeping thread.
     644              :  *
     645              :  * This routine prematurely wakes up @a thread from sleeping.
     646              :  *
     647              :  * If @a thread is not currently sleeping, the routine has no effect.
     648              :  *
     649              :  * @param thread ID of thread to wake.
     650              :  */
     651            1 : __syscall void k_wakeup(k_tid_t thread);
     652              : 
     653              : /**
     654              :  * @brief Query thread ID of the current thread.
     655              :  *
     656              :  * This unconditionally queries the kernel via a system call.
     657              :  *
     658              :  * @note Use k_current_get() unless absolutely sure this is necessary.
     659              :  *       This should only be used directly where the thread local
     660              :  *       variable cannot be used or may contain invalid values
     661              :  *       if thread local storage (TLS) is enabled. If TLS is not
     662              :  *       enabled, this is the same as k_current_get().
     663              :  *
     664              :  * @return ID of current thread.
     665              :  */
     666              : __attribute_const__
     667            1 : __syscall k_tid_t k_sched_current_thread_query(void);
     668              : 
     669              : /**
     670              :  * @brief Get thread ID of the current thread.
     671              :  *
     672              :  * @return ID of current thread.
     673              :  *
     674              :  */
     675              : __attribute_const__
     676            1 : static inline k_tid_t k_current_get(void)
     677              : {
     678              : #ifdef CONFIG_CURRENT_THREAD_USE_TLS
     679              : 
     680              :         /* Thread-local cache of current thread ID, set in z_thread_entry() */
     681              :         extern Z_THREAD_LOCAL k_tid_t z_tls_current;
     682              : 
     683              :         return z_tls_current;
     684              : #else
     685              :         return k_sched_current_thread_query();
     686              : #endif
     687              : }
     688              : 
     689              : /**
     690              :  * @brief Abort a thread.
     691              :  *
     692              :  * This routine permanently stops execution of @a thread. The thread is taken
     693              :  * off all kernel queues it is part of (i.e. the ready queue, the timeout
     694              :  * queue, or a kernel object wait queue). However, any kernel resources the
     695              :  * thread might currently own (such as mutexes or memory blocks) are not
     696              :  * released. It is the responsibility of the caller of this routine to ensure
     697              :  * all necessary cleanup is performed.
     698              :  *
     699              :  * After k_thread_abort() returns, the thread is guaranteed not to be
     700              :  * running or to become runnable anywhere on the system.  Normally
     701              :  * this is done via blocking the caller (in the same manner as
     702              :  * k_thread_join()), but in interrupt context on SMP systems the
     703              :  * implementation is required to spin for threads that are running on
     704              :  * other CPUs.
     705              :  *
     706              :  * @param thread ID of thread to abort.
     707              :  */
     708            1 : __syscall void k_thread_abort(k_tid_t thread);
     709              : 
     710              : k_ticks_t z_timeout_expires(const struct _timeout *timeout);
     711              : k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
     712              : 
     713              : #ifdef CONFIG_SYS_CLOCK_EXISTS
     714              : 
     715              : /**
     716              :  * @brief Get time when a thread wakes up, in system ticks
     717              :  *
     718              :  * This routine computes the system uptime when a waiting thread next
     719              :  * executes, in units of system ticks.  If the thread is not waiting,
     720              :  * it returns current system time.
     721              :  */
     722            1 : __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
     723              : 
     724              : static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
     725              :                                                 const struct k_thread *thread)
     726              : {
     727              :         return z_timeout_expires(&thread->base.timeout);
     728              : }
     729              : 
     730              : /**
     731              :  * @brief Get time remaining before a thread wakes up, in system ticks
     732              :  *
     733              :  * This routine computes the time remaining before a waiting thread
     734              :  * next executes, in units of system ticks.  If the thread is not
     735              :  * waiting, it returns zero.
     736              :  */
     737            1 : __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread);
     738              : 
     739              : static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
     740              :                                                 const struct k_thread *thread)
     741              : {
     742              :         return z_timeout_remaining(&thread->base.timeout);
     743              : }
     744              : 
     745              : #endif /* CONFIG_SYS_CLOCK_EXISTS */
     746              : 
     747              : /**
     748              :  * @cond INTERNAL_HIDDEN
     749              :  */
     750              : 
     751              : struct _static_thread_data {
     752              :         struct k_thread *init_thread;
     753              :         k_thread_stack_t *init_stack;
     754              :         unsigned int init_stack_size;
     755              :         k_thread_entry_t init_entry;
     756              :         void *init_p1;
     757              :         void *init_p2;
     758              :         void *init_p3;
     759              :         int init_prio;
     760              :         uint32_t init_options;
     761              :         const char *init_name;
     762              : #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
     763              :         int32_t init_delay_ms;
     764              : #else
     765              :         k_timeout_t init_delay;
     766              : #endif
     767              : };
     768              : 
     769              : #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
     770              : #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
     771              : #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
     772              : #else
     773              : #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
     774              : #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
     775              : #endif
     776              : 
     777              : #define Z_THREAD_INITIALIZER(thread, stack, stack_size,           \
     778              :                             entry, p1, p2, p3,                   \
     779              :                             prio, options, delay, tname)         \
     780              :         {                                                        \
     781              :         .init_thread = (thread),                                 \
     782              :         .init_stack = (stack),                                   \
     783              :         .init_stack_size = (stack_size),                         \
     784              :         .init_entry = (k_thread_entry_t)entry,                   \
     785              :         .init_p1 = (void *)p1,                                   \
     786              :         .init_p2 = (void *)p2,                                   \
     787              :         .init_p3 = (void *)p3,                                   \
     788              :         .init_prio = (prio),                                     \
     789              :         .init_options = (options),                               \
     790              :         .init_name = STRINGIFY(tname),                           \
     791              :         Z_THREAD_INIT_DELAY_INITIALIZER(delay)                   \
     792              :         }
     793              : 
     794              : /*
     795              :  * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
     796              :  * information on arguments.
     797              :  */
     798              : #define Z_THREAD_COMMON_DEFINE(name, stack_size,                        \
     799              :                                entry, p1, p2, p3,                       \
     800              :                                prio, options, delay)                    \
     801              :         struct k_thread _k_thread_obj_##name;                           \
     802              :         STRUCT_SECTION_ITERABLE(_static_thread_data,                    \
     803              :                                 _k_thread_data_##name) =                \
     804              :                 Z_THREAD_INITIALIZER(&_k_thread_obj_##name,         \
     805              :                                      _k_thread_stack_##name, stack_size,\
     806              :                                      entry, p1, p2, p3, prio, options,  \
     807              :                                      delay, name);                      \
     808              :         const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
     809              : 
     810              : /**
     811              :  * INTERNAL_HIDDEN @endcond
     812              :  */
     813              : 
     814              : /**
     815              :  * @brief Statically define and initialize a thread.
     816              :  *
     817              :  * The thread may be scheduled for immediate execution or a delayed start.
     818              :  *
     819              :  * Thread options are architecture-specific, and can include K_ESSENTIAL,
     820              :  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
     821              :  * them using "|" (the logical OR operator).
     822              :  *
     823              :  * The ID of the thread can be accessed using:
     824              :  *
     825              :  * @code extern const k_tid_t <name>; @endcode
     826              :  *
     827              :  * @param name Name of the thread.
     828              :  * @param stack_size Stack size in bytes.
     829              :  * @param entry Thread entry function.
     830              :  * @param p1 1st entry point parameter.
     831              :  * @param p2 2nd entry point parameter.
     832              :  * @param p3 3rd entry point parameter.
     833              :  * @param prio Thread priority.
     834              :  * @param options Thread options.
     835              :  * @param delay Scheduling delay (in milliseconds), zero for no delay.
     836              :  *
     837              :  * @note Static threads with zero delay should not normally have
     838              :  * MetaIRQ priority levels.  This can preempt the system
     839              :  * initialization handling (depending on the priority of the main
     840              :  * thread) and cause surprising ordering side effects.  It will not
     841              :  * affect anything in the OS per se, but consider it bad practice.
     842              :  * Use a SYS_INIT() callback if you need to run code before entrance
     843              :  * to the application main().
     844              :  */
     845              : #define K_THREAD_DEFINE(name, stack_size,                                \
     846              :                         entry, p1, p2, p3,                               \
     847            1 :                         prio, options, delay)                            \
     848              :         K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size);       \
     849              :         Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,      \
     850              :                                prio, options, delay)
     851              : 
     852              : /**
     853              :  * @brief Statically define and initialize a thread intended to run only in kernel mode.
     854              :  *
     855              :  * The thread may be scheduled for immediate execution or a delayed start.
     856              :  *
     857              :  * Thread options are architecture-specific, and can include K_ESSENTIAL,
     858              :  * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
     859              :  * them using "|" (the logical OR operator).
     860              :  *
     861              :  * The ID of the thread can be accessed using:
     862              :  *
     863              :  * @code extern const k_tid_t <name>; @endcode
     864              :  *
     865              :  * @note Threads defined by this can only run in kernel mode, and cannot be
     866              :  *       transformed into user thread via k_thread_user_mode_enter().
     867              :  *
     868              :  * @warning Depending on the architecture, the stack size (@p stack_size)
     869              :  *          may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
     870              :  *          or in power-of-two size (if MPU).
     871              :  *
     872              :  * @param name Name of the thread.
     873              :  * @param stack_size Stack size in bytes.
     874              :  * @param entry Thread entry function.
     875              :  * @param p1 1st entry point parameter.
     876              :  * @param p2 2nd entry point parameter.
     877              :  * @param p3 3rd entry point parameter.
     878              :  * @param prio Thread priority.
     879              :  * @param options Thread options.
     880              :  * @param delay Scheduling delay (in milliseconds), zero for no delay.
     881              :  */
     882              : #define K_KERNEL_THREAD_DEFINE(name, stack_size,                        \
     883              :                                entry, p1, p2, p3,                       \
     884            1 :                                prio, options, delay)                    \
     885              :         K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size);      \
     886              :         Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3,     \
     887              :                                prio, options, delay)
     888              : 
     889              : /**
     890              :  * @brief Get a thread's priority.
     891              :  *
     892              :  * This routine gets the priority of @a thread.
     893              :  *
     894              :  * @param thread ID of thread whose priority is needed.
     895              :  *
     896              :  * @return Priority of @a thread.
     897              :  */
     898            1 : __syscall int k_thread_priority_get(k_tid_t thread);
     899              : 
     900              : /**
     901              :  * @brief Set a thread's priority.
     902              :  *
     903              :  * This routine immediately changes the priority of @a thread.
     904              :  *
     905              :  * Rescheduling can occur immediately depending on the priority @a thread is
     906              :  * set to:
     907              :  *
     908              :  * - If its priority is raised above the priority of a currently scheduled
     909              :  * preemptible thread, @a thread will be scheduled in.
     910              :  *
     911              :  * - If the caller lowers the priority of a currently scheduled preemptible
     912              :  * thread below that of other threads in the system, the thread of the highest
     913              :  * priority will be scheduled in.
     914              :  *
     915              :  * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
     916              :  * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
     917              :  * highest priority.
     918              :  *
     919              :  * @param thread ID of thread whose priority is to be set.
     920              :  * @param prio New priority.
     921              :  *
     922              :  * @warning Changing the priority of a thread currently involved in mutex
     923              :  * priority inheritance may result in undefined behavior.
     924              :  */
     925            1 : __syscall void k_thread_priority_set(k_tid_t thread, int prio);
     926              : 
     927              : 
     928              : #ifdef CONFIG_SCHED_DEADLINE
     929              : /**
     930              :  * @brief Set deadline expiration time for scheduler
     931              :  *
     932              :  * This sets the "deadline" expiration as a time delta from the
     933              :  * current time, in the same units used by k_cycle_get_32().  The
     934              :  * scheduler (when deadline scheduling is enabled) will choose the
     935              :  * next expiring thread when selecting between threads at the same
     936              :  * static priority.  Threads at different priorities will be scheduled
     937              :  * according to their static priority.
     938              :  *
     939              :  * @note Deadlines are stored internally using 32 bit unsigned
     940              :  * integers.  The number of cycles between the "first" deadline in the
     941              :  * scheduler queue and the "last" deadline must be less than 2^31 (i.e
     942              :  * a signed non-negative quantity).  Failure to adhere to this rule
     943              :  * may result in scheduled threads running in an incorrect deadline
     944              :  * order.
     945              :  *
     946              :  * @note Despite the API naming, the scheduler makes no guarantees
     947              :  * the thread WILL be scheduled within that deadline, nor does it take
     948              :  * extra metadata (like e.g. the "runtime" and "period" parameters in
     949              :  * Linux sched_setattr()) that allows the kernel to validate the
     950              :  * scheduling for achievability.  Such features could be implemented
     951              :  * above this call, which is simply input to the priority selection
     952              :  * logic.
     953              :  *
     954              :  * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
     955              :  * configuration.
     956              :  *
     957              :  * @param thread A thread on which to set the deadline
     958              :  * @param deadline A time delta, in cycle units
     959              :  *
     960              :  */
     961            1 : __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
     962              : #endif
     963              : 
     964              : /**
     965              :  * @brief Invoke the scheduler
     966              :  *
     967              :  * This routine invokes the scheduler to force a schedule point on the current
     968              :  * CPU. If invoked from within a thread, the scheduler will be invoked
     969              :  * immediately (provided interrupts were not locked when invoked). If invoked
     970              :  * from within an ISR, the scheduler will be invoked upon exiting the ISR.
     971              :  *
     972              :  * Invoking the scheduler allows the kernel to make an immediate determination
     973              :  * as to what the next thread to execute should be. Unlike yielding, this
     974              :  * routine is not guaranteed to switch to a thread of equal or higher priority
     975              :  * if any are available. For example, if the current thread is cooperative and
     976              :  * there is a still higher priority cooperative thread that is ready, then
     977              :  * yielding will switch to that higher priority thread whereas this routine
     978              :  * will not.
     979              :  *
     980              :  * Most applications will never use this routine.
     981              :  */
     982            1 : __syscall void k_reschedule(void);
     983              : 
     984              : #ifdef CONFIG_SCHED_CPU_MASK
     985              : /**
     986              :  * @brief Sets all CPU enable masks to zero
     987              :  *
     988              :  * After this returns, the thread will no longer be schedulable on any
     989              :  * CPUs.  The thread must not be currently runnable.
     990              :  *
     991              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
     992              :  * configuration.
     993              :  *
     994              :  * @param thread Thread to operate upon
     995              :  * @return Zero on success, otherwise error code
     996              :  */
     997            1 : int k_thread_cpu_mask_clear(k_tid_t thread);
     998              : 
     999              : /**
    1000              :  * @brief Sets all CPU enable masks to one
    1001              :  *
    1002              :  * After this returns, the thread will be schedulable on any CPU.  The
    1003              :  * thread must not be currently runnable.
    1004              :  *
    1005              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
    1006              :  * configuration.
    1007              :  *
    1008              :  * @param thread Thread to operate upon
    1009              :  * @return Zero on success, otherwise error code
    1010              :  */
    1011            1 : int k_thread_cpu_mask_enable_all(k_tid_t thread);
    1012              : 
    1013              : /**
    1014              :  * @brief Enable thread to run on specified CPU
    1015              :  *
    1016              :  * The thread must not be currently runnable.
    1017              :  *
    1018              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
    1019              :  * configuration.
    1020              :  *
    1021              :  * @param thread Thread to operate upon
    1022              :  * @param cpu CPU index
    1023              :  * @return Zero on success, otherwise error code
    1024              :  */
    1025            1 : int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
    1026              : 
    1027              : /**
    1028              :  * @brief Prevent thread to run on specified CPU
    1029              :  *
    1030              :  * The thread must not be currently runnable.
    1031              :  *
    1032              :  * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
    1033              :  * configuration.
    1034              :  *
    1035              :  * @param thread Thread to operate upon
    1036              :  * @param cpu CPU index
    1037              :  * @return Zero on success, otherwise error code
    1038              :  */
    1039            1 : int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
    1040              : 
    1041              : /**
    1042              :  * @brief Pin a thread to a CPU
    1043              :  *
    1044              :  * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
    1045              :  * thread on the selected CPU.
    1046              :  *
    1047              :  * @param thread Thread to operate upon
    1048              :  * @param cpu CPU index
    1049              :  * @return Zero on success, otherwise error code
    1050              :  */
    1051            1 : int k_thread_cpu_pin(k_tid_t thread, int cpu);
    1052              : #endif
    1053              : 
    1054              : /**
    1055              :  * @brief Suspend a thread.
    1056              :  *
    1057              :  * This routine prevents the kernel scheduler from making @a thread
    1058              :  * the current thread. All other internal operations on @a thread are
    1059              :  * still performed; for example, kernel objects it is waiting on are
    1060              :  * still handed to it. Thread suspension does not impact any timeout
    1061              :  * upon which the thread may be waiting (such as a timeout from a call
    1062              :  * to k_sem_take() or k_sleep()). Thus if the timeout expires while the
    1063              :  * thread is suspended, it is still suspended until k_thread_resume()
    1064              :  * is called.
    1065              :  *
    1066              :  * When the target thread is active on another CPU, the caller will block until
    1067              :  * the target thread is halted (suspended or aborted).  But if the caller is in
    1068              :  * an interrupt context, it will spin waiting for that target thread active on
    1069              :  * another CPU to halt.
    1070              :  *
    1071              :  * If @a thread is already suspended, the routine has no effect.
    1072              :  *
    1073              :  * @param thread ID of thread to suspend.
    1074              :  */
    1075            1 : __syscall void k_thread_suspend(k_tid_t thread);
    1076              : 
    1077              : /**
    1078              :  * @brief Resume a suspended thread.
    1079              :  *
    1080              :  * This routine reverses the thread suspension from k_thread_suspend()
    1081              :  * and allows the kernel scheduler to make @a thread the current thread
    1082              :  * when it is next eligible for that role.
    1083              :  *
    1084              :  * If @a thread is not currently suspended, the routine has no effect.
    1085              :  *
    1086              :  * @param thread ID of thread to resume.
    1087              :  */
    1088            1 : __syscall void k_thread_resume(k_tid_t thread);
    1089              : 
    1090              : /**
    1091              :  * @brief Start an inactive thread
    1092              :  *
    1093              :  * If a thread was created with K_FOREVER in the delay parameter, it will
    1094              :  * not be added to the scheduling queue until this function is called
    1095              :  * on it.
    1096              :  *
    1097              :  * @note This is a legacy API for compatibility.  Modern Zephyr
    1098              :  * threads are initialized in the "sleeping" state and do not need
    1099              :  * special handling for "start".
    1100              :  *
    1101              :  * @param thread thread to start
    1102              :  */
    1103            1 : static inline void k_thread_start(k_tid_t thread)
    1104              : {
    1105              :         k_wakeup(thread);
    1106              : }
    1107              : 
    1108              : /**
    1109              :  * @brief Set time-slicing period and scope.
    1110              :  *
    1111              :  * This routine specifies how the scheduler will perform time slicing of
    1112              :  * preemptible threads.
    1113              :  *
    1114              :  * To enable time slicing, @a slice must be non-zero. The scheduler
    1115              :  * ensures that no thread runs for more than the specified time limit
    1116              :  * before other threads of that priority are given a chance to execute.
    1117              :  * Any thread whose priority is higher than @a prio is exempted, and may
    1118              :  * execute as long as desired without being preempted due to time slicing.
    1119              :  *
    1120              :  * Time slicing only limits the maximum amount of time a thread may continuously
    1121              :  * execute. Once the scheduler selects a thread for execution, there is no
    1122              :  * minimum guaranteed time the thread will execute before threads of greater or
    1123              :  * equal priority are scheduled.
    1124              :  *
    1125              :  * When the current thread is the only one of that priority eligible
    1126              :  * for execution, this routine has no effect; the thread is immediately
    1127              :  * rescheduled after the slice period expires.
    1128              :  *
    1129              :  * To disable timeslicing, set both @a slice and @a prio to zero.
    1130              :  *
    1131              :  * @param slice Maximum time slice length (in milliseconds).
    1132              :  * @param prio Highest thread priority level eligible for time slicing.
    1133              :  */
    1134            1 : void k_sched_time_slice_set(int32_t slice, int prio);
    1135              : 
    1136              : /**
    1137              :  * @brief Set thread time slice
    1138              :  *
    1139              :  * As for k_sched_time_slice_set, but (when
    1140              :  * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
    1141              :  * thread.  When non-zero, this timeslice will take precedence over
    1142              :  * the global value.
    1143              :  *
    1144              :  * When such a thread's timeslice expires, the configured callback
    1145              :  * will be called before the thread is removed/re-added to the run
    1146              :  * queue.  This callback will occur in interrupt context, and the
    1147              :  * specified thread is guaranteed to have been preempted by the
    1148              :  * currently-executing ISR.  Such a callback is free to, for example,
    1149              :  * modify the thread priority or slice time for future execution,
    1150              :  * suspend the thread, etc...
    1151              :  *
    1152              :  * @note Unlike the older API, the time slice parameter here is
    1153              :  * specified in ticks, not milliseconds.  Ticks have always been the
    1154              :  * internal unit, and not all platforms have integer conversions
    1155              :  * between the two.
    1156              :  *
    1157              :  * @note Threads with a non-zero slice time set will be timesliced
    1158              :  * always, even if they are higher priority than the maximum timeslice
    1159              :  * priority set via k_sched_time_slice_set().
    1160              :  *
    1161              :  * @note The callback notification for slice expiration happens, as it
    1162              :  * must, while the thread is still "current", and thus it happens
    1163              :  * before any registered timeouts at this tick.  This has the somewhat
    1164              :  * confusing side effect that the tick time (c.f. k_uptime_get()) does
    1165              :  * not yet reflect the expired ticks.  Applications wishing to make
    1166              :  * fine-grained timing decisions within this callback should use the
    1167              :  * cycle API, or derived facilities like k_thread_runtime_stats_get().
    1168              :  *
    1169              :  * @param th A valid, initialized thread
    1170              :  * @param slice_ticks Maximum timeslice, in ticks
    1171              :  * @param expired Callback function called on slice expiration
    1172              :  * @param data Parameter for the expiration handler
    1173              :  */
    1174            1 : void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
    1175              :                              k_thread_timeslice_fn_t expired, void *data);
    1176              : 
    1177              : /** @} */
    1178              : 
    1179              : /**
    1180              :  * @addtogroup isr_apis
    1181              :  * @{
    1182              :  */
    1183              : 
    1184              : /**
    1185              :  * @brief Determine if code is running at interrupt level.
    1186              :  *
    1187              :  * This routine allows the caller to customize its actions, depending on
    1188              :  * whether it is a thread or an ISR.
    1189              :  *
    1190              :  * @funcprops \isr_ok
    1191              :  *
    1192              :  * @return false if invoked by a thread.
    1193              :  * @return true if invoked by an ISR.
    1194              :  */
    1195            1 : bool k_is_in_isr(void);
    1196              : 
    1197              : /**
    1198              :  * @brief Determine if code is running in a preemptible thread.
    1199              :  *
    1200              :  * This routine allows the caller to customize its actions, depending on
    1201              :  * whether it can be preempted by another thread. The routine returns a 'true'
    1202              :  * value if all of the following conditions are met:
    1203              :  *
    1204              :  * - The code is running in a thread, not at ISR.
    1205              :  * - The thread's priority is in the preemptible range.
    1206              :  * - The thread has not locked the scheduler.
    1207              :  *
    1208              :  * @funcprops \isr_ok
    1209              :  *
    1210              :  * @return 0 if invoked by an ISR or by a cooperative thread.
    1211              :  * @return Non-zero if invoked by a preemptible thread.
    1212              :  */
    1213            1 : __syscall int k_is_preempt_thread(void);
    1214              : 
    1215              : /**
    1216              :  * @brief Test whether startup is in the before-main-task phase.
    1217              :  *
    1218              :  * This routine allows the caller to customize its actions, depending on
    1219              :  * whether it being invoked before the kernel is fully active.
    1220              :  *
    1221              :  * @funcprops \isr_ok
    1222              :  *
    1223              :  * @return true if invoked before post-kernel initialization
    1224              :  * @return false if invoked during/after post-kernel initialization
    1225              :  */
    1226            1 : static inline bool k_is_pre_kernel(void)
    1227              : {
    1228              :         extern bool z_sys_post_kernel; /* in init.c */
    1229              : 
    1230              :         return !z_sys_post_kernel;
    1231              : }
    1232              : 
    1233              : /**
    1234              :  * @}
    1235              :  */
    1236              : 
    1237              : /**
    1238              :  * @addtogroup thread_apis
    1239              :  * @{
    1240              :  */
    1241              : 
    1242              : /**
    1243              :  * @brief Lock the scheduler.
    1244              :  *
    1245              :  * This routine prevents the current thread from being preempted by another
    1246              :  * thread by instructing the scheduler to treat it as a cooperative thread.
    1247              :  * If the thread subsequently performs an operation that makes it unready,
    1248              :  * it will be context switched out in the normal manner. When the thread
    1249              :  * again becomes the current thread, its non-preemptible status is maintained.
    1250              :  *
    1251              :  * This routine can be called recursively.
    1252              :  *
    1253              :  * Owing to clever implementation details, scheduler locks are
    1254              :  * extremely fast for non-userspace threads (just one byte
    1255              :  * inc/decrement in the thread struct).
    1256              :  *
    1257              :  * @note This works by elevating the thread priority temporarily to a
    1258              :  * cooperative priority, allowing cheap synchronization vs. other
    1259              :  * preemptible or cooperative threads running on the current CPU.  It
    1260              :  * does not prevent preemption or asynchrony of other types.  It does
    1261              :  * not prevent threads from running on other CPUs when CONFIG_SMP=y.
    1262              :  * It does not prevent interrupts from happening, nor does it prevent
    1263              :  * threads with MetaIRQ priorities from preempting the current thread.
    1264              :  * In general this is a historical API not well-suited to modern
    1265              :  * applications, use with care.
    1266              :  */
    1267            1 : void k_sched_lock(void);
    1268              : 
    1269              : /**
    1270              :  * @brief Unlock the scheduler.
    1271              :  *
    1272              :  * This routine reverses the effect of a previous call to k_sched_lock().
    1273              :  * A thread must call the routine once for each time it called k_sched_lock()
    1274              :  * before the thread becomes preemptible.
    1275              :  */
    1276            1 : void k_sched_unlock(void);
    1277              : 
    1278              : /**
    1279              :  * @brief Set current thread's custom data.
    1280              :  *
    1281              :  * This routine sets the custom data for the current thread to @ value.
    1282              :  *
    1283              :  * Custom data is not used by the kernel itself, and is freely available
    1284              :  * for a thread to use as it sees fit. It can be used as a framework
    1285              :  * upon which to build thread-local storage.
    1286              :  *
    1287              :  * @param value New custom data value.
    1288              :  *
    1289              :  */
    1290            1 : __syscall void k_thread_custom_data_set(void *value);
    1291              : 
    1292              : /**
    1293              :  * @brief Get current thread's custom data.
    1294              :  *
    1295              :  * This routine returns the custom data for the current thread.
    1296              :  *
    1297              :  * @return Current custom data value.
    1298              :  */
    1299            1 : __syscall void *k_thread_custom_data_get(void);
    1300              : 
    1301              : /**
    1302              :  * @brief Set current thread name
    1303              :  *
    1304              :  * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
    1305              :  * is enabled for tracing and debugging.
    1306              :  *
    1307              :  * @param thread Thread to set name, or NULL to set the current thread
    1308              :  * @param str Name string
    1309              :  * @retval 0 on success
    1310              :  * @retval -EFAULT Memory access error with supplied string
    1311              :  * @retval -ENOSYS Thread name configuration option not enabled
    1312              :  * @retval -EINVAL Thread name too long
    1313              :  */
    1314            1 : __syscall int k_thread_name_set(k_tid_t thread, const char *str);
    1315              : 
    1316              : /**
    1317              :  * @brief Get thread name
    1318              :  *
    1319              :  * Get the name of a thread
    1320              :  *
    1321              :  * @param thread Thread ID
    1322              :  * @retval Thread name, or NULL if configuration not enabled
    1323              :  */
    1324            1 : const char *k_thread_name_get(k_tid_t thread);
    1325              : 
    1326              : /**
    1327              :  * @brief Copy the thread name into a supplied buffer
    1328              :  *
    1329              :  * @param thread Thread to obtain name information
    1330              :  * @param buf Destination buffer
    1331              :  * @param size Destination buffer size
    1332              :  * @retval -ENOSPC Destination buffer too small
    1333              :  * @retval -EFAULT Memory access error
    1334              :  * @retval -ENOSYS Thread name feature not enabled
    1335              :  * @retval 0 Success
    1336              :  */
    1337            1 : __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
    1338              :                                  size_t size);
    1339              : 
    1340              : /**
    1341              :  * @brief Get thread state string
    1342              :  *
    1343              :  * This routine generates a human friendly string containing the thread's
    1344              :  * state, and copies as much of it as possible into @a buf.
    1345              :  *
    1346              :  * @param thread_id Thread ID
    1347              :  * @param buf Buffer into which to copy state strings
    1348              :  * @param buf_size Size of the buffer
    1349              :  *
    1350              :  * @retval Pointer to @a buf if data was copied, else a pointer to "".
    1351              :  */
    1352            1 : const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
    1353              : 
    1354              : /**
    1355              :  * @}
    1356              :  */
    1357              : 
    1358              : /**
    1359              :  * @addtogroup clock_apis
    1360              :  * @{
    1361              :  */
    1362              : 
    1363              : /**
    1364              :  * @brief Generate null timeout delay.
    1365              :  *
    1366              :  * This macro generates a timeout delay that instructs a kernel API
    1367              :  * not to wait if the requested operation cannot be performed immediately.
    1368              :  *
    1369              :  * @return Timeout delay value.
    1370              :  */
    1371            1 : #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
    1372              : 
    1373              : /**
    1374              :  * @brief Generate timeout delay from nanoseconds.
    1375              :  *
    1376              :  * This macro generates a timeout delay that instructs a kernel API to
    1377              :  * wait up to @a t nanoseconds to perform the requested operation.
    1378              :  * Note that timer precision is limited to the tick rate, not the
    1379              :  * requested value.
    1380              :  *
    1381              :  * @param t Duration in nanoseconds.
    1382              :  *
    1383              :  * @return Timeout delay value.
    1384              :  */
    1385            1 : #define K_NSEC(t)     Z_TIMEOUT_NS(t)
    1386              : 
    1387              : /**
    1388              :  * @brief Generate timeout delay from microseconds.
    1389              :  *
    1390              :  * This macro generates a timeout delay that instructs a kernel API
    1391              :  * to wait up to @a t microseconds to perform the requested operation.
    1392              :  * Note that timer precision is limited to the tick rate, not the
    1393              :  * requested value.
    1394              :  *
    1395              :  * @param t Duration in microseconds.
    1396              :  *
    1397              :  * @return Timeout delay value.
    1398              :  */
    1399            1 : #define K_USEC(t)     Z_TIMEOUT_US(t)
    1400              : 
    1401              : /**
    1402              :  * @brief Generate timeout delay from cycles.
    1403              :  *
    1404              :  * This macro generates a timeout delay that instructs a kernel API
    1405              :  * to wait up to @a t cycles to perform the requested operation.
    1406              :  *
    1407              :  * @param t Duration in cycles.
    1408              :  *
    1409              :  * @return Timeout delay value.
    1410              :  */
    1411            1 : #define K_CYC(t)     Z_TIMEOUT_CYC(t)
    1412              : 
    1413              : /**
    1414              :  * @brief Generate timeout delay from system ticks.
    1415              :  *
    1416              :  * This macro generates a timeout delay that instructs a kernel API
    1417              :  * to wait up to @a t ticks to perform the requested operation.
    1418              :  *
    1419              :  * @param t Duration in system ticks.
    1420              :  *
    1421              :  * @return Timeout delay value.
    1422              :  */
    1423            1 : #define K_TICKS(t)     Z_TIMEOUT_TICKS(t)
    1424              : 
    1425              : /**
    1426              :  * @brief Generate timeout delay from milliseconds.
    1427              :  *
    1428              :  * This macro generates a timeout delay that instructs a kernel API
    1429              :  * to wait up to @a ms milliseconds to perform the requested operation.
    1430              :  *
    1431              :  * @param ms Duration in milliseconds.
    1432              :  *
    1433              :  * @return Timeout delay value.
    1434              :  */
    1435            1 : #define K_MSEC(ms)     Z_TIMEOUT_MS(ms)
    1436              : 
    1437              : /**
    1438              :  * @brief Generate timeout delay from seconds.
    1439              :  *
    1440              :  * This macro generates a timeout delay that instructs a kernel API
    1441              :  * to wait up to @a s seconds to perform the requested operation.
    1442              :  *
    1443              :  * @param s Duration in seconds.
    1444              :  *
    1445              :  * @return Timeout delay value.
    1446              :  */
    1447            1 : #define K_SECONDS(s)   K_MSEC((s) * MSEC_PER_SEC)
    1448              : 
    1449              : /**
    1450              :  * @brief Generate timeout delay from minutes.
    1451              : 
    1452              :  * This macro generates a timeout delay that instructs a kernel API
    1453              :  * to wait up to @a m minutes to perform the requested operation.
    1454              :  *
    1455              :  * @param m Duration in minutes.
    1456              :  *
    1457              :  * @return Timeout delay value.
    1458              :  */
    1459            1 : #define K_MINUTES(m)   K_SECONDS((m) * 60)
    1460              : 
    1461              : /**
    1462              :  * @brief Generate timeout delay from hours.
    1463              :  *
    1464              :  * This macro generates a timeout delay that instructs a kernel API
    1465              :  * to wait up to @a h hours to perform the requested operation.
    1466              :  *
    1467              :  * @param h Duration in hours.
    1468              :  *
    1469              :  * @return Timeout delay value.
    1470              :  */
    1471            1 : #define K_HOURS(h)     K_MINUTES((h) * 60)
    1472              : 
    1473              : /**
    1474              :  * @brief Generate infinite timeout delay.
    1475              :  *
    1476              :  * This macro generates a timeout delay that instructs a kernel API
    1477              :  * to wait as long as necessary to perform the requested operation.
    1478              :  *
    1479              :  * @return Timeout delay value.
    1480              :  */
    1481            1 : #define K_FOREVER Z_FOREVER
    1482              : 
    1483              : #ifdef CONFIG_TIMEOUT_64BIT
    1484              : 
    1485              : /**
    1486              :  * @brief Generates an absolute/uptime timeout value from system ticks
    1487              :  *
    1488              :  * This macro generates a timeout delay that represents an expiration
    1489              :  * at the absolute uptime value specified, in system ticks.  That is, the
    1490              :  * timeout will expire immediately after the system uptime reaches the
    1491              :  * specified tick count. Value is clamped to the range 0 to INT64_MAX-1.
    1492              :  *
    1493              :  * @param t Tick uptime value
    1494              :  * @return Timeout delay value
    1495              :  */
    1496              : #define K_TIMEOUT_ABS_TICKS(t) \
    1497              :         Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)CLAMP(t, 0, (INT64_MAX - 1))))
    1498              : 
    1499              : /**
    1500              :  * @brief Generates an absolute/uptime timeout value from seconds
    1501              :  *
    1502              :  * This macro generates a timeout delay that represents an expiration
    1503              :  * at the absolute uptime value specified, in seconds.  That is, the
    1504              :  * timeout will expire immediately after the system uptime reaches the
    1505              :  * specified tick count.
    1506              :  *
    1507              :  * @param t Second uptime value
    1508              :  * @return Timeout delay value
    1509              :  */
    1510              : #define K_TIMEOUT_ABS_SEC(t) K_TIMEOUT_ABS_TICKS(k_sec_to_ticks_ceil64(t))
    1511              : 
    1512              : /**
    1513              :  * @brief Generates an absolute/uptime timeout value from milliseconds
    1514              :  *
    1515              :  * This macro generates a timeout delay that represents an expiration
    1516              :  * at the absolute uptime value specified, in milliseconds.  That is,
    1517              :  * the timeout will expire immediately after the system uptime reaches
    1518              :  * the specified tick count.
    1519              :  *
    1520              :  * @param t Millisecond uptime value
    1521              :  * @return Timeout delay value
    1522              :  */
    1523              : #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
    1524              : 
    1525              : /**
    1526              :  * @brief Generates an absolute/uptime timeout value from microseconds
    1527              :  *
    1528              :  * This macro generates a timeout delay that represents an expiration
    1529              :  * at the absolute uptime value specified, in microseconds.  That is,
    1530              :  * the timeout will expire immediately after the system uptime reaches
    1531              :  * the specified time.  Note that timer precision is limited by the
    1532              :  * system tick rate and not the requested timeout value.
    1533              :  *
    1534              :  * @param t Microsecond uptime value
    1535              :  * @return Timeout delay value
    1536              :  */
    1537              : #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
    1538              : 
    1539              : /**
    1540              :  * @brief Generates an absolute/uptime timeout value from nanoseconds
    1541              :  *
    1542              :  * This macro generates a timeout delay that represents an expiration
    1543              :  * at the absolute uptime value specified, in nanoseconds.  That is,
    1544              :  * the timeout will expire immediately after the system uptime reaches
    1545              :  * the specified time.  Note that timer precision is limited by the
    1546              :  * system tick rate and not the requested timeout value.
    1547              :  *
    1548              :  * @param t Nanosecond uptime value
    1549              :  * @return Timeout delay value
    1550              :  */
    1551              : #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
    1552              : 
    1553              : /**
    1554              :  * @brief Generates an absolute/uptime timeout value from system cycles
    1555              :  *
    1556              :  * This macro generates a timeout delay that represents an expiration
    1557              :  * at the absolute uptime value specified, in cycles.  That is, the
    1558              :  * timeout will expire immediately after the system uptime reaches the
    1559              :  * specified time.  Note that timer precision is limited by the system
    1560              :  * tick rate and not the requested timeout value.
    1561              :  *
    1562              :  * @param t Cycle uptime value
    1563              :  * @return Timeout delay value
    1564              :  */
    1565              : #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
    1566              : 
    1567              : #endif
    1568              : 
    1569              : /**
    1570              :  * @}
    1571              :  */
    1572              : 
    1573              : /**
    1574              :  * @cond INTERNAL_HIDDEN
    1575              :  */
    1576              : 
    1577              : struct k_timer {
    1578              :         /*
    1579              :          * _timeout structure must be first here if we want to use
    1580              :          * dynamic timer allocation. timeout.node is used in the double-linked
    1581              :          * list of free timers
    1582              :          */
    1583              :         struct _timeout timeout;
    1584              : 
    1585              :         /* wait queue for the (single) thread waiting on this timer */
    1586              :         _wait_q_t wait_q;
    1587              : 
    1588              :         /* runs in ISR context */
    1589              :         void (*expiry_fn)(struct k_timer *timer);
    1590              : 
    1591              :         /* runs in the context of the thread that calls k_timer_stop() */
    1592              :         void (*stop_fn)(struct k_timer *timer);
    1593              : 
    1594              :         /* timer period */
    1595              :         k_timeout_t period;
    1596              : 
    1597              :         /* timer status */
    1598              :         uint32_t status;
    1599              : 
    1600              :         /* user-specific data, also used to support legacy features */
    1601              :         void *user_data;
    1602              : 
    1603              :         SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
    1604              : 
    1605              : #ifdef CONFIG_OBJ_CORE_TIMER
    1606              :         struct k_obj_core  obj_core;
    1607              : #endif
    1608              : };
    1609              : 
    1610              : #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
    1611              :         { \
    1612              :         .timeout = { \
    1613              :                 .node = {},\
    1614              :                 .fn = z_timer_expiration_handler, \
    1615              :                 .dticks = 0, \
    1616              :         }, \
    1617              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
    1618              :         .expiry_fn = expiry, \
    1619              :         .stop_fn = stop, \
    1620              :         .period = {}, \
    1621              :         .status = 0, \
    1622              :         .user_data = 0, \
    1623              :         }
    1624              : 
    1625              : /**
    1626              :  * INTERNAL_HIDDEN @endcond
    1627              :  */
    1628              : 
    1629              : /**
    1630              :  * @defgroup timer_apis Timer APIs
    1631              :  * @ingroup kernel_apis
    1632              :  * @{
    1633              :  */
    1634              : 
    1635              : /**
    1636              :  * @typedef k_timer_expiry_t
    1637              :  * @brief Timer expiry function type.
    1638              :  *
    1639              :  * A timer's expiry function is executed by the system clock interrupt handler
    1640              :  * each time the timer expires. The expiry function is optional, and is only
    1641              :  * invoked if the timer has been initialized with one.
    1642              :  *
    1643              :  * @param timer     Address of timer.
    1644              :  */
    1645            1 : typedef void (*k_timer_expiry_t)(struct k_timer *timer);
    1646              : 
    1647              : /**
    1648              :  * @typedef k_timer_stop_t
    1649              :  * @brief Timer stop function type.
    1650              :  *
    1651              :  * A timer's stop function is executed if the timer is stopped prematurely.
    1652              :  * The function runs in the context of call that stops the timer.  As
    1653              :  * k_timer_stop() can be invoked from an ISR, the stop function must be
    1654              :  * callable from interrupt context (isr-ok).
    1655              :  *
    1656              :  * The stop function is optional, and is only invoked if the timer has been
    1657              :  * initialized with one.
    1658              :  *
    1659              :  * @param timer     Address of timer.
    1660              :  */
    1661            1 : typedef void (*k_timer_stop_t)(struct k_timer *timer);
    1662              : 
    1663              : /**
    1664              :  * @brief Statically define and initialize a timer.
    1665              :  *
    1666              :  * The timer can be accessed outside the module where it is defined using:
    1667              :  *
    1668              :  * @code extern struct k_timer <name>; @endcode
    1669              :  *
    1670              :  * @param name Name of the timer variable.
    1671              :  * @param expiry_fn Function to invoke each time the timer expires.
    1672              :  * @param stop_fn   Function to invoke if the timer is stopped while running.
    1673              :  */
    1674            1 : #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
    1675              :         STRUCT_SECTION_ITERABLE(k_timer, name) = \
    1676              :                 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
    1677              : 
    1678              : /**
    1679              :  * @brief Initialize a timer.
    1680              :  *
    1681              :  * This routine initializes a timer, prior to its first use.
    1682              :  *
    1683              :  * @param timer     Address of timer.
    1684              :  * @param expiry_fn Function to invoke each time the timer expires.
    1685              :  * @param stop_fn   Function to invoke if the timer is stopped while running.
    1686              :  */
    1687            1 : void k_timer_init(struct k_timer *timer,
    1688              :                          k_timer_expiry_t expiry_fn,
    1689              :                          k_timer_stop_t stop_fn);
    1690              : 
    1691              : /**
    1692              :  * @brief Start a timer.
    1693              :  *
    1694              :  * This routine starts a timer, and resets its status to zero. The timer
    1695              :  * begins counting down using the specified duration and period values.
    1696              :  *
    1697              :  * Attempting to start a timer that is already running is permitted.
    1698              :  * The timer's status is reset to zero and the timer begins counting down
    1699              :  * using the new duration and period values.
    1700              :  *
    1701              :  * @param timer     Address of timer.
    1702              :  * @param duration  Initial timer duration.
    1703              :  * @param period    Timer period.
    1704              :  */
    1705            1 : __syscall void k_timer_start(struct k_timer *timer,
    1706              :                              k_timeout_t duration, k_timeout_t period);
    1707              : 
    1708              : /**
    1709              :  * @brief Stop a timer.
    1710              :  *
    1711              :  * This routine stops a running timer prematurely. The timer's stop function,
    1712              :  * if one exists, is invoked by the caller.
    1713              :  *
    1714              :  * Attempting to stop a timer that is not running is permitted, but has no
    1715              :  * effect on the timer.
    1716              :  *
    1717              :  * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
    1718              :  * be called from ISRs.
    1719              :  *
    1720              :  * @funcprops \isr_ok
    1721              :  *
    1722              :  * @param timer     Address of timer.
    1723              :  */
    1724            1 : __syscall void k_timer_stop(struct k_timer *timer);
    1725              : 
    1726              : /**
    1727              :  * @brief Read timer status.
    1728              :  *
    1729              :  * This routine reads the timer's status, which indicates the number of times
    1730              :  * it has expired since its status was last read.
    1731              :  *
    1732              :  * Calling this routine resets the timer's status to zero.
    1733              :  *
    1734              :  * @param timer     Address of timer.
    1735              :  *
    1736              :  * @return Timer status.
    1737              :  */
    1738            1 : __syscall uint32_t k_timer_status_get(struct k_timer *timer);
    1739              : 
    1740              : /**
    1741              :  * @brief Synchronize thread to timer expiration.
    1742              :  *
    1743              :  * This routine blocks the calling thread until the timer's status is non-zero
    1744              :  * (indicating that it has expired at least once since it was last examined)
    1745              :  * or the timer is stopped. If the timer status is already non-zero,
    1746              :  * or the timer is already stopped, the caller continues without waiting.
    1747              :  *
    1748              :  * Calling this routine resets the timer's status to zero.
    1749              :  *
    1750              :  * This routine must not be used by interrupt handlers, since they are not
    1751              :  * allowed to block.
    1752              :  *
    1753              :  * @param timer     Address of timer.
    1754              :  *
    1755              :  * @return Timer status.
    1756              :  */
    1757            1 : __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
    1758              : 
    1759              : #ifdef CONFIG_SYS_CLOCK_EXISTS
    1760              : 
    1761              : /**
    1762              :  * @brief Get next expiration time of a timer, in system ticks
    1763              :  *
    1764              :  * This routine returns the future system uptime reached at the next
    1765              :  * time of expiration of the timer, in units of system ticks.  If the
    1766              :  * timer is not running, current system time is returned.
    1767              :  *
    1768              :  * @param timer The timer object
    1769              :  * @return Uptime of expiration, in ticks
    1770              :  */
    1771            1 : __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
    1772              : 
    1773              : static inline k_ticks_t z_impl_k_timer_expires_ticks(
    1774              :                                        const struct k_timer *timer)
    1775              : {
    1776              :         return z_timeout_expires(&timer->timeout);
    1777              : }
    1778              : 
    1779              : /**
    1780              :  * @brief Get time remaining before a timer next expires, in system ticks
    1781              :  *
    1782              :  * This routine computes the time remaining before a running timer
    1783              :  * next expires, in units of system ticks.  If the timer is not
    1784              :  * running, it returns zero.
    1785              :  *
    1786              :  * @param timer The timer object
    1787              :  * @return Remaining time until expiration, in ticks
    1788              :  */
    1789            1 : __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
    1790              : 
    1791              : static inline k_ticks_t z_impl_k_timer_remaining_ticks(
    1792              :                                        const struct k_timer *timer)
    1793              : {
    1794              :         return z_timeout_remaining(&timer->timeout);
    1795              : }
    1796              : 
    1797              : /**
    1798              :  * @brief Get time remaining before a timer next expires.
    1799              :  *
    1800              :  * This routine computes the (approximate) time remaining before a running
    1801              :  * timer next expires. If the timer is not running, it returns zero.
    1802              :  *
    1803              :  * @param timer     Address of timer.
    1804              :  *
    1805              :  * @return Remaining time (in milliseconds).
    1806              :  */
    1807            1 : static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
    1808              : {
    1809              :         return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
    1810              : }
    1811              : 
    1812              : #endif /* CONFIG_SYS_CLOCK_EXISTS */
    1813              : 
    1814              : /**
    1815              :  * @brief Associate user-specific data with a timer.
    1816              :  *
    1817              :  * This routine records the @a user_data with the @a timer, to be retrieved
    1818              :  * later.
    1819              :  *
    1820              :  * It can be used e.g. in a timer handler shared across multiple subsystems to
    1821              :  * retrieve data specific to the subsystem this timer is associated with.
    1822              :  *
    1823              :  * @param timer     Address of timer.
    1824              :  * @param user_data User data to associate with the timer.
    1825              :  */
    1826            1 : __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
    1827              : 
    1828              : /**
    1829              :  * @internal
    1830              :  */
    1831              : static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
    1832              :                                                void *user_data)
    1833              : {
    1834              :         timer->user_data = user_data;
    1835              : }
    1836              : 
    1837              : /**
    1838              :  * @brief Retrieve the user-specific data from a timer.
    1839              :  *
    1840              :  * @param timer     Address of timer.
    1841              :  *
    1842              :  * @return The user data.
    1843              :  */
    1844            1 : __syscall void *k_timer_user_data_get(const struct k_timer *timer);
    1845              : 
    1846              : static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
    1847              : {
    1848              :         return timer->user_data;
    1849              : }
    1850              : 
    1851              : /** @} */
    1852              : 
    1853              : /**
    1854              :  * @addtogroup clock_apis
    1855              :  * @ingroup kernel_apis
    1856              :  * @{
    1857              :  */
    1858              : 
    1859              : /**
    1860              :  * @brief Get system uptime, in system ticks.
    1861              :  *
    1862              :  * This routine returns the elapsed time since the system booted, in
    1863              :  * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
    1864              :  * fundamental unit of resolution of kernel timekeeping.
    1865              :  *
    1866              :  * @return Current uptime in ticks.
    1867              :  */
    1868            1 : __syscall int64_t k_uptime_ticks(void);
    1869              : 
    1870              : /**
    1871              :  * @brief Get system uptime.
    1872              :  *
    1873              :  * This routine returns the elapsed time since the system booted,
    1874              :  * in milliseconds.
    1875              :  *
    1876              :  * @note
    1877              :  *    While this function returns time in milliseconds, it does
    1878              :  *    not mean it has millisecond resolution. The actual resolution depends on
    1879              :  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
    1880              :  *
    1881              :  * @return Current uptime in milliseconds.
    1882              :  */
    1883            1 : static inline int64_t k_uptime_get(void)
    1884              : {
    1885              :         return k_ticks_to_ms_floor64(k_uptime_ticks());
    1886              : }
    1887              : 
    1888              : /**
    1889              :  * @brief Get system uptime (32-bit version).
    1890              :  *
    1891              :  * This routine returns the lower 32 bits of the system uptime in
    1892              :  * milliseconds.
    1893              :  *
    1894              :  * Because correct conversion requires full precision of the system
    1895              :  * clock there is no benefit to using this over k_uptime_get() unless
    1896              :  * you know the application will never run long enough for the system
    1897              :  * clock to approach 2^32 ticks.  Calls to this function may involve
    1898              :  * interrupt blocking and 64-bit math.
    1899              :  *
    1900              :  * @note
    1901              :  *    While this function returns time in milliseconds, it does
    1902              :  *    not mean it has millisecond resolution. The actual resolution depends on
    1903              :  *    @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
    1904              :  *
    1905              :  * @return The low 32 bits of the current uptime, in milliseconds.
    1906              :  */
    1907            1 : static inline uint32_t k_uptime_get_32(void)
    1908              : {
    1909              :         return (uint32_t)k_uptime_get();
    1910              : }
    1911              : 
    1912              : /**
    1913              :  * @brief Get system uptime in seconds.
    1914              :  *
    1915              :  * This routine returns the elapsed time since the system booted,
    1916              :  * in seconds.
    1917              :  *
    1918              :  * @return Current uptime in seconds.
    1919              :  */
    1920            1 : static inline uint32_t k_uptime_seconds(void)
    1921              : {
    1922              :         return k_ticks_to_sec_floor32(k_uptime_ticks());
    1923              : }
    1924              : 
    1925              : /**
    1926              :  * @brief Get elapsed time.
    1927              :  *
    1928              :  * This routine computes the elapsed time between the current system uptime
    1929              :  * and an earlier reference time, in milliseconds.
    1930              :  *
    1931              :  * @param reftime Pointer to a reference time, which is updated to the current
    1932              :  *                uptime upon return.
    1933              :  *
    1934              :  * @return Elapsed time.
    1935              :  */
    1936            1 : static inline int64_t k_uptime_delta(int64_t *reftime)
    1937              : {
    1938              :         int64_t uptime, delta;
    1939              : 
    1940              :         uptime = k_uptime_get();
    1941              :         delta = uptime - *reftime;
    1942              :         *reftime = uptime;
    1943              : 
    1944              :         return delta;
    1945              : }
    1946              : 
    1947              : /**
    1948              :  * @brief Read the hardware clock.
    1949              :  *
    1950              :  * This routine returns the current time, as measured by the system's hardware
    1951              :  * clock.
    1952              :  *
    1953              :  * @return Current hardware clock up-counter (in cycles).
    1954              :  */
    1955            1 : static inline uint32_t k_cycle_get_32(void)
    1956              : {
    1957              :         return arch_k_cycle_get_32();
    1958              : }
    1959              : 
    1960              : /**
    1961              :  * @brief Read the 64-bit hardware clock.
    1962              :  *
    1963              :  * This routine returns the current time in 64-bits, as measured by the
    1964              :  * system's hardware clock, if available.
    1965              :  *
    1966              :  * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
    1967              :  *
    1968              :  * @return Current hardware clock up-counter (in cycles).
    1969              :  */
    1970            1 : static inline uint64_t k_cycle_get_64(void)
    1971              : {
    1972              :         if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
    1973              :                 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
    1974              :                             "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
    1975              :                 return 0;
    1976              :         }
    1977              : 
    1978              :         return arch_k_cycle_get_64();
    1979              : }
    1980              : 
    1981              : /**
    1982              :  * @}
    1983              :  */
    1984              : 
    1985            0 : struct k_queue {
    1986            0 :         sys_sflist_t data_q;
    1987            0 :         struct k_spinlock lock;
    1988            0 :         _wait_q_t wait_q;
    1989              : 
    1990              :         Z_DECL_POLL_EVENT
    1991              : 
    1992              :         SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
    1993              : };
    1994              : 
    1995              : /**
    1996              :  * @cond INTERNAL_HIDDEN
    1997              :  */
    1998              : 
    1999              : #define Z_QUEUE_INITIALIZER(obj) \
    2000              :         { \
    2001              :         .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
    2002              :         .lock = { }, \
    2003              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q),       \
    2004              :         Z_POLL_EVENT_OBJ_INIT(obj)              \
    2005              :         }
    2006              : 
    2007              : /**
    2008              :  * INTERNAL_HIDDEN @endcond
    2009              :  */
    2010              : 
    2011              : /**
    2012              :  * @defgroup queue_apis Queue APIs
    2013              :  * @ingroup kernel_apis
    2014              :  * @{
    2015              :  */
    2016              : 
    2017              : /**
    2018              :  * @brief Initialize a queue.
    2019              :  *
    2020              :  * This routine initializes a queue object, prior to its first use.
    2021              :  *
    2022              :  * @param queue Address of the queue.
    2023              :  */
    2024            1 : __syscall void k_queue_init(struct k_queue *queue);
    2025              : 
    2026              : /**
    2027              :  * @brief Cancel waiting on a queue.
    2028              :  *
    2029              :  * This routine causes first thread pending on @a queue, if any, to
    2030              :  * return from k_queue_get() call with NULL value (as if timeout expired).
    2031              :  * If the queue is being waited on by k_poll(), it will return with
    2032              :  * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
    2033              :  * k_queue_get() will return NULL).
    2034              :  *
    2035              :  * @funcprops \isr_ok
    2036              :  *
    2037              :  * @param queue Address of the queue.
    2038              :  */
    2039            1 : __syscall void k_queue_cancel_wait(struct k_queue *queue);
    2040              : 
    2041              : /**
    2042              :  * @brief Append an element to the end of a queue.
    2043              :  *
    2044              :  * This routine appends a data item to @a queue. A queue data item must be
    2045              :  * aligned on a word boundary, and the first word of the item is reserved
    2046              :  * for the kernel's use.
    2047              :  *
    2048              :  * @funcprops \isr_ok
    2049              :  *
    2050              :  * @param queue Address of the queue.
    2051              :  * @param data Address of the data item.
    2052              :  */
    2053            1 : void k_queue_append(struct k_queue *queue, void *data);
    2054              : 
    2055              : /**
    2056              :  * @brief Append an element to a queue.
    2057              :  *
    2058              :  * This routine appends a data item to @a queue. There is an implicit memory
    2059              :  * allocation to create an additional temporary bookkeeping data structure from
    2060              :  * the calling thread's resource pool, which is automatically freed when the
    2061              :  * item is removed. The data itself is not copied.
    2062              :  *
    2063              :  * @funcprops \isr_ok
    2064              :  *
    2065              :  * @param queue Address of the queue.
    2066              :  * @param data Address of the data item.
    2067              :  *
    2068              :  * @retval 0 on success
    2069              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    2070              :  */
    2071            1 : __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
    2072              : 
    2073              : /**
    2074              :  * @brief Prepend an element to a queue.
    2075              :  *
    2076              :  * This routine prepends a data item to @a queue. A queue data item must be
    2077              :  * aligned on a word boundary, and the first word of the item is reserved
    2078              :  * for the kernel's use.
    2079              :  *
    2080              :  * @funcprops \isr_ok
    2081              :  *
    2082              :  * @param queue Address of the queue.
    2083              :  * @param data Address of the data item.
    2084              :  */
    2085            1 : void k_queue_prepend(struct k_queue *queue, void *data);
    2086              : 
    2087              : /**
    2088              :  * @brief Prepend an element to a queue.
    2089              :  *
    2090              :  * This routine prepends a data item to @a queue. There is an implicit memory
    2091              :  * allocation to create an additional temporary bookkeeping data structure from
    2092              :  * the calling thread's resource pool, which is automatically freed when the
    2093              :  * item is removed. The data itself is not copied.
    2094              :  *
    2095              :  * @funcprops \isr_ok
    2096              :  *
    2097              :  * @param queue Address of the queue.
    2098              :  * @param data Address of the data item.
    2099              :  *
    2100              :  * @retval 0 on success
    2101              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    2102              :  */
    2103            1 : __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
    2104              : 
    2105              : /**
    2106              :  * @brief Inserts an element to a queue.
    2107              :  *
    2108              :  * This routine inserts a data item to @a queue after previous item. A queue
    2109              :  * data item must be aligned on a word boundary, and the first word of
    2110              :  * the item is reserved for the kernel's use.
    2111              :  *
    2112              :  * @funcprops \isr_ok
    2113              :  *
    2114              :  * @param queue Address of the queue.
    2115              :  * @param prev Address of the previous data item.
    2116              :  * @param data Address of the data item.
    2117              :  */
    2118            1 : void k_queue_insert(struct k_queue *queue, void *prev, void *data);
    2119              : 
    2120              : /**
    2121              :  * @brief Atomically append a list of elements to a queue.
    2122              :  *
    2123              :  * This routine adds a list of data items to @a queue in one operation.
    2124              :  * The data items must be in a singly-linked list, with the first word
    2125              :  * in each data item pointing to the next data item; the list must be
    2126              :  * NULL-terminated.
    2127              :  *
    2128              :  * @funcprops \isr_ok
    2129              :  *
    2130              :  * @param queue Address of the queue.
    2131              :  * @param head Pointer to first node in singly-linked list.
    2132              :  * @param tail Pointer to last node in singly-linked list.
    2133              :  *
    2134              :  * @retval 0 on success
    2135              :  * @retval -EINVAL on invalid supplied data
    2136              :  *
    2137              :  */
    2138            1 : int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
    2139              : 
    2140              : /**
    2141              :  * @brief Atomically add a list of elements to a queue.
    2142              :  *
    2143              :  * This routine adds a list of data items to @a queue in one operation.
    2144              :  * The data items must be in a singly-linked list implemented using a
    2145              :  * sys_slist_t object. Upon completion, the original list is empty.
    2146              :  *
    2147              :  * @funcprops \isr_ok
    2148              :  *
    2149              :  * @param queue Address of the queue.
    2150              :  * @param list Pointer to sys_slist_t object.
    2151              :  *
    2152              :  * @retval 0 on success
    2153              :  * @retval -EINVAL on invalid data
    2154              :  */
    2155            1 : int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
    2156              : 
    2157              : /**
    2158              :  * @brief Get an element from a queue.
    2159              :  *
    2160              :  * This routine removes first data item from @a queue. The first word of the
    2161              :  * data item is reserved for the kernel's use.
    2162              :  *
    2163              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    2164              :  *
    2165              :  * @funcprops \isr_ok
    2166              :  *
    2167              :  * @param queue Address of the queue.
    2168              :  * @param timeout Waiting period to obtain a data item, or one of the special
    2169              :  *                values K_NO_WAIT and K_FOREVER.
    2170              :  *
    2171              :  * @return Address of the data item if successful; NULL if returned
    2172              :  * without waiting, or waiting period timed out.
    2173              :  */
    2174            1 : __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
    2175              : 
    2176              : /**
    2177              :  * @brief Remove an element from a queue.
    2178              :  *
    2179              :  * This routine removes data item from @a queue. The first word of the
    2180              :  * data item is reserved for the kernel's use. Removing elements from k_queue
    2181              :  * rely on sys_slist_find_and_remove which is not a constant time operation.
    2182              :  *
    2183              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    2184              :  *
    2185              :  * @funcprops \isr_ok
    2186              :  *
    2187              :  * @param queue Address of the queue.
    2188              :  * @param data Address of the data item.
    2189              :  *
    2190              :  * @return true if data item was removed
    2191              :  */
    2192            1 : bool k_queue_remove(struct k_queue *queue, void *data);
    2193              : 
    2194              : /**
    2195              :  * @brief Append an element to a queue only if it's not present already.
    2196              :  *
    2197              :  * This routine appends data item to @a queue. The first word of the data
    2198              :  * item is reserved for the kernel's use. Appending elements to k_queue
    2199              :  * relies on sys_slist_is_node_in_list which is not a constant time operation.
    2200              :  *
    2201              :  * @funcprops \isr_ok
    2202              :  *
    2203              :  * @param queue Address of the queue.
    2204              :  * @param data Address of the data item.
    2205              :  *
    2206              :  * @return true if data item was added, false if not
    2207              :  */
    2208            1 : bool k_queue_unique_append(struct k_queue *queue, void *data);
    2209              : 
    2210              : /**
    2211              :  * @brief Query a queue to see if it has data available.
    2212              :  *
    2213              :  * Note that the data might be already gone by the time this function returns
    2214              :  * if other threads are also trying to read from the queue.
    2215              :  *
    2216              :  * @funcprops \isr_ok
    2217              :  *
    2218              :  * @param queue Address of the queue.
    2219              :  *
    2220              :  * @return Non-zero if the queue is empty.
    2221              :  * @return 0 if data is available.
    2222              :  */
    2223            1 : __syscall int k_queue_is_empty(struct k_queue *queue);
    2224              : 
    2225              : static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
    2226              : {
    2227              :         return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
    2228              : }
    2229              : 
    2230              : /**
    2231              :  * @brief Peek element at the head of queue.
    2232              :  *
    2233              :  * Return element from the head of queue without removing it.
    2234              :  *
    2235              :  * @param queue Address of the queue.
    2236              :  *
    2237              :  * @return Head element, or NULL if queue is empty.
    2238              :  */
    2239            1 : __syscall void *k_queue_peek_head(struct k_queue *queue);
    2240              : 
    2241              : /**
    2242              :  * @brief Peek element at the tail of queue.
    2243              :  *
    2244              :  * Return element from the tail of queue without removing it.
    2245              :  *
    2246              :  * @param queue Address of the queue.
    2247              :  *
    2248              :  * @return Tail element, or NULL if queue is empty.
    2249              :  */
    2250            1 : __syscall void *k_queue_peek_tail(struct k_queue *queue);
    2251              : 
    2252              : /**
    2253              :  * @brief Statically define and initialize a queue.
    2254              :  *
    2255              :  * The queue can be accessed outside the module where it is defined using:
    2256              :  *
    2257              :  * @code extern struct k_queue <name>; @endcode
    2258              :  *
    2259              :  * @param name Name of the queue.
    2260              :  */
    2261            1 : #define K_QUEUE_DEFINE(name) \
    2262              :         STRUCT_SECTION_ITERABLE(k_queue, name) = \
    2263              :                 Z_QUEUE_INITIALIZER(name)
    2264              : 
    2265              : /** @} */
    2266              : 
    2267              : #ifdef CONFIG_USERSPACE
    2268              : /**
    2269              :  * @brief futex structure
    2270              :  *
    2271              :  * A k_futex is a lightweight mutual exclusion primitive designed
    2272              :  * to minimize kernel involvement. Uncontended operation relies
    2273              :  * only on atomic access to shared memory. k_futex are tracked as
    2274              :  * kernel objects and can live in user memory so that any access
    2275              :  * bypasses the kernel object permission management mechanism.
    2276              :  */
    2277            1 : struct k_futex {
    2278            0 :         atomic_t val;
    2279              : };
    2280              : 
    2281              : /**
    2282              :  * @brief futex kernel data structure
    2283              :  *
    2284              :  * z_futex_data are the helper data structure for k_futex to complete
    2285              :  * futex contended operation on kernel side, structure z_futex_data
    2286              :  * of every futex object is invisible in user mode.
    2287              :  */
    2288              : struct z_futex_data {
    2289              :         _wait_q_t wait_q;
    2290              :         struct k_spinlock lock;
    2291              : };
    2292              : 
    2293              : #define Z_FUTEX_DATA_INITIALIZER(obj) \
    2294              :         { \
    2295              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
    2296              :         }
    2297              : 
    2298              : /**
    2299              :  * @defgroup futex_apis FUTEX APIs
    2300              :  * @ingroup kernel_apis
    2301              :  * @{
    2302              :  */
    2303              : 
    2304              : /**
    2305              :  * @brief Pend the current thread on a futex
    2306              :  *
    2307              :  * Tests that the supplied futex contains the expected value, and if so,
    2308              :  * goes to sleep until some other thread calls k_futex_wake() on it.
    2309              :  *
    2310              :  * @param futex Address of the futex.
    2311              :  * @param expected Expected value of the futex, if it is different the caller
    2312              :  *                 will not wait on it.
    2313              :  * @param timeout Waiting period on the futex, or one of the special values
    2314              :  *                K_NO_WAIT or K_FOREVER.
    2315              :  * @retval -EACCES Caller does not have read access to futex address.
    2316              :  * @retval -EAGAIN If the futex value did not match the expected parameter.
    2317              :  * @retval -EINVAL Futex parameter address not recognized by the kernel.
    2318              :  * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
    2319              :  * @retval 0 if the caller went to sleep and was woken up. The caller
    2320              :  *           should check the futex's value on wakeup to determine if it needs
    2321              :  *           to block again.
    2322              :  */
    2323            1 : __syscall int k_futex_wait(struct k_futex *futex, int expected,
    2324              :                            k_timeout_t timeout);
    2325              : 
    2326              : /**
    2327              :  * @brief Wake one/all threads pending on a futex
    2328              :  *
    2329              :  * Wake up the highest priority thread pending on the supplied futex, or
    2330              :  * wakeup all the threads pending on the supplied futex, and the behavior
    2331              :  * depends on wake_all.
    2332              :  *
    2333              :  * @param futex Futex to wake up pending threads.
    2334              :  * @param wake_all If true, wake up all pending threads; If false,
    2335              :  *                 wakeup the highest priority thread.
    2336              :  * @retval -EACCES Caller does not have access to the futex address.
    2337              :  * @retval -EINVAL Futex parameter address not recognized by the kernel.
    2338              :  * @retval Number of threads that were woken up.
    2339              :  */
    2340            1 : __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
    2341              : 
    2342              : /** @} */
    2343              : #endif
    2344              : 
    2345              : /**
    2346              :  * @defgroup event_apis Event APIs
    2347              :  * @ingroup kernel_apis
    2348              :  * @{
    2349              :  */
    2350              : 
    2351              : /**
    2352              :  * Event Structure
    2353              :  * @ingroup event_apis
    2354              :  */
    2355              : 
    2356            1 : struct k_event {
    2357            0 :         _wait_q_t         wait_q;
    2358            0 :         uint32_t          events;
    2359            0 :         struct k_spinlock lock;
    2360              : 
    2361              :         SYS_PORT_TRACING_TRACKING_FIELD(k_event)
    2362              : 
    2363              : #ifdef CONFIG_OBJ_CORE_EVENT
    2364              :         struct k_obj_core obj_core;
    2365              : #endif
    2366              : 
    2367              : };
    2368              : 
    2369              : #define Z_EVENT_INITIALIZER(obj) \
    2370              :         { \
    2371              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
    2372              :         .events = 0, \
    2373              :         .lock = {}, \
    2374              :         }
    2375              : 
    2376              : /**
    2377              :  * @brief Initialize an event object
    2378              :  *
    2379              :  * This routine initializes an event object, prior to its first use.
    2380              :  *
    2381              :  * @param event Address of the event object.
    2382              :  */
    2383            1 : __syscall void k_event_init(struct k_event *event);
    2384              : 
    2385              : /**
    2386              :  * @brief Post one or more events to an event object
    2387              :  *
    2388              :  * This routine posts one or more events to an event object. All tasks waiting
    2389              :  * on the event object @a event whose waiting conditions become met by this
    2390              :  * posting immediately unpend.
    2391              :  *
    2392              :  * Posting differs from setting in that posted events are merged together with
    2393              :  * the current set of events tracked by the event object.
    2394              :  *
    2395              :  * @funcprops \isr_ok
    2396              :  *
    2397              :  * @param event Address of the event object
    2398              :  * @param events Set of events to post to @a event
    2399              :  *
    2400              :  * @retval Previous value of the events in @a event
    2401              :  */
    2402            1 : __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
    2403              : 
    2404              : /**
    2405              :  * @brief Set the events in an event object
    2406              :  *
    2407              :  * This routine sets the events stored in event object to the specified value.
    2408              :  * All tasks waiting on the event object @a event whose waiting conditions
    2409              :  * become met by this immediately unpend.
    2410              :  *
    2411              :  * Setting differs from posting in that set events replace the current set of
    2412              :  * events tracked by the event object.
    2413              :  *
    2414              :  * @funcprops \isr_ok
    2415              :  *
    2416              :  * @param event Address of the event object
    2417              :  * @param events Set of events to set in @a event
    2418              :  *
    2419              :  * @retval Previous value of the events in @a event
    2420              :  */
    2421            1 : __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
    2422              : 
    2423              : /**
    2424              :  * @brief Set or clear the events in an event object
    2425              :  *
    2426              :  * This routine sets the events stored in event object to the specified value.
    2427              :  * All tasks waiting on the event object @a event whose waiting conditions
    2428              :  * become met by this immediately unpend. Unlike @ref k_event_set, this routine
    2429              :  * allows specific event bits to be set and cleared as determined by the mask.
    2430              :  *
    2431              :  * @funcprops \isr_ok
    2432              :  *
    2433              :  * @param event Address of the event object
    2434              :  * @param events Set of events to set/clear in @a event
    2435              :  * @param events_mask Mask to be applied to @a events
    2436              :  *
    2437              :  * @retval Previous value of the events in @a events_mask
    2438              :  */
    2439            1 : __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
    2440              :                                   uint32_t events_mask);
    2441              : 
    2442              : /**
    2443              :  * @brief Clear the events in an event object
    2444              :  *
    2445              :  * This routine clears (resets) the specified events stored in an event object.
    2446              :  *
    2447              :  * @funcprops \isr_ok
    2448              :  *
    2449              :  * @param event Address of the event object
    2450              :  * @param events Set of events to clear in @a event
    2451              :  *
    2452              :  * @retval Previous value of the events in @a event
    2453              :  */
    2454            1 : __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
    2455              : 
    2456              : /**
    2457              :  * @brief Wait for any of the specified events
    2458              :  *
    2459              :  * This routine waits on event object @a event until any of the specified
    2460              :  * events have been delivered to the event object, or the maximum wait time
    2461              :  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
    2462              :  * events that are expressed as bits in a single 32-bit word.
    2463              :  *
    2464              :  * @note The caller must be careful when resetting if there are multiple threads
    2465              :  * waiting for the event object @a event.
    2466              :  *
    2467              :  * @note This function may be called from ISR context only when @a timeout is
    2468              :  * set to K_NO_WAIT.
    2469              :  *
    2470              :  * @param event Address of the event object
    2471              :  * @param events Set of desired events on which to wait
    2472              :  * @param reset If true, clear the set of events tracked by the event object
    2473              :  *              before waiting. If false, do not clear the events.
    2474              :  * @param timeout Waiting period for the desired set of events or one of the
    2475              :  *                special values K_NO_WAIT and K_FOREVER.
    2476              :  *
    2477              :  * @retval set of matching events upon success
    2478              :  * @retval 0 if matching events were not received within the specified time
    2479              :  */
    2480            1 : __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
    2481              :                                 bool reset, k_timeout_t timeout);
    2482              : 
    2483              : /**
    2484              :  * @brief Wait for all of the specified events
    2485              :  *
    2486              :  * This routine waits on event object @a event until all of the specified
    2487              :  * events have been delivered to the event object, or the maximum wait time
    2488              :  * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
    2489              :  * events that are expressed as bits in a single 32-bit word.
    2490              :  *
    2491              :  * @note The caller must be careful when resetting if there are multiple threads
    2492              :  * waiting for the event object @a event.
    2493              :  *
    2494              :  * @note This function may be called from ISR context only when @a timeout is
    2495              :  * set to K_NO_WAIT.
    2496              :  *
    2497              :  * @param event Address of the event object
    2498              :  * @param events Set of desired events on which to wait
    2499              :  * @param reset If true, clear the set of events tracked by the event object
    2500              :  *              before waiting. If false, do not clear the events.
    2501              :  * @param timeout Waiting period for the desired set of events or one of the
    2502              :  *                special values K_NO_WAIT and K_FOREVER.
    2503              :  *
    2504              :  * @retval set of matching events upon success
    2505              :  * @retval 0 if matching events were not received within the specified time
    2506              :  */
    2507            1 : __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
    2508              :                                     bool reset, k_timeout_t timeout);
    2509              : 
    2510              : /**
    2511              :  * @brief Test the events currently tracked in the event object
    2512              :  *
    2513              :  * @funcprops \isr_ok
    2514              :  *
    2515              :  * @param event Address of the event object
    2516              :  * @param events_mask Set of desired events to test
    2517              :  *
    2518              :  * @retval Current value of events in @a events_mask
    2519              :  */
    2520            1 : static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
    2521              : {
    2522              :         return k_event_wait(event, events_mask, false, K_NO_WAIT);
    2523              : }
    2524              : 
    2525              : /**
    2526              :  * @brief Statically define and initialize an event object
    2527              :  *
    2528              :  * The event can be accessed outside the module where it is defined using:
    2529              :  *
    2530              :  * @code extern struct k_event <name>; @endcode
    2531              :  *
    2532              :  * @param name Name of the event object.
    2533              :  */
    2534            1 : #define K_EVENT_DEFINE(name)                                   \
    2535              :         STRUCT_SECTION_ITERABLE(k_event, name) =               \
    2536              :                 Z_EVENT_INITIALIZER(name);
    2537              : 
    2538              : /** @} */
    2539              : 
    2540            0 : struct k_fifo {
    2541              :         struct k_queue _queue;
    2542              : #ifdef CONFIG_OBJ_CORE_FIFO
    2543              :         struct k_obj_core  obj_core;
    2544              : #endif
    2545              : };
    2546              : 
    2547              : /**
    2548              :  * @cond INTERNAL_HIDDEN
    2549              :  */
    2550              : #define Z_FIFO_INITIALIZER(obj) \
    2551              :         { \
    2552              :         ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
    2553              :         }
    2554              : 
    2555              : /**
    2556              :  * INTERNAL_HIDDEN @endcond
    2557              :  */
    2558              : 
    2559              : /**
    2560              :  * @defgroup fifo_apis FIFO APIs
    2561              :  * @ingroup kernel_apis
    2562              :  * @{
    2563              :  */
    2564              : 
    2565              : /**
    2566              :  * @brief Initialize a FIFO queue.
    2567              :  *
    2568              :  * This routine initializes a FIFO queue, prior to its first use.
    2569              :  *
    2570              :  * @param fifo Address of the FIFO queue.
    2571              :  */
    2572            1 : #define k_fifo_init(fifo)                                    \
    2573              :         ({                                                   \
    2574              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
    2575              :         k_queue_init(&(fifo)->_queue);                       \
    2576              :         K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo);   \
    2577              :         K_OBJ_CORE_LINK(K_OBJ_CORE(fifo));                   \
    2578              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo);  \
    2579              :         })
    2580              : 
    2581              : /**
    2582              :  * @brief Cancel waiting on a FIFO queue.
    2583              :  *
    2584              :  * This routine causes first thread pending on @a fifo, if any, to
    2585              :  * return from k_fifo_get() call with NULL value (as if timeout
    2586              :  * expired).
    2587              :  *
    2588              :  * @funcprops \isr_ok
    2589              :  *
    2590              :  * @param fifo Address of the FIFO queue.
    2591              :  */
    2592            1 : #define k_fifo_cancel_wait(fifo) \
    2593              :         ({ \
    2594              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
    2595              :         k_queue_cancel_wait(&(fifo)->_queue); \
    2596              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
    2597              :         })
    2598              : 
    2599              : /**
    2600              :  * @brief Add an element to a FIFO queue.
    2601              :  *
    2602              :  * This routine adds a data item to @a fifo. A FIFO data item must be
    2603              :  * aligned on a word boundary, and the first word of the item is reserved
    2604              :  * for the kernel's use.
    2605              :  *
    2606              :  * @funcprops \isr_ok
    2607              :  *
    2608              :  * @param fifo Address of the FIFO.
    2609              :  * @param data Address of the data item.
    2610              :  */
    2611            1 : #define k_fifo_put(fifo, data) \
    2612              :         ({ \
    2613              :         void *_data = data; \
    2614              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
    2615              :         k_queue_append(&(fifo)->_queue, _data); \
    2616              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
    2617              :         })
    2618              : 
    2619              : /**
    2620              :  * @brief Add an element to a FIFO queue.
    2621              :  *
    2622              :  * This routine adds a data item to @a fifo. There is an implicit memory
    2623              :  * allocation to create an additional temporary bookkeeping data structure from
    2624              :  * the calling thread's resource pool, which is automatically freed when the
    2625              :  * item is removed. The data itself is not copied.
    2626              :  *
    2627              :  * @funcprops \isr_ok
    2628              :  *
    2629              :  * @param fifo Address of the FIFO.
    2630              :  * @param data Address of the data item.
    2631              :  *
    2632              :  * @retval 0 on success
    2633              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    2634              :  */
    2635            1 : #define k_fifo_alloc_put(fifo, data) \
    2636              :         ({ \
    2637              :         void *_data = data; \
    2638              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
    2639              :         int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
    2640              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
    2641              :         fap_ret; \
    2642              :         })
    2643              : 
    2644              : /**
    2645              :  * @brief Atomically add a list of elements to a FIFO.
    2646              :  *
    2647              :  * This routine adds a list of data items to @a fifo in one operation.
    2648              :  * The data items must be in a singly-linked list, with the first word of
    2649              :  * each data item pointing to the next data item; the list must be
    2650              :  * NULL-terminated.
    2651              :  *
    2652              :  * @funcprops \isr_ok
    2653              :  *
    2654              :  * @param fifo Address of the FIFO queue.
    2655              :  * @param head Pointer to first node in singly-linked list.
    2656              :  * @param tail Pointer to last node in singly-linked list.
    2657              :  */
    2658            1 : #define k_fifo_put_list(fifo, head, tail) \
    2659              :         ({ \
    2660              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
    2661              :         k_queue_append_list(&(fifo)->_queue, head, tail); \
    2662              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
    2663              :         })
    2664              : 
    2665              : /**
    2666              :  * @brief Atomically add a list of elements to a FIFO queue.
    2667              :  *
    2668              :  * This routine adds a list of data items to @a fifo in one operation.
    2669              :  * The data items must be in a singly-linked list implemented using a
    2670              :  * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
    2671              :  * and must be re-initialized via sys_slist_init().
    2672              :  *
    2673              :  * @funcprops \isr_ok
    2674              :  *
    2675              :  * @param fifo Address of the FIFO queue.
    2676              :  * @param list Pointer to sys_slist_t object.
    2677              :  */
    2678            1 : #define k_fifo_put_slist(fifo, list) \
    2679              :         ({ \
    2680              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
    2681              :         k_queue_merge_slist(&(fifo)->_queue, list); \
    2682              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
    2683              :         })
    2684              : 
    2685              : /**
    2686              :  * @brief Get an element from a FIFO queue.
    2687              :  *
    2688              :  * This routine removes a data item from @a fifo in a "first in, first out"
    2689              :  * manner. The first word of the data item is reserved for the kernel's use.
    2690              :  *
    2691              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    2692              :  *
    2693              :  * @funcprops \isr_ok
    2694              :  *
    2695              :  * @param fifo Address of the FIFO queue.
    2696              :  * @param timeout Waiting period to obtain a data item,
    2697              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    2698              :  *
    2699              :  * @return Address of the data item if successful; NULL if returned
    2700              :  * without waiting, or waiting period timed out.
    2701              :  */
    2702            1 : #define k_fifo_get(fifo, timeout) \
    2703              :         ({ \
    2704              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
    2705              :         void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
    2706              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
    2707              :         fg_ret; \
    2708              :         })
    2709              : 
    2710              : /**
    2711              :  * @brief Query a FIFO queue to see if it has data available.
    2712              :  *
    2713              :  * Note that the data might be already gone by the time this function returns
    2714              :  * if other threads is also trying to read from the FIFO.
    2715              :  *
    2716              :  * @funcprops \isr_ok
    2717              :  *
    2718              :  * @param fifo Address of the FIFO queue.
    2719              :  *
    2720              :  * @return Non-zero if the FIFO queue is empty.
    2721              :  * @return 0 if data is available.
    2722              :  */
    2723            1 : #define k_fifo_is_empty(fifo) \
    2724              :         k_queue_is_empty(&(fifo)->_queue)
    2725              : 
    2726              : /**
    2727              :  * @brief Peek element at the head of a FIFO queue.
    2728              :  *
    2729              :  * Return element from the head of FIFO queue without removing it. A usecase
    2730              :  * for this is if elements of the FIFO object are themselves containers. Then
    2731              :  * on each iteration of processing, a head container will be peeked,
    2732              :  * and some data processed out of it, and only if the container is empty,
    2733              :  * it will be completely remove from the FIFO queue.
    2734              :  *
    2735              :  * @param fifo Address of the FIFO queue.
    2736              :  *
    2737              :  * @return Head element, or NULL if the FIFO queue is empty.
    2738              :  */
    2739            1 : #define k_fifo_peek_head(fifo) \
    2740              :         ({ \
    2741              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
    2742              :         void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
    2743              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
    2744              :         fph_ret; \
    2745              :         })
    2746              : 
    2747              : /**
    2748              :  * @brief Peek element at the tail of FIFO queue.
    2749              :  *
    2750              :  * Return element from the tail of FIFO queue (without removing it). A usecase
    2751              :  * for this is if elements of the FIFO queue are themselves containers. Then
    2752              :  * it may be useful to add more data to the last container in a FIFO queue.
    2753              :  *
    2754              :  * @param fifo Address of the FIFO queue.
    2755              :  *
    2756              :  * @return Tail element, or NULL if a FIFO queue is empty.
    2757              :  */
    2758            1 : #define k_fifo_peek_tail(fifo) \
    2759              :         ({ \
    2760              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
    2761              :         void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
    2762              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
    2763              :         fpt_ret; \
    2764              :         })
    2765              : 
    2766              : /**
    2767              :  * @brief Statically define and initialize a FIFO queue.
    2768              :  *
    2769              :  * The FIFO queue can be accessed outside the module where it is defined using:
    2770              :  *
    2771              :  * @code extern struct k_fifo <name>; @endcode
    2772              :  *
    2773              :  * @param name Name of the FIFO queue.
    2774              :  */
    2775            1 : #define K_FIFO_DEFINE(name) \
    2776              :         STRUCT_SECTION_ITERABLE(k_fifo, name) = \
    2777              :                 Z_FIFO_INITIALIZER(name)
    2778              : 
    2779              : /** @} */
    2780              : 
    2781            0 : struct k_lifo {
    2782              :         struct k_queue _queue;
    2783              : #ifdef CONFIG_OBJ_CORE_LIFO
    2784              :         struct k_obj_core  obj_core;
    2785              : #endif
    2786              : };
    2787              : 
    2788              : /**
    2789              :  * @cond INTERNAL_HIDDEN
    2790              :  */
    2791              : 
    2792              : #define Z_LIFO_INITIALIZER(obj) \
    2793              :         { \
    2794              :         ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
    2795              :         }
    2796              : 
    2797              : /**
    2798              :  * INTERNAL_HIDDEN @endcond
    2799              :  */
    2800              : 
    2801              : /**
    2802              :  * @defgroup lifo_apis LIFO APIs
    2803              :  * @ingroup kernel_apis
    2804              :  * @{
    2805              :  */
    2806              : 
    2807              : /**
    2808              :  * @brief Initialize a LIFO queue.
    2809              :  *
    2810              :  * This routine initializes a LIFO queue object, prior to its first use.
    2811              :  *
    2812              :  * @param lifo Address of the LIFO queue.
    2813              :  */
    2814            1 : #define k_lifo_init(lifo)                                    \
    2815              :         ({                                                   \
    2816              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
    2817              :         k_queue_init(&(lifo)->_queue);                       \
    2818              :         K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo);   \
    2819              :         K_OBJ_CORE_LINK(K_OBJ_CORE(lifo));                   \
    2820              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo);  \
    2821              :         })
    2822              : 
    2823              : /**
    2824              :  * @brief Add an element to a LIFO queue.
    2825              :  *
    2826              :  * This routine adds a data item to @a lifo. A LIFO queue data item must be
    2827              :  * aligned on a word boundary, and the first word of the item is
    2828              :  * reserved for the kernel's use.
    2829              :  *
    2830              :  * @funcprops \isr_ok
    2831              :  *
    2832              :  * @param lifo Address of the LIFO queue.
    2833              :  * @param data Address of the data item.
    2834              :  */
    2835            1 : #define k_lifo_put(lifo, data) \
    2836              :         ({ \
    2837              :         void *_data = data; \
    2838              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
    2839              :         k_queue_prepend(&(lifo)->_queue, _data); \
    2840              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
    2841              :         })
    2842              : 
    2843              : /**
    2844              :  * @brief Add an element to a LIFO queue.
    2845              :  *
    2846              :  * This routine adds a data item to @a lifo. There is an implicit memory
    2847              :  * allocation to create an additional temporary bookkeeping data structure from
    2848              :  * the calling thread's resource pool, which is automatically freed when the
    2849              :  * item is removed. The data itself is not copied.
    2850              :  *
    2851              :  * @funcprops \isr_ok
    2852              :  *
    2853              :  * @param lifo Address of the LIFO.
    2854              :  * @param data Address of the data item.
    2855              :  *
    2856              :  * @retval 0 on success
    2857              :  * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
    2858              :  */
    2859            1 : #define k_lifo_alloc_put(lifo, data) \
    2860              :         ({ \
    2861              :         void *_data = data; \
    2862              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
    2863              :         int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
    2864              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
    2865              :         lap_ret; \
    2866              :         })
    2867              : 
    2868              : /**
    2869              :  * @brief Get an element from a LIFO queue.
    2870              :  *
    2871              :  * This routine removes a data item from @a LIFO in a "last in, first out"
    2872              :  * manner. The first word of the data item is reserved for the kernel's use.
    2873              :  *
    2874              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    2875              :  *
    2876              :  * @funcprops \isr_ok
    2877              :  *
    2878              :  * @param lifo Address of the LIFO queue.
    2879              :  * @param timeout Waiting period to obtain a data item,
    2880              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    2881              :  *
    2882              :  * @return Address of the data item if successful; NULL if returned
    2883              :  * without waiting, or waiting period timed out.
    2884              :  */
    2885            1 : #define k_lifo_get(lifo, timeout) \
    2886              :         ({ \
    2887              :         SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
    2888              :         void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
    2889              :         SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
    2890              :         lg_ret; \
    2891              :         })
    2892              : 
    2893              : /**
    2894              :  * @brief Statically define and initialize a LIFO queue.
    2895              :  *
    2896              :  * The LIFO queue can be accessed outside the module where it is defined using:
    2897              :  *
    2898              :  * @code extern struct k_lifo <name>; @endcode
    2899              :  *
    2900              :  * @param name Name of the fifo.
    2901              :  */
    2902            1 : #define K_LIFO_DEFINE(name) \
    2903              :         STRUCT_SECTION_ITERABLE(k_lifo, name) = \
    2904              :                 Z_LIFO_INITIALIZER(name)
    2905              : 
    2906              : /** @} */
    2907              : 
    2908              : /**
    2909              :  * @cond INTERNAL_HIDDEN
    2910              :  */
    2911              : #define K_STACK_FLAG_ALLOC      ((uint8_t)1)    /* Buffer was allocated */
    2912              : 
    2913              : typedef uintptr_t stack_data_t;
    2914              : 
    2915              : struct k_stack {
    2916              :         _wait_q_t wait_q;
    2917              :         struct k_spinlock lock;
    2918              :         stack_data_t *base, *next, *top;
    2919              : 
    2920              :         uint8_t flags;
    2921              : 
    2922              :         SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
    2923              : 
    2924              : #ifdef CONFIG_OBJ_CORE_STACK
    2925              :         struct k_obj_core  obj_core;
    2926              : #endif
    2927              : };
    2928              : 
    2929              : #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
    2930              :         { \
    2931              :         .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q),     \
    2932              :         .base = (stack_buffer), \
    2933              :         .next = (stack_buffer), \
    2934              :         .top = (stack_buffer) + (stack_num_entries), \
    2935              :         }
    2936              : 
    2937              : /**
    2938              :  * INTERNAL_HIDDEN @endcond
    2939              :  */
    2940              : 
    2941              : /**
    2942              :  * @defgroup stack_apis Stack APIs
    2943              :  * @ingroup kernel_apis
    2944              :  * @{
    2945              :  */
    2946              : 
    2947              : /**
    2948              :  * @brief Initialize a stack.
    2949              :  *
    2950              :  * This routine initializes a stack object, prior to its first use.
    2951              :  *
    2952              :  * @param stack Address of the stack.
    2953              :  * @param buffer Address of array used to hold stacked values.
    2954              :  * @param num_entries Maximum number of values that can be stacked.
    2955              :  */
    2956            1 : void k_stack_init(struct k_stack *stack,
    2957              :                   stack_data_t *buffer, uint32_t num_entries);
    2958              : 
    2959              : 
    2960              : /**
    2961              :  * @brief Initialize a stack.
    2962              :  *
    2963              :  * This routine initializes a stack object, prior to its first use. Internal
    2964              :  * buffers will be allocated from the calling thread's resource pool.
    2965              :  * This memory will be released if k_stack_cleanup() is called, or
    2966              :  * userspace is enabled and the stack object loses all references to it.
    2967              :  *
    2968              :  * @param stack Address of the stack.
    2969              :  * @param num_entries Maximum number of values that can be stacked.
    2970              :  *
    2971              :  * @return -ENOMEM if memory couldn't be allocated
    2972              :  */
    2973              : 
    2974            1 : __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
    2975              :                                    uint32_t num_entries);
    2976              : 
    2977              : /**
    2978              :  * @brief Release a stack's allocated buffer
    2979              :  *
    2980              :  * If a stack object was given a dynamically allocated buffer via
    2981              :  * k_stack_alloc_init(), this will free it. This function does nothing
    2982              :  * if the buffer wasn't dynamically allocated.
    2983              :  *
    2984              :  * @param stack Address of the stack.
    2985              :  * @retval 0 on success
    2986              :  * @retval -EAGAIN when object is still in use
    2987              :  */
    2988            1 : int k_stack_cleanup(struct k_stack *stack);
    2989              : 
    2990              : /**
    2991              :  * @brief Push an element onto a stack.
    2992              :  *
    2993              :  * This routine adds a stack_data_t value @a data to @a stack.
    2994              :  *
    2995              :  * @funcprops \isr_ok
    2996              :  *
    2997              :  * @param stack Address of the stack.
    2998              :  * @param data Value to push onto the stack.
    2999              :  *
    3000              :  * @retval 0 on success
    3001              :  * @retval -ENOMEM if stack is full
    3002              :  */
    3003            1 : __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
    3004              : 
    3005              : /**
    3006              :  * @brief Pop an element from a stack.
    3007              :  *
    3008              :  * This routine removes a stack_data_t value from @a stack in a "last in,
    3009              :  * first out" manner and stores the value in @a data.
    3010              :  *
    3011              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    3012              :  *
    3013              :  * @funcprops \isr_ok
    3014              :  *
    3015              :  * @param stack Address of the stack.
    3016              :  * @param data Address of area to hold the value popped from the stack.
    3017              :  * @param timeout Waiting period to obtain a value,
    3018              :  *                or one of the special values K_NO_WAIT and
    3019              :  *                K_FOREVER.
    3020              :  *
    3021              :  * @retval 0 Element popped from stack.
    3022              :  * @retval -EBUSY Returned without waiting.
    3023              :  * @retval -EAGAIN Waiting period timed out.
    3024              :  */
    3025            1 : __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
    3026              :                           k_timeout_t timeout);
    3027              : 
    3028              : /**
    3029              :  * @brief Statically define and initialize a stack
    3030              :  *
    3031              :  * The stack can be accessed outside the module where it is defined using:
    3032              :  *
    3033              :  * @code extern struct k_stack <name>; @endcode
    3034              :  *
    3035              :  * @param name Name of the stack.
    3036              :  * @param stack_num_entries Maximum number of values that can be stacked.
    3037              :  */
    3038            1 : #define K_STACK_DEFINE(name, stack_num_entries)                \
    3039              :         stack_data_t __noinit                                  \
    3040              :                 _k_stack_buf_##name[stack_num_entries];        \
    3041              :         STRUCT_SECTION_ITERABLE(k_stack, name) =               \
    3042              :                 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
    3043              :                                     stack_num_entries)
    3044              : 
    3045              : /** @} */
    3046              : 
    3047              : /**
    3048              :  * @cond INTERNAL_HIDDEN
    3049              :  */
    3050              : 
    3051              : struct k_work;
    3052              : struct k_work_q;
    3053              : struct k_work_queue_config;
    3054              : extern struct k_work_q k_sys_work_q;
    3055              : 
    3056              : /**
    3057              :  * INTERNAL_HIDDEN @endcond
    3058              :  */
    3059              : 
    3060              : /**
    3061              :  * @defgroup mutex_apis Mutex APIs
    3062              :  * @ingroup kernel_apis
    3063              :  * @{
    3064              :  */
    3065              : 
    3066              : /**
    3067              :  * Mutex Structure
    3068              :  * @ingroup mutex_apis
    3069              :  */
    3070            1 : struct k_mutex {
    3071              :         /** Mutex wait queue */
    3072            1 :         _wait_q_t wait_q;
    3073              :         /** Mutex owner */
    3074            1 :         struct k_thread *owner;
    3075              : 
    3076              :         /** Current lock count */
    3077            1 :         uint32_t lock_count;
    3078              : 
    3079              :         /** Original thread priority */
    3080            1 :         int owner_orig_prio;
    3081              : 
    3082              :         SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
    3083              : 
    3084              : #ifdef CONFIG_OBJ_CORE_MUTEX
    3085              :         struct k_obj_core obj_core;
    3086              : #endif
    3087              : };
    3088              : 
    3089              : /**
    3090              :  * @cond INTERNAL_HIDDEN
    3091              :  */
    3092              : #define Z_MUTEX_INITIALIZER(obj) \
    3093              :         { \
    3094              :         .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
    3095              :         .owner = NULL, \
    3096              :         .lock_count = 0, \
    3097              :         .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
    3098              :         }
    3099              : 
    3100              : /**
    3101              :  * INTERNAL_HIDDEN @endcond
    3102              :  */
    3103              : 
    3104              : /**
    3105              :  * @brief Statically define and initialize a mutex.
    3106              :  *
    3107              :  * The mutex can be accessed outside the module where it is defined using:
    3108              :  *
    3109              :  * @code extern struct k_mutex <name>; @endcode
    3110              :  *
    3111              :  * @param name Name of the mutex.
    3112              :  */
    3113            1 : #define K_MUTEX_DEFINE(name) \
    3114              :         STRUCT_SECTION_ITERABLE(k_mutex, name) = \
    3115              :                 Z_MUTEX_INITIALIZER(name)
    3116              : 
    3117              : /**
    3118              :  * @brief Initialize a mutex.
    3119              :  *
    3120              :  * This routine initializes a mutex object, prior to its first use.
    3121              :  *
    3122              :  * Upon completion, the mutex is available and does not have an owner.
    3123              :  *
    3124              :  * @param mutex Address of the mutex.
    3125              :  *
    3126              :  * @retval 0 Mutex object created
    3127              :  *
    3128              :  */
    3129            1 : __syscall int k_mutex_init(struct k_mutex *mutex);
    3130              : 
    3131              : 
    3132              : /**
    3133              :  * @brief Lock a mutex.
    3134              :  *
    3135              :  * This routine locks @a mutex. If the mutex is locked by another thread,
    3136              :  * the calling thread waits until the mutex becomes available or until
    3137              :  * a timeout occurs.
    3138              :  *
    3139              :  * A thread is permitted to lock a mutex it has already locked. The operation
    3140              :  * completes immediately and the lock count is increased by 1.
    3141              :  *
    3142              :  * Mutexes may not be locked in ISRs.
    3143              :  *
    3144              :  * @param mutex Address of the mutex.
    3145              :  * @param timeout Waiting period to lock the mutex,
    3146              :  *                or one of the special values K_NO_WAIT and
    3147              :  *                K_FOREVER.
    3148              :  *
    3149              :  * @retval 0 Mutex locked.
    3150              :  * @retval -EBUSY Returned without waiting.
    3151              :  * @retval -EAGAIN Waiting period timed out.
    3152              :  */
    3153            1 : __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
    3154              : 
    3155              : /**
    3156              :  * @brief Unlock a mutex.
    3157              :  *
    3158              :  * This routine unlocks @a mutex. The mutex must already be locked by the
    3159              :  * calling thread.
    3160              :  *
    3161              :  * The mutex cannot be claimed by another thread until it has been unlocked by
    3162              :  * the calling thread as many times as it was previously locked by that
    3163              :  * thread.
    3164              :  *
    3165              :  * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
    3166              :  * in thread context due to ownership and priority inheritance semantics.
    3167              :  *
    3168              :  * @param mutex Address of the mutex.
    3169              :  *
    3170              :  * @retval 0 Mutex unlocked.
    3171              :  * @retval -EPERM The current thread does not own the mutex
    3172              :  * @retval -EINVAL The mutex is not locked
    3173              :  *
    3174              :  */
    3175            1 : __syscall int k_mutex_unlock(struct k_mutex *mutex);
    3176              : 
    3177              : /**
    3178              :  * @}
    3179              :  */
    3180              : 
    3181              : 
    3182            0 : struct k_condvar {
    3183            0 :         _wait_q_t wait_q;
    3184              : 
    3185              : #ifdef CONFIG_OBJ_CORE_CONDVAR
    3186              :         struct k_obj_core  obj_core;
    3187              : #endif
    3188              : };
    3189              : 
    3190              : #define Z_CONDVAR_INITIALIZER(obj)                                             \
    3191              :         {                                                                      \
    3192              :                 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q),                          \
    3193              :         }
    3194              : 
    3195              : /**
    3196              :  * @defgroup condvar_apis Condition Variables APIs
    3197              :  * @ingroup kernel_apis
    3198              :  * @{
    3199              :  */
    3200              : 
    3201              : /**
    3202              :  * @brief Initialize a condition variable
    3203              :  *
    3204              :  * @param condvar pointer to a @p k_condvar structure
    3205              :  * @retval 0 Condition variable created successfully
    3206              :  */
    3207            1 : __syscall int k_condvar_init(struct k_condvar *condvar);
    3208              : 
    3209              : /**
    3210              :  * @brief Signals one thread that is pending on the condition variable
    3211              :  *
    3212              :  * @param condvar pointer to a @p k_condvar structure
    3213              :  * @retval 0 On success
    3214              :  */
    3215            1 : __syscall int k_condvar_signal(struct k_condvar *condvar);
    3216              : 
    3217              : /**
    3218              :  * @brief Unblock all threads that are pending on the condition
    3219              :  * variable
    3220              :  *
    3221              :  * @param condvar pointer to a @p k_condvar structure
    3222              :  * @return An integer with number of woken threads on success
    3223              :  */
    3224            1 : __syscall int k_condvar_broadcast(struct k_condvar *condvar);
    3225              : 
    3226              : /**
    3227              :  * @brief Waits on the condition variable releasing the mutex lock
    3228              :  *
    3229              :  * Atomically releases the currently owned mutex, blocks the current thread
    3230              :  * waiting on the condition variable specified by @a condvar,
    3231              :  * and finally acquires the mutex again.
    3232              :  *
    3233              :  * The waiting thread unblocks only after another thread calls
    3234              :  * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
    3235              :  *
    3236              :  * @param condvar pointer to a @p k_condvar structure
    3237              :  * @param mutex Address of the mutex.
    3238              :  * @param timeout Waiting period for the condition variable
    3239              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    3240              :  * @retval 0 On success
    3241              :  * @retval -EAGAIN Waiting period timed out.
    3242              :  */
    3243            1 : __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
    3244              :                              k_timeout_t timeout);
    3245              : 
    3246              : /**
    3247              :  * @brief Statically define and initialize a condition variable.
    3248              :  *
    3249              :  * The condition variable can be accessed outside the module where it is
    3250              :  * defined using:
    3251              :  *
    3252              :  * @code extern struct k_condvar <name>; @endcode
    3253              :  *
    3254              :  * @param name Name of the condition variable.
    3255              :  */
    3256            1 : #define K_CONDVAR_DEFINE(name)                                                 \
    3257              :         STRUCT_SECTION_ITERABLE(k_condvar, name) =                             \
    3258              :                 Z_CONDVAR_INITIALIZER(name)
    3259              : /**
    3260              :  * @}
    3261              :  */
    3262              : 
    3263              : /**
    3264              :  * @defgroup semaphore_apis Semaphore APIs
    3265              :  * @ingroup kernel_apis
    3266              :  * @{
    3267              :  */
    3268              : 
    3269              : /**
    3270              :  * @brief Semaphore structure
    3271              :  *
    3272              :  * This structure is used to represent a semaphore.
    3273              :  * All the members are internal and should not be accessed directly.
    3274              :  */
    3275            1 : struct k_sem {
    3276              :         /**
    3277              :          * @cond INTERNAL_HIDDEN
    3278              :          */
    3279              :         _wait_q_t wait_q;
    3280              :         unsigned int count;
    3281              :         unsigned int limit;
    3282              : 
    3283              :         Z_DECL_POLL_EVENT
    3284              : 
    3285              :         SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
    3286              : 
    3287              : #ifdef CONFIG_OBJ_CORE_SEM
    3288              :         struct k_obj_core  obj_core;
    3289              : #endif
    3290              :         /** @endcond */
    3291              : };
    3292              : 
    3293              : /**
    3294              :  * @cond INTERNAL_HIDDEN
    3295              :  */
    3296              : 
    3297              : #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
    3298              :         { \
    3299              :         .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
    3300              :         .count = (initial_count), \
    3301              :         .limit = (count_limit), \
    3302              :         Z_POLL_EVENT_OBJ_INIT(obj) \
    3303              :         }
    3304              : 
    3305              : /**
    3306              :  * @endcond
    3307              :  */
    3308              : 
    3309              : /**
    3310              :  * @brief Maximum limit value allowed for a semaphore.
    3311              :  *
    3312              :  * This is intended for use when a semaphore does not have
    3313              :  * an explicit maximum limit, and instead is just used for
    3314              :  * counting purposes.
    3315              :  *
    3316              :  */
    3317            1 : #define K_SEM_MAX_LIMIT UINT_MAX
    3318              : 
    3319              : /**
    3320              :  * @brief Initialize a semaphore.
    3321              :  *
    3322              :  * This routine initializes a semaphore object, prior to its first use.
    3323              :  *
    3324              :  * @param sem Address of the semaphore.
    3325              :  * @param initial_count Initial semaphore count.
    3326              :  * @param limit Maximum permitted semaphore count.
    3327              :  *
    3328              :  * @see K_SEM_MAX_LIMIT
    3329              :  *
    3330              :  * @retval 0 Semaphore created successfully
    3331              :  * @retval -EINVAL Invalid values
    3332              :  *
    3333              :  */
    3334            1 : __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
    3335              :                           unsigned int limit);
    3336              : 
    3337              : /**
    3338              :  * @brief Take a semaphore.
    3339              :  *
    3340              :  * This routine takes @a sem.
    3341              :  *
    3342              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    3343              :  *
    3344              :  * @funcprops \isr_ok
    3345              :  *
    3346              :  * @param sem Address of the semaphore.
    3347              :  * @param timeout Waiting period to take the semaphore,
    3348              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    3349              :  *
    3350              :  * @retval 0 Semaphore taken.
    3351              :  * @retval -EBUSY Returned without waiting.
    3352              :  * @retval -EAGAIN Waiting period timed out,
    3353              :  *                      or the semaphore was reset during the waiting period.
    3354              :  */
    3355            1 : __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
    3356              : 
    3357              : /**
    3358              :  * @brief Give a semaphore.
    3359              :  *
    3360              :  * This routine gives @a sem, unless the semaphore is already at its maximum
    3361              :  * permitted count.
    3362              :  *
    3363              :  * @funcprops \isr_ok
    3364              :  *
    3365              :  * @param sem Address of the semaphore.
    3366              :  */
    3367            1 : __syscall void k_sem_give(struct k_sem *sem);
    3368              : 
    3369              : /**
    3370              :  * @brief Resets a semaphore's count to zero.
    3371              :  *
    3372              :  * This routine sets the count of @a sem to zero.
    3373              :  * Any outstanding semaphore takes will be aborted
    3374              :  * with -EAGAIN.
    3375              :  *
    3376              :  * @param sem Address of the semaphore.
    3377              :  */
    3378            1 : __syscall void k_sem_reset(struct k_sem *sem);
    3379              : 
    3380              : /**
    3381              :  * @brief Get a semaphore's count.
    3382              :  *
    3383              :  * This routine returns the current count of @a sem.
    3384              :  *
    3385              :  * @param sem Address of the semaphore.
    3386              :  *
    3387              :  * @return Current semaphore count.
    3388              :  */
    3389            1 : __syscall unsigned int k_sem_count_get(struct k_sem *sem);
    3390              : 
    3391              : /**
    3392              :  * @internal
    3393              :  */
    3394              : static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
    3395              : {
    3396              :         return sem->count;
    3397              : }
    3398              : 
    3399              : /**
    3400              :  * @brief Statically define and initialize a semaphore.
    3401              :  *
    3402              :  * The semaphore can be accessed outside the module where it is defined using:
    3403              :  *
    3404              :  * @code extern struct k_sem <name>; @endcode
    3405              :  *
    3406              :  * @param name Name of the semaphore.
    3407              :  * @param initial_count Initial semaphore count.
    3408              :  * @param count_limit Maximum permitted semaphore count.
    3409              :  */
    3410            1 : #define K_SEM_DEFINE(name, initial_count, count_limit)                                             \
    3411              :         STRUCT_SECTION_ITERABLE(k_sem, name) =                                                     \
    3412              :                 Z_SEM_INITIALIZER(name, initial_count, count_limit);                               \
    3413              :         BUILD_ASSERT(((count_limit) != 0) &&                                                       \
    3414              :                      (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) &&  \
    3415              :                      ((count_limit) <= K_SEM_MAX_LIMIT));
    3416              : 
    3417              : /** @} */
    3418              : 
    3419              : #if defined(CONFIG_SCHED_IPI_SUPPORTED) || defined(__DOXYGEN__)
    3420              : struct k_ipi_work;
    3421              : 
    3422              : /**
    3423              :  * @cond INTERNAL_HIDDEN
    3424              :  */
    3425              : 
    3426              : typedef void (*k_ipi_func_t)(struct k_ipi_work *work);
    3427              : 
    3428              : struct k_ipi_work {
    3429              :         sys_dnode_t        node[CONFIG_MP_MAX_NUM_CPUS];   /* Node in IPI work queue */
    3430              :         k_ipi_func_t   func;     /* Function to execute on target CPU */
    3431              :         struct k_event event;    /* Event to signal when processed */
    3432              :         uint32_t       bitmask;  /* Bitmask of targeted CPUs */
    3433              : };
    3434              : 
    3435              : /** @endcond */
    3436              : 
    3437              : /**
    3438              :  * @brief Initialize the specified IPI work item
    3439              :  *
    3440              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3441              :  *
    3442              :  * @param work Pointer to the IPI work item to be initialized
    3443              :  */
    3444            1 : static inline void k_ipi_work_init(struct k_ipi_work *work)
    3445              : {
    3446              :         k_event_init(&work->event);
    3447              :         for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
    3448              :                 sys_dnode_init(&work->node[i]);
    3449              :         }
    3450              :         work->bitmask = 0;
    3451              : }
    3452              : 
    3453              : /**
    3454              :  * @brief Add an IPI work item to the IPI work queue
    3455              :  *
    3456              :  * Adds the specified IPI work item to the IPI work queues of each CPU
    3457              :  * identified by @a cpu_bitmask. The specified IPI work item will subsequently
    3458              :  * execute at ISR level as those CPUs process their received IPIs. Do not
    3459              :  * re-use the specified IPI work item until it has been processed by all of
    3460              :  * the identified CPUs.
    3461              :  *
    3462              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3463              :  *
    3464              :  * @param work Pointer to the IPI work item
    3465              :  * @param cpu_bitmask Set of CPUs to which the IPI work item will be sent
    3466              :  * @param func Function to execute on the targeted CPU(s)
    3467              :  *
    3468              :  * @retval 0 on success
    3469              :  * @retval -EBUSY if the specified IPI work item is still being processed
    3470              :  */
    3471            1 : int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask,
    3472              :                    k_ipi_func_t func);
    3473              : 
    3474              : /**
    3475              :  * @brief Wait until the IPI work item has been processed by all targeted CPUs
    3476              :  *
    3477              :  * This routine waits until the IPI work item has been processed by all CPUs
    3478              :  * to which it was sent. If called from an ISR, then @a timeout must be set to
    3479              :  * K_NO_WAIT. To prevent deadlocks the caller must not have IRQs locked when
    3480              :  * calling this function.
    3481              :  *
    3482              :  * @note It is not in general possible to poll safely for completion of this
    3483              :  * function in ISR or locked contexts where the calling CPU cannot service IPIs
    3484              :  * (because the targeted CPUs may themselves be waiting on the calling CPU).
    3485              :  * Application code must be prepared for failure or to poll from a thread
    3486              :  * context.
    3487              :  *
    3488              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3489              :  *
    3490              :  * @param work Pointer to the IPI work item
    3491              :  * @param timeout Maximum time to wait for IPI work to be processed
    3492              :  *
    3493              :  * @retval -EAGAIN Waiting period timed out.
    3494              :  * @retval 0 if processed by all targeted CPUs
    3495              :  */
    3496            1 : int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout);
    3497              : 
    3498              : /**
    3499              :  * @brief Signal that there is one or more IPI work items to process
    3500              :  *
    3501              :  * This routine sends an IPI to the set of CPUs identified by calls to
    3502              :  * k_ipi_work_add() since this CPU sent its last set of IPIs.
    3503              :  *
    3504              :  * @kconfig_dep{CONFIG_SCHED_IPI_SUPPORTED}
    3505              :  */
    3506            1 : void k_ipi_work_signal(void);
    3507              : 
    3508              : #endif /* CONFIG_SCHED_IPI_SUPPORTED */
    3509              : 
    3510              : /**
    3511              :  * @cond INTERNAL_HIDDEN
    3512              :  */
    3513              : 
    3514              : struct k_work_delayable;
    3515              : struct k_work_sync;
    3516              : 
    3517              : /**
    3518              :  * INTERNAL_HIDDEN @endcond
    3519              :  */
    3520              : 
    3521              : /**
    3522              :  * @defgroup workqueue_apis Work Queue APIs
    3523              :  * @ingroup kernel_apis
    3524              :  * @{
    3525              :  */
    3526              : 
    3527              : /** @brief The signature for a work item handler function.
    3528              :  *
    3529              :  * The function will be invoked by the thread animating a work queue.
    3530              :  *
    3531              :  * @param work the work item that provided the handler.
    3532              :  */
    3533            1 : typedef void (*k_work_handler_t)(struct k_work *work);
    3534              : 
    3535              : /** @brief Initialize a (non-delayable) work structure.
    3536              :  *
    3537              :  * This must be invoked before submitting a work structure for the first time.
    3538              :  * It need not be invoked again on the same work structure.  It can be
    3539              :  * re-invoked to change the associated handler, but this must be done when the
    3540              :  * work item is idle.
    3541              :  *
    3542              :  * @funcprops \isr_ok
    3543              :  *
    3544              :  * @param work the work structure to be initialized.
    3545              :  *
    3546              :  * @param handler the handler to be invoked by the work item.
    3547              :  */
    3548            1 : void k_work_init(struct k_work *work,
    3549              :                   k_work_handler_t handler);
    3550              : 
    3551              : /** @brief Busy state flags from the work item.
    3552              :  *
    3553              :  * A zero return value indicates the work item appears to be idle.
    3554              :  *
    3555              :  * @note This is a live snapshot of state, which may change before the result
    3556              :  * is checked.  Use locks where appropriate.
    3557              :  *
    3558              :  * @funcprops \isr_ok
    3559              :  *
    3560              :  * @param work pointer to the work item.
    3561              :  *
    3562              :  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
    3563              :  * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
    3564              :  */
    3565            1 : int k_work_busy_get(const struct k_work *work);
    3566              : 
    3567              : /** @brief Test whether a work item is currently pending.
    3568              :  *
    3569              :  * Wrapper to determine whether a work item is in a non-idle state.
    3570              :  *
    3571              :  * @note This is a live snapshot of state, which may change before the result
    3572              :  * is checked.  Use locks where appropriate.
    3573              :  *
    3574              :  * @funcprops \isr_ok
    3575              :  *
    3576              :  * @param work pointer to the work item.
    3577              :  *
    3578              :  * @return true if and only if k_work_busy_get() returns a non-zero value.
    3579              :  */
    3580              : static inline bool k_work_is_pending(const struct k_work *work);
    3581              : 
    3582              : /** @brief Submit a work item to a queue.
    3583              :  *
    3584              :  * @param queue pointer to the work queue on which the item should run.  If
    3585              :  * NULL the queue from the most recent submission will be used.
    3586              :  *
    3587              :  * @funcprops \isr_ok
    3588              :  *
    3589              :  * @param work pointer to the work item.
    3590              :  *
    3591              :  * @retval 0 if work was already submitted to a queue
    3592              :  * @retval 1 if work was not submitted and has been queued to @p queue
    3593              :  * @retval 2 if work was running and has been queued to the queue that was
    3594              :  * running it
    3595              :  * @retval -EBUSY
    3596              :  * * if work submission was rejected because the work item is cancelling; or
    3597              :  * * @p queue is draining; or
    3598              :  * * @p queue is plugged.
    3599              :  * @retval -EINVAL if @p queue is null and the work item has never been run.
    3600              :  * @retval -ENODEV if @p queue has not been started.
    3601              :  */
    3602            1 : int k_work_submit_to_queue(struct k_work_q *queue,
    3603              :                            struct k_work *work);
    3604              : 
    3605              : /** @brief Submit a work item to the system queue.
    3606              :  *
    3607              :  * @funcprops \isr_ok
    3608              :  *
    3609              :  * @param work pointer to the work item.
    3610              :  *
    3611              :  * @return as with k_work_submit_to_queue().
    3612              :  */
    3613            1 : int k_work_submit(struct k_work *work);
    3614              : 
    3615              : /** @brief Wait for last-submitted instance to complete.
    3616              :  *
    3617              :  * Resubmissions may occur while waiting, including chained submissions (from
    3618              :  * within the handler).
    3619              :  *
    3620              :  * @note Be careful of caller and work queue thread relative priority.  If
    3621              :  * this function sleeps it will not return until the work queue thread
    3622              :  * completes the tasks that allow this thread to resume.
    3623              :  *
    3624              :  * @note Behavior is undefined if this function is invoked on @p work from a
    3625              :  * work queue running @p work.
    3626              :  *
    3627              :  * @param work pointer to the work item.
    3628              :  *
    3629              :  * @param sync pointer to an opaque item containing state related to the
    3630              :  * pending cancellation.  The object must persist until the call returns, and
    3631              :  * be accessible from both the caller thread and the work queue thread.  The
    3632              :  * object must not be used for any other flush or cancel operation until this
    3633              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    3634              :  * must be allocated in coherent memory.
    3635              :  *
    3636              :  * @retval true if call had to wait for completion
    3637              :  * @retval false if work was already idle
    3638              :  */
    3639            1 : bool k_work_flush(struct k_work *work,
    3640              :                   struct k_work_sync *sync);
    3641              : 
    3642              : /** @brief Cancel a work item.
    3643              :  *
    3644              :  * This attempts to prevent a pending (non-delayable) work item from being
    3645              :  * processed by removing it from the work queue.  If the item is being
    3646              :  * processed, the work item will continue to be processed, but resubmissions
    3647              :  * are rejected until cancellation completes.
    3648              :  *
    3649              :  * If this returns zero cancellation is complete, otherwise something
    3650              :  * (probably a work queue thread) is still referencing the item.
    3651              :  *
    3652              :  * See also k_work_cancel_sync().
    3653              :  *
    3654              :  * @funcprops \isr_ok
    3655              :  *
    3656              :  * @param work pointer to the work item.
    3657              :  *
    3658              :  * @return the k_work_busy_get() status indicating the state of the item after all
    3659              :  * cancellation steps performed by this call are completed.
    3660              :  */
    3661            1 : int k_work_cancel(struct k_work *work);
    3662              : 
    3663              : /** @brief Cancel a work item and wait for it to complete.
    3664              :  *
    3665              :  * Same as k_work_cancel() but does not return until cancellation is complete.
    3666              :  * This can be invoked by a thread after k_work_cancel() to synchronize with a
    3667              :  * previous cancellation.
    3668              :  *
    3669              :  * On return the work structure will be idle unless something submits it after
    3670              :  * the cancellation was complete.
    3671              :  *
    3672              :  * @note Be careful of caller and work queue thread relative priority.  If
    3673              :  * this function sleeps it will not return until the work queue thread
    3674              :  * completes the tasks that allow this thread to resume.
    3675              :  *
    3676              :  * @note Behavior is undefined if this function is invoked on @p work from a
    3677              :  * work queue running @p work.
    3678              :  *
    3679              :  * @param work pointer to the work item.
    3680              :  *
    3681              :  * @param sync pointer to an opaque item containing state related to the
    3682              :  * pending cancellation.  The object must persist until the call returns, and
    3683              :  * be accessible from both the caller thread and the work queue thread.  The
    3684              :  * object must not be used for any other flush or cancel operation until this
    3685              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    3686              :  * must be allocated in coherent memory.
    3687              :  *
    3688              :  * @retval true if work was pending (call had to wait for cancellation of a
    3689              :  * running handler to complete, or scheduled or submitted operations were
    3690              :  * cancelled);
    3691              :  * @retval false otherwise
    3692              :  */
    3693            1 : bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
    3694              : 
    3695              : /** @brief Initialize a work queue structure.
    3696              :  *
    3697              :  * This must be invoked before starting a work queue structure for the first time.
    3698              :  * It need not be invoked again on the same work queue structure.
    3699              :  *
    3700              :  * @funcprops \isr_ok
    3701              :  *
    3702              :  * @param queue the queue structure to be initialized.
    3703              :  */
    3704            1 : void k_work_queue_init(struct k_work_q *queue);
    3705              : 
    3706              : /** @brief Initialize a work queue.
    3707              :  *
    3708              :  * This configures the work queue thread and starts it running.  The function
    3709              :  * should not be re-invoked on a queue.
    3710              :  *
    3711              :  * @param queue pointer to the queue structure. It must be initialized
    3712              :  *        in zeroed/bss memory or with @ref k_work_queue_init before
    3713              :  *        use.
    3714              :  *
    3715              :  * @param stack pointer to the work thread stack area.
    3716              :  *
    3717              :  * @param stack_size size of the work thread stack area, in bytes.
    3718              :  *
    3719              :  * @param prio initial thread priority
    3720              :  *
    3721              :  * @param cfg optional additional configuration parameters.  Pass @c
    3722              :  * NULL if not required, to use the defaults documented in
    3723              :  * k_work_queue_config.
    3724              :  */
    3725            1 : void k_work_queue_start(struct k_work_q *queue,
    3726              :                         k_thread_stack_t *stack, size_t stack_size,
    3727              :                         int prio, const struct k_work_queue_config *cfg);
    3728              : 
    3729              : /** @brief Run work queue using calling thread
    3730              :  *
    3731              :  * This will run the work queue forever unless stopped by @ref k_work_queue_stop.
    3732              :  *
    3733              :  * @param queue the queue to run
    3734              :  *
    3735              :  * @param cfg optional additional configuration parameters.  Pass @c
    3736              :  * NULL if not required, to use the defaults documented in
    3737              :  * k_work_queue_config.
    3738              :  */
    3739            1 : void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg);
    3740              : 
    3741              : /** @brief Access the thread that animates a work queue.
    3742              :  *
    3743              :  * This is necessary to grant a work queue thread access to things the work
    3744              :  * items it will process are expected to use.
    3745              :  *
    3746              :  * @param queue pointer to the queue structure.
    3747              :  *
    3748              :  * @return the thread associated with the work queue.
    3749              :  */
    3750              : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
    3751              : 
    3752              : /** @brief Wait until the work queue has drained, optionally plugging it.
    3753              :  *
    3754              :  * This blocks submission to the work queue except when coming from queue
    3755              :  * thread, and blocks the caller until no more work items are available in the
    3756              :  * queue.
    3757              :  *
    3758              :  * If @p plug is true then submission will continue to be blocked after the
    3759              :  * drain operation completes until k_work_queue_unplug() is invoked.
    3760              :  *
    3761              :  * Note that work items that are delayed are not yet associated with their
    3762              :  * work queue.  They must be cancelled externally if a goal is to ensure the
    3763              :  * work queue remains empty.  The @p plug feature can be used to prevent
    3764              :  * delayed items from being submitted after the drain completes.
    3765              :  *
    3766              :  * @param queue pointer to the queue structure.
    3767              :  *
    3768              :  * @param plug if true the work queue will continue to block new submissions
    3769              :  * after all items have drained.
    3770              :  *
    3771              :  * @retval 1 if call had to wait for the drain to complete
    3772              :  * @retval 0 if call did not have to wait
    3773              :  * @retval negative if wait was interrupted or failed
    3774              :  */
    3775            1 : int k_work_queue_drain(struct k_work_q *queue, bool plug);
    3776              : 
    3777              : /** @brief Release a work queue to accept new submissions.
    3778              :  *
    3779              :  * This releases the block on new submissions placed when k_work_queue_drain()
    3780              :  * is invoked with the @p plug option enabled.  If this is invoked before the
    3781              :  * drain completes new items may be submitted as soon as the drain completes.
    3782              :  *
    3783              :  * @funcprops \isr_ok
    3784              :  *
    3785              :  * @param queue pointer to the queue structure.
    3786              :  *
    3787              :  * @retval 0 if successfully unplugged
    3788              :  * @retval -EALREADY if the work queue was not plugged.
    3789              :  */
    3790            1 : int k_work_queue_unplug(struct k_work_q *queue);
    3791              : 
    3792              : /** @brief Stop a work queue.
    3793              :  *
    3794              :  * Stops the work queue thread and ensures that no further work will be processed.
    3795              :  * This call is blocking and guarantees that the work queue thread has terminated
    3796              :  * cleanly if successful, no work will be processed past this point.
    3797              :  *
    3798              :  * @param queue Pointer to the queue structure.
    3799              :  * @param timeout Maximum time to wait for the work queue to stop.
    3800              :  *
    3801              :  * @retval 0 if the work queue was stopped
    3802              :  * @retval -EALREADY if the work queue was not started (or already stopped)
    3803              :  * @retval -EBUSY if the work queue is actively processing work items
    3804              :  * @retval -ETIMEDOUT if the work queue did not stop within the stipulated timeout
    3805              :  */
    3806            1 : int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
    3807              : 
    3808              : /** @brief Initialize a delayable work structure.
    3809              :  *
    3810              :  * This must be invoked before scheduling a delayable work structure for the
    3811              :  * first time.  It need not be invoked again on the same work structure.  It
    3812              :  * can be re-invoked to change the associated handler, but this must be done
    3813              :  * when the work item is idle.
    3814              :  *
    3815              :  * @funcprops \isr_ok
    3816              :  *
    3817              :  * @param dwork the delayable work structure to be initialized.
    3818              :  *
    3819              :  * @param handler the handler to be invoked by the work item.
    3820              :  */
    3821            1 : void k_work_init_delayable(struct k_work_delayable *dwork,
    3822              :                            k_work_handler_t handler);
    3823              : 
    3824              : /**
    3825              :  * @brief Get the parent delayable work structure from a work pointer.
    3826              :  *
    3827              :  * This function is necessary when a @c k_work_handler_t function is passed to
    3828              :  * k_work_schedule_for_queue() and the handler needs to access data from the
    3829              :  * container of the containing `k_work_delayable`.
    3830              :  *
    3831              :  * @param work Address passed to the work handler
    3832              :  *
    3833              :  * @return Address of the containing @c k_work_delayable structure.
    3834              :  */
    3835              : static inline struct k_work_delayable *
    3836              : k_work_delayable_from_work(struct k_work *work);
    3837              : 
    3838              : /** @brief Busy state flags from the delayable work item.
    3839              :  *
    3840              :  * @funcprops \isr_ok
    3841              :  *
    3842              :  * @note This is a live snapshot of state, which may change before the result
    3843              :  * can be inspected.  Use locks where appropriate.
    3844              :  *
    3845              :  * @param dwork pointer to the delayable work item.
    3846              :  *
    3847              :  * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
    3848              :  * K_WORK_CANCELING, and K_WORK_FLUSHING.  A zero return value indicates the
    3849              :  * work item appears to be idle.
    3850              :  */
    3851            1 : int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
    3852              : 
    3853              : /** @brief Test whether a delayed work item is currently pending.
    3854              :  *
    3855              :  * Wrapper to determine whether a delayed work item is in a non-idle state.
    3856              :  *
    3857              :  * @note This is a live snapshot of state, which may change before the result
    3858              :  * can be inspected.  Use locks where appropriate.
    3859              :  *
    3860              :  * @funcprops \isr_ok
    3861              :  *
    3862              :  * @param dwork pointer to the delayable work item.
    3863              :  *
    3864              :  * @return true if and only if k_work_delayable_busy_get() returns a non-zero
    3865              :  * value.
    3866              :  */
    3867              : static inline bool k_work_delayable_is_pending(
    3868              :         const struct k_work_delayable *dwork);
    3869              : 
    3870              : /** @brief Get the absolute tick count at which a scheduled delayable work
    3871              :  * will be submitted.
    3872              :  *
    3873              :  * @note This is a live snapshot of state, which may change before the result
    3874              :  * can be inspected.  Use locks where appropriate.
    3875              :  *
    3876              :  * @funcprops \isr_ok
    3877              :  *
    3878              :  * @param dwork pointer to the delayable work item.
    3879              :  *
    3880              :  * @return the tick count when the timer that will schedule the work item will
    3881              :  * expire, or the current tick count if the work is not scheduled.
    3882              :  */
    3883              : static inline k_ticks_t k_work_delayable_expires_get(
    3884              :         const struct k_work_delayable *dwork);
    3885              : 
    3886              : /** @brief Get the number of ticks until a scheduled delayable work will be
    3887              :  * submitted.
    3888              :  *
    3889              :  * @note This is a live snapshot of state, which may change before the result
    3890              :  * can be inspected.  Use locks where appropriate.
    3891              :  *
    3892              :  * @funcprops \isr_ok
    3893              :  *
    3894              :  * @param dwork pointer to the delayable work item.
    3895              :  *
    3896              :  * @return the number of ticks until the timer that will schedule the work
    3897              :  * item will expire, or zero if the item is not scheduled.
    3898              :  */
    3899              : static inline k_ticks_t k_work_delayable_remaining_get(
    3900              :         const struct k_work_delayable *dwork);
    3901              : 
    3902              : /** @brief Submit an idle work item to a queue after a delay.
    3903              :  *
    3904              :  * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
    3905              :  * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
    3906              :  *
    3907              :  * @funcprops \isr_ok
    3908              :  *
    3909              :  * @param queue the queue on which the work item should be submitted after the
    3910              :  * delay.
    3911              :  *
    3912              :  * @param dwork pointer to the delayable work item.
    3913              :  *
    3914              :  * @param delay the time to wait before submitting the work item.  If @c
    3915              :  * K_NO_WAIT and the work is not pending this is equivalent to
    3916              :  * k_work_submit_to_queue().
    3917              :  *
    3918              :  * @retval 0 if work was already scheduled or submitted.
    3919              :  * @retval 1 if work has been scheduled.
    3920              :  * @retval 2 if @p delay is @c K_NO_WAIT and work
    3921              :  *         was running and has been queued to the queue that was running it.
    3922              :  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
    3923              :  *         k_work_submit_to_queue() fails with this code.
    3924              :  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
    3925              :  *         k_work_submit_to_queue() fails with this code.
    3926              :  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
    3927              :  *         k_work_submit_to_queue() fails with this code.
    3928              :  */
    3929            1 : int k_work_schedule_for_queue(struct k_work_q *queue,
    3930              :                                struct k_work_delayable *dwork,
    3931              :                                k_timeout_t delay);
    3932              : 
    3933              : /** @brief Submit an idle work item to the system work queue after a
    3934              :  * delay.
    3935              :  *
    3936              :  * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
    3937              :  * characteristics of that function.
    3938              :  *
    3939              :  * @param dwork pointer to the delayable work item.
    3940              :  *
    3941              :  * @param delay the time to wait before submitting the work item.  If @c
    3942              :  * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
    3943              :  *
    3944              :  * @return as with k_work_schedule_for_queue().
    3945              :  */
    3946            1 : int k_work_schedule(struct k_work_delayable *dwork,
    3947              :                                    k_timeout_t delay);
    3948              : 
    3949              : /** @brief Reschedule a work item to a queue after a delay.
    3950              :  *
    3951              :  * Unlike k_work_schedule_for_queue() this function can change the deadline of
    3952              :  * a scheduled work item, and will schedule a work item that is in any state
    3953              :  * (e.g. is idle, submitted, or running).  This function does not affect
    3954              :  * ("unsubmit") a work item that has been submitted to a queue.
    3955              :  *
    3956              :  * @funcprops \isr_ok
    3957              :  *
    3958              :  * @param queue the queue on which the work item should be submitted after the
    3959              :  * delay.
    3960              :  *
    3961              :  * @param dwork pointer to the delayable work item.
    3962              :  *
    3963              :  * @param delay the time to wait before submitting the work item.  If @c
    3964              :  * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
    3965              :  * any previous scheduled submission.
    3966              :  *
    3967              :  * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
    3968              :  * k_work_submit_to_queue().
    3969              :  *
    3970              :  * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
    3971              :  * @retval 1 if
    3972              :  * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
    3973              :  *   to @p queue; or
    3974              :  * * delay not @c K_NO_WAIT and work has been scheduled
    3975              :  * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
    3976              :  * to the queue that was running it
    3977              :  * @retval -EBUSY if @p delay is @c K_NO_WAIT and
    3978              :  *         k_work_submit_to_queue() fails with this code.
    3979              :  * @retval -EINVAL if @p delay is @c K_NO_WAIT and
    3980              :  *         k_work_submit_to_queue() fails with this code.
    3981              :  * @retval -ENODEV if @p delay is @c K_NO_WAIT and
    3982              :  *         k_work_submit_to_queue() fails with this code.
    3983              :  */
    3984            1 : int k_work_reschedule_for_queue(struct k_work_q *queue,
    3985              :                                  struct k_work_delayable *dwork,
    3986              :                                  k_timeout_t delay);
    3987              : 
    3988              : /** @brief Reschedule a work item to the system work queue after a
    3989              :  * delay.
    3990              :  *
    3991              :  * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
    3992              :  * API characteristics of that function.
    3993              :  *
    3994              :  * @param dwork pointer to the delayable work item.
    3995              :  *
    3996              :  * @param delay the time to wait before submitting the work item.
    3997              :  *
    3998              :  * @return as with k_work_reschedule_for_queue().
    3999              :  */
    4000            1 : int k_work_reschedule(struct k_work_delayable *dwork,
    4001              :                                      k_timeout_t delay);
    4002              : 
    4003              : /** @brief Flush delayable work.
    4004              :  *
    4005              :  * If the work is scheduled, it is immediately submitted.  Then the caller
    4006              :  * blocks until the work completes, as with k_work_flush().
    4007              :  *
    4008              :  * @note Be careful of caller and work queue thread relative priority.  If
    4009              :  * this function sleeps it will not return until the work queue thread
    4010              :  * completes the tasks that allow this thread to resume.
    4011              :  *
    4012              :  * @note Behavior is undefined if this function is invoked on @p dwork from a
    4013              :  * work queue running @p dwork.
    4014              :  *
    4015              :  * @param dwork pointer to the delayable work item.
    4016              :  *
    4017              :  * @param sync pointer to an opaque item containing state related to the
    4018              :  * pending cancellation.  The object must persist until the call returns, and
    4019              :  * be accessible from both the caller thread and the work queue thread.  The
    4020              :  * object must not be used for any other flush or cancel operation until this
    4021              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    4022              :  * must be allocated in coherent memory.
    4023              :  *
    4024              :  * @retval true if call had to wait for completion
    4025              :  * @retval false if work was already idle
    4026              :  */
    4027            1 : bool k_work_flush_delayable(struct k_work_delayable *dwork,
    4028              :                             struct k_work_sync *sync);
    4029              : 
    4030              : /** @brief Cancel delayable work.
    4031              :  *
    4032              :  * Similar to k_work_cancel() but for delayable work.  If the work is
    4033              :  * scheduled or submitted it is canceled.  This function does not wait for the
    4034              :  * cancellation to complete.
    4035              :  *
    4036              :  * @note The work may still be running when this returns.  Use
    4037              :  * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
    4038              :  * not running.
    4039              :  *
    4040              :  * @note Canceling delayable work does not prevent rescheduling it.  It does
    4041              :  * prevent submitting it until the cancellation completes.
    4042              :  *
    4043              :  * @funcprops \isr_ok
    4044              :  *
    4045              :  * @param dwork pointer to the delayable work item.
    4046              :  *
    4047              :  * @return the k_work_delayable_busy_get() status indicating the state of the
    4048              :  * item after all cancellation steps performed by this call are completed.
    4049              :  */
    4050            1 : int k_work_cancel_delayable(struct k_work_delayable *dwork);
    4051              : 
    4052              : /** @brief Cancel delayable work and wait.
    4053              :  *
    4054              :  * Like k_work_cancel_delayable() but waits until the work becomes idle.
    4055              :  *
    4056              :  * @note Canceling delayable work does not prevent rescheduling it.  It does
    4057              :  * prevent submitting it until the cancellation completes.
    4058              :  *
    4059              :  * @note Be careful of caller and work queue thread relative priority.  If
    4060              :  * this function sleeps it will not return until the work queue thread
    4061              :  * completes the tasks that allow this thread to resume.
    4062              :  *
    4063              :  * @note Behavior is undefined if this function is invoked on @p dwork from a
    4064              :  * work queue running @p dwork.
    4065              :  *
    4066              :  * @param dwork pointer to the delayable work item.
    4067              :  *
    4068              :  * @param sync pointer to an opaque item containing state related to the
    4069              :  * pending cancellation.  The object must persist until the call returns, and
    4070              :  * be accessible from both the caller thread and the work queue thread.  The
    4071              :  * object must not be used for any other flush or cancel operation until this
    4072              :  * one completes.  On architectures with CONFIG_KERNEL_COHERENCE the object
    4073              :  * must be allocated in coherent memory.
    4074              :  *
    4075              :  * @retval true if work was not idle (call had to wait for cancellation of a
    4076              :  * running handler to complete, or scheduled or submitted operations were
    4077              :  * cancelled);
    4078              :  * @retval false otherwise
    4079              :  */
    4080            1 : bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
    4081              :                                   struct k_work_sync *sync);
    4082              : 
    4083            0 : enum {
    4084              : /**
    4085              :  * @cond INTERNAL_HIDDEN
    4086              :  */
    4087              : 
    4088              :         /* The atomic API is used for all work and queue flags fields to
    4089              :          * enforce sequential consistency in SMP environments.
    4090              :          */
    4091              : 
    4092              :         /* Bits that represent the work item states.  At least nine of the
    4093              :          * combinations are distinct valid stable states.
    4094              :          */
    4095              :         K_WORK_RUNNING_BIT = 0,
    4096              :         K_WORK_CANCELING_BIT = 1,
    4097              :         K_WORK_QUEUED_BIT = 2,
    4098              :         K_WORK_DELAYED_BIT = 3,
    4099              :         K_WORK_FLUSHING_BIT = 4,
    4100              : 
    4101              :         K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
    4102              :                 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
    4103              : 
    4104              :         /* Static work flags */
    4105              :         K_WORK_DELAYABLE_BIT = 8,
    4106              :         K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
    4107              : 
    4108              :         /* Dynamic work queue flags */
    4109              :         K_WORK_QUEUE_STARTED_BIT = 0,
    4110              :         K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
    4111              :         K_WORK_QUEUE_BUSY_BIT = 1,
    4112              :         K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
    4113              :         K_WORK_QUEUE_DRAIN_BIT = 2,
    4114              :         K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
    4115              :         K_WORK_QUEUE_PLUGGED_BIT = 3,
    4116              :         K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
    4117              :         K_WORK_QUEUE_STOP_BIT = 4,
    4118              :         K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
    4119              : 
    4120              :         /* Static work queue flags */
    4121              :         K_WORK_QUEUE_NO_YIELD_BIT = 8,
    4122              :         K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
    4123              : 
    4124              : /**
    4125              :  * INTERNAL_HIDDEN @endcond
    4126              :  */
    4127              :         /* Transient work flags */
    4128              : 
    4129              :         /** @brief Flag indicating a work item that is running under a work
    4130              :          * queue thread.
    4131              :          *
    4132              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4133              :          */
    4134              :         K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
    4135              : 
    4136              :         /** @brief Flag indicating a work item that is being canceled.
    4137              :          *
    4138              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4139              :          */
    4140              :         K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
    4141              : 
    4142              :         /** @brief Flag indicating a work item that has been submitted to a
    4143              :          * queue but has not started running.
    4144              :          *
    4145              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4146              :          */
    4147              :         K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
    4148              : 
    4149              :         /** @brief Flag indicating a delayed work item that is scheduled for
    4150              :          * submission to a queue.
    4151              :          *
    4152              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4153              :          */
    4154              :         K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
    4155              : 
    4156              :         /** @brief Flag indicating a synced work item that is being flushed.
    4157              :          *
    4158              :          * Accessed via k_work_busy_get().  May co-occur with other flags.
    4159              :          */
    4160              :         K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
    4161              : };
    4162              : 
    4163              : /** @brief A structure used to submit work. */
    4164            1 : struct k_work {
    4165              :         /* All fields are protected by the work module spinlock.  No fields
    4166              :          * are to be accessed except through kernel API.
    4167              :          */
    4168              : 
    4169              :         /* Node to link into k_work_q pending list. */
    4170            0 :         sys_snode_t node;
    4171              : 
    4172              :         /* The function to be invoked by the work queue thread. */
    4173            0 :         k_work_handler_t handler;
    4174              : 
    4175              :         /* The queue on which the work item was last submitted. */
    4176            0 :         struct k_work_q *queue;
    4177              : 
    4178              :         /* State of the work item.
    4179              :          *
    4180              :          * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
    4181              :          *
    4182              :          * It can be RUNNING and CANCELING simultaneously.
    4183              :          */
    4184            0 :         uint32_t flags;
    4185              : };
    4186              : 
    4187              : #define Z_WORK_INITIALIZER(work_handler) { \
    4188              :         .handler = (work_handler), \
    4189              : }
    4190              : 
    4191              : /** @brief A structure used to submit work after a delay. */
    4192            1 : struct k_work_delayable {
    4193              :         /* The work item. */
    4194            0 :         struct k_work work;
    4195              : 
    4196              :         /* Timeout used to submit work after a delay. */
    4197            0 :         struct _timeout timeout;
    4198              : 
    4199              :         /* The queue to which the work should be submitted. */
    4200            0 :         struct k_work_q *queue;
    4201              : };
    4202              : 
    4203              : #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
    4204              :         .work = { \
    4205              :                 .handler = (work_handler), \
    4206              :                 .flags = K_WORK_DELAYABLE, \
    4207              :         }, \
    4208              : }
    4209              : 
    4210              : /**
    4211              :  * @brief Initialize a statically-defined delayable work item.
    4212              :  *
    4213              :  * This macro can be used to initialize a statically-defined delayable
    4214              :  * work item, prior to its first use. For example,
    4215              :  *
    4216              :  * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
    4217              :  *
    4218              :  * Note that if the runtime dependencies support initialization with
    4219              :  * k_work_init_delayable() using that will eliminate the initialized
    4220              :  * object in ROM that is produced by this macro and copied in at
    4221              :  * system startup.
    4222              :  *
    4223              :  * @param work Symbol name for delayable work item object
    4224              :  * @param work_handler Function to invoke each time work item is processed.
    4225              :  */
    4226            1 : #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
    4227              :         struct k_work_delayable work \
    4228              :           = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
    4229              : 
    4230              : /**
    4231              :  * @cond INTERNAL_HIDDEN
    4232              :  */
    4233              : 
    4234              : /* Record used to wait for work to flush.
    4235              :  *
    4236              :  * The work item is inserted into the queue that will process (or is
    4237              :  * processing) the item, and will be processed as soon as the item
    4238              :  * completes.  When the flusher is processed the semaphore will be
    4239              :  * signaled, releasing the thread waiting for the flush.
    4240              :  */
    4241              : struct z_work_flusher {
    4242              :         struct k_work work;
    4243              :         struct k_sem sem;
    4244              : };
    4245              : 
    4246              : /* Record used to wait for work to complete a cancellation.
    4247              :  *
    4248              :  * The work item is inserted into a global queue of pending cancels.
    4249              :  * When a cancelling work item goes idle any matching waiters are
    4250              :  * removed from pending_cancels and are woken.
    4251              :  */
    4252              : struct z_work_canceller {
    4253              :         sys_snode_t node;
    4254              :         struct k_work *work;
    4255              :         struct k_sem sem;
    4256              : };
    4257              : 
    4258              : /**
    4259              :  * INTERNAL_HIDDEN @endcond
    4260              :  */
    4261              : 
    4262              : /** @brief A structure holding internal state for a pending synchronous
    4263              :  * operation on a work item or queue.
    4264              :  *
    4265              :  * Instances of this type are provided by the caller for invocation of
    4266              :  * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs.  A
    4267              :  * referenced object must persist until the call returns, and be accessible
    4268              :  * from both the caller thread and the work queue thread.
    4269              :  *
    4270              :  * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
    4271              :  * coherent memory; see arch_mem_coherent().  The stack on these architectures
    4272              :  * is generally not coherent.  be stack-allocated.  Violations are detected by
    4273              :  * runtime assertion.
    4274              :  */
    4275            1 : struct k_work_sync {
    4276              :         union {
    4277            0 :                 struct z_work_flusher flusher;
    4278            0 :                 struct z_work_canceller canceller;
    4279            0 :         };
    4280              : };
    4281              : 
    4282              : /** @brief A structure holding optional configuration items for a work
    4283              :  * queue.
    4284              :  *
    4285              :  * This structure, and values it references, are not retained by
    4286              :  * k_work_queue_start().
    4287              :  */
    4288            1 : struct k_work_queue_config {
    4289              :         /** The name to be given to the work queue thread.
    4290              :          *
    4291              :          * If left null the thread will not have a name.
    4292              :          */
    4293            1 :         const char *name;
    4294              : 
    4295              :         /** Control whether the work queue thread should yield between
    4296              :          * items.
    4297              :          *
    4298              :          * Yielding between items helps guarantee the work queue
    4299              :          * thread does not starve other threads, including cooperative
    4300              :          * ones released by a work item.  This is the default behavior.
    4301              :          *
    4302              :          * Set this to @c true to prevent the work queue thread from
    4303              :          * yielding between items.  This may be appropriate when a
    4304              :          * sequence of items should complete without yielding
    4305              :          * control.
    4306              :          */
    4307            1 :         bool no_yield;
    4308              : 
    4309              :         /** Control whether the work queue thread should be marked as
    4310              :          * essential thread.
    4311              :          */
    4312            1 :         bool essential;
    4313              : 
    4314              :         /** Controls whether work queue monitors work timeouts.
    4315              :          *
    4316              :          * If non-zero, and CONFIG_WORKQUEUE_WORK_TIMEOUT is enabled,
    4317              :          * the work queue will monitor the duration of each work item.
    4318              :          * If the work item handler takes longer than the specified
    4319              :          * time to execute, the work queue thread will be aborted, and
    4320              :          * an error will be logged if CONFIG_LOG is enabled.
    4321              :          */
    4322            1 :         uint32_t work_timeout_ms;
    4323              : };
    4324              : 
    4325              : /** @brief A structure used to hold work until it can be processed. */
    4326            1 : struct k_work_q {
    4327              :         /* The thread that animates the work. */
    4328            0 :         struct k_thread thread;
    4329              : 
    4330              :         /* The thread ID that animates the work. This may be an external thread
    4331              :          * if k_work_queue_run() is used.
    4332              :          */
    4333            0 :         k_tid_t thread_id;
    4334              : 
    4335              :         /* All the following fields must be accessed only while the
    4336              :          * work module spinlock is held.
    4337              :          */
    4338              : 
    4339              :         /* List of k_work items to be worked. */
    4340            0 :         sys_slist_t pending;
    4341              : 
    4342              :         /* Wait queue for idle work thread. */
    4343            0 :         _wait_q_t notifyq;
    4344              : 
    4345              :         /* Wait queue for threads waiting for the queue to drain. */
    4346            0 :         _wait_q_t drainq;
    4347              : 
    4348              :         /* Flags describing queue state. */
    4349            0 :         uint32_t flags;
    4350              : 
    4351              : #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT)
    4352              :         struct _timeout work_timeout_record;
    4353              :         struct k_work *work;
    4354              :         k_timeout_t work_timeout;
    4355              : #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
    4356              : };
    4357              : 
    4358              : /* Provide the implementation for inline functions declared above */
    4359              : 
    4360            1 : static inline bool k_work_is_pending(const struct k_work *work)
    4361              : {
    4362              :         return k_work_busy_get(work) != 0;
    4363              : }
    4364              : 
    4365              : static inline struct k_work_delayable *
    4366            1 : k_work_delayable_from_work(struct k_work *work)
    4367              : {
    4368              :         return CONTAINER_OF(work, struct k_work_delayable, work);
    4369              : }
    4370              : 
    4371            1 : static inline bool k_work_delayable_is_pending(
    4372              :         const struct k_work_delayable *dwork)
    4373              : {
    4374              :         return k_work_delayable_busy_get(dwork) != 0;
    4375              : }
    4376              : 
    4377            1 : static inline k_ticks_t k_work_delayable_expires_get(
    4378              :         const struct k_work_delayable *dwork)
    4379              : {
    4380              :         return z_timeout_expires(&dwork->timeout);
    4381              : }
    4382              : 
    4383            1 : static inline k_ticks_t k_work_delayable_remaining_get(
    4384              :         const struct k_work_delayable *dwork)
    4385              : {
    4386              :         return z_timeout_remaining(&dwork->timeout);
    4387              : }
    4388              : 
    4389            1 : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
    4390              : {
    4391              :         return queue->thread_id;
    4392              : }
    4393              : 
    4394              : /** @} */
    4395              : 
    4396              : struct k_work_user;
    4397              : 
    4398              : /**
    4399              :  * @addtogroup workqueue_apis
    4400              :  * @{
    4401              :  */
    4402              : 
    4403              : /**
    4404              :  * @typedef k_work_user_handler_t
    4405              :  * @brief Work item handler function type for user work queues.
    4406              :  *
    4407              :  * A work item's handler function is executed by a user workqueue's thread
    4408              :  * when the work item is processed by the workqueue.
    4409              :  *
    4410              :  * @param work Address of the work item.
    4411              :  */
    4412            1 : typedef void (*k_work_user_handler_t)(struct k_work_user *work);
    4413              : 
    4414              : /**
    4415              :  * @cond INTERNAL_HIDDEN
    4416              :  */
    4417              : 
    4418              : struct k_work_user_q {
    4419              :         struct k_queue queue;
    4420              :         struct k_thread thread;
    4421              : };
    4422              : 
    4423              : enum {
    4424              :         K_WORK_USER_STATE_PENDING,      /* Work item pending state */
    4425              : };
    4426              : 
    4427              : struct k_work_user {
    4428              :         void *_reserved;                /* Used by k_queue implementation. */
    4429              :         k_work_user_handler_t handler;
    4430              :         atomic_t flags;
    4431              : };
    4432              : 
    4433              : /**
    4434              :  * INTERNAL_HIDDEN @endcond
    4435              :  */
    4436              : 
    4437              : #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
    4438              : #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
    4439              : #else
    4440              : #define Z_WORK_USER_INITIALIZER(work_handler) \
    4441              :         { \
    4442              :         ._reserved = NULL, \
    4443              :         .handler = (work_handler), \
    4444              :         .flags = 0 \
    4445              :         }
    4446              : #endif
    4447              : 
    4448              : /**
    4449              :  * @brief Initialize a statically-defined user work item.
    4450              :  *
    4451              :  * This macro can be used to initialize a statically-defined user work
    4452              :  * item, prior to its first use. For example,
    4453              :  *
    4454              :  * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
    4455              :  *
    4456              :  * @param work Symbol name for work item object
    4457              :  * @param work_handler Function to invoke each time work item is processed.
    4458              :  */
    4459            1 : #define K_WORK_USER_DEFINE(work, work_handler) \
    4460              :         struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
    4461              : 
    4462              : /**
    4463              :  * @brief Initialize a userspace work item.
    4464              :  *
    4465              :  * This routine initializes a user workqueue work item, prior to its
    4466              :  * first use.
    4467              :  *
    4468              :  * @param work Address of work item.
    4469              :  * @param handler Function to invoke each time work item is processed.
    4470              :  */
    4471            1 : static inline void k_work_user_init(struct k_work_user *work,
    4472              :                                     k_work_user_handler_t handler)
    4473              : {
    4474              :         *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
    4475              : }
    4476              : 
    4477              : /**
    4478              :  * @brief Check if a userspace work item is pending.
    4479              :  *
    4480              :  * This routine indicates if user work item @a work is pending in a workqueue's
    4481              :  * queue.
    4482              :  *
    4483              :  * @note Checking if the work is pending gives no guarantee that the
    4484              :  *       work will still be pending when this information is used. It is up to
    4485              :  *       the caller to make sure that this information is used in a safe manner.
    4486              :  *
    4487              :  * @funcprops \isr_ok
    4488              :  *
    4489              :  * @param work Address of work item.
    4490              :  *
    4491              :  * @return true if work item is pending, or false if it is not pending.
    4492              :  */
    4493            1 : static inline bool k_work_user_is_pending(struct k_work_user *work)
    4494              : {
    4495              :         return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
    4496              : }
    4497              : 
    4498              : /**
    4499              :  * @brief Submit a work item to a user mode workqueue
    4500              :  *
    4501              :  * Submits a work item to a workqueue that runs in user mode. A temporary
    4502              :  * memory allocation is made from the caller's resource pool which is freed
    4503              :  * once the worker thread consumes the k_work item. The workqueue
    4504              :  * thread must have memory access to the k_work item being submitted. The caller
    4505              :  * must have permission granted on the work_q parameter's queue object.
    4506              :  *
    4507              :  * @funcprops \isr_ok
    4508              :  *
    4509              :  * @param work_q Address of workqueue.
    4510              :  * @param work Address of work item.
    4511              :  *
    4512              :  * @retval -EBUSY if the work item was already in some workqueue
    4513              :  * @retval -ENOMEM if no memory for thread resource pool allocation
    4514              :  * @retval 0 Success
    4515              :  */
    4516            1 : static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
    4517              :                                               struct k_work_user *work)
    4518              : {
    4519              :         int ret = -EBUSY;
    4520              : 
    4521              :         if (!atomic_test_and_set_bit(&work->flags,
    4522              :                                      K_WORK_USER_STATE_PENDING)) {
    4523              :                 ret = k_queue_alloc_append(&work_q->queue, work);
    4524              : 
    4525              :                 /* Couldn't insert into the queue. Clear the pending bit
    4526              :                  * so the work item can be submitted again
    4527              :                  */
    4528              :                 if (ret != 0) {
    4529              :                         atomic_clear_bit(&work->flags,
    4530              :                                          K_WORK_USER_STATE_PENDING);
    4531              :                 }
    4532              :         }
    4533              : 
    4534              :         return ret;
    4535              : }
    4536              : 
    4537              : /**
    4538              :  * @brief Start a workqueue in user mode
    4539              :  *
    4540              :  * This works identically to k_work_queue_start() except it is callable from
    4541              :  * user mode, and the worker thread created will run in user mode.  The caller
    4542              :  * must have permissions granted on both the work_q parameter's thread and
    4543              :  * queue objects, and the same restrictions on priority apply as
    4544              :  * k_thread_create().
    4545              :  *
    4546              :  * @param work_q Address of workqueue.
    4547              :  * @param stack Pointer to work queue thread's stack space, as defined by
    4548              :  *              K_THREAD_STACK_DEFINE()
    4549              :  * @param stack_size Size of the work queue thread's stack (in bytes), which
    4550              :  *              should either be the same constant passed to
    4551              :  *              K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
    4552              :  * @param prio Priority of the work queue's thread.
    4553              :  * @param name optional thread name.  If not null a copy is made into the
    4554              :  *              thread's name buffer.
    4555              :  */
    4556            1 : void k_work_user_queue_start(struct k_work_user_q *work_q,
    4557              :                                     k_thread_stack_t *stack,
    4558              :                                     size_t stack_size, int prio,
    4559              :                                     const char *name);
    4560              : 
    4561              : /**
    4562              :  * @brief Access the user mode thread that animates a work queue.
    4563              :  *
    4564              :  * This is necessary to grant a user mode work queue thread access to things
    4565              :  * the work items it will process are expected to use.
    4566              :  *
    4567              :  * @param work_q pointer to the user mode queue structure.
    4568              :  *
    4569              :  * @return the user mode thread associated with the work queue.
    4570              :  */
    4571            1 : static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
    4572              : {
    4573              :         return &work_q->thread;
    4574              : }
    4575              : 
    4576              : /** @} */
    4577              : 
    4578              : /**
    4579              :  * @cond INTERNAL_HIDDEN
    4580              :  */
    4581              : 
    4582              : struct k_work_poll {
    4583              :         struct k_work work;
    4584              :         struct k_work_q *workq;
    4585              :         struct z_poller poller;
    4586              :         struct k_poll_event *events;
    4587              :         int num_events;
    4588              :         k_work_handler_t real_handler;
    4589              :         struct _timeout timeout;
    4590              :         int poll_result;
    4591              : };
    4592              : 
    4593              : /**
    4594              :  * INTERNAL_HIDDEN @endcond
    4595              :  */
    4596              : 
    4597              : /**
    4598              :  * @addtogroup workqueue_apis
    4599              :  * @{
    4600              :  */
    4601              : 
    4602              : /**
    4603              :  * @brief Initialize a statically-defined work item.
    4604              :  *
    4605              :  * This macro can be used to initialize a statically-defined workqueue work
    4606              :  * item, prior to its first use. For example,
    4607              :  *
    4608              :  * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
    4609              :  *
    4610              :  * @param work Symbol name for work item object
    4611              :  * @param work_handler Function to invoke each time work item is processed.
    4612              :  */
    4613            1 : #define K_WORK_DEFINE(work, work_handler) \
    4614              :         struct k_work work = Z_WORK_INITIALIZER(work_handler)
    4615              : 
    4616              : /**
    4617              :  * @brief Initialize a triggered work item.
    4618              :  *
    4619              :  * This routine initializes a workqueue triggered work item, prior to
    4620              :  * its first use.
    4621              :  *
    4622              :  * @param work Address of triggered work item.
    4623              :  * @param handler Function to invoke each time work item is processed.
    4624              :  */
    4625            1 : void k_work_poll_init(struct k_work_poll *work,
    4626              :                              k_work_handler_t handler);
    4627              : 
    4628              : /**
    4629              :  * @brief Submit a triggered work item.
    4630              :  *
    4631              :  * This routine schedules work item @a work to be processed by workqueue
    4632              :  * @a work_q when one of the given @a events is signaled. The routine
    4633              :  * initiates internal poller for the work item and then returns to the caller.
    4634              :  * Only when one of the watched events happen the work item is actually
    4635              :  * submitted to the workqueue and becomes pending.
    4636              :  *
    4637              :  * Submitting a previously submitted triggered work item that is still
    4638              :  * waiting for the event cancels the existing submission and reschedules it
    4639              :  * the using the new event list. Note that this behavior is inherently subject
    4640              :  * to race conditions with the pre-existing triggered work item and work queue,
    4641              :  * so care must be taken to synchronize such resubmissions externally.
    4642              :  *
    4643              :  * @funcprops \isr_ok
    4644              :  *
    4645              :  * @warning
    4646              :  * Provided array of events as well as a triggered work item must be placed
    4647              :  * in persistent memory (valid until work handler execution or work
    4648              :  * cancellation) and cannot be modified after submission.
    4649              :  *
    4650              :  * @param work_q Address of workqueue.
    4651              :  * @param work Address of delayed work item.
    4652              :  * @param events An array of events which trigger the work.
    4653              :  * @param num_events The number of events in the array.
    4654              :  * @param timeout Timeout after which the work will be scheduled
    4655              :  *                for execution even if not triggered.
    4656              :  *
    4657              :  *
    4658              :  * @retval 0 Work item started watching for events.
    4659              :  * @retval -EINVAL Work item is being processed or has completed its work.
    4660              :  * @retval -EADDRINUSE Work item is pending on a different workqueue.
    4661              :  */
    4662            1 : int k_work_poll_submit_to_queue(struct k_work_q *work_q,
    4663              :                                        struct k_work_poll *work,
    4664              :                                        struct k_poll_event *events,
    4665              :                                        int num_events,
    4666              :                                        k_timeout_t timeout);
    4667              : 
    4668              : /**
    4669              :  * @brief Submit a triggered work item to the system workqueue.
    4670              :  *
    4671              :  * This routine schedules work item @a work to be processed by system
    4672              :  * workqueue when one of the given @a events is signaled. The routine
    4673              :  * initiates internal poller for the work item and then returns to the caller.
    4674              :  * Only when one of the watched events happen the work item is actually
    4675              :  * submitted to the workqueue and becomes pending.
    4676              :  *
    4677              :  * Submitting a previously submitted triggered work item that is still
    4678              :  * waiting for the event cancels the existing submission and reschedules it
    4679              :  * the using the new event list. Note that this behavior is inherently subject
    4680              :  * to race conditions with the pre-existing triggered work item and work queue,
    4681              :  * so care must be taken to synchronize such resubmissions externally.
    4682              :  *
    4683              :  * @funcprops \isr_ok
    4684              :  *
    4685              :  * @warning
    4686              :  * Provided array of events as well as a triggered work item must not be
    4687              :  * modified until the item has been processed by the workqueue.
    4688              :  *
    4689              :  * @param work Address of delayed work item.
    4690              :  * @param events An array of events which trigger the work.
    4691              :  * @param num_events The number of events in the array.
    4692              :  * @param timeout Timeout after which the work will be scheduled
    4693              :  *                for execution even if not triggered.
    4694              :  *
    4695              :  * @retval 0 Work item started watching for events.
    4696              :  * @retval -EINVAL Work item is being processed or has completed its work.
    4697              :  * @retval -EADDRINUSE Work item is pending on a different workqueue.
    4698              :  */
    4699            1 : int k_work_poll_submit(struct k_work_poll *work,
    4700              :                                      struct k_poll_event *events,
    4701              :                                      int num_events,
    4702              :                                      k_timeout_t timeout);
    4703              : 
    4704              : /**
    4705              :  * @brief Cancel a triggered work item.
    4706              :  *
    4707              :  * This routine cancels the submission of triggered work item @a work.
    4708              :  * A triggered work item can only be canceled if no event triggered work
    4709              :  * submission.
    4710              :  *
    4711              :  * @funcprops \isr_ok
    4712              :  *
    4713              :  * @param work Address of delayed work item.
    4714              :  *
    4715              :  * @retval 0 Work item canceled.
    4716              :  * @retval -EINVAL Work item is being processed or has completed its work.
    4717              :  */
    4718            1 : int k_work_poll_cancel(struct k_work_poll *work);
    4719              : 
    4720              : /** @} */
    4721              : 
    4722              : /**
    4723              :  * @defgroup msgq_apis Message Queue APIs
    4724              :  * @ingroup kernel_apis
    4725              :  * @{
    4726              :  */
    4727              : 
    4728              : /**
    4729              :  * @brief Message Queue Structure
    4730              :  */
    4731            1 : struct k_msgq {
    4732              :         /** Message queue wait queue */
    4733            1 :         _wait_q_t wait_q;
    4734              :         /** Lock */
    4735            1 :         struct k_spinlock lock;
    4736              :         /** Message size */
    4737            1 :         size_t msg_size;
    4738              :         /** Maximal number of messages */
    4739            1 :         uint32_t max_msgs;
    4740              :         /** Start of message buffer */
    4741            1 :         char *buffer_start;
    4742              :         /** End of message buffer */
    4743            1 :         char *buffer_end;
    4744              :         /** Read pointer */
    4745            1 :         char *read_ptr;
    4746              :         /** Write pointer */
    4747            1 :         char *write_ptr;
    4748              :         /** Number of used messages */
    4749            1 :         uint32_t used_msgs;
    4750              : 
    4751              :         Z_DECL_POLL_EVENT
    4752              : 
    4753              :         /** Message queue */
    4754            1 :         uint8_t flags;
    4755              : 
    4756              :         SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
    4757              : 
    4758              : #ifdef CONFIG_OBJ_CORE_MSGQ
    4759              :         struct k_obj_core  obj_core;
    4760              : #endif
    4761              : };
    4762              : /**
    4763              :  * @cond INTERNAL_HIDDEN
    4764              :  */
    4765              : 
    4766              : 
    4767              : #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
    4768              :         { \
    4769              :         .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
    4770              :         .lock = {}, \
    4771              :         .msg_size = q_msg_size, \
    4772              :         .max_msgs = q_max_msgs, \
    4773              :         .buffer_start = q_buffer, \
    4774              :         .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
    4775              :         .read_ptr = q_buffer, \
    4776              :         .write_ptr = q_buffer, \
    4777              :         .used_msgs = 0, \
    4778              :         Z_POLL_EVENT_OBJ_INIT(obj) \
    4779              :         .flags = 0, \
    4780              :         }
    4781              : 
    4782              : /**
    4783              :  * INTERNAL_HIDDEN @endcond
    4784              :  */
    4785              : 
    4786              : 
    4787            0 : #define K_MSGQ_FLAG_ALLOC       BIT(0)
    4788              : 
    4789              : /**
    4790              :  * @brief Message Queue Attributes
    4791              :  */
    4792            1 : struct k_msgq_attrs {
    4793              :         /** Message Size */
    4794            1 :         size_t msg_size;
    4795              :         /** Maximal number of messages */
    4796            1 :         uint32_t max_msgs;
    4797              :         /** Used messages */
    4798            1 :         uint32_t used_msgs;
    4799              : };
    4800              : 
    4801              : 
    4802              : /**
    4803              :  * @brief Statically define and initialize a message queue.
    4804              :  *
    4805              :  * The message queue's ring buffer contains space for @a q_max_msgs messages,
    4806              :  * each of which is @a q_msg_size bytes long. Alignment of the message queue's
    4807              :  * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
    4808              :  *
    4809              :  * The message queue can be accessed outside the module where it is defined
    4810              :  * using:
    4811              :  *
    4812              :  * @code extern struct k_msgq <name>; @endcode
    4813              :  *
    4814              :  * @param q_name Name of the message queue.
    4815              :  * @param q_msg_size Message size (in bytes).
    4816              :  * @param q_max_msgs Maximum number of messages that can be queued.
    4817              :  * @param q_align Alignment of the message queue's ring buffer (power of 2).
    4818              :  *
    4819              :  */
    4820            1 : #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align)          \
    4821              :         static char __noinit __aligned(q_align)                         \
    4822              :                 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)];      \
    4823              :         STRUCT_SECTION_ITERABLE(k_msgq, q_name) =                       \
    4824              :                Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
    4825              :                                   (q_msg_size), (q_max_msgs))
    4826              : 
    4827              : /**
    4828              :  * @brief Initialize a message queue.
    4829              :  *
    4830              :  * This routine initializes a message queue object, prior to its first use.
    4831              :  *
    4832              :  * The message queue's ring buffer must contain space for @a max_msgs messages,
    4833              :  * each of which is @a msg_size bytes long. Alignment of the message queue's
    4834              :  * ring buffer is not necessary.
    4835              :  *
    4836              :  * @param msgq Address of the message queue.
    4837              :  * @param buffer Pointer to ring buffer that holds queued messages.
    4838              :  * @param msg_size Message size (in bytes).
    4839              :  * @param max_msgs Maximum number of messages that can be queued.
    4840              :  */
    4841            1 : void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
    4842              :                  uint32_t max_msgs);
    4843              : 
    4844              : /**
    4845              :  * @brief Initialize a message queue.
    4846              :  *
    4847              :  * This routine initializes a message queue object, prior to its first use,
    4848              :  * allocating its internal ring buffer from the calling thread's resource
    4849              :  * pool.
    4850              :  *
    4851              :  * Memory allocated for the ring buffer can be released by calling
    4852              :  * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
    4853              :  * all of its references.
    4854              :  *
    4855              :  * @param msgq Address of the message queue.
    4856              :  * @param msg_size Message size (in bytes).
    4857              :  * @param max_msgs Maximum number of messages that can be queued.
    4858              :  *
    4859              :  * @return 0 on success, -ENOMEM if there was insufficient memory in the
    4860              :  *      thread's resource pool, or -EINVAL if the size parameters cause
    4861              :  *      an integer overflow.
    4862              :  */
    4863            1 : __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
    4864              :                                 uint32_t max_msgs);
    4865              : 
    4866              : /**
    4867              :  * @brief Release allocated buffer for a queue
    4868              :  *
    4869              :  * Releases memory allocated for the ring buffer.
    4870              :  *
    4871              :  * @param msgq message queue to cleanup
    4872              :  *
    4873              :  * @retval 0 on success
    4874              :  * @retval -EBUSY Queue not empty
    4875              :  */
    4876            1 : int k_msgq_cleanup(struct k_msgq *msgq);
    4877              : 
    4878              : /**
    4879              :  * @brief Send a message to the end of a message queue.
    4880              :  *
    4881              :  * This routine sends a message to message queue @a q.
    4882              :  *
    4883              :  * @note The message content is copied from @a data into @a msgq and the @a data
    4884              :  * pointer is not retained, so the message content will not be modified
    4885              :  * by this function.
    4886              :  *
    4887              :  * @funcprops \isr_ok
    4888              :  *
    4889              :  * @param msgq Address of the message queue.
    4890              :  * @param data Pointer to the message.
    4891              :  * @param timeout Waiting period to add the message, or one of the special
    4892              :  *                values K_NO_WAIT and K_FOREVER.
    4893              :  *
    4894              :  * @retval 0 Message sent.
    4895              :  * @retval -ENOMSG Returned without waiting or queue purged.
    4896              :  * @retval -EAGAIN Waiting period timed out.
    4897              :  */
    4898            1 : __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
    4899              : 
    4900              : /**
    4901              :  * @brief Send a message to the front of a message queue.
    4902              :  *
    4903              :  * This routine sends a message to the beginning (head) of message queue @a q.
    4904              :  * Messages sent with this method will be retrieved before any pre-existing
    4905              :  * messages in the queue.
    4906              :  *
    4907              :  * @note if there is no space in the message queue, this function will
    4908              :  * behave the same as k_msgq_put.
    4909              :  *
    4910              :  * @note The message content is copied from @a data into @a msgq and the @a data
    4911              :  * pointer is not retained, so the message content will not be modified
    4912              :  * by this function.
    4913              :  *
    4914              :  * @funcprops \isr_ok
    4915              :  *
    4916              :  * @param msgq Address of the message queue.
    4917              :  * @param data Pointer to the message.
    4918              :  * @param timeout Waiting period to add the message, or one of the special
    4919              :  *                values K_NO_WAIT and K_FOREVER.
    4920              :  *
    4921              :  * @retval 0 Message sent.
    4922              :  * @retval -ENOMSG Returned without waiting or queue purged.
    4923              :  * @retval -EAGAIN Waiting period timed out.
    4924              :  */
    4925            1 : __syscall int k_msgq_put_front(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
    4926              : 
    4927              : /**
    4928              :  * @brief Receive a message from a message queue.
    4929              :  *
    4930              :  * This routine receives a message from message queue @a q in a "first in,
    4931              :  * first out" manner.
    4932              :  *
    4933              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    4934              :  *
    4935              :  * @funcprops \isr_ok
    4936              :  *
    4937              :  * @param msgq Address of the message queue.
    4938              :  * @param data Address of area to hold the received message.
    4939              :  * @param timeout Waiting period to receive the message,
    4940              :  *                or one of the special values K_NO_WAIT and
    4941              :  *                K_FOREVER.
    4942              :  *
    4943              :  * @retval 0 Message received.
    4944              :  * @retval -ENOMSG Returned without waiting or queue purged.
    4945              :  * @retval -EAGAIN Waiting period timed out.
    4946              :  */
    4947            1 : __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
    4948              : 
    4949              : /**
    4950              :  * @brief Peek/read a message from a message queue.
    4951              :  *
    4952              :  * This routine reads a message from message queue @a q in a "first in,
    4953              :  * first out" manner and leaves the message in the queue.
    4954              :  *
    4955              :  * @funcprops \isr_ok
    4956              :  *
    4957              :  * @param msgq Address of the message queue.
    4958              :  * @param data Address of area to hold the message read from the queue.
    4959              :  *
    4960              :  * @retval 0 Message read.
    4961              :  * @retval -ENOMSG Returned when the queue has no message.
    4962              :  */
    4963            1 : __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
    4964              : 
    4965              : /**
    4966              :  * @brief Peek/read a message from a message queue at the specified index
    4967              :  *
    4968              :  * This routine reads a message from message queue at the specified index
    4969              :  * and leaves the message in the queue.
    4970              :  * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
    4971              :  *
    4972              :  * @funcprops \isr_ok
    4973              :  *
    4974              :  * @param msgq Address of the message queue.
    4975              :  * @param data Address of area to hold the message read from the queue.
    4976              :  * @param idx Message queue index at which to peek
    4977              :  *
    4978              :  * @retval 0 Message read.
    4979              :  * @retval -ENOMSG Returned when the queue has no message at index.
    4980              :  */
    4981            1 : __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
    4982              : 
    4983              : /**
    4984              :  * @brief Purge a message queue.
    4985              :  *
    4986              :  * This routine discards all unreceived messages in a message queue's ring
    4987              :  * buffer. Any threads that are blocked waiting to send a message to the
    4988              :  * message queue are unblocked and see an -ENOMSG error code.
    4989              :  *
    4990              :  * @param msgq Address of the message queue.
    4991              :  */
    4992            1 : __syscall void k_msgq_purge(struct k_msgq *msgq);
    4993              : 
    4994              : /**
    4995              :  * @brief Get the amount of free space in a message queue.
    4996              :  *
    4997              :  * This routine returns the number of unused entries in a message queue's
    4998              :  * ring buffer.
    4999              :  *
    5000              :  * @param msgq Address of the message queue.
    5001              :  *
    5002              :  * @return Number of unused ring buffer entries.
    5003              :  */
    5004            1 : __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
    5005              : 
    5006              : /**
    5007              :  * @brief Get basic attributes of a message queue.
    5008              :  *
    5009              :  * This routine fetches basic attributes of message queue into attr argument.
    5010              :  *
    5011              :  * @param msgq Address of the message queue.
    5012              :  * @param attrs pointer to message queue attribute structure.
    5013              :  */
    5014            1 : __syscall void  k_msgq_get_attrs(struct k_msgq *msgq,
    5015              :                                  struct k_msgq_attrs *attrs);
    5016              : 
    5017              : 
    5018              : static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
    5019              : {
    5020              :         return msgq->max_msgs - msgq->used_msgs;
    5021              : }
    5022              : 
    5023              : /**
    5024              :  * @brief Get the number of messages in a message queue.
    5025              :  *
    5026              :  * This routine returns the number of messages in a message queue's ring buffer.
    5027              :  *
    5028              :  * @param msgq Address of the message queue.
    5029              :  *
    5030              :  * @return Number of messages.
    5031              :  */
    5032            1 : __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
    5033              : 
    5034              : static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
    5035              : {
    5036              :         return msgq->used_msgs;
    5037              : }
    5038              : 
    5039              : /** @} */
    5040              : 
    5041              : /**
    5042              :  * @defgroup mailbox_apis Mailbox APIs
    5043              :  * @ingroup kernel_apis
    5044              :  * @{
    5045              :  */
    5046              : 
    5047              : /**
    5048              :  * @brief Mailbox Message Structure
    5049              :  *
    5050              :  */
    5051            1 : struct k_mbox_msg {
    5052              :         /** size of message (in bytes) */
    5053            1 :         size_t size;
    5054              :         /** application-defined information value */
    5055            1 :         uint32_t info;
    5056              :         /** sender's message data buffer */
    5057            1 :         void *tx_data;
    5058              :         /** source thread id */
    5059            1 :         k_tid_t rx_source_thread;
    5060              :         /** target thread id */
    5061            1 :         k_tid_t tx_target_thread;
    5062              :         /** internal use only - thread waiting on send (may be a dummy) */
    5063              :         k_tid_t _syncing_thread;
    5064              : #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
    5065              :         /** internal use only - semaphore used during asynchronous send */
    5066              :         struct k_sem *_async_sem;
    5067              : #endif
    5068              : };
    5069              : /**
    5070              :  * @brief Mailbox Structure
    5071              :  *
    5072              :  */
    5073            1 : struct k_mbox {
    5074              :         /** Transmit messages queue */
    5075            1 :         _wait_q_t tx_msg_queue;
    5076              :         /** Receive message queue */
    5077            1 :         _wait_q_t rx_msg_queue;
    5078            0 :         struct k_spinlock lock;
    5079              : 
    5080              :         SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
    5081              : 
    5082              : #ifdef CONFIG_OBJ_CORE_MAILBOX
    5083              :         struct k_obj_core  obj_core;
    5084              : #endif
    5085              : };
    5086              : /**
    5087              :  * @cond INTERNAL_HIDDEN
    5088              :  */
    5089              : 
    5090              : #define Z_MBOX_INITIALIZER(obj) \
    5091              :         { \
    5092              :         .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
    5093              :         .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
    5094              :         }
    5095              : 
    5096              : /**
    5097              :  * INTERNAL_HIDDEN @endcond
    5098              :  */
    5099              : 
    5100              : /**
    5101              :  * @brief Statically define and initialize a mailbox.
    5102              :  *
    5103              :  * The mailbox is to be accessed outside the module where it is defined using:
    5104              :  *
    5105              :  * @code extern struct k_mbox <name>; @endcode
    5106              :  *
    5107              :  * @param name Name of the mailbox.
    5108              :  */
    5109            1 : #define K_MBOX_DEFINE(name) \
    5110              :         STRUCT_SECTION_ITERABLE(k_mbox, name) = \
    5111              :                 Z_MBOX_INITIALIZER(name) \
    5112              : 
    5113              : /**
    5114              :  * @brief Initialize a mailbox.
    5115              :  *
    5116              :  * This routine initializes a mailbox object, prior to its first use.
    5117              :  *
    5118              :  * @param mbox Address of the mailbox.
    5119              :  */
    5120            1 : void k_mbox_init(struct k_mbox *mbox);
    5121              : 
    5122              : /**
    5123              :  * @brief Send a mailbox message in a synchronous manner.
    5124              :  *
    5125              :  * This routine sends a message to @a mbox and waits for a receiver to both
    5126              :  * receive and process it. The message data may be in a buffer or non-existent
    5127              :  * (i.e. an empty message).
    5128              :  *
    5129              :  * @param mbox Address of the mailbox.
    5130              :  * @param tx_msg Address of the transmit message descriptor.
    5131              :  * @param timeout Waiting period for the message to be received,
    5132              :  *                or one of the special values K_NO_WAIT
    5133              :  *                and K_FOREVER. Once the message has been received,
    5134              :  *                this routine waits as long as necessary for the message
    5135              :  *                to be completely processed.
    5136              :  *
    5137              :  * @retval 0 Message sent.
    5138              :  * @retval -ENOMSG Returned without waiting.
    5139              :  * @retval -EAGAIN Waiting period timed out.
    5140              :  */
    5141            1 : int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
    5142              :                       k_timeout_t timeout);
    5143              : 
    5144              : /**
    5145              :  * @brief Send a mailbox message in an asynchronous manner.
    5146              :  *
    5147              :  * This routine sends a message to @a mbox without waiting for a receiver
    5148              :  * to process it. The message data may be in a buffer or non-existent
    5149              :  * (i.e. an empty message). Optionally, the semaphore @a sem will be given
    5150              :  * when the message has been both received and completely processed by
    5151              :  * the receiver.
    5152              :  *
    5153              :  * @param mbox Address of the mailbox.
    5154              :  * @param tx_msg Address of the transmit message descriptor.
    5155              :  * @param sem Address of a semaphore, or NULL if none is needed.
    5156              :  */
    5157            1 : void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
    5158              :                              struct k_sem *sem);
    5159              : 
    5160              : /**
    5161              :  * @brief Receive a mailbox message.
    5162              :  *
    5163              :  * This routine receives a message from @a mbox, then optionally retrieves
    5164              :  * its data and disposes of the message.
    5165              :  *
    5166              :  * @param mbox Address of the mailbox.
    5167              :  * @param rx_msg Address of the receive message descriptor.
    5168              :  * @param buffer Address of the buffer to receive data, or NULL to defer data
    5169              :  *               retrieval and message disposal until later.
    5170              :  * @param timeout Waiting period for a message to be received,
    5171              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    5172              :  *
    5173              :  * @retval 0 Message received.
    5174              :  * @retval -ENOMSG Returned without waiting.
    5175              :  * @retval -EAGAIN Waiting period timed out.
    5176              :  */
    5177            1 : int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
    5178              :                       void *buffer, k_timeout_t timeout);
    5179              : 
    5180              : /**
    5181              :  * @brief Retrieve mailbox message data into a buffer.
    5182              :  *
    5183              :  * This routine completes the processing of a received message by retrieving
    5184              :  * its data into a buffer, then disposing of the message.
    5185              :  *
    5186              :  * Alternatively, this routine can be used to dispose of a received message
    5187              :  * without retrieving its data.
    5188              :  *
    5189              :  * @param rx_msg Address of the receive message descriptor.
    5190              :  * @param buffer Address of the buffer to receive data, or NULL to discard
    5191              :  *               the data.
    5192              :  */
    5193            1 : void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
    5194              : 
    5195              : /** @} */
    5196              : 
    5197              : /**
    5198              :  * @defgroup pipe_apis Pipe APIs
    5199              :  * @ingroup kernel_apis
    5200              :  * @{
    5201              :  */
    5202              : 
    5203              : /**
    5204              :  * @brief initialize a pipe
    5205              :  *
    5206              :  * This routine initializes a pipe object, prior to its first use.
    5207              :  *
    5208              :  * @param pipe Address of the pipe.
    5209              :  * @param buffer Address of the pipe's buffer, or NULL if no ring buffer is used.
    5210              :  * @param buffer_size Size of the pipe's buffer, or zero if no ring buffer is used.
    5211              :  */
    5212            1 : __syscall void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size);
    5213              : 
    5214            0 : enum pipe_flags {
    5215              :         PIPE_FLAG_OPEN = BIT(0),
    5216              :         PIPE_FLAG_RESET = BIT(1),
    5217              : };
    5218              : 
    5219            0 : struct k_pipe {
    5220            0 :         size_t waiting;
    5221            0 :         struct ring_buf buf;
    5222            0 :         struct k_spinlock lock;
    5223            0 :         _wait_q_t data;
    5224            0 :         _wait_q_t space;
    5225            0 :         uint8_t flags;
    5226              : 
    5227              :         Z_DECL_POLL_EVENT
    5228              : #ifdef CONFIG_OBJ_CORE_PIPE
    5229              :         struct k_obj_core  obj_core;
    5230              : #endif
    5231              :         SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
    5232              : };
    5233              : 
    5234              : /**
    5235              :  * @cond INTERNAL_HIDDEN
    5236              :  */
    5237              : #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size)  \
    5238              : {                                                               \
    5239              :         .waiting = 0,                                           \
    5240              :         .buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size),    \
    5241              :         .data = Z_WAIT_Q_INIT(&obj.data),                   \
    5242              :         .space = Z_WAIT_Q_INIT(&obj.space),                 \
    5243              :         .flags = PIPE_FLAG_OPEN,                                \
    5244              :         Z_POLL_EVENT_OBJ_INIT(obj)                              \
    5245              : }
    5246              : /**
    5247              :  * INTERNAL_HIDDEN @endcond
    5248              :  */
    5249              : 
    5250              : /**
    5251              :  * @brief Statically define and initialize a pipe.
    5252              :  *
    5253              :  * The pipe can be accessed outside the module where it is defined using:
    5254              :  *
    5255              :  * @code extern struct k_pipe <name>; @endcode
    5256              :  *
    5257              :  * @param name Name of the pipe.
    5258              :  * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes)
    5259              :  *                         or zero if no ring buffer is used.
    5260              :  * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
    5261              :  *
    5262              :  */
    5263            1 : #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align)               \
    5264              :         static unsigned char __noinit __aligned(pipe_align)             \
    5265              :                 _k_pipe_buf_##name[pipe_buffer_size];                   \
    5266              :         STRUCT_SECTION_ITERABLE(k_pipe, name) =                         \
    5267              :                 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
    5268              : 
    5269              : 
    5270              : /**
    5271              :  * @brief Write data to a pipe
    5272              :  *
    5273              :  * This routine writes up to @a len bytes of data to @a pipe.
    5274              :  * If the pipe is full, the routine will block until the data can be written or the timeout expires.
    5275              :  *
    5276              :  * @param pipe Address of the pipe.
    5277              :  * @param data Address of data to write.
    5278              :  * @param len Size of data (in bytes).
    5279              :  * @param timeout Waiting period to wait for the data to be written.
    5280              :  *
    5281              :  * @retval number of bytes written on success
    5282              :  * @retval -EAGAIN if no data could be written before the timeout expired
    5283              :  * @retval -ECANCELED if the write was interrupted by k_pipe_reset(..)
    5284              :  * @retval -EPIPE if the pipe was closed
    5285              :  */
    5286            1 : __syscall int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len,
    5287              :                            k_timeout_t timeout);
    5288              : 
    5289              : /**
    5290              :  * @brief Read data from a pipe
    5291              :  * This routine reads up to @a len bytes of data from @a pipe.
    5292              :  * If the pipe is empty, the routine will block until the data can be read or the timeout expires.
    5293              :  *
    5294              :  * @param pipe Address of the pipe.
    5295              :  * @param data Address to place the data read from pipe.
    5296              :  * @param len Requested number of bytes to read.
    5297              :  * @param timeout Waiting period to wait for the data to be read.
    5298              :  *
    5299              :  * @retval number of bytes read on success
    5300              :  * @retval -EAGAIN if no data could be read before the timeout expired
    5301              :  * @retval -ECANCELED if the read was interrupted by k_pipe_reset(..)
    5302              :  * @retval -EPIPE if the pipe was closed
    5303              :  */
    5304            1 : __syscall int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len,
    5305              :                           k_timeout_t timeout);
    5306              : 
    5307              : /**
    5308              :  * @brief Reset a pipe
    5309              :  * This routine resets the pipe, discarding any unread data and unblocking any threads waiting to
    5310              :  * write or read, causing the waiting threads to return with -ECANCELED. Calling k_pipe_read(..) or
    5311              :  * k_pipe_write(..) when the pipe is resetting but not yet reset will return -ECANCELED.
    5312              :  * The pipe is left open after a reset and can be used as normal.
    5313              :  *
    5314              :  * @param pipe Address of the pipe.
    5315              :  */
    5316            1 : __syscall void k_pipe_reset(struct k_pipe *pipe);
    5317              : 
    5318              : /**
    5319              :  * @brief Close a pipe
    5320              :  *
    5321              :  * This routine closes a pipe. Any threads that were blocked on the pipe
    5322              :  * will be unblocked and receive an error code.
    5323              :  *
    5324              :  * @param pipe Address of the pipe.
    5325              :  */
    5326            1 : __syscall void k_pipe_close(struct k_pipe *pipe);
    5327              : /** @} */
    5328              : 
    5329              : /**
    5330              :  * @cond INTERNAL_HIDDEN
    5331              :  */
    5332              : struct k_mem_slab_info {
    5333              :         uint32_t num_blocks;
    5334              :         size_t   block_size;
    5335              :         uint32_t num_used;
    5336              : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
    5337              :         uint32_t max_used;
    5338              : #endif
    5339              : };
    5340              : 
    5341              : struct k_mem_slab {
    5342              :         _wait_q_t wait_q;
    5343              :         struct k_spinlock lock;
    5344              :         char *buffer;
    5345              :         char *free_list;
    5346              :         struct k_mem_slab_info info;
    5347              : 
    5348              :         SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
    5349              : 
    5350              : #ifdef CONFIG_OBJ_CORE_MEM_SLAB
    5351              :         struct k_obj_core  obj_core;
    5352              : #endif
    5353              : };
    5354              : 
    5355              : #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
    5356              :                                _slab_num_blocks)                      \
    5357              :         {                                                             \
    5358              :         .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q),                     \
    5359              :         .lock = {},                                                   \
    5360              :         .buffer = _slab_buffer,                                       \
    5361              :         .free_list = NULL,                                            \
    5362              :         .info = {_slab_num_blocks, _slab_block_size, 0}               \
    5363              :         }
    5364              : 
    5365              : 
    5366              : /**
    5367              :  * INTERNAL_HIDDEN @endcond
    5368              :  */
    5369              : 
    5370              : /**
    5371              :  * @defgroup mem_slab_apis Memory Slab APIs
    5372              :  * @ingroup kernel_apis
    5373              :  * @{
    5374              :  */
    5375              : 
    5376              : /**
    5377              :  * @brief Statically define and initialize a memory slab in a user-provided memory section with
    5378              :  * public (non-static) scope.
    5379              :  *
    5380              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5381              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5382              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5383              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5384              :  * @a slab_align.
    5385              :  *
    5386              :  * The memory slab can be accessed outside the module where it is defined
    5387              :  * using:
    5388              :  *
    5389              :  * @code extern struct k_mem_slab <name>; @endcode
    5390              :  *
    5391              :  * @note This macro cannot be used together with a static keyword.
    5392              :  *       If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_IN_SECT_STATIC
    5393              :  *       instead.
    5394              :  *
    5395              :  * @param name Name of the memory slab.
    5396              :  * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
    5397              :  * @param slab_block_size Size of each memory block (in bytes).
    5398              :  * @param slab_num_blocks Number memory blocks.
    5399              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5400              :  */
    5401            1 : #define K_MEM_SLAB_DEFINE_IN_SECT(name, in_section, slab_block_size, slab_num_blocks, slab_align)  \
    5402              :         BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0,                                      \
    5403              :                      "slab_block_size must be a multiple of slab_align");                          \
    5404              :         BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0),                                   \
    5405              :                      "slab_align must be a power of 2");                                           \
    5406              :         char in_section __aligned(WB_UP(                                                           \
    5407              :                 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)];   \
    5408              :         STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER(                        \
    5409              :                 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
    5410              : 
    5411              : /**
    5412              :  * @brief Statically define and initialize a memory slab in a public (non-static) scope.
    5413              :  *
    5414              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5415              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5416              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5417              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5418              :  * @a slab_align.
    5419              :  *
    5420              :  * The memory slab can be accessed outside the module where it is defined
    5421              :  * using:
    5422              :  *
    5423              :  * @code extern struct k_mem_slab <name>; @endcode
    5424              :  *
    5425              :  * @note This macro cannot be used together with a static keyword.
    5426              :  *       If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
    5427              :  *       instead.
    5428              :  *
    5429              :  * @param name Name of the memory slab.
    5430              :  * @param slab_block_size Size of each memory block (in bytes).
    5431              :  * @param slab_num_blocks Number memory blocks.
    5432              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5433              :  */
    5434            1 : #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align)                      \
    5435              :         K_MEM_SLAB_DEFINE_IN_SECT(name, __noinit_named(k_mem_slab_buf_##name), slab_block_size,    \
    5436              :                                   slab_num_blocks, slab_align)
    5437              : 
    5438              : /**
    5439              :  * @brief Statically define and initialize a memory slab in a user-provided memory section with
    5440              :  * private (static) scope.
    5441              :  *
    5442              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5443              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5444              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5445              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5446              :  * @a slab_align.
    5447              :  *
    5448              :  * @param name Name of the memory slab.
    5449              :  * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
    5450              :  * @param slab_block_size Size of each memory block (in bytes).
    5451              :  * @param slab_num_blocks Number memory blocks.
    5452              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5453              :  */
    5454              : #define K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, in_section, slab_block_size, slab_num_blocks,       \
    5455            1 :                                          slab_align)                                               \
    5456              :         BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0,                                      \
    5457              :                      "slab_block_size must be a multiple of slab_align");                          \
    5458              :         BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0),                                   \
    5459              :                      "slab_align must be a power of 2");                                           \
    5460              :         static char in_section __aligned(WB_UP(                                                    \
    5461              :                 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)];   \
    5462              :         static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER(                 \
    5463              :                 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
    5464              : 
    5465              : /**
    5466              :  * @brief Statically define and initialize a memory slab in a private (static) scope.
    5467              :  *
    5468              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5469              :  * that are @a slab_block_size bytes long. The buffer is aligned to a
    5470              :  * @a slab_align -byte boundary. To ensure that each memory block is similarly
    5471              :  * aligned to this boundary, @a slab_block_size must also be a multiple of
    5472              :  * @a slab_align.
    5473              :  *
    5474              :  * @param name Name of the memory slab.
    5475              :  * @param slab_block_size Size of each memory block (in bytes).
    5476              :  * @param slab_num_blocks Number memory blocks.
    5477              :  * @param slab_align Alignment of the memory slab's buffer (power of 2).
    5478              :  */
    5479            1 : #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align)               \
    5480              :         K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, __noinit_named(k_mem_slab_buf_##name),              \
    5481              :                                          slab_block_size, slab_num_blocks, slab_align)
    5482              : 
    5483              : /**
    5484              :  * @brief Initialize a memory slab.
    5485              :  *
    5486              :  * Initializes a memory slab, prior to its first use.
    5487              :  *
    5488              :  * The memory slab's buffer contains @a slab_num_blocks memory blocks
    5489              :  * that are @a slab_block_size bytes long. The buffer must be aligned to an
    5490              :  * N-byte boundary matching a word boundary, where N is a power of 2
    5491              :  * (i.e. 4 on 32-bit systems, 8, 16, ...).
    5492              :  * To ensure that each memory block is similarly aligned to this boundary,
    5493              :  * @a slab_block_size must also be a multiple of N.
    5494              :  *
    5495              :  * @param slab Address of the memory slab.
    5496              :  * @param buffer Pointer to buffer used for the memory blocks.
    5497              :  * @param block_size Size of each memory block (in bytes).
    5498              :  * @param num_blocks Number of memory blocks.
    5499              :  *
    5500              :  * @retval 0 on success
    5501              :  * @retval -EINVAL invalid data supplied
    5502              :  *
    5503              :  */
    5504            1 : int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
    5505              :                            size_t block_size, uint32_t num_blocks);
    5506              : 
    5507              : /**
    5508              :  * @brief Allocate memory from a memory slab.
    5509              :  *
    5510              :  * This routine allocates a memory block from a memory slab.
    5511              :  *
    5512              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5513              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5514              :  *
    5515              :  * @funcprops \isr_ok
    5516              :  *
    5517              :  * @param slab Address of the memory slab.
    5518              :  * @param mem Pointer to block address area.
    5519              :  * @param timeout Waiting period to wait for operation to complete.
    5520              :  *        Use K_NO_WAIT to return without waiting,
    5521              :  *        or K_FOREVER to wait as long as necessary.
    5522              :  *
    5523              :  * @retval 0 Memory allocated. The block address area pointed at by @a mem
    5524              :  *         is set to the starting address of the memory block.
    5525              :  * @retval -ENOMEM Returned without waiting.
    5526              :  * @retval -EAGAIN Waiting period timed out.
    5527              :  * @retval -EINVAL Invalid data supplied
    5528              :  */
    5529            1 : int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
    5530              :                             k_timeout_t timeout);
    5531              : 
    5532              : /**
    5533              :  * @brief Free memory allocated from a memory slab.
    5534              :  *
    5535              :  * This routine releases a previously allocated memory block back to its
    5536              :  * associated memory slab.
    5537              :  *
    5538              :  * @param slab Address of the memory slab.
    5539              :  * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
    5540              :  */
    5541            1 : void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
    5542              : 
    5543              : /**
    5544              :  * @brief Get the number of used blocks in a memory slab.
    5545              :  *
    5546              :  * This routine gets the number of memory blocks that are currently
    5547              :  * allocated in @a slab.
    5548              :  *
    5549              :  * @param slab Address of the memory slab.
    5550              :  *
    5551              :  * @return Number of allocated memory blocks.
    5552              :  */
    5553            1 : static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
    5554              : {
    5555              :         return slab->info.num_used;
    5556              : }
    5557              : 
    5558              : /**
    5559              :  * @brief Get the number of maximum used blocks so far in a memory slab.
    5560              :  *
    5561              :  * This routine gets the maximum number of memory blocks that were
    5562              :  * allocated in @a slab.
    5563              :  *
    5564              :  * @param slab Address of the memory slab.
    5565              :  *
    5566              :  * @return Maximum number of allocated memory blocks.
    5567              :  */
    5568            1 : static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
    5569              : {
    5570              : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
    5571              :         return slab->info.max_used;
    5572              : #else
    5573              :         ARG_UNUSED(slab);
    5574              :         return 0;
    5575              : #endif
    5576              : }
    5577              : 
    5578              : /**
    5579              :  * @brief Get the number of unused blocks in a memory slab.
    5580              :  *
    5581              :  * This routine gets the number of memory blocks that are currently
    5582              :  * unallocated in @a slab.
    5583              :  *
    5584              :  * @param slab Address of the memory slab.
    5585              :  *
    5586              :  * @return Number of unallocated memory blocks.
    5587              :  */
    5588            1 : static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
    5589              : {
    5590              :         return slab->info.num_blocks - slab->info.num_used;
    5591              : }
    5592              : 
    5593              : /**
    5594              :  * @brief Get the memory stats for a memory slab
    5595              :  *
    5596              :  * This routine gets the runtime memory usage stats for the slab @a slab.
    5597              :  *
    5598              :  * @param slab Address of the memory slab
    5599              :  * @param stats Pointer to memory into which to copy memory usage statistics
    5600              :  *
    5601              :  * @retval 0 Success
    5602              :  * @retval -EINVAL Any parameter points to NULL
    5603              :  */
    5604              : 
    5605            1 : int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
    5606              : 
    5607              : /**
    5608              :  * @brief Reset the maximum memory usage for a slab
    5609              :  *
    5610              :  * This routine resets the maximum memory usage for the slab @a slab to its
    5611              :  * current usage.
    5612              :  *
    5613              :  * @param slab Address of the memory slab
    5614              :  *
    5615              :  * @retval 0 Success
    5616              :  * @retval -EINVAL Memory slab is NULL
    5617              :  */
    5618            1 : int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
    5619              : 
    5620              : /** @} */
    5621              : 
    5622              : /**
    5623              :  * @addtogroup heap_apis
    5624              :  * @{
    5625              :  */
    5626              : 
    5627              : /* kernel synchronized heap struct */
    5628              : 
    5629            0 : struct k_heap {
    5630            0 :         struct sys_heap heap;
    5631            0 :         _wait_q_t wait_q;
    5632            0 :         struct k_spinlock lock;
    5633              : };
    5634              : 
    5635              : /**
    5636              :  * @brief Initialize a k_heap
    5637              :  *
    5638              :  * This constructs a synchronized k_heap object over a memory region
    5639              :  * specified by the user.  Note that while any alignment and size can
    5640              :  * be passed as valid parameters, internal alignment restrictions
    5641              :  * inside the inner sys_heap mean that not all bytes may be usable as
    5642              :  * allocated memory.
    5643              :  *
    5644              :  * @param h Heap struct to initialize
    5645              :  * @param mem Pointer to memory.
    5646              :  * @param bytes Size of memory region, in bytes
    5647              :  */
    5648            1 : void k_heap_init(struct k_heap *h, void *mem,
    5649              :                 size_t bytes) __attribute_nonnull(1);
    5650              : 
    5651              : /**
    5652              :  * @brief Allocate aligned memory from a k_heap
    5653              :  *
    5654              :  * Behaves in all ways like k_heap_alloc(), except that the returned
    5655              :  * memory (if available) will have a starting address in memory which
    5656              :  * is a multiple of the specified power-of-two alignment value in
    5657              :  * bytes.  The resulting memory can be returned to the heap using
    5658              :  * k_heap_free().
    5659              :  *
    5660              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5661              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5662              :  *
    5663              :  * @funcprops \isr_ok
    5664              :  *
    5665              :  * @param h Heap from which to allocate
    5666              :  * @param align Alignment in bytes, must be a power of two
    5667              :  * @param bytes Number of bytes requested
    5668              :  * @param timeout How long to wait, or K_NO_WAIT
    5669              :  * @return Pointer to memory the caller can now use
    5670              :  */
    5671            1 : void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
    5672              :                         k_timeout_t timeout) __attribute_nonnull(1);
    5673              : 
    5674              : /**
    5675              :  * @brief Allocate memory from a k_heap
    5676              :  *
    5677              :  * Allocates and returns a memory buffer from the memory region owned
    5678              :  * by the heap.  If no memory is available immediately, the call will
    5679              :  * block for the specified timeout (constructed via the standard
    5680              :  * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
    5681              :  * freed.  If the allocation cannot be performed by the expiration of
    5682              :  * the timeout, NULL will be returned.
    5683              :  * Allocated memory is aligned on a multiple of pointer sizes.
    5684              :  *
    5685              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5686              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5687              :  *
    5688              :  * @funcprops \isr_ok
    5689              :  *
    5690              :  * @param h Heap from which to allocate
    5691              :  * @param bytes Desired size of block to allocate
    5692              :  * @param timeout How long to wait, or K_NO_WAIT
    5693              :  * @return A pointer to valid heap memory, or NULL
    5694              :  */
    5695            1 : void *k_heap_alloc(struct k_heap *h, size_t bytes,
    5696              :                 k_timeout_t timeout) __attribute_nonnull(1);
    5697              : 
    5698              : /**
    5699              :  * @brief Allocate and initialize memory for an array of objects from a k_heap
    5700              :  *
    5701              :  * Allocates memory for an array of num objects of size and initializes all
    5702              :  * bytes in the allocated storage to zero.  If no memory is available
    5703              :  * immediately, the call will block for the specified timeout (constructed
    5704              :  * via the standard timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory
    5705              :  * to be freed.  If the allocation cannot be performed by the expiration of
    5706              :  * the timeout, NULL will be returned.
    5707              :  * Allocated memory is aligned on a multiple of pointer sizes.
    5708              :  *
    5709              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5710              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5711              :  *
    5712              :  * @funcprops \isr_ok
    5713              :  *
    5714              :  * @param h Heap from which to allocate
    5715              :  * @param num Number of objects to allocate
    5716              :  * @param size Desired size of each object to allocate
    5717              :  * @param timeout How long to wait, or K_NO_WAIT
    5718              :  * @return A pointer to valid heap memory, or NULL
    5719              :  */
    5720            1 : void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
    5721              :         __attribute_nonnull(1);
    5722              : 
    5723              : /**
    5724              :  * @brief Reallocate memory from a k_heap
    5725              :  *
    5726              :  * Reallocates and returns a memory buffer from the memory region owned
    5727              :  * by the heap.  If no memory is available immediately, the call will
    5728              :  * block for the specified timeout (constructed via the standard
    5729              :  * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
    5730              :  * freed.  If the allocation cannot be performed by the expiration of
    5731              :  * the timeout, NULL will be returned.
    5732              :  * Reallocated memory is aligned on a multiple of pointer sizes.
    5733              :  *
    5734              :  * @note @a timeout must be set to K_NO_WAIT if called from ISR.
    5735              :  * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
    5736              :  *
    5737              :  * @funcprops \isr_ok
    5738              :  *
    5739              :  * @param h Heap from which to allocate
    5740              :  * @param ptr Original pointer returned from a previous allocation
    5741              :  * @param bytes Desired size of block to allocate
    5742              :  * @param timeout How long to wait, or K_NO_WAIT
    5743              :  *
    5744              :  * @return Pointer to memory the caller can now use, or NULL
    5745              :  */
    5746            1 : void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
    5747              :         __attribute_nonnull(1);
    5748              : 
    5749              : /**
    5750              :  * @brief Free memory allocated by k_heap_alloc()
    5751              :  *
    5752              :  * Returns the specified memory block, which must have been returned
    5753              :  * from k_heap_alloc(), to the heap for use by other callers.  Passing
    5754              :  * a NULL block is legal, and has no effect.
    5755              :  *
    5756              :  * @param h Heap to which to return the memory
    5757              :  * @param mem A valid memory block, or NULL
    5758              :  */
    5759            1 : void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
    5760              : 
    5761              : /* Hand-calculated minimum heap sizes needed to return a successful
    5762              :  * 1-byte allocation.  See details in lib/os/heap.[ch]
    5763              :  */
    5764              : #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
    5765              : 
    5766              : /**
    5767              :  * @brief Define a static k_heap in the specified linker section
    5768              :  *
    5769              :  * This macro defines and initializes a static memory region and
    5770              :  * k_heap of the requested size in the specified linker section.
    5771              :  * After kernel start, &name can be used as if k_heap_init() had
    5772              :  * been called.
    5773              :  *
    5774              :  * Note that this macro enforces a minimum size on the memory region
    5775              :  * to accommodate metadata requirements.  Very small heaps will be
    5776              :  * padded to fit.
    5777              :  *
    5778              :  * @param name Symbol name for the struct k_heap object
    5779              :  * @param bytes Size of memory region, in bytes
    5780              :  * @param in_section Section attribute specifier such as Z_GENERIC_SECTION.
    5781              :  */
    5782              : #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section)          \
    5783              :         char in_section                                         \
    5784              :              __aligned(8) /* CHUNK_UNIT */                      \
    5785              :              kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)];         \
    5786              :         STRUCT_SECTION_ITERABLE(k_heap, name) = {               \
    5787              :                 .heap = {                                       \
    5788              :                         .init_mem = kheap_##name,               \
    5789              :                         .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
    5790              :                  },                                             \
    5791              :         }
    5792              : 
    5793              : /**
    5794              :  * @brief Define a static k_heap
    5795              :  *
    5796              :  * This macro defines and initializes a static memory region and
    5797              :  * k_heap of the requested size.  After kernel start, &name can be
    5798              :  * used as if k_heap_init() had been called.
    5799              :  *
    5800              :  * Note that this macro enforces a minimum size on the memory region
    5801              :  * to accommodate metadata requirements.  Very small heaps will be
    5802              :  * padded to fit.
    5803              :  *
    5804              :  * @param name Symbol name for the struct k_heap object
    5805              :  * @param bytes Size of memory region, in bytes
    5806              :  */
    5807            1 : #define K_HEAP_DEFINE(name, bytes)                              \
    5808              :         Z_HEAP_DEFINE_IN_SECT(name, bytes,                      \
    5809              :                               __noinit_named(kheap_buf_##name))
    5810              : 
    5811              : /**
    5812              :  * @brief Define a static k_heap in uncached memory
    5813              :  *
    5814              :  * This macro defines and initializes a static memory region and
    5815              :  * k_heap of the requested size in uncached memory.  After kernel
    5816              :  * start, &name can be used as if k_heap_init() had been called.
    5817              :  *
    5818              :  * Note that this macro enforces a minimum size on the memory region
    5819              :  * to accommodate metadata requirements.  Very small heaps will be
    5820              :  * padded to fit.
    5821              :  *
    5822              :  * @param name Symbol name for the struct k_heap object
    5823              :  * @param bytes Size of memory region, in bytes
    5824              :  */
    5825            1 : #define K_HEAP_DEFINE_NOCACHE(name, bytes)                      \
    5826              :         Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
    5827              : 
    5828              : /** @brief Get the array of statically defined heaps
    5829              :  *
    5830              :  * Returns the pointer to the start of the static heap array.
    5831              :  * Static heaps are those declared through one of the `K_HEAP_DEFINE`
    5832              :  * macros.
    5833              :  *
    5834              :  * @param heap Pointer to location where heap array address is written
    5835              :  * @return Number of static heaps
    5836              :  */
    5837            1 : int k_heap_array_get(struct k_heap **heap);
    5838              : 
    5839              : /**
    5840              :  * @}
    5841              :  */
    5842              : 
    5843              : /**
    5844              :  * @defgroup heap_apis Heap APIs
    5845              :  * @brief Memory allocation from the Heap
    5846              :  * @ingroup kernel_apis
    5847              :  * @{
    5848              :  */
    5849              : 
    5850              : /**
    5851              :  * @brief Allocate memory from the heap with a specified alignment.
    5852              :  *
    5853              :  * This routine provides semantics similar to aligned_alloc(); memory is
    5854              :  * allocated from the heap with a specified alignment. However, one minor
    5855              :  * difference is that k_aligned_alloc() accepts any non-zero @p size,
    5856              :  * whereas aligned_alloc() only accepts a @p size that is an integral
    5857              :  * multiple of @p align.
    5858              :  *
    5859              :  * Above, aligned_alloc() refers to:
    5860              :  * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
    5861              :  * The aligned_alloc function (p: 347-348)
    5862              :  *
    5863              :  * @param align Alignment of memory requested (in bytes).
    5864              :  * @param size Amount of memory requested (in bytes).
    5865              :  *
    5866              :  * @return Address of the allocated memory if successful; otherwise NULL.
    5867              :  */
    5868            1 : void *k_aligned_alloc(size_t align, size_t size);
    5869              : 
    5870              : /**
    5871              :  * @brief Allocate memory from the heap.
    5872              :  *
    5873              :  * This routine provides traditional malloc() semantics. Memory is
    5874              :  * allocated from the heap memory pool.
    5875              :  * Allocated memory is aligned on a multiple of pointer sizes.
    5876              :  *
    5877              :  * @param size Amount of memory requested (in bytes).
    5878              :  *
    5879              :  * @return Address of the allocated memory if successful; otherwise NULL.
    5880              :  */
    5881            1 : void *k_malloc(size_t size);
    5882              : 
    5883              : /**
    5884              :  * @brief Free memory allocated from heap.
    5885              :  *
    5886              :  * This routine provides traditional free() semantics. The memory being
    5887              :  * returned must have been allocated from the heap memory pool.
    5888              :  *
    5889              :  * If @a ptr is NULL, no operation is performed.
    5890              :  *
    5891              :  * @param ptr Pointer to previously allocated memory.
    5892              :  */
    5893            1 : void k_free(void *ptr);
    5894              : 
    5895              : /**
    5896              :  * @brief Allocate memory from heap, array style
    5897              :  *
    5898              :  * This routine provides traditional calloc() semantics. Memory is
    5899              :  * allocated from the heap memory pool and zeroed.
    5900              :  *
    5901              :  * @param nmemb Number of elements in the requested array
    5902              :  * @param size Size of each array element (in bytes).
    5903              :  *
    5904              :  * @return Address of the allocated memory if successful; otherwise NULL.
    5905              :  */
    5906            1 : void *k_calloc(size_t nmemb, size_t size);
    5907              : 
    5908              : /** @brief Expand the size of an existing allocation
    5909              :  *
    5910              :  * Returns a pointer to a new memory region with the same contents,
    5911              :  * but a different allocated size.  If the new allocation can be
    5912              :  * expanded in place, the pointer returned will be identical.
    5913              :  * Otherwise the data will be copies to a new block and the old one
    5914              :  * will be freed as per sys_heap_free().  If the specified size is
    5915              :  * smaller than the original, the block will be truncated in place and
    5916              :  * the remaining memory returned to the heap.  If the allocation of a
    5917              :  * new block fails, then NULL will be returned and the old block will
    5918              :  * not be freed or modified.
    5919              :  *
    5920              :  * @param ptr Original pointer returned from a previous allocation
    5921              :  * @param size Amount of memory requested (in bytes).
    5922              :  *
    5923              :  * @return Pointer to memory the caller can now use, or NULL.
    5924              :  */
    5925            1 : void *k_realloc(void *ptr, size_t size);
    5926              : 
    5927              : /** @} */
    5928              : 
    5929              : /* polling API - PRIVATE */
    5930              : 
    5931              : #ifdef CONFIG_POLL
    5932              : #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
    5933              : #else
    5934              : #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
    5935              : #endif
    5936              : 
    5937              : /* private - types bit positions */
    5938              : enum _poll_types_bits {
    5939              :         /* can be used to ignore an event */
    5940              :         _POLL_TYPE_IGNORE,
    5941              : 
    5942              :         /* to be signaled by k_poll_signal_raise() */
    5943              :         _POLL_TYPE_SIGNAL,
    5944              : 
    5945              :         /* semaphore availability */
    5946              :         _POLL_TYPE_SEM_AVAILABLE,
    5947              : 
    5948              :         /* queue/FIFO/LIFO data availability */
    5949              :         _POLL_TYPE_DATA_AVAILABLE,
    5950              : 
    5951              :         /* msgq data availability */
    5952              :         _POLL_TYPE_MSGQ_DATA_AVAILABLE,
    5953              : 
    5954              :         /* pipe data availability */
    5955              :         _POLL_TYPE_PIPE_DATA_AVAILABLE,
    5956              : 
    5957              :         _POLL_NUM_TYPES
    5958              : };
    5959              : 
    5960              : #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
    5961              : 
    5962              : /* private - states bit positions */
    5963              : enum _poll_states_bits {
    5964              :         /* default state when creating event */
    5965              :         _POLL_STATE_NOT_READY,
    5966              : 
    5967              :         /* signaled by k_poll_signal_raise() */
    5968              :         _POLL_STATE_SIGNALED,
    5969              : 
    5970              :         /* semaphore is available */
    5971              :         _POLL_STATE_SEM_AVAILABLE,
    5972              : 
    5973              :         /* data is available to read on queue/FIFO/LIFO */
    5974              :         _POLL_STATE_DATA_AVAILABLE,
    5975              : 
    5976              :         /* queue/FIFO/LIFO wait was cancelled */
    5977              :         _POLL_STATE_CANCELLED,
    5978              : 
    5979              :         /* data is available to read on a message queue */
    5980              :         _POLL_STATE_MSGQ_DATA_AVAILABLE,
    5981              : 
    5982              :         /* data is available to read from a pipe */
    5983              :         _POLL_STATE_PIPE_DATA_AVAILABLE,
    5984              : 
    5985              :         _POLL_NUM_STATES
    5986              : };
    5987              : 
    5988              : #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
    5989              : 
    5990              : #define _POLL_EVENT_NUM_UNUSED_BITS \
    5991              :         (32 - (0 \
    5992              :                + 8 /* tag */ \
    5993              :                + _POLL_NUM_TYPES \
    5994              :                + _POLL_NUM_STATES \
    5995              :                + 1 /* modes */ \
    5996              :               ))
    5997              : 
    5998              : /* end of polling API - PRIVATE */
    5999              : 
    6000              : 
    6001              : /**
    6002              :  * @defgroup poll_apis Async polling APIs
    6003              :  * @brief An API to wait concurrently for any one of multiple conditions to be
    6004              :  *        fulfilled
    6005              :  * @ingroup kernel_apis
    6006              :  * @{
    6007              :  */
    6008              : 
    6009              : /* Public polling API */
    6010              : 
    6011              : /* public - values for k_poll_event.type bitfield */
    6012            0 : #define K_POLL_TYPE_IGNORE 0
    6013            0 : #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
    6014            0 : #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
    6015            0 : #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
    6016            0 : #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
    6017            0 : #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
    6018            0 : #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
    6019              : 
    6020              : /* public - polling modes */
    6021            0 : enum k_poll_modes {
    6022              :         /* polling thread does not take ownership of objects when available */
    6023              :         K_POLL_MODE_NOTIFY_ONLY = 0,
    6024              : 
    6025              :         K_POLL_NUM_MODES
    6026              : };
    6027              : 
    6028              : /* public - values for k_poll_event.state bitfield */
    6029            0 : #define K_POLL_STATE_NOT_READY 0
    6030            0 : #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
    6031            0 : #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
    6032            0 : #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
    6033            0 : #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
    6034            0 : #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
    6035            0 : #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
    6036            0 : #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
    6037              : 
    6038              : /* public - poll signal object */
    6039            0 : struct k_poll_signal {
    6040              :         /** PRIVATE - DO NOT TOUCH */
    6041            1 :         sys_dlist_t poll_events;
    6042              : 
    6043              :         /**
    6044              :          * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
    6045              :          * user resets it to 0.
    6046              :          */
    6047            1 :         unsigned int signaled;
    6048              : 
    6049              :         /** custom result value passed to k_poll_signal_raise() if needed */
    6050            1 :         int result;
    6051              : };
    6052              : 
    6053            0 : #define K_POLL_SIGNAL_INITIALIZER(obj) \
    6054              :         { \
    6055              :         .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
    6056              :         .signaled = 0, \
    6057              :         .result = 0, \
    6058              :         }
    6059              : /**
    6060              :  * @brief Poll Event
    6061              :  *
    6062              :  */
    6063            1 : struct k_poll_event {
    6064              :         /** PRIVATE - DO NOT TOUCH */
    6065              :         sys_dnode_t _node;
    6066              : 
    6067              :         /** PRIVATE - DO NOT TOUCH */
    6068            1 :         struct z_poller *poller;
    6069              : 
    6070              :         /** optional user-specified tag, opaque, untouched by the API */
    6071            1 :         uint32_t tag:8;
    6072              : 
    6073              :         /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
    6074            1 :         uint32_t type:_POLL_NUM_TYPES;
    6075              : 
    6076              :         /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
    6077            1 :         uint32_t state:_POLL_NUM_STATES;
    6078              : 
    6079              :         /** mode of operation, from enum k_poll_modes */
    6080            1 :         uint32_t mode:1;
    6081              : 
    6082              :         /** unused bits in 32-bit word */
    6083            1 :         uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
    6084              : 
    6085              :         /** per-type data */
    6086              :         union {
    6087              :                 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
    6088              :                  * type safety of polled objects.
    6089              :                  */
    6090            0 :                 void *obj, *typed_K_POLL_TYPE_IGNORE;
    6091            0 :                 struct k_poll_signal *signal, *typed_K_POLL_TYPE_SIGNAL;
    6092            0 :                 struct k_sem *sem, *typed_K_POLL_TYPE_SEM_AVAILABLE;
    6093            0 :                 struct k_fifo *fifo, *typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE;
    6094            0 :                 struct k_queue *queue, *typed_K_POLL_TYPE_DATA_AVAILABLE;
    6095            0 :                 struct k_msgq *msgq, *typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE;
    6096            0 :                 struct k_pipe *pipe, *typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE;
    6097            1 :         };
    6098              : };
    6099              : 
    6100            0 : #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
    6101              :         { \
    6102              :         .poller = NULL, \
    6103              :         .type = _event_type, \
    6104              :         .state = K_POLL_STATE_NOT_READY, \
    6105              :         .mode = _event_mode, \
    6106              :         .unused = 0, \
    6107              :         { \
    6108              :                 .typed_##_event_type = _event_obj, \
    6109              :         }, \
    6110              :         }
    6111              : 
    6112              : #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
    6113            0 :                                         event_tag) \
    6114              :         { \
    6115              :         .tag = event_tag, \
    6116              :         .type = _event_type, \
    6117              :         .state = K_POLL_STATE_NOT_READY, \
    6118              :         .mode = _event_mode, \
    6119              :         .unused = 0, \
    6120              :         { \
    6121              :                 .typed_##_event_type = _event_obj, \
    6122              :         }, \
    6123              :         }
    6124              : 
    6125              : /**
    6126              :  * @brief Initialize one struct k_poll_event instance
    6127              :  *
    6128              :  * After this routine is called on a poll event, the event it ready to be
    6129              :  * placed in an event array to be passed to k_poll().
    6130              :  *
    6131              :  * @param event The event to initialize.
    6132              :  * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
    6133              :  *             values. Only values that apply to the same object being polled
    6134              :  *             can be used together. Choosing K_POLL_TYPE_IGNORE disables the
    6135              :  *             event.
    6136              :  * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
    6137              :  * @param obj Kernel object or poll signal.
    6138              :  */
    6139              : 
    6140            1 : void k_poll_event_init(struct k_poll_event *event, uint32_t type,
    6141              :                               int mode, void *obj);
    6142              : 
    6143              : /**
    6144              :  * @brief Wait for one or many of multiple poll events to occur
    6145              :  *
    6146              :  * This routine allows a thread to wait concurrently for one or many of
    6147              :  * multiple poll events to have occurred. Such events can be a kernel object
    6148              :  * being available, like a semaphore, or a poll signal event.
    6149              :  *
    6150              :  * When an event notifies that a kernel object is available, the kernel object
    6151              :  * is not "given" to the thread calling k_poll(): it merely signals the fact
    6152              :  * that the object was available when the k_poll() call was in effect. Also,
    6153              :  * all threads trying to acquire an object the regular way, i.e. by pending on
    6154              :  * the object, have precedence over the thread polling on the object. This
    6155              :  * means that the polling thread will never get the poll event on an object
    6156              :  * until the object becomes available and its pend queue is empty. For this
    6157              :  * reason, the k_poll() call is more effective when the objects being polled
    6158              :  * only have one thread, the polling thread, trying to acquire them.
    6159              :  *
    6160              :  * When k_poll() returns 0, the caller should loop on all the events that were
    6161              :  * passed to k_poll() and check the state field for the values that were
    6162              :  * expected and take the associated actions.
    6163              :  *
    6164              :  * Before being reused for another call to k_poll(), the user has to reset the
    6165              :  * state field to K_POLL_STATE_NOT_READY.
    6166              :  *
    6167              :  * When called from user mode, a temporary memory allocation is required from
    6168              :  * the caller's resource pool.
    6169              :  *
    6170              :  * @param events An array of events to be polled for.
    6171              :  * @param num_events The number of events in the array.
    6172              :  * @param timeout Waiting period for an event to be ready,
    6173              :  *                or one of the special values K_NO_WAIT and K_FOREVER.
    6174              :  *
    6175              :  * @retval 0 One or more events are ready.
    6176              :  * @retval -EAGAIN Waiting period timed out.
    6177              :  * @retval -EINTR Polling has been interrupted, e.g. with
    6178              :  *         k_queue_cancel_wait(). All output events are still set and valid,
    6179              :  *         cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
    6180              :  *         words, -EINTR status means that at least one of output events is
    6181              :  *         K_POLL_STATE_CANCELLED.
    6182              :  * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
    6183              :  * @retval -EINVAL Bad parameters (user mode only)
    6184              :  */
    6185              : 
    6186            1 : __syscall int k_poll(struct k_poll_event *events, int num_events,
    6187              :                      k_timeout_t timeout);
    6188              : 
    6189              : /**
    6190              :  * @brief Initialize a poll signal object.
    6191              :  *
    6192              :  * Ready a poll signal object to be signaled via k_poll_signal_raise().
    6193              :  *
    6194              :  * @param sig A poll signal.
    6195              :  */
    6196              : 
    6197            1 : __syscall void k_poll_signal_init(struct k_poll_signal *sig);
    6198              : 
    6199              : /**
    6200              :  * @brief Reset a poll signal object's state to unsignaled.
    6201              :  *
    6202              :  * @param sig A poll signal object
    6203              :  */
    6204            1 : __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
    6205              : 
    6206              : /**
    6207              :  * @brief Fetch the signaled state and result value of a poll signal
    6208              :  *
    6209              :  * @param sig A poll signal object
    6210              :  * @param signaled An integer buffer which will be written nonzero if the
    6211              :  *                 object was signaled
    6212              :  * @param result An integer destination buffer which will be written with the
    6213              :  *                 result value if the object was signaled, or an undefined
    6214              :  *                 value if it was not.
    6215              :  */
    6216            1 : __syscall void k_poll_signal_check(struct k_poll_signal *sig,
    6217              :                                    unsigned int *signaled, int *result);
    6218              : 
    6219              : /**
    6220              :  * @brief Signal a poll signal object.
    6221              :  *
    6222              :  * This routine makes ready a poll signal, which is basically a poll event of
    6223              :  * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
    6224              :  * made ready to run. A @a result value can be specified.
    6225              :  *
    6226              :  * The poll signal contains a 'signaled' field that, when set by
    6227              :  * k_poll_signal_raise(), stays set until the user sets it back to 0 with
    6228              :  * k_poll_signal_reset(). It thus has to be reset by the user before being
    6229              :  * passed again to k_poll() or k_poll() will consider it being signaled, and
    6230              :  * will return immediately.
    6231              :  *
    6232              :  * @note The result is stored and the 'signaled' field is set even if
    6233              :  * this function returns an error indicating that an expiring poll was
    6234              :  * not notified.  The next k_poll() will detect the missed raise.
    6235              :  *
    6236              :  * @param sig A poll signal.
    6237              :  * @param result The value to store in the result field of the signal.
    6238              :  *
    6239              :  * @retval 0 The signal was delivered successfully.
    6240              :  * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
    6241              :  */
    6242              : 
    6243            1 : __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
    6244              : 
    6245              : /** @} */
    6246              : 
    6247              : /**
    6248              :  * @defgroup cpu_idle_apis CPU Idling APIs
    6249              :  * @ingroup kernel_apis
    6250              :  * @{
    6251              :  */
    6252              : /**
    6253              :  * @brief Make the CPU idle.
    6254              :  *
    6255              :  * This function makes the CPU idle until an event wakes it up.
    6256              :  *
    6257              :  * In a regular system, the idle thread should be the only thread responsible
    6258              :  * for making the CPU idle and triggering any type of power management.
    6259              :  * However, in some more constrained systems, such as a single-threaded system,
    6260              :  * the only thread would be responsible for this if needed.
    6261              :  *
    6262              :  * @note In some architectures, before returning, the function unmasks interrupts
    6263              :  * unconditionally.
    6264              :  */
    6265            1 : static inline void k_cpu_idle(void)
    6266              : {
    6267              :         arch_cpu_idle();
    6268              : }
    6269              : 
    6270              : /**
    6271              :  * @brief Make the CPU idle in an atomic fashion.
    6272              :  *
    6273              :  * Similar to k_cpu_idle(), but must be called with interrupts locked.
    6274              :  *
    6275              :  * Enabling interrupts and entering a low-power mode will be atomic,
    6276              :  * i.e. there will be no period of time where interrupts are enabled before
    6277              :  * the processor enters a low-power mode.
    6278              :  *
    6279              :  * After waking up from the low-power mode, the interrupt lockout state will
    6280              :  * be restored as if by irq_unlock(key).
    6281              :  *
    6282              :  * @param key Interrupt locking key obtained from irq_lock().
    6283              :  */
    6284            1 : static inline void k_cpu_atomic_idle(unsigned int key)
    6285              : {
    6286              :         arch_cpu_atomic_idle(key);
    6287              : }
    6288              : 
    6289              : /**
    6290              :  * @}
    6291              :  */
    6292              : 
    6293              : /**
    6294              :  * @cond INTERNAL_HIDDEN
    6295              :  * @internal
    6296              :  */
    6297              : #ifdef ARCH_EXCEPT
    6298              : /* This architecture has direct support for triggering a CPU exception */
    6299              : #define z_except_reason(reason) ARCH_EXCEPT(reason)
    6300              : #else
    6301              : 
    6302              : #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
    6303              : #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
    6304              : #else
    6305              : #define __EXCEPT_LOC()
    6306              : #endif
    6307              : 
    6308              : /* NOTE: This is the implementation for arches that do not implement
    6309              :  * ARCH_EXCEPT() to generate a real CPU exception.
    6310              :  *
    6311              :  * We won't have a real exception frame to determine the PC value when
    6312              :  * the oops occurred, so print file and line number before we jump into
    6313              :  * the fatal error handler.
    6314              :  */
    6315              : #define z_except_reason(reason) do { \
    6316              :                 __EXCEPT_LOC();              \
    6317              :                 z_fatal_error(reason, NULL); \
    6318              :         } while (false)
    6319              : 
    6320              : #endif /* _ARCH__EXCEPT */
    6321              : /**
    6322              :  * INTERNAL_HIDDEN @endcond
    6323              :  */
    6324              : 
    6325              : /**
    6326              :  * @brief Fatally terminate a thread
    6327              :  *
    6328              :  * This should be called when a thread has encountered an unrecoverable
    6329              :  * runtime condition and needs to terminate. What this ultimately
    6330              :  * means is determined by the _fatal_error_handler() implementation, which
    6331              :  * will be called with reason code K_ERR_KERNEL_OOPS.
    6332              :  *
    6333              :  * If this is called from ISR context, the default system fatal error handler
    6334              :  * will treat it as an unrecoverable system error, just like k_panic().
    6335              :  */
    6336            1 : #define k_oops()        z_except_reason(K_ERR_KERNEL_OOPS)
    6337              : 
    6338              : /**
    6339              :  * @brief Fatally terminate the system
    6340              :  *
    6341              :  * This should be called when the Zephyr kernel has encountered an
    6342              :  * unrecoverable runtime condition and needs to terminate. What this ultimately
    6343              :  * means is determined by the _fatal_error_handler() implementation, which
    6344              :  * will be called with reason code K_ERR_KERNEL_PANIC.
    6345              :  */
    6346            1 : #define k_panic()       z_except_reason(K_ERR_KERNEL_PANIC)
    6347              : 
    6348              : /**
    6349              :  * @cond INTERNAL_HIDDEN
    6350              :  */
    6351              : 
    6352              : /*
    6353              :  * private APIs that are utilized by one or more public APIs
    6354              :  */
    6355              : 
    6356              : /**
    6357              :  * @internal
    6358              :  */
    6359              : void z_timer_expiration_handler(struct _timeout *timeout);
    6360              : /**
    6361              :  * INTERNAL_HIDDEN @endcond
    6362              :  */
    6363              : 
    6364              : #ifdef CONFIG_PRINTK
    6365              : /**
    6366              :  * @brief Emit a character buffer to the console device
    6367              :  *
    6368              :  * @param c String of characters to print
    6369              :  * @param n The length of the string
    6370              :  *
    6371              :  */
    6372              : __syscall void k_str_out(char *c, size_t n);
    6373              : #endif
    6374              : 
    6375              : /**
    6376              :  * @defgroup float_apis Floating Point APIs
    6377              :  * @ingroup kernel_apis
    6378              :  * @{
    6379              :  */
    6380              : 
    6381              : /**
    6382              :  * @brief Disable preservation of floating point context information.
    6383              :  *
    6384              :  * This routine informs the kernel that the specified thread
    6385              :  * will no longer be using the floating point registers.
    6386              :  *
    6387              :  * @warning
    6388              :  * Some architectures apply restrictions on how the disabling of floating
    6389              :  * point preservation may be requested, see arch_float_disable.
    6390              :  *
    6391              :  * @warning
    6392              :  * This routine should only be used to disable floating point support for
    6393              :  * a thread that currently has such support enabled.
    6394              :  *
    6395              :  * @param thread ID of thread.
    6396              :  *
    6397              :  * @retval 0        On success.
    6398              :  * @retval -ENOTSUP If the floating point disabling is not implemented.
    6399              :  *         -EINVAL  If the floating point disabling could not be performed.
    6400              :  */
    6401            1 : __syscall int k_float_disable(struct k_thread *thread);
    6402              : 
    6403              : /**
    6404              :  * @brief Enable preservation of floating point context information.
    6405              :  *
    6406              :  * This routine informs the kernel that the specified thread
    6407              :  * will use the floating point registers.
    6408              : 
    6409              :  * Invoking this routine initializes the thread's floating point context info
    6410              :  * to that of an FPU that has been reset. The next time the thread is scheduled
    6411              :  * by z_swap() it will either inherit an FPU that is guaranteed to be in a
    6412              :  * "sane" state (if the most recent user of the FPU was cooperatively swapped
    6413              :  * out) or the thread's own floating point context will be loaded (if the most
    6414              :  * recent user of the FPU was preempted, or if this thread is the first user
    6415              :  * of the FPU). Thereafter, the kernel will protect the thread's FP context
    6416              :  * so that it is not altered during a preemptive context switch.
    6417              :  *
    6418              :  * The @a options parameter indicates which floating point register sets will
    6419              :  * be used by the specified thread.
    6420              :  *
    6421              :  * For x86 options:
    6422              :  *
    6423              :  * - K_FP_REGS  indicates x87 FPU and MMX registers only
    6424              :  * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
    6425              :  *
    6426              :  * @warning
    6427              :  * Some architectures apply restrictions on how the enabling of floating
    6428              :  * point preservation may be requested, see arch_float_enable.
    6429              :  *
    6430              :  * @warning
    6431              :  * This routine should only be used to enable floating point support for
    6432              :  * a thread that currently has such support enabled.
    6433              :  *
    6434              :  * @param thread  ID of thread.
    6435              :  * @param options architecture dependent options
    6436              :  *
    6437              :  * @retval 0        On success.
    6438              :  * @retval -ENOTSUP If the floating point enabling is not implemented.
    6439              :  *         -EINVAL  If the floating point enabling could not be performed.
    6440              :  */
    6441            1 : __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
    6442              : 
    6443              : /**
    6444              :  * @}
    6445              :  */
    6446              : 
    6447              : /**
    6448              :  * @brief Get the runtime statistics of a thread
    6449              :  *
    6450              :  * @param thread ID of thread.
    6451              :  * @param stats Pointer to struct to copy statistics into.
    6452              :  * @return -EINVAL if null pointers, otherwise 0
    6453              :  */
    6454            1 : int k_thread_runtime_stats_get(k_tid_t thread,
    6455              :                                k_thread_runtime_stats_t *stats);
    6456              : 
    6457              : /**
    6458              :  * @brief Get the runtime statistics of all threads
    6459              :  *
    6460              :  * @param stats Pointer to struct to copy statistics into.
    6461              :  * @return -EINVAL if null pointers, otherwise 0
    6462              :  */
    6463            1 : int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
    6464              : 
    6465              : /**
    6466              :  * @brief Get the runtime statistics of all threads on specified cpu
    6467              :  *
    6468              :  * @param cpu The cpu number
    6469              :  * @param stats Pointer to struct to copy statistics into.
    6470              :  * @return -EINVAL if null pointers, otherwise 0
    6471              :  */
    6472            1 : int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
    6473              : 
    6474              : /**
    6475              :  * @brief Enable gathering of runtime statistics for specified thread
    6476              :  *
    6477              :  * This routine enables the gathering of runtime statistics for the specified
    6478              :  * thread.
    6479              :  *
    6480              :  * @param thread ID of thread
    6481              :  * @return -EINVAL if invalid thread ID, otherwise 0
    6482              :  */
    6483            1 : int k_thread_runtime_stats_enable(k_tid_t thread);
    6484              : 
    6485              : /**
    6486              :  * @brief Disable gathering of runtime statistics for specified thread
    6487              :  *
    6488              :  * This routine disables the gathering of runtime statistics for the specified
    6489              :  * thread.
    6490              :  *
    6491              :  * @param thread ID of thread
    6492              :  * @return -EINVAL if invalid thread ID, otherwise 0
    6493              :  */
    6494            1 : int k_thread_runtime_stats_disable(k_tid_t thread);
    6495              : 
    6496              : /**
    6497              :  * @brief Enable gathering of system runtime statistics
    6498              :  *
    6499              :  * This routine enables the gathering of system runtime statistics. Note that
    6500              :  * it does not affect the gathering of similar statistics for individual
    6501              :  * threads.
    6502              :  */
    6503            1 : void k_sys_runtime_stats_enable(void);
    6504              : 
    6505              : /**
    6506              :  * @brief Disable gathering of system runtime statistics
    6507              :  *
    6508              :  * This routine disables the gathering of system runtime statistics. Note that
    6509              :  * it does not affect the gathering of similar statistics for individual
    6510              :  * threads.
    6511              :  */
    6512            1 : void k_sys_runtime_stats_disable(void);
    6513              : 
    6514              : #ifdef __cplusplus
    6515              : }
    6516              : #endif
    6517              : 
    6518              : #include <zephyr/tracing/tracing.h>
    6519              : #include <zephyr/syscalls/kernel.h>
    6520              : 
    6521              : #endif /* !_ASMLANGUAGE */
    6522              : 
    6523              : #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
        

Generated by: LCOV version 2.0-1