Line data Source code
1 1 : /*
2 : * Copyright (c) 2016, Wind River Systems, Inc.
3 : *
4 : * SPDX-License-Identifier: Apache-2.0
5 : */
6 :
7 : /**
8 : * @file
9 : *
10 : * @brief Public kernel APIs.
11 : */
12 :
13 : #ifndef ZEPHYR_INCLUDE_KERNEL_H_
14 : #define ZEPHYR_INCLUDE_KERNEL_H_
15 :
16 : #if !defined(_ASMLANGUAGE)
17 : #include <zephyr/kernel_includes.h>
18 : #include <errno.h>
19 : #include <limits.h>
20 : #include <stdbool.h>
21 : #include <zephyr/toolchain.h>
22 : #include <zephyr/tracing/tracing_macros.h>
23 : #include <zephyr/sys/mem_stats.h>
24 : #include <zephyr/sys/iterable_sections.h>
25 :
26 : #ifdef __cplusplus
27 : extern "C" {
28 : #endif
29 :
30 : /*
31 : * Zephyr currently assumes the size of a couple standard types to simplify
32 : * print string formats. Let's make sure this doesn't change without notice.
33 : */
34 : BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
35 : BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
36 : BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
37 :
38 : /**
39 : * @brief Kernel APIs
40 : * @defgroup kernel_apis Kernel APIs
41 : * @since 1.0
42 : * @version 1.0.0
43 : * @{
44 : * @}
45 : */
46 :
47 0 : #define K_ANY NULL
48 :
49 : #if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
50 : #error Zero available thread priorities defined!
51 : #endif
52 :
53 0 : #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
54 0 : #define K_PRIO_PREEMPT(x) (x)
55 :
56 0 : #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
57 0 : #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
58 0 : #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
59 0 : #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
60 0 : #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
61 :
62 : #ifdef CONFIG_POLL
63 : #define Z_POLL_EVENT_OBJ_INIT(obj) \
64 : .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
65 : #define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
66 : #else
67 : #define Z_POLL_EVENT_OBJ_INIT(obj)
68 : #define Z_DECL_POLL_EVENT
69 : #endif
70 :
71 : struct k_thread;
72 : struct k_mutex;
73 : struct k_sem;
74 : struct k_msgq;
75 : struct k_mbox;
76 : struct k_pipe;
77 : struct k_queue;
78 : struct k_fifo;
79 : struct k_lifo;
80 : struct k_stack;
81 : struct k_mem_slab;
82 : struct k_timer;
83 : struct k_poll_event;
84 : struct k_poll_signal;
85 : struct k_mem_domain;
86 : struct k_mem_partition;
87 : struct k_futex;
88 : struct k_event;
89 :
90 0 : enum execution_context_types {
91 : K_ISR = 0,
92 : K_COOP_THREAD,
93 : K_PREEMPT_THREAD,
94 : };
95 :
96 : /* private, used by k_poll and k_work_poll */
97 : struct k_work_poll;
98 : typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
99 :
100 : /**
101 : * @addtogroup thread_apis
102 : * @{
103 : */
104 :
105 0 : typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
106 : void *user_data);
107 :
108 : /**
109 : * @brief Iterate over all the threads in the system.
110 : *
111 : * This routine iterates over all the threads in the system and
112 : * calls the user_cb function for each thread.
113 : *
114 : * @param user_cb Pointer to the user callback function.
115 : * @param user_data Pointer to user data.
116 : *
117 : * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
118 : * to be effective.
119 : * @note This API uses @ref k_spin_lock to protect the _kernel.threads
120 : * list which means creation of new threads and terminations of existing
121 : * threads are blocked until this API returns.
122 : */
123 1 : void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
124 :
125 : /**
126 : * @brief Iterate over all the threads in running on specified cpu.
127 : *
128 : * This function is does otherwise the same thing as k_thread_foreach(),
129 : * but it only loops through the threads running on specified cpu only.
130 : * If CONFIG_SMP is not defined the implementation this is the same as
131 : * k_thread_foreach(), with an assert cpu == 0.
132 : *
133 : * @param cpu The filtered cpu number
134 : * @param user_cb Pointer to the user callback function.
135 : * @param user_data Pointer to user data.
136 : *
137 : * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
138 : * to be effective.
139 : * @note This API uses @ref k_spin_lock to protect the _kernel.threads
140 : * list which means creation of new threads and terminations of existing
141 : * threads are blocked until this API returns.
142 : */
143 : #ifdef CONFIG_SMP
144 1 : void k_thread_foreach_filter_by_cpu(unsigned int cpu,
145 : k_thread_user_cb_t user_cb, void *user_data);
146 : #else
147 : static inline
148 : void k_thread_foreach_filter_by_cpu(unsigned int cpu,
149 : k_thread_user_cb_t user_cb, void *user_data)
150 : {
151 : __ASSERT(cpu == 0, "cpu filter out of bounds");
152 : ARG_UNUSED(cpu);
153 : k_thread_foreach(user_cb, user_data);
154 : }
155 : #endif
156 :
157 : /**
158 : * @brief Iterate over all the threads in the system without locking.
159 : *
160 : * This routine works exactly the same like @ref k_thread_foreach
161 : * but unlocks interrupts when user_cb is executed.
162 : *
163 : * @param user_cb Pointer to the user callback function.
164 : * @param user_data Pointer to user data.
165 : *
166 : * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
167 : * to be effective.
168 : * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
169 : * queue elements. It unlocks it during user callback function processing.
170 : * If a new task is created when this @c foreach function is in progress,
171 : * the added new task would not be included in the enumeration.
172 : * If a task is aborted during this enumeration, there would be a race here
173 : * and there is a possibility that this aborted task would be included in the
174 : * enumeration.
175 : * @note If the task is aborted and the memory occupied by its @c k_thread
176 : * structure is reused when this @c k_thread_foreach_unlocked is in progress
177 : * it might even lead to the system behave unstable.
178 : * This function may never return, as it would follow some @c next task
179 : * pointers treating given pointer as a pointer to the k_thread structure
180 : * while it is something different right now.
181 : * Do not reuse the memory that was occupied by k_thread structure of aborted
182 : * task if it was aborted after this function was called in any context.
183 : */
184 1 : void k_thread_foreach_unlocked(
185 : k_thread_user_cb_t user_cb, void *user_data);
186 :
187 : /**
188 : * @brief Iterate over the threads in running on current cpu without locking.
189 : *
190 : * This function does otherwise the same thing as
191 : * k_thread_foreach_unlocked(), but it only loops through the threads
192 : * running on specified cpu. If CONFIG_SMP is not defined the
193 : * implementation this is the same as k_thread_foreach_unlocked(), with an
194 : * assert requiring cpu == 0.
195 : *
196 : * @param cpu The filtered cpu number
197 : * @param user_cb Pointer to the user callback function.
198 : * @param user_data Pointer to user data.
199 : *
200 : * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
201 : * to be effective.
202 : * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
203 : * queue elements. It unlocks it during user callback function processing.
204 : * If a new task is created when this @c foreach function is in progress,
205 : * the added new task would not be included in the enumeration.
206 : * If a task is aborted during this enumeration, there would be a race here
207 : * and there is a possibility that this aborted task would be included in the
208 : * enumeration.
209 : * @note If the task is aborted and the memory occupied by its @c k_thread
210 : * structure is reused when this @c k_thread_foreach_unlocked is in progress
211 : * it might even lead to the system behave unstable.
212 : * This function may never return, as it would follow some @c next task
213 : * pointers treating given pointer as a pointer to the k_thread structure
214 : * while it is something different right now.
215 : * Do not reuse the memory that was occupied by k_thread structure of aborted
216 : * task if it was aborted after this function was called in any context.
217 : */
218 : #ifdef CONFIG_SMP
219 1 : void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
220 : k_thread_user_cb_t user_cb, void *user_data);
221 : #else
222 : static inline
223 : void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
224 : k_thread_user_cb_t user_cb, void *user_data)
225 : {
226 : __ASSERT(cpu == 0, "cpu filter out of bounds");
227 : ARG_UNUSED(cpu);
228 : k_thread_foreach_unlocked(user_cb, user_data);
229 : }
230 : #endif
231 :
232 : /** @} */
233 :
234 : /**
235 : * @defgroup thread_apis Thread APIs
236 : * @ingroup kernel_apis
237 : * @{
238 : */
239 :
240 : #endif /* !_ASMLANGUAGE */
241 :
242 :
243 : /*
244 : * Thread user options. May be needed by assembly code. Common part uses low
245 : * bits, arch-specific use high bits.
246 : */
247 :
248 : /**
249 : * @brief system thread that must not abort
250 : * */
251 1 : #define K_ESSENTIAL (BIT(0))
252 :
253 0 : #define K_FP_IDX 1
254 : /**
255 : * @brief FPU registers are managed by context switch
256 : *
257 : * @details
258 : * This option indicates that the thread uses the CPU's floating point
259 : * registers. This instructs the kernel to take additional steps to save
260 : * and restore the contents of these registers when scheduling the thread.
261 : * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
262 : */
263 1 : #define K_FP_REGS (BIT(K_FP_IDX))
264 :
265 : /**
266 : * @brief user mode thread
267 : *
268 : * This thread has dropped from supervisor mode to user mode and consequently
269 : * has additional restrictions
270 : */
271 1 : #define K_USER (BIT(2))
272 :
273 : /**
274 : * @brief Inherit Permissions
275 : *
276 : * @details
277 : * Indicates that the thread being created should inherit all kernel object
278 : * permissions from the thread that created it. No effect if
279 : * @kconfig{CONFIG_USERSPACE} is not enabled.
280 : */
281 1 : #define K_INHERIT_PERMS (BIT(3))
282 :
283 : /**
284 : * @brief Callback item state
285 : *
286 : * @details
287 : * This is a single bit of state reserved for "callback manager"
288 : * utilities (p4wq initially) who need to track operations invoked
289 : * from within a user-provided callback they have been invoked.
290 : * Effectively it serves as a tiny bit of zero-overhead TLS data.
291 : */
292 1 : #define K_CALLBACK_STATE (BIT(4))
293 :
294 : /**
295 : * @brief DSP registers are managed by context switch
296 : *
297 : * @details
298 : * This option indicates that the thread uses the CPU's DSP registers.
299 : * This instructs the kernel to take additional steps to save and
300 : * restore the contents of these registers when scheduling the thread.
301 : * No effect if @kconfig{CONFIG_DSP_SHARING} is not enabled.
302 : */
303 1 : #define K_DSP_IDX 6
304 0 : #define K_DSP_REGS (BIT(K_DSP_IDX))
305 :
306 : /**
307 : * @brief AGU registers are managed by context switch
308 : *
309 : * @details
310 : * This option indicates that the thread uses the ARC processor's XY
311 : * memory and DSP feature. Often used with @kconfig{CONFIG_ARC_AGU_SHARING}.
312 : * No effect if @kconfig{CONFIG_ARC_AGU_SHARING} is not enabled.
313 : */
314 1 : #define K_AGU_IDX 7
315 0 : #define K_AGU_REGS (BIT(K_AGU_IDX))
316 :
317 : /**
318 : * @brief FP and SSE registers are managed by context switch on x86
319 : *
320 : * @details
321 : * This option indicates that the thread uses the x86 CPU's floating point
322 : * and SSE registers. This instructs the kernel to take additional steps to
323 : * save and restore the contents of these registers when scheduling
324 : * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
325 : */
326 1 : #define K_SSE_REGS (BIT(7))
327 :
328 : /* end - thread options */
329 :
330 : #if !defined(_ASMLANGUAGE)
331 : /**
332 : * @brief Dynamically allocate a thread stack.
333 : *
334 : * Relevant stack creation flags include:
335 : * - @ref K_USER allocate a userspace thread (requires `CONFIG_USERSPACE=y`)
336 : *
337 : * @param size Stack size in bytes.
338 : * @param flags Stack creation flags, or 0.
339 : *
340 : * @retval the allocated thread stack on success.
341 : * @retval NULL on failure.
342 : *
343 : * @see CONFIG_DYNAMIC_THREAD
344 : */
345 1 : __syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
346 :
347 : /**
348 : * @brief Free a dynamically allocated thread stack.
349 : *
350 : * @param stack Pointer to the thread stack.
351 : *
352 : * @retval 0 on success.
353 : * @retval -EBUSY if the thread stack is in use.
354 : * @retval -EINVAL if @p stack is invalid.
355 : * @retval -ENOSYS if dynamic thread stack allocation is disabled
356 : *
357 : * @see CONFIG_DYNAMIC_THREAD
358 : */
359 1 : __syscall int k_thread_stack_free(k_thread_stack_t *stack);
360 :
361 : /**
362 : * @brief Create a thread.
363 : *
364 : * This routine initializes a thread, then schedules it for execution.
365 : *
366 : * The new thread may be scheduled for immediate execution or a delayed start.
367 : * If the newly spawned thread does not have a delayed start the kernel
368 : * scheduler may preempt the current thread to allow the new thread to
369 : * execute.
370 : *
371 : * Thread options are architecture-specific, and can include K_ESSENTIAL,
372 : * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
373 : * them using "|" (the logical OR operator).
374 : *
375 : * Stack objects passed to this function must be originally defined with
376 : * either of these macros in order to be portable:
377 : *
378 : * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
379 : * supervisor threads.
380 : * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
381 : * threads only. These stacks use less memory if CONFIG_USERSPACE is
382 : * enabled.
383 : *
384 : * The stack_size parameter has constraints. It must either be:
385 : *
386 : * - The original size value passed to K_THREAD_STACK_DEFINE() or
387 : * K_KERNEL_STACK_DEFINE()
388 : * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
389 : * defined with K_THREAD_STACK_DEFINE()
390 : * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
391 : * defined with K_KERNEL_STACK_DEFINE().
392 : *
393 : * Using other values, or sizeof(stack) may produce undefined behavior.
394 : *
395 : * @param new_thread Pointer to uninitialized struct k_thread
396 : * @param stack Pointer to the stack space.
397 : * @param stack_size Stack size in bytes.
398 : * @param entry Thread entry function.
399 : * @param p1 1st entry point parameter.
400 : * @param p2 2nd entry point parameter.
401 : * @param p3 3rd entry point parameter.
402 : * @param prio Thread priority.
403 : * @param options Thread options.
404 : * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
405 : *
406 : * @return ID of new thread.
407 : *
408 : */
409 1 : __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
410 : k_thread_stack_t *stack,
411 : size_t stack_size,
412 : k_thread_entry_t entry,
413 : void *p1, void *p2, void *p3,
414 : int prio, uint32_t options, k_timeout_t delay);
415 :
416 : /**
417 : * @brief Drop a thread's privileges permanently to user mode
418 : *
419 : * This allows a supervisor thread to be re-used as a user thread.
420 : * This function does not return, but control will transfer to the provided
421 : * entry point as if this was a new user thread.
422 : *
423 : * The implementation ensures that the stack buffer contents are erased.
424 : * Any thread-local storage will be reverted to a pristine state.
425 : *
426 : * Memory domain membership, resource pool assignment, kernel object
427 : * permissions, priority, and thread options are preserved.
428 : *
429 : * A common use of this function is to re-use the main thread as a user thread
430 : * once all supervisor mode-only tasks have been completed.
431 : *
432 : * @param entry Function to start executing from
433 : * @param p1 1st entry point parameter
434 : * @param p2 2nd entry point parameter
435 : * @param p3 3rd entry point parameter
436 : */
437 1 : FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
438 : void *p1, void *p2,
439 : void *p3);
440 :
441 : /**
442 : * @brief Grant a thread access to a set of kernel objects
443 : *
444 : * This is a convenience function. For the provided thread, grant access to
445 : * the remaining arguments, which must be pointers to kernel objects.
446 : *
447 : * The thread object must be initialized (i.e. running). The objects don't
448 : * need to be.
449 : * Note that NULL shouldn't be passed as an argument.
450 : *
451 : * @param thread Thread to grant access to objects
452 : * @param ... list of kernel object pointers
453 : */
454 1 : #define k_thread_access_grant(thread, ...) \
455 : FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
456 :
457 : /**
458 : * @brief Assign a resource memory pool to a thread
459 : *
460 : * By default, threads have no resource pool assigned unless their parent
461 : * thread has a resource pool, in which case it is inherited. Multiple
462 : * threads may be assigned to the same memory pool.
463 : *
464 : * Changing a thread's resource pool will not migrate allocations from the
465 : * previous pool.
466 : *
467 : * @param thread Target thread to assign a memory pool for resource requests.
468 : * @param heap Heap object to use for resources,
469 : * or NULL if the thread should no longer have a memory pool.
470 : */
471 1 : static inline void k_thread_heap_assign(struct k_thread *thread,
472 : struct k_heap *heap)
473 : {
474 : thread->resource_pool = heap;
475 : }
476 :
477 : #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
478 : /**
479 : * @brief Obtain stack usage information for the specified thread
480 : *
481 : * User threads will need to have permission on the target thread object.
482 : *
483 : * Some hardware may prevent inspection of a stack buffer currently in use.
484 : * If this API is called from supervisor mode, on the currently running thread,
485 : * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
486 : * error will be generated.
487 : *
488 : * @param thread Thread to inspect stack information
489 : * @param unused_ptr Output parameter, filled in with the unused stack space
490 : * of the target thread in bytes.
491 : * @return 0 on success
492 : * @return -EBADF Bad thread object (user mode only)
493 : * @return -EPERM No permissions on thread object (user mode only)
494 : * #return -ENOTSUP Forbidden by hardware policy
495 : * @return -EINVAL Thread is uninitialized or exited (user mode only)
496 : * @return -EFAULT Bad memory address for unused_ptr (user mode only)
497 : */
498 : __syscall int k_thread_stack_space_get(const struct k_thread *thread,
499 : size_t *unused_ptr);
500 : #endif
501 :
502 : #if (K_HEAP_MEM_POOL_SIZE > 0)
503 : /**
504 : * @brief Assign the system heap as a thread's resource pool
505 : *
506 : * Similar to k_thread_heap_assign(), but the thread will use
507 : * the kernel heap to draw memory.
508 : *
509 : * Use with caution, as a malicious thread could perform DoS attacks on the
510 : * kernel heap.
511 : *
512 : * @param thread Target thread to assign the system heap for resource requests
513 : *
514 : */
515 : void k_thread_system_pool_assign(struct k_thread *thread);
516 : #endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
517 :
518 : /**
519 : * @brief Sleep until a thread exits
520 : *
521 : * The caller will be put to sleep until the target thread exits, either due
522 : * to being aborted, self-exiting, or taking a fatal error. This API returns
523 : * immediately if the thread isn't running.
524 : *
525 : * This API may only be called from ISRs with a K_NO_WAIT timeout,
526 : * where it can be useful as a predicate to detect when a thread has
527 : * aborted.
528 : *
529 : * @param thread Thread to wait to exit
530 : * @param timeout upper bound time to wait for the thread to exit.
531 : * @retval 0 success, target thread has exited or wasn't running
532 : * @retval -EBUSY returned without waiting
533 : * @retval -EAGAIN waiting period timed out
534 : * @retval -EDEADLK target thread is joining on the caller, or target thread
535 : * is the caller
536 : */
537 1 : __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
538 :
539 : /**
540 : * @brief Put the current thread to sleep.
541 : *
542 : * This routine puts the current thread to sleep for @a duration,
543 : * specified as a k_timeout_t object.
544 : *
545 : * @param timeout Desired duration of sleep.
546 : *
547 : * @return Zero if the requested time has elapsed or if the thread was woken up
548 : * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
549 : * millisecond.
550 : */
551 1 : __syscall int32_t k_sleep(k_timeout_t timeout);
552 :
553 : /**
554 : * @brief Put the current thread to sleep.
555 : *
556 : * This routine puts the current thread to sleep for @a duration milliseconds.
557 : *
558 : * @param ms Number of milliseconds to sleep.
559 : *
560 : * @return Zero if the requested time has elapsed or if the thread was woken up
561 : * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
562 : * millisecond.
563 : */
564 1 : static inline int32_t k_msleep(int32_t ms)
565 : {
566 : return k_sleep(Z_TIMEOUT_MS(ms));
567 : }
568 :
569 : /**
570 : * @brief Put the current thread to sleep with microsecond resolution.
571 : *
572 : * This function is unlikely to work as expected without kernel tuning.
573 : * In particular, because the lower bound on the duration of a sleep is
574 : * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
575 : * adjusted to achieve the resolution desired. The implications of doing
576 : * this must be understood before attempting to use k_usleep(). Use with
577 : * caution.
578 : *
579 : * @param us Number of microseconds to sleep.
580 : *
581 : * @return Zero if the requested time has elapsed or if the thread was woken up
582 : * by the \ref k_wakeup call, the time left to sleep rounded up to the nearest
583 : * microsecond.
584 : */
585 1 : __syscall int32_t k_usleep(int32_t us);
586 :
587 : /**
588 : * @brief Cause the current thread to busy wait.
589 : *
590 : * This routine causes the current thread to execute a "do nothing" loop for
591 : * @a usec_to_wait microseconds.
592 : *
593 : * @note The clock used for the microsecond-resolution delay here may
594 : * be skewed relative to the clock used for system timeouts like
595 : * k_sleep(). For example k_busy_wait(1000) may take slightly more or
596 : * less time than k_sleep(K_MSEC(1)), with the offset dependent on
597 : * clock tolerances.
598 : *
599 : * @note In case when @kconfig{CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE} and
600 : * @kconfig{CONFIG_PM} options are enabled, this function may not work.
601 : * The timer/clock used for delay processing may be disabled/inactive.
602 : */
603 1 : __syscall void k_busy_wait(uint32_t usec_to_wait);
604 :
605 : /**
606 : * @brief Check whether it is possible to yield in the current context.
607 : *
608 : * This routine checks whether the kernel is in a state where it is possible to
609 : * yield or call blocking API's. It should be used by code that needs to yield
610 : * to perform correctly, but can feasibly be called from contexts where that
611 : * is not possible. For example in the PRE_KERNEL initialization step, or when
612 : * being run from the idle thread.
613 : *
614 : * @return True if it is possible to yield in the current context, false otherwise.
615 : */
616 1 : bool k_can_yield(void);
617 :
618 : /**
619 : * @brief Yield the current thread.
620 : *
621 : * This routine causes the current thread to yield execution to another
622 : * thread of the same or higher priority. If there are no other ready threads
623 : * of the same or higher priority, the routine returns immediately.
624 : */
625 1 : __syscall void k_yield(void);
626 :
627 : /**
628 : * @brief Wake up a sleeping thread.
629 : *
630 : * This routine prematurely wakes up @a thread from sleeping.
631 : *
632 : * If @a thread is not currently sleeping, the routine has no effect.
633 : *
634 : * @param thread ID of thread to wake.
635 : */
636 1 : __syscall void k_wakeup(k_tid_t thread);
637 :
638 : /**
639 : * @brief Query thread ID of the current thread.
640 : *
641 : * This unconditionally queries the kernel via a system call.
642 : *
643 : * @note Use k_current_get() unless absolutely sure this is necessary.
644 : * This should only be used directly where the thread local
645 : * variable cannot be used or may contain invalid values
646 : * if thread local storage (TLS) is enabled. If TLS is not
647 : * enabled, this is the same as k_current_get().
648 : *
649 : * @return ID of current thread.
650 : */
651 : __attribute_const__
652 1 : __syscall k_tid_t k_sched_current_thread_query(void);
653 :
654 : /**
655 : * @brief Get thread ID of the current thread.
656 : *
657 : * @return ID of current thread.
658 : *
659 : */
660 : __attribute_const__
661 1 : static inline k_tid_t k_current_get(void)
662 : {
663 : #ifdef CONFIG_CURRENT_THREAD_USE_TLS
664 :
665 : /* Thread-local cache of current thread ID, set in z_thread_entry() */
666 : extern Z_THREAD_LOCAL k_tid_t z_tls_current;
667 :
668 : return z_tls_current;
669 : #else
670 : return k_sched_current_thread_query();
671 : #endif
672 : }
673 :
674 : /**
675 : * @brief Abort a thread.
676 : *
677 : * This routine permanently stops execution of @a thread. The thread is taken
678 : * off all kernel queues it is part of (i.e. the ready queue, the timeout
679 : * queue, or a kernel object wait queue). However, any kernel resources the
680 : * thread might currently own (such as mutexes or memory blocks) are not
681 : * released. It is the responsibility of the caller of this routine to ensure
682 : * all necessary cleanup is performed.
683 : *
684 : * After k_thread_abort() returns, the thread is guaranteed not to be
685 : * running or to become runnable anywhere on the system. Normally
686 : * this is done via blocking the caller (in the same manner as
687 : * k_thread_join()), but in interrupt context on SMP systems the
688 : * implementation is required to spin for threads that are running on
689 : * other CPUs.
690 : *
691 : * @param thread ID of thread to abort.
692 : */
693 1 : __syscall void k_thread_abort(k_tid_t thread);
694 :
695 : k_ticks_t z_timeout_expires(const struct _timeout *timeout);
696 : k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
697 :
698 : #ifdef CONFIG_SYS_CLOCK_EXISTS
699 :
700 : /**
701 : * @brief Get time when a thread wakes up, in system ticks
702 : *
703 : * This routine computes the system uptime when a waiting thread next
704 : * executes, in units of system ticks. If the thread is not waiting,
705 : * it returns current system time.
706 : */
707 1 : __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
708 :
709 : static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
710 : const struct k_thread *thread)
711 : {
712 : return z_timeout_expires(&thread->base.timeout);
713 : }
714 :
715 : /**
716 : * @brief Get time remaining before a thread wakes up, in system ticks
717 : *
718 : * This routine computes the time remaining before a waiting thread
719 : * next executes, in units of system ticks. If the thread is not
720 : * waiting, it returns zero.
721 : */
722 1 : __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread);
723 :
724 : static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
725 : const struct k_thread *thread)
726 : {
727 : return z_timeout_remaining(&thread->base.timeout);
728 : }
729 :
730 : #endif /* CONFIG_SYS_CLOCK_EXISTS */
731 :
732 : /**
733 : * @cond INTERNAL_HIDDEN
734 : */
735 :
736 : struct _static_thread_data {
737 : struct k_thread *init_thread;
738 : k_thread_stack_t *init_stack;
739 : unsigned int init_stack_size;
740 : k_thread_entry_t init_entry;
741 : void *init_p1;
742 : void *init_p2;
743 : void *init_p3;
744 : int init_prio;
745 : uint32_t init_options;
746 : const char *init_name;
747 : #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
748 : int32_t init_delay_ms;
749 : #else
750 : k_timeout_t init_delay;
751 : #endif
752 : };
753 :
754 : #ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
755 : #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
756 : #define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
757 : #else
758 : #define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS(ms)
759 : #define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
760 : #endif
761 :
762 : #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
763 : entry, p1, p2, p3, \
764 : prio, options, delay, tname) \
765 : { \
766 : .init_thread = (thread), \
767 : .init_stack = (stack), \
768 : .init_stack_size = (stack_size), \
769 : .init_entry = (k_thread_entry_t)entry, \
770 : .init_p1 = (void *)p1, \
771 : .init_p2 = (void *)p2, \
772 : .init_p3 = (void *)p3, \
773 : .init_prio = (prio), \
774 : .init_options = (options), \
775 : .init_name = STRINGIFY(tname), \
776 : Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
777 : }
778 :
779 : /*
780 : * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
781 : * information on arguments.
782 : */
783 : #define Z_THREAD_COMMON_DEFINE(name, stack_size, \
784 : entry, p1, p2, p3, \
785 : prio, options, delay) \
786 : struct k_thread _k_thread_obj_##name; \
787 : STRUCT_SECTION_ITERABLE(_static_thread_data, \
788 : _k_thread_data_##name) = \
789 : Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
790 : _k_thread_stack_##name, stack_size,\
791 : entry, p1, p2, p3, prio, options, \
792 : delay, name); \
793 : const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
794 :
795 : /**
796 : * INTERNAL_HIDDEN @endcond
797 : */
798 :
799 : /**
800 : * @brief Statically define and initialize a thread.
801 : *
802 : * The thread may be scheduled for immediate execution or a delayed start.
803 : *
804 : * Thread options are architecture-specific, and can include K_ESSENTIAL,
805 : * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
806 : * them using "|" (the logical OR operator).
807 : *
808 : * The ID of the thread can be accessed using:
809 : *
810 : * @code extern const k_tid_t <name>; @endcode
811 : *
812 : * @param name Name of the thread.
813 : * @param stack_size Stack size in bytes.
814 : * @param entry Thread entry function.
815 : * @param p1 1st entry point parameter.
816 : * @param p2 2nd entry point parameter.
817 : * @param p3 3rd entry point parameter.
818 : * @param prio Thread priority.
819 : * @param options Thread options.
820 : * @param delay Scheduling delay (in milliseconds), zero for no delay.
821 : *
822 : * @note Static threads with zero delay should not normally have
823 : * MetaIRQ priority levels. This can preempt the system
824 : * initialization handling (depending on the priority of the main
825 : * thread) and cause surprising ordering side effects. It will not
826 : * affect anything in the OS per se, but consider it bad practice.
827 : * Use a SYS_INIT() callback if you need to run code before entrance
828 : * to the application main().
829 : */
830 : #define K_THREAD_DEFINE(name, stack_size, \
831 : entry, p1, p2, p3, \
832 1 : prio, options, delay) \
833 : K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
834 : Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
835 : prio, options, delay)
836 :
837 : /**
838 : * @brief Statically define and initialize a thread intended to run only in kernel mode.
839 : *
840 : * The thread may be scheduled for immediate execution or a delayed start.
841 : *
842 : * Thread options are architecture-specific, and can include K_ESSENTIAL,
843 : * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
844 : * them using "|" (the logical OR operator).
845 : *
846 : * The ID of the thread can be accessed using:
847 : *
848 : * @code extern const k_tid_t <name>; @endcode
849 : *
850 : * @note Threads defined by this can only run in kernel mode, and cannot be
851 : * transformed into user thread via k_thread_user_mode_enter().
852 : *
853 : * @warning Depending on the architecture, the stack size (@p stack_size)
854 : * may need to be multiples of CONFIG_MMU_PAGE_SIZE (if MMU)
855 : * or in power-of-two size (if MPU).
856 : *
857 : * @param name Name of the thread.
858 : * @param stack_size Stack size in bytes.
859 : * @param entry Thread entry function.
860 : * @param p1 1st entry point parameter.
861 : * @param p2 2nd entry point parameter.
862 : * @param p3 3rd entry point parameter.
863 : * @param prio Thread priority.
864 : * @param options Thread options.
865 : * @param delay Scheduling delay (in milliseconds), zero for no delay.
866 : */
867 : #define K_KERNEL_THREAD_DEFINE(name, stack_size, \
868 : entry, p1, p2, p3, \
869 1 : prio, options, delay) \
870 : K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
871 : Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
872 : prio, options, delay)
873 :
874 : /**
875 : * @brief Get a thread's priority.
876 : *
877 : * This routine gets the priority of @a thread.
878 : *
879 : * @param thread ID of thread whose priority is needed.
880 : *
881 : * @return Priority of @a thread.
882 : */
883 1 : __syscall int k_thread_priority_get(k_tid_t thread);
884 :
885 : /**
886 : * @brief Set a thread's priority.
887 : *
888 : * This routine immediately changes the priority of @a thread.
889 : *
890 : * Rescheduling can occur immediately depending on the priority @a thread is
891 : * set to:
892 : *
893 : * - If its priority is raised above the priority of a currently scheduled
894 : * preemptible thread, @a thread will be scheduled in.
895 : *
896 : * - If the caller lowers the priority of a currently scheduled preemptible
897 : * thread below that of other threads in the system, the thread of the highest
898 : * priority will be scheduled in.
899 : *
900 : * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
901 : * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
902 : * highest priority.
903 : *
904 : * @param thread ID of thread whose priority is to be set.
905 : * @param prio New priority.
906 : *
907 : * @warning Changing the priority of a thread currently involved in mutex
908 : * priority inheritance may result in undefined behavior.
909 : */
910 1 : __syscall void k_thread_priority_set(k_tid_t thread, int prio);
911 :
912 :
913 : #ifdef CONFIG_SCHED_DEADLINE
914 : /**
915 : * @brief Set deadline expiration time for scheduler
916 : *
917 : * This sets the "deadline" expiration as a time delta from the
918 : * current time, in the same units used by k_cycle_get_32(). The
919 : * scheduler (when deadline scheduling is enabled) will choose the
920 : * next expiring thread when selecting between threads at the same
921 : * static priority. Threads at different priorities will be scheduled
922 : * according to their static priority.
923 : *
924 : * @note Deadlines are stored internally using 32 bit unsigned
925 : * integers. The number of cycles between the "first" deadline in the
926 : * scheduler queue and the "last" deadline must be less than 2^31 (i.e
927 : * a signed non-negative quantity). Failure to adhere to this rule
928 : * may result in scheduled threads running in an incorrect deadline
929 : * order.
930 : *
931 : * @note Despite the API naming, the scheduler makes no guarantees
932 : * the thread WILL be scheduled within that deadline, nor does it take
933 : * extra metadata (like e.g. the "runtime" and "period" parameters in
934 : * Linux sched_setattr()) that allows the kernel to validate the
935 : * scheduling for achievability. Such features could be implemented
936 : * above this call, which is simply input to the priority selection
937 : * logic.
938 : *
939 : * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
940 : * configuration.
941 : *
942 : * @param thread A thread on which to set the deadline
943 : * @param deadline A time delta, in cycle units
944 : *
945 : */
946 1 : __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
947 : #endif
948 :
949 : #ifdef CONFIG_SCHED_CPU_MASK
950 : /**
951 : * @brief Sets all CPU enable masks to zero
952 : *
953 : * After this returns, the thread will no longer be schedulable on any
954 : * CPUs. The thread must not be currently runnable.
955 : *
956 : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
957 : * configuration.
958 : *
959 : * @param thread Thread to operate upon
960 : * @return Zero on success, otherwise error code
961 : */
962 1 : int k_thread_cpu_mask_clear(k_tid_t thread);
963 :
964 : /**
965 : * @brief Sets all CPU enable masks to one
966 : *
967 : * After this returns, the thread will be schedulable on any CPU. The
968 : * thread must not be currently runnable.
969 : *
970 : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
971 : * configuration.
972 : *
973 : * @param thread Thread to operate upon
974 : * @return Zero on success, otherwise error code
975 : */
976 1 : int k_thread_cpu_mask_enable_all(k_tid_t thread);
977 :
978 : /**
979 : * @brief Enable thread to run on specified CPU
980 : *
981 : * The thread must not be currently runnable.
982 : *
983 : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
984 : * configuration.
985 : *
986 : * @param thread Thread to operate upon
987 : * @param cpu CPU index
988 : * @return Zero on success, otherwise error code
989 : */
990 1 : int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
991 :
992 : /**
993 : * @brief Prevent thread to run on specified CPU
994 : *
995 : * The thread must not be currently runnable.
996 : *
997 : * @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
998 : * configuration.
999 : *
1000 : * @param thread Thread to operate upon
1001 : * @param cpu CPU index
1002 : * @return Zero on success, otherwise error code
1003 : */
1004 1 : int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
1005 :
1006 : /**
1007 : * @brief Pin a thread to a CPU
1008 : *
1009 : * Pin a thread to a CPU by first clearing the cpu mask and then enabling the
1010 : * thread on the selected CPU.
1011 : *
1012 : * @param thread Thread to operate upon
1013 : * @param cpu CPU index
1014 : * @return Zero on success, otherwise error code
1015 : */
1016 1 : int k_thread_cpu_pin(k_tid_t thread, int cpu);
1017 : #endif
1018 :
1019 : /**
1020 : * @brief Suspend a thread.
1021 : *
1022 : * This routine prevents the kernel scheduler from making @a thread
1023 : * the current thread. All other internal operations on @a thread are
1024 : * still performed; for example, kernel objects it is waiting on are
1025 : * still handed to it. Thread suspension does not impact any timeout
1026 : * upon which the thread may be waiting (such as a timeout from a call
1027 : * to k_sem_take() or k_sleep()). Thus if the timeout expires while the
1028 : * thread is suspended, it is still suspended until k_thread_resume()
1029 : * is called.
1030 : *
1031 : * When the target thread is active on another CPU, the caller will block until
1032 : * the target thread is halted (suspended or aborted). But if the caller is in
1033 : * an interrupt context, it will spin waiting for that target thread active on
1034 : * another CPU to halt.
1035 : *
1036 : * If @a thread is already suspended, the routine has no effect.
1037 : *
1038 : * @param thread ID of thread to suspend.
1039 : */
1040 1 : __syscall void k_thread_suspend(k_tid_t thread);
1041 :
1042 : /**
1043 : * @brief Resume a suspended thread.
1044 : *
1045 : * This routine reverses the thread suspension from k_thread_suspend()
1046 : * and allows the kernel scheduler to make @a thread the current thread
1047 : * when it is next eligible for that role.
1048 : *
1049 : * If @a thread is not currently suspended, the routine has no effect.
1050 : *
1051 : * @param thread ID of thread to resume.
1052 : */
1053 1 : __syscall void k_thread_resume(k_tid_t thread);
1054 :
1055 : /**
1056 : * @brief Start an inactive thread
1057 : *
1058 : * If a thread was created with K_FOREVER in the delay parameter, it will
1059 : * not be added to the scheduling queue until this function is called
1060 : * on it.
1061 : *
1062 : * @note This is a legacy API for compatibility. Modern Zephyr
1063 : * threads are initialized in the "sleeping" state and do not need
1064 : * special handling for "start".
1065 : *
1066 : * @param thread thread to start
1067 : */
1068 1 : static inline void k_thread_start(k_tid_t thread)
1069 : {
1070 : k_wakeup(thread);
1071 : }
1072 :
1073 : /**
1074 : * @brief Set time-slicing period and scope.
1075 : *
1076 : * This routine specifies how the scheduler will perform time slicing of
1077 : * preemptible threads.
1078 : *
1079 : * To enable time slicing, @a slice must be non-zero. The scheduler
1080 : * ensures that no thread runs for more than the specified time limit
1081 : * before other threads of that priority are given a chance to execute.
1082 : * Any thread whose priority is higher than @a prio is exempted, and may
1083 : * execute as long as desired without being preempted due to time slicing.
1084 : *
1085 : * Time slicing only limits the maximum amount of time a thread may continuously
1086 : * execute. Once the scheduler selects a thread for execution, there is no
1087 : * minimum guaranteed time the thread will execute before threads of greater or
1088 : * equal priority are scheduled.
1089 : *
1090 : * When the current thread is the only one of that priority eligible
1091 : * for execution, this routine has no effect; the thread is immediately
1092 : * rescheduled after the slice period expires.
1093 : *
1094 : * To disable timeslicing, set both @a slice and @a prio to zero.
1095 : *
1096 : * @param slice Maximum time slice length (in milliseconds).
1097 : * @param prio Highest thread priority level eligible for time slicing.
1098 : */
1099 1 : void k_sched_time_slice_set(int32_t slice, int prio);
1100 :
1101 : /**
1102 : * @brief Set thread time slice
1103 : *
1104 : * As for k_sched_time_slice_set, but (when
1105 : * CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
1106 : * thread. When non-zero, this timeslice will take precedence over
1107 : * the global value.
1108 : *
1109 : * When such a thread's timeslice expires, the configured callback
1110 : * will be called before the thread is removed/re-added to the run
1111 : * queue. This callback will occur in interrupt context, and the
1112 : * specified thread is guaranteed to have been preempted by the
1113 : * currently-executing ISR. Such a callback is free to, for example,
1114 : * modify the thread priority or slice time for future execution,
1115 : * suspend the thread, etc...
1116 : *
1117 : * @note Unlike the older API, the time slice parameter here is
1118 : * specified in ticks, not milliseconds. Ticks have always been the
1119 : * internal unit, and not all platforms have integer conversions
1120 : * between the two.
1121 : *
1122 : * @note Threads with a non-zero slice time set will be timesliced
1123 : * always, even if they are higher priority than the maximum timeslice
1124 : * priority set via k_sched_time_slice_set().
1125 : *
1126 : * @note The callback notification for slice expiration happens, as it
1127 : * must, while the thread is still "current", and thus it happens
1128 : * before any registered timeouts at this tick. This has the somewhat
1129 : * confusing side effect that the tick time (c.f. k_uptime_get()) does
1130 : * not yet reflect the expired ticks. Applications wishing to make
1131 : * fine-grained timing decisions within this callback should use the
1132 : * cycle API, or derived facilities like k_thread_runtime_stats_get().
1133 : *
1134 : * @param th A valid, initialized thread
1135 : * @param slice_ticks Maximum timeslice, in ticks
1136 : * @param expired Callback function called on slice expiration
1137 : * @param data Parameter for the expiration handler
1138 : */
1139 1 : void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1140 : k_thread_timeslice_fn_t expired, void *data);
1141 :
1142 : /** @} */
1143 :
1144 : /**
1145 : * @addtogroup isr_apis
1146 : * @{
1147 : */
1148 :
1149 : /**
1150 : * @brief Determine if code is running at interrupt level.
1151 : *
1152 : * This routine allows the caller to customize its actions, depending on
1153 : * whether it is a thread or an ISR.
1154 : *
1155 : * @funcprops \isr_ok
1156 : *
1157 : * @return false if invoked by a thread.
1158 : * @return true if invoked by an ISR.
1159 : */
1160 1 : bool k_is_in_isr(void);
1161 :
1162 : /**
1163 : * @brief Determine if code is running in a preemptible thread.
1164 : *
1165 : * This routine allows the caller to customize its actions, depending on
1166 : * whether it can be preempted by another thread. The routine returns a 'true'
1167 : * value if all of the following conditions are met:
1168 : *
1169 : * - The code is running in a thread, not at ISR.
1170 : * - The thread's priority is in the preemptible range.
1171 : * - The thread has not locked the scheduler.
1172 : *
1173 : * @funcprops \isr_ok
1174 : *
1175 : * @return 0 if invoked by an ISR or by a cooperative thread.
1176 : * @return Non-zero if invoked by a preemptible thread.
1177 : */
1178 1 : __syscall int k_is_preempt_thread(void);
1179 :
1180 : /**
1181 : * @brief Test whether startup is in the before-main-task phase.
1182 : *
1183 : * This routine allows the caller to customize its actions, depending on
1184 : * whether it being invoked before the kernel is fully active.
1185 : *
1186 : * @funcprops \isr_ok
1187 : *
1188 : * @return true if invoked before post-kernel initialization
1189 : * @return false if invoked during/after post-kernel initialization
1190 : */
1191 1 : static inline bool k_is_pre_kernel(void)
1192 : {
1193 : extern bool z_sys_post_kernel; /* in init.c */
1194 :
1195 : return !z_sys_post_kernel;
1196 : }
1197 :
1198 : /**
1199 : * @}
1200 : */
1201 :
1202 : /**
1203 : * @addtogroup thread_apis
1204 : * @{
1205 : */
1206 :
1207 : /**
1208 : * @brief Lock the scheduler.
1209 : *
1210 : * This routine prevents the current thread from being preempted by another
1211 : * thread by instructing the scheduler to treat it as a cooperative thread.
1212 : * If the thread subsequently performs an operation that makes it unready,
1213 : * it will be context switched out in the normal manner. When the thread
1214 : * again becomes the current thread, its non-preemptible status is maintained.
1215 : *
1216 : * This routine can be called recursively.
1217 : *
1218 : * Owing to clever implementation details, scheduler locks are
1219 : * extremely fast for non-userspace threads (just one byte
1220 : * inc/decrement in the thread struct).
1221 : *
1222 : * @note This works by elevating the thread priority temporarily to a
1223 : * cooperative priority, allowing cheap synchronization vs. other
1224 : * preemptible or cooperative threads running on the current CPU. It
1225 : * does not prevent preemption or asynchrony of other types. It does
1226 : * not prevent threads from running on other CPUs when CONFIG_SMP=y.
1227 : * It does not prevent interrupts from happening, nor does it prevent
1228 : * threads with MetaIRQ priorities from preempting the current thread.
1229 : * In general this is a historical API not well-suited to modern
1230 : * applications, use with care.
1231 : */
1232 1 : void k_sched_lock(void);
1233 :
1234 : /**
1235 : * @brief Unlock the scheduler.
1236 : *
1237 : * This routine reverses the effect of a previous call to k_sched_lock().
1238 : * A thread must call the routine once for each time it called k_sched_lock()
1239 : * before the thread becomes preemptible.
1240 : */
1241 1 : void k_sched_unlock(void);
1242 :
1243 : /**
1244 : * @brief Set current thread's custom data.
1245 : *
1246 : * This routine sets the custom data for the current thread to @ value.
1247 : *
1248 : * Custom data is not used by the kernel itself, and is freely available
1249 : * for a thread to use as it sees fit. It can be used as a framework
1250 : * upon which to build thread-local storage.
1251 : *
1252 : * @param value New custom data value.
1253 : *
1254 : */
1255 1 : __syscall void k_thread_custom_data_set(void *value);
1256 :
1257 : /**
1258 : * @brief Get current thread's custom data.
1259 : *
1260 : * This routine returns the custom data for the current thread.
1261 : *
1262 : * @return Current custom data value.
1263 : */
1264 1 : __syscall void *k_thread_custom_data_get(void);
1265 :
1266 : /**
1267 : * @brief Set current thread name
1268 : *
1269 : * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
1270 : * is enabled for tracing and debugging.
1271 : *
1272 : * @param thread Thread to set name, or NULL to set the current thread
1273 : * @param str Name string
1274 : * @retval 0 on success
1275 : * @retval -EFAULT Memory access error with supplied string
1276 : * @retval -ENOSYS Thread name configuration option not enabled
1277 : * @retval -EINVAL Thread name too long
1278 : */
1279 1 : __syscall int k_thread_name_set(k_tid_t thread, const char *str);
1280 :
1281 : /**
1282 : * @brief Get thread name
1283 : *
1284 : * Get the name of a thread
1285 : *
1286 : * @param thread Thread ID
1287 : * @retval Thread name, or NULL if configuration not enabled
1288 : */
1289 1 : const char *k_thread_name_get(k_tid_t thread);
1290 :
1291 : /**
1292 : * @brief Copy the thread name into a supplied buffer
1293 : *
1294 : * @param thread Thread to obtain name information
1295 : * @param buf Destination buffer
1296 : * @param size Destination buffer size
1297 : * @retval -ENOSPC Destination buffer too small
1298 : * @retval -EFAULT Memory access error
1299 : * @retval -ENOSYS Thread name feature not enabled
1300 : * @retval 0 Success
1301 : */
1302 1 : __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1303 : size_t size);
1304 :
1305 : /**
1306 : * @brief Get thread state string
1307 : *
1308 : * This routine generates a human friendly string containing the thread's
1309 : * state, and copies as much of it as possible into @a buf.
1310 : *
1311 : * @param thread_id Thread ID
1312 : * @param buf Buffer into which to copy state strings
1313 : * @param buf_size Size of the buffer
1314 : *
1315 : * @retval Pointer to @a buf if data was copied, else a pointer to "".
1316 : */
1317 1 : const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1318 :
1319 : /**
1320 : * @}
1321 : */
1322 :
1323 : /**
1324 : * @addtogroup clock_apis
1325 : * @{
1326 : */
1327 :
1328 : /**
1329 : * @brief Generate null timeout delay.
1330 : *
1331 : * This macro generates a timeout delay that instructs a kernel API
1332 : * not to wait if the requested operation cannot be performed immediately.
1333 : *
1334 : * @return Timeout delay value.
1335 : */
1336 1 : #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1337 :
1338 : /**
1339 : * @brief Generate timeout delay from nanoseconds.
1340 : *
1341 : * This macro generates a timeout delay that instructs a kernel API to
1342 : * wait up to @a t nanoseconds to perform the requested operation.
1343 : * Note that timer precision is limited to the tick rate, not the
1344 : * requested value.
1345 : *
1346 : * @param t Duration in nanoseconds.
1347 : *
1348 : * @return Timeout delay value.
1349 : */
1350 1 : #define K_NSEC(t) Z_TIMEOUT_NS(t)
1351 :
1352 : /**
1353 : * @brief Generate timeout delay from microseconds.
1354 : *
1355 : * This macro generates a timeout delay that instructs a kernel API
1356 : * to wait up to @a t microseconds to perform the requested operation.
1357 : * Note that timer precision is limited to the tick rate, not the
1358 : * requested value.
1359 : *
1360 : * @param t Duration in microseconds.
1361 : *
1362 : * @return Timeout delay value.
1363 : */
1364 1 : #define K_USEC(t) Z_TIMEOUT_US(t)
1365 :
1366 : /**
1367 : * @brief Generate timeout delay from cycles.
1368 : *
1369 : * This macro generates a timeout delay that instructs a kernel API
1370 : * to wait up to @a t cycles to perform the requested operation.
1371 : *
1372 : * @param t Duration in cycles.
1373 : *
1374 : * @return Timeout delay value.
1375 : */
1376 1 : #define K_CYC(t) Z_TIMEOUT_CYC(t)
1377 :
1378 : /**
1379 : * @brief Generate timeout delay from system ticks.
1380 : *
1381 : * This macro generates a timeout delay that instructs a kernel API
1382 : * to wait up to @a t ticks to perform the requested operation.
1383 : *
1384 : * @param t Duration in system ticks.
1385 : *
1386 : * @return Timeout delay value.
1387 : */
1388 1 : #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1389 :
1390 : /**
1391 : * @brief Generate timeout delay from milliseconds.
1392 : *
1393 : * This macro generates a timeout delay that instructs a kernel API
1394 : * to wait up to @a ms milliseconds to perform the requested operation.
1395 : *
1396 : * @param ms Duration in milliseconds.
1397 : *
1398 : * @return Timeout delay value.
1399 : */
1400 1 : #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1401 :
1402 : /**
1403 : * @brief Generate timeout delay from seconds.
1404 : *
1405 : * This macro generates a timeout delay that instructs a kernel API
1406 : * to wait up to @a s seconds to perform the requested operation.
1407 : *
1408 : * @param s Duration in seconds.
1409 : *
1410 : * @return Timeout delay value.
1411 : */
1412 1 : #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1413 :
1414 : /**
1415 : * @brief Generate timeout delay from minutes.
1416 :
1417 : * This macro generates a timeout delay that instructs a kernel API
1418 : * to wait up to @a m minutes to perform the requested operation.
1419 : *
1420 : * @param m Duration in minutes.
1421 : *
1422 : * @return Timeout delay value.
1423 : */
1424 1 : #define K_MINUTES(m) K_SECONDS((m) * 60)
1425 :
1426 : /**
1427 : * @brief Generate timeout delay from hours.
1428 : *
1429 : * This macro generates a timeout delay that instructs a kernel API
1430 : * to wait up to @a h hours to perform the requested operation.
1431 : *
1432 : * @param h Duration in hours.
1433 : *
1434 : * @return Timeout delay value.
1435 : */
1436 1 : #define K_HOURS(h) K_MINUTES((h) * 60)
1437 :
1438 : /**
1439 : * @brief Generate infinite timeout delay.
1440 : *
1441 : * This macro generates a timeout delay that instructs a kernel API
1442 : * to wait as long as necessary to perform the requested operation.
1443 : *
1444 : * @return Timeout delay value.
1445 : */
1446 1 : #define K_FOREVER Z_FOREVER
1447 :
1448 : #ifdef CONFIG_TIMEOUT_64BIT
1449 :
1450 : /**
1451 : * @brief Generates an absolute/uptime timeout value from system ticks
1452 : *
1453 : * This macro generates a timeout delay that represents an expiration
1454 : * at the absolute uptime value specified, in system ticks. That is, the
1455 : * timeout will expire immediately after the system uptime reaches the
1456 : * specified tick count.
1457 : *
1458 : * @param t Tick uptime value
1459 : * @return Timeout delay value
1460 : */
1461 : #define K_TIMEOUT_ABS_TICKS(t) \
1462 : Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
1463 :
1464 : /**
1465 : * @brief Generates an absolute/uptime timeout value from milliseconds
1466 : *
1467 : * This macro generates a timeout delay that represents an expiration
1468 : * at the absolute uptime value specified, in milliseconds. That is,
1469 : * the timeout will expire immediately after the system uptime reaches
1470 : * the specified tick count.
1471 : *
1472 : * @param t Millisecond uptime value
1473 : * @return Timeout delay value
1474 : */
1475 : #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1476 :
1477 : /**
1478 : * @brief Generates an absolute/uptime timeout value from microseconds
1479 : *
1480 : * This macro generates a timeout delay that represents an expiration
1481 : * at the absolute uptime value specified, in microseconds. That is,
1482 : * the timeout will expire immediately after the system uptime reaches
1483 : * the specified time. Note that timer precision is limited by the
1484 : * system tick rate and not the requested timeout value.
1485 : *
1486 : * @param t Microsecond uptime value
1487 : * @return Timeout delay value
1488 : */
1489 : #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1490 :
1491 : /**
1492 : * @brief Generates an absolute/uptime timeout value from nanoseconds
1493 : *
1494 : * This macro generates a timeout delay that represents an expiration
1495 : * at the absolute uptime value specified, in nanoseconds. That is,
1496 : * the timeout will expire immediately after the system uptime reaches
1497 : * the specified time. Note that timer precision is limited by the
1498 : * system tick rate and not the requested timeout value.
1499 : *
1500 : * @param t Nanosecond uptime value
1501 : * @return Timeout delay value
1502 : */
1503 : #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1504 :
1505 : /**
1506 : * @brief Generates an absolute/uptime timeout value from system cycles
1507 : *
1508 : * This macro generates a timeout delay that represents an expiration
1509 : * at the absolute uptime value specified, in cycles. That is, the
1510 : * timeout will expire immediately after the system uptime reaches the
1511 : * specified time. Note that timer precision is limited by the system
1512 : * tick rate and not the requested timeout value.
1513 : *
1514 : * @param t Cycle uptime value
1515 : * @return Timeout delay value
1516 : */
1517 : #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1518 :
1519 : #endif
1520 :
1521 : /**
1522 : * @}
1523 : */
1524 :
1525 : /**
1526 : * @cond INTERNAL_HIDDEN
1527 : */
1528 :
1529 : struct k_timer {
1530 : /*
1531 : * _timeout structure must be first here if we want to use
1532 : * dynamic timer allocation. timeout.node is used in the double-linked
1533 : * list of free timers
1534 : */
1535 : struct _timeout timeout;
1536 :
1537 : /* wait queue for the (single) thread waiting on this timer */
1538 : _wait_q_t wait_q;
1539 :
1540 : /* runs in ISR context */
1541 : void (*expiry_fn)(struct k_timer *timer);
1542 :
1543 : /* runs in the context of the thread that calls k_timer_stop() */
1544 : void (*stop_fn)(struct k_timer *timer);
1545 :
1546 : /* timer period */
1547 : k_timeout_t period;
1548 :
1549 : /* timer status */
1550 : uint32_t status;
1551 :
1552 : /* user-specific data, also used to support legacy features */
1553 : void *user_data;
1554 :
1555 : SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
1556 :
1557 : #ifdef CONFIG_OBJ_CORE_TIMER
1558 : struct k_obj_core obj_core;
1559 : #endif
1560 : };
1561 :
1562 : #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1563 : { \
1564 : .timeout = { \
1565 : .node = {},\
1566 : .fn = z_timer_expiration_handler, \
1567 : .dticks = 0, \
1568 : }, \
1569 : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1570 : .expiry_fn = expiry, \
1571 : .stop_fn = stop, \
1572 : .status = 0, \
1573 : .user_data = 0, \
1574 : }
1575 :
1576 : /**
1577 : * INTERNAL_HIDDEN @endcond
1578 : */
1579 :
1580 : /**
1581 : * @defgroup timer_apis Timer APIs
1582 : * @ingroup kernel_apis
1583 : * @{
1584 : */
1585 :
1586 : /**
1587 : * @typedef k_timer_expiry_t
1588 : * @brief Timer expiry function type.
1589 : *
1590 : * A timer's expiry function is executed by the system clock interrupt handler
1591 : * each time the timer expires. The expiry function is optional, and is only
1592 : * invoked if the timer has been initialized with one.
1593 : *
1594 : * @param timer Address of timer.
1595 : */
1596 1 : typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1597 :
1598 : /**
1599 : * @typedef k_timer_stop_t
1600 : * @brief Timer stop function type.
1601 : *
1602 : * A timer's stop function is executed if the timer is stopped prematurely.
1603 : * The function runs in the context of call that stops the timer. As
1604 : * k_timer_stop() can be invoked from an ISR, the stop function must be
1605 : * callable from interrupt context (isr-ok).
1606 : *
1607 : * The stop function is optional, and is only invoked if the timer has been
1608 : * initialized with one.
1609 : *
1610 : * @param timer Address of timer.
1611 : */
1612 1 : typedef void (*k_timer_stop_t)(struct k_timer *timer);
1613 :
1614 : /**
1615 : * @brief Statically define and initialize a timer.
1616 : *
1617 : * The timer can be accessed outside the module where it is defined using:
1618 : *
1619 : * @code extern struct k_timer <name>; @endcode
1620 : *
1621 : * @param name Name of the timer variable.
1622 : * @param expiry_fn Function to invoke each time the timer expires.
1623 : * @param stop_fn Function to invoke if the timer is stopped while running.
1624 : */
1625 1 : #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1626 : STRUCT_SECTION_ITERABLE(k_timer, name) = \
1627 : Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1628 :
1629 : /**
1630 : * @brief Initialize a timer.
1631 : *
1632 : * This routine initializes a timer, prior to its first use.
1633 : *
1634 : * @param timer Address of timer.
1635 : * @param expiry_fn Function to invoke each time the timer expires.
1636 : * @param stop_fn Function to invoke if the timer is stopped while running.
1637 : */
1638 1 : void k_timer_init(struct k_timer *timer,
1639 : k_timer_expiry_t expiry_fn,
1640 : k_timer_stop_t stop_fn);
1641 :
1642 : /**
1643 : * @brief Start a timer.
1644 : *
1645 : * This routine starts a timer, and resets its status to zero. The timer
1646 : * begins counting down using the specified duration and period values.
1647 : *
1648 : * Attempting to start a timer that is already running is permitted.
1649 : * The timer's status is reset to zero and the timer begins counting down
1650 : * using the new duration and period values.
1651 : *
1652 : * @param timer Address of timer.
1653 : * @param duration Initial timer duration.
1654 : * @param period Timer period.
1655 : */
1656 1 : __syscall void k_timer_start(struct k_timer *timer,
1657 : k_timeout_t duration, k_timeout_t period);
1658 :
1659 : /**
1660 : * @brief Stop a timer.
1661 : *
1662 : * This routine stops a running timer prematurely. The timer's stop function,
1663 : * if one exists, is invoked by the caller.
1664 : *
1665 : * Attempting to stop a timer that is not running is permitted, but has no
1666 : * effect on the timer.
1667 : *
1668 : * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1669 : * be called from ISRs.
1670 : *
1671 : * @funcprops \isr_ok
1672 : *
1673 : * @param timer Address of timer.
1674 : */
1675 1 : __syscall void k_timer_stop(struct k_timer *timer);
1676 :
1677 : /**
1678 : * @brief Read timer status.
1679 : *
1680 : * This routine reads the timer's status, which indicates the number of times
1681 : * it has expired since its status was last read.
1682 : *
1683 : * Calling this routine resets the timer's status to zero.
1684 : *
1685 : * @param timer Address of timer.
1686 : *
1687 : * @return Timer status.
1688 : */
1689 1 : __syscall uint32_t k_timer_status_get(struct k_timer *timer);
1690 :
1691 : /**
1692 : * @brief Synchronize thread to timer expiration.
1693 : *
1694 : * This routine blocks the calling thread until the timer's status is non-zero
1695 : * (indicating that it has expired at least once since it was last examined)
1696 : * or the timer is stopped. If the timer status is already non-zero,
1697 : * or the timer is already stopped, the caller continues without waiting.
1698 : *
1699 : * Calling this routine resets the timer's status to zero.
1700 : *
1701 : * This routine must not be used by interrupt handlers, since they are not
1702 : * allowed to block.
1703 : *
1704 : * @param timer Address of timer.
1705 : *
1706 : * @return Timer status.
1707 : */
1708 1 : __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1709 :
1710 : #ifdef CONFIG_SYS_CLOCK_EXISTS
1711 :
1712 : /**
1713 : * @brief Get next expiration time of a timer, in system ticks
1714 : *
1715 : * This routine returns the future system uptime reached at the next
1716 : * time of expiration of the timer, in units of system ticks. If the
1717 : * timer is not running, current system time is returned.
1718 : *
1719 : * @param timer The timer object
1720 : * @return Uptime of expiration, in ticks
1721 : */
1722 1 : __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1723 :
1724 : static inline k_ticks_t z_impl_k_timer_expires_ticks(
1725 : const struct k_timer *timer)
1726 : {
1727 : return z_timeout_expires(&timer->timeout);
1728 : }
1729 :
1730 : /**
1731 : * @brief Get time remaining before a timer next expires, in system ticks
1732 : *
1733 : * This routine computes the time remaining before a running timer
1734 : * next expires, in units of system ticks. If the timer is not
1735 : * running, it returns zero.
1736 : *
1737 : * @param timer The timer object
1738 : * @return Remaining time until expiration, in ticks
1739 : */
1740 1 : __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1741 :
1742 : static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1743 : const struct k_timer *timer)
1744 : {
1745 : return z_timeout_remaining(&timer->timeout);
1746 : }
1747 :
1748 : /**
1749 : * @brief Get time remaining before a timer next expires.
1750 : *
1751 : * This routine computes the (approximate) time remaining before a running
1752 : * timer next expires. If the timer is not running, it returns zero.
1753 : *
1754 : * @param timer Address of timer.
1755 : *
1756 : * @return Remaining time (in milliseconds).
1757 : */
1758 1 : static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1759 : {
1760 : return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
1761 : }
1762 :
1763 : #endif /* CONFIG_SYS_CLOCK_EXISTS */
1764 :
1765 : /**
1766 : * @brief Associate user-specific data with a timer.
1767 : *
1768 : * This routine records the @a user_data with the @a timer, to be retrieved
1769 : * later.
1770 : *
1771 : * It can be used e.g. in a timer handler shared across multiple subsystems to
1772 : * retrieve data specific to the subsystem this timer is associated with.
1773 : *
1774 : * @param timer Address of timer.
1775 : * @param user_data User data to associate with the timer.
1776 : */
1777 1 : __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1778 :
1779 : /**
1780 : * @internal
1781 : */
1782 : static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1783 : void *user_data)
1784 : {
1785 : timer->user_data = user_data;
1786 : }
1787 :
1788 : /**
1789 : * @brief Retrieve the user-specific data from a timer.
1790 : *
1791 : * @param timer Address of timer.
1792 : *
1793 : * @return The user data.
1794 : */
1795 1 : __syscall void *k_timer_user_data_get(const struct k_timer *timer);
1796 :
1797 : static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1798 : {
1799 : return timer->user_data;
1800 : }
1801 :
1802 : /** @} */
1803 :
1804 : /**
1805 : * @addtogroup clock_apis
1806 : * @ingroup kernel_apis
1807 : * @{
1808 : */
1809 :
1810 : /**
1811 : * @brief Get system uptime, in system ticks.
1812 : *
1813 : * This routine returns the elapsed time since the system booted, in
1814 : * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
1815 : * fundamental unit of resolution of kernel timekeeping.
1816 : *
1817 : * @return Current uptime in ticks.
1818 : */
1819 1 : __syscall int64_t k_uptime_ticks(void);
1820 :
1821 : /**
1822 : * @brief Get system uptime.
1823 : *
1824 : * This routine returns the elapsed time since the system booted,
1825 : * in milliseconds.
1826 : *
1827 : * @note
1828 : * While this function returns time in milliseconds, it does
1829 : * not mean it has millisecond resolution. The actual resolution depends on
1830 : * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
1831 : *
1832 : * @return Current uptime in milliseconds.
1833 : */
1834 1 : static inline int64_t k_uptime_get(void)
1835 : {
1836 : return k_ticks_to_ms_floor64(k_uptime_ticks());
1837 : }
1838 :
1839 : /**
1840 : * @brief Get system uptime (32-bit version).
1841 : *
1842 : * This routine returns the lower 32 bits of the system uptime in
1843 : * milliseconds.
1844 : *
1845 : * Because correct conversion requires full precision of the system
1846 : * clock there is no benefit to using this over k_uptime_get() unless
1847 : * you know the application will never run long enough for the system
1848 : * clock to approach 2^32 ticks. Calls to this function may involve
1849 : * interrupt blocking and 64-bit math.
1850 : *
1851 : * @note
1852 : * While this function returns time in milliseconds, it does
1853 : * not mean it has millisecond resolution. The actual resolution depends on
1854 : * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
1855 : *
1856 : * @return The low 32 bits of the current uptime, in milliseconds.
1857 : */
1858 1 : static inline uint32_t k_uptime_get_32(void)
1859 : {
1860 : return (uint32_t)k_uptime_get();
1861 : }
1862 :
1863 : /**
1864 : * @brief Get system uptime in seconds.
1865 : *
1866 : * This routine returns the elapsed time since the system booted,
1867 : * in seconds.
1868 : *
1869 : * @return Current uptime in seconds.
1870 : */
1871 1 : static inline uint32_t k_uptime_seconds(void)
1872 : {
1873 : return k_ticks_to_sec_floor32(k_uptime_ticks());
1874 : }
1875 :
1876 : /**
1877 : * @brief Get elapsed time.
1878 : *
1879 : * This routine computes the elapsed time between the current system uptime
1880 : * and an earlier reference time, in milliseconds.
1881 : *
1882 : * @param reftime Pointer to a reference time, which is updated to the current
1883 : * uptime upon return.
1884 : *
1885 : * @return Elapsed time.
1886 : */
1887 1 : static inline int64_t k_uptime_delta(int64_t *reftime)
1888 : {
1889 : int64_t uptime, delta;
1890 :
1891 : uptime = k_uptime_get();
1892 : delta = uptime - *reftime;
1893 : *reftime = uptime;
1894 :
1895 : return delta;
1896 : }
1897 :
1898 : /**
1899 : * @brief Read the hardware clock.
1900 : *
1901 : * This routine returns the current time, as measured by the system's hardware
1902 : * clock.
1903 : *
1904 : * @return Current hardware clock up-counter (in cycles).
1905 : */
1906 1 : static inline uint32_t k_cycle_get_32(void)
1907 : {
1908 : return arch_k_cycle_get_32();
1909 : }
1910 :
1911 : /**
1912 : * @brief Read the 64-bit hardware clock.
1913 : *
1914 : * This routine returns the current time in 64-bits, as measured by the
1915 : * system's hardware clock, if available.
1916 : *
1917 : * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1918 : *
1919 : * @return Current hardware clock up-counter (in cycles).
1920 : */
1921 1 : static inline uint64_t k_cycle_get_64(void)
1922 : {
1923 : if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1924 : __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1925 : "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1926 : return 0;
1927 : }
1928 :
1929 : return arch_k_cycle_get_64();
1930 : }
1931 :
1932 : /**
1933 : * @}
1934 : */
1935 :
1936 0 : struct k_queue {
1937 0 : sys_sflist_t data_q;
1938 0 : struct k_spinlock lock;
1939 0 : _wait_q_t wait_q;
1940 :
1941 : Z_DECL_POLL_EVENT
1942 :
1943 : SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
1944 : };
1945 :
1946 : /**
1947 : * @cond INTERNAL_HIDDEN
1948 : */
1949 :
1950 : #define Z_QUEUE_INITIALIZER(obj) \
1951 : { \
1952 : .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
1953 : .lock = { }, \
1954 : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1955 : Z_POLL_EVENT_OBJ_INIT(obj) \
1956 : }
1957 :
1958 : /**
1959 : * INTERNAL_HIDDEN @endcond
1960 : */
1961 :
1962 : /**
1963 : * @defgroup queue_apis Queue APIs
1964 : * @ingroup kernel_apis
1965 : * @{
1966 : */
1967 :
1968 : /**
1969 : * @brief Initialize a queue.
1970 : *
1971 : * This routine initializes a queue object, prior to its first use.
1972 : *
1973 : * @param queue Address of the queue.
1974 : */
1975 1 : __syscall void k_queue_init(struct k_queue *queue);
1976 :
1977 : /**
1978 : * @brief Cancel waiting on a queue.
1979 : *
1980 : * This routine causes first thread pending on @a queue, if any, to
1981 : * return from k_queue_get() call with NULL value (as if timeout expired).
1982 : * If the queue is being waited on by k_poll(), it will return with
1983 : * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1984 : * k_queue_get() will return NULL).
1985 : *
1986 : * @funcprops \isr_ok
1987 : *
1988 : * @param queue Address of the queue.
1989 : */
1990 1 : __syscall void k_queue_cancel_wait(struct k_queue *queue);
1991 :
1992 : /**
1993 : * @brief Append an element to the end of a queue.
1994 : *
1995 : * This routine appends a data item to @a queue. A queue data item must be
1996 : * aligned on a word boundary, and the first word of the item is reserved
1997 : * for the kernel's use.
1998 : *
1999 : * @funcprops \isr_ok
2000 : *
2001 : * @param queue Address of the queue.
2002 : * @param data Address of the data item.
2003 : */
2004 1 : void k_queue_append(struct k_queue *queue, void *data);
2005 :
2006 : /**
2007 : * @brief Append an element to a queue.
2008 : *
2009 : * This routine appends a data item to @a queue. There is an implicit memory
2010 : * allocation to create an additional temporary bookkeeping data structure from
2011 : * the calling thread's resource pool, which is automatically freed when the
2012 : * item is removed. The data itself is not copied.
2013 : *
2014 : * @funcprops \isr_ok
2015 : *
2016 : * @param queue Address of the queue.
2017 : * @param data Address of the data item.
2018 : *
2019 : * @retval 0 on success
2020 : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2021 : */
2022 1 : __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2023 :
2024 : /**
2025 : * @brief Prepend an element to a queue.
2026 : *
2027 : * This routine prepends a data item to @a queue. A queue data item must be
2028 : * aligned on a word boundary, and the first word of the item is reserved
2029 : * for the kernel's use.
2030 : *
2031 : * @funcprops \isr_ok
2032 : *
2033 : * @param queue Address of the queue.
2034 : * @param data Address of the data item.
2035 : */
2036 1 : void k_queue_prepend(struct k_queue *queue, void *data);
2037 :
2038 : /**
2039 : * @brief Prepend an element to a queue.
2040 : *
2041 : * This routine prepends a data item to @a queue. There is an implicit memory
2042 : * allocation to create an additional temporary bookkeeping data structure from
2043 : * the calling thread's resource pool, which is automatically freed when the
2044 : * item is removed. The data itself is not copied.
2045 : *
2046 : * @funcprops \isr_ok
2047 : *
2048 : * @param queue Address of the queue.
2049 : * @param data Address of the data item.
2050 : *
2051 : * @retval 0 on success
2052 : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2053 : */
2054 1 : __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2055 :
2056 : /**
2057 : * @brief Inserts an element to a queue.
2058 : *
2059 : * This routine inserts a data item to @a queue after previous item. A queue
2060 : * data item must be aligned on a word boundary, and the first word of
2061 : * the item is reserved for the kernel's use.
2062 : *
2063 : * @funcprops \isr_ok
2064 : *
2065 : * @param queue Address of the queue.
2066 : * @param prev Address of the previous data item.
2067 : * @param data Address of the data item.
2068 : */
2069 1 : void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2070 :
2071 : /**
2072 : * @brief Atomically append a list of elements to a queue.
2073 : *
2074 : * This routine adds a list of data items to @a queue in one operation.
2075 : * The data items must be in a singly-linked list, with the first word
2076 : * in each data item pointing to the next data item; the list must be
2077 : * NULL-terminated.
2078 : *
2079 : * @funcprops \isr_ok
2080 : *
2081 : * @param queue Address of the queue.
2082 : * @param head Pointer to first node in singly-linked list.
2083 : * @param tail Pointer to last node in singly-linked list.
2084 : *
2085 : * @retval 0 on success
2086 : * @retval -EINVAL on invalid supplied data
2087 : *
2088 : */
2089 1 : int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2090 :
2091 : /**
2092 : * @brief Atomically add a list of elements to a queue.
2093 : *
2094 : * This routine adds a list of data items to @a queue in one operation.
2095 : * The data items must be in a singly-linked list implemented using a
2096 : * sys_slist_t object. Upon completion, the original list is empty.
2097 : *
2098 : * @funcprops \isr_ok
2099 : *
2100 : * @param queue Address of the queue.
2101 : * @param list Pointer to sys_slist_t object.
2102 : *
2103 : * @retval 0 on success
2104 : * @retval -EINVAL on invalid data
2105 : */
2106 1 : int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2107 :
2108 : /**
2109 : * @brief Get an element from a queue.
2110 : *
2111 : * This routine removes first data item from @a queue. The first word of the
2112 : * data item is reserved for the kernel's use.
2113 : *
2114 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2115 : *
2116 : * @funcprops \isr_ok
2117 : *
2118 : * @param queue Address of the queue.
2119 : * @param timeout Waiting period to obtain a data item, or one of the special
2120 : * values K_NO_WAIT and K_FOREVER.
2121 : *
2122 : * @return Address of the data item if successful; NULL if returned
2123 : * without waiting, or waiting period timed out.
2124 : */
2125 1 : __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2126 :
2127 : /**
2128 : * @brief Remove an element from a queue.
2129 : *
2130 : * This routine removes data item from @a queue. The first word of the
2131 : * data item is reserved for the kernel's use. Removing elements from k_queue
2132 : * rely on sys_slist_find_and_remove which is not a constant time operation.
2133 : *
2134 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2135 : *
2136 : * @funcprops \isr_ok
2137 : *
2138 : * @param queue Address of the queue.
2139 : * @param data Address of the data item.
2140 : *
2141 : * @return true if data item was removed
2142 : */
2143 1 : bool k_queue_remove(struct k_queue *queue, void *data);
2144 :
2145 : /**
2146 : * @brief Append an element to a queue only if it's not present already.
2147 : *
2148 : * This routine appends data item to @a queue. The first word of the data
2149 : * item is reserved for the kernel's use. Appending elements to k_queue
2150 : * relies on sys_slist_is_node_in_list which is not a constant time operation.
2151 : *
2152 : * @funcprops \isr_ok
2153 : *
2154 : * @param queue Address of the queue.
2155 : * @param data Address of the data item.
2156 : *
2157 : * @return true if data item was added, false if not
2158 : */
2159 1 : bool k_queue_unique_append(struct k_queue *queue, void *data);
2160 :
2161 : /**
2162 : * @brief Query a queue to see if it has data available.
2163 : *
2164 : * Note that the data might be already gone by the time this function returns
2165 : * if other threads are also trying to read from the queue.
2166 : *
2167 : * @funcprops \isr_ok
2168 : *
2169 : * @param queue Address of the queue.
2170 : *
2171 : * @return Non-zero if the queue is empty.
2172 : * @return 0 if data is available.
2173 : */
2174 1 : __syscall int k_queue_is_empty(struct k_queue *queue);
2175 :
2176 : static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2177 : {
2178 : return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2179 : }
2180 :
2181 : /**
2182 : * @brief Peek element at the head of queue.
2183 : *
2184 : * Return element from the head of queue without removing it.
2185 : *
2186 : * @param queue Address of the queue.
2187 : *
2188 : * @return Head element, or NULL if queue is empty.
2189 : */
2190 1 : __syscall void *k_queue_peek_head(struct k_queue *queue);
2191 :
2192 : /**
2193 : * @brief Peek element at the tail of queue.
2194 : *
2195 : * Return element from the tail of queue without removing it.
2196 : *
2197 : * @param queue Address of the queue.
2198 : *
2199 : * @return Tail element, or NULL if queue is empty.
2200 : */
2201 1 : __syscall void *k_queue_peek_tail(struct k_queue *queue);
2202 :
2203 : /**
2204 : * @brief Statically define and initialize a queue.
2205 : *
2206 : * The queue can be accessed outside the module where it is defined using:
2207 : *
2208 : * @code extern struct k_queue <name>; @endcode
2209 : *
2210 : * @param name Name of the queue.
2211 : */
2212 1 : #define K_QUEUE_DEFINE(name) \
2213 : STRUCT_SECTION_ITERABLE(k_queue, name) = \
2214 : Z_QUEUE_INITIALIZER(name)
2215 :
2216 : /** @} */
2217 :
2218 : #ifdef CONFIG_USERSPACE
2219 : /**
2220 : * @brief futex structure
2221 : *
2222 : * A k_futex is a lightweight mutual exclusion primitive designed
2223 : * to minimize kernel involvement. Uncontended operation relies
2224 : * only on atomic access to shared memory. k_futex are tracked as
2225 : * kernel objects and can live in user memory so that any access
2226 : * bypasses the kernel object permission management mechanism.
2227 : */
2228 1 : struct k_futex {
2229 0 : atomic_t val;
2230 : };
2231 :
2232 : /**
2233 : * @brief futex kernel data structure
2234 : *
2235 : * z_futex_data are the helper data structure for k_futex to complete
2236 : * futex contended operation on kernel side, structure z_futex_data
2237 : * of every futex object is invisible in user mode.
2238 : */
2239 : struct z_futex_data {
2240 : _wait_q_t wait_q;
2241 : struct k_spinlock lock;
2242 : };
2243 :
2244 : #define Z_FUTEX_DATA_INITIALIZER(obj) \
2245 : { \
2246 : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2247 : }
2248 :
2249 : /**
2250 : * @defgroup futex_apis FUTEX APIs
2251 : * @ingroup kernel_apis
2252 : * @{
2253 : */
2254 :
2255 : /**
2256 : * @brief Pend the current thread on a futex
2257 : *
2258 : * Tests that the supplied futex contains the expected value, and if so,
2259 : * goes to sleep until some other thread calls k_futex_wake() on it.
2260 : *
2261 : * @param futex Address of the futex.
2262 : * @param expected Expected value of the futex, if it is different the caller
2263 : * will not wait on it.
2264 : * @param timeout Waiting period on the futex, or one of the special values
2265 : * K_NO_WAIT or K_FOREVER.
2266 : * @retval -EACCES Caller does not have read access to futex address.
2267 : * @retval -EAGAIN If the futex value did not match the expected parameter.
2268 : * @retval -EINVAL Futex parameter address not recognized by the kernel.
2269 : * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2270 : * @retval 0 if the caller went to sleep and was woken up. The caller
2271 : * should check the futex's value on wakeup to determine if it needs
2272 : * to block again.
2273 : */
2274 1 : __syscall int k_futex_wait(struct k_futex *futex, int expected,
2275 : k_timeout_t timeout);
2276 :
2277 : /**
2278 : * @brief Wake one/all threads pending on a futex
2279 : *
2280 : * Wake up the highest priority thread pending on the supplied futex, or
2281 : * wakeup all the threads pending on the supplied futex, and the behavior
2282 : * depends on wake_all.
2283 : *
2284 : * @param futex Futex to wake up pending threads.
2285 : * @param wake_all If true, wake up all pending threads; If false,
2286 : * wakeup the highest priority thread.
2287 : * @retval -EACCES Caller does not have access to the futex address.
2288 : * @retval -EINVAL Futex parameter address not recognized by the kernel.
2289 : * @retval Number of threads that were woken up.
2290 : */
2291 1 : __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2292 :
2293 : /** @} */
2294 : #endif
2295 :
2296 : /**
2297 : * @defgroup event_apis Event APIs
2298 : * @ingroup kernel_apis
2299 : * @{
2300 : */
2301 :
2302 : /**
2303 : * Event Structure
2304 : * @ingroup event_apis
2305 : */
2306 :
2307 1 : struct k_event {
2308 0 : _wait_q_t wait_q;
2309 0 : uint32_t events;
2310 0 : struct k_spinlock lock;
2311 :
2312 : SYS_PORT_TRACING_TRACKING_FIELD(k_event)
2313 :
2314 : #ifdef CONFIG_OBJ_CORE_EVENT
2315 : struct k_obj_core obj_core;
2316 : #endif
2317 :
2318 : };
2319 :
2320 : #define Z_EVENT_INITIALIZER(obj) \
2321 : { \
2322 : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2323 : .events = 0 \
2324 : }
2325 :
2326 : /**
2327 : * @brief Initialize an event object
2328 : *
2329 : * This routine initializes an event object, prior to its first use.
2330 : *
2331 : * @param event Address of the event object.
2332 : */
2333 1 : __syscall void k_event_init(struct k_event *event);
2334 :
2335 : /**
2336 : * @brief Post one or more events to an event object
2337 : *
2338 : * This routine posts one or more events to an event object. All tasks waiting
2339 : * on the event object @a event whose waiting conditions become met by this
2340 : * posting immediately unpend.
2341 : *
2342 : * Posting differs from setting in that posted events are merged together with
2343 : * the current set of events tracked by the event object.
2344 : *
2345 : * @param event Address of the event object
2346 : * @param events Set of events to post to @a event
2347 : *
2348 : * @retval Previous value of the events in @a event
2349 : */
2350 1 : __syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2351 :
2352 : /**
2353 : * @brief Set the events in an event object
2354 : *
2355 : * This routine sets the events stored in event object to the specified value.
2356 : * All tasks waiting on the event object @a event whose waiting conditions
2357 : * become met by this immediately unpend.
2358 : *
2359 : * Setting differs from posting in that set events replace the current set of
2360 : * events tracked by the event object.
2361 : *
2362 : * @param event Address of the event object
2363 : * @param events Set of events to set in @a event
2364 : *
2365 : * @retval Previous value of the events in @a event
2366 : */
2367 1 : __syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2368 :
2369 : /**
2370 : * @brief Set or clear the events in an event object
2371 : *
2372 : * This routine sets the events stored in event object to the specified value.
2373 : * All tasks waiting on the event object @a event whose waiting conditions
2374 : * become met by this immediately unpend. Unlike @ref k_event_set, this routine
2375 : * allows specific event bits to be set and cleared as determined by the mask.
2376 : *
2377 : * @param event Address of the event object
2378 : * @param events Set of events to set/clear in @a event
2379 : * @param events_mask Mask to be applied to @a events
2380 : *
2381 : * @retval Previous value of the events in @a events_mask
2382 : */
2383 1 : __syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2384 : uint32_t events_mask);
2385 :
2386 : /**
2387 : * @brief Clear the events in an event object
2388 : *
2389 : * This routine clears (resets) the specified events stored in an event object.
2390 : *
2391 : * @param event Address of the event object
2392 : * @param events Set of events to clear in @a event
2393 : *
2394 : * @retval Previous value of the events in @a event
2395 : */
2396 1 : __syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2397 :
2398 : /**
2399 : * @brief Wait for any of the specified events
2400 : *
2401 : * This routine waits on event object @a event until any of the specified
2402 : * events have been delivered to the event object, or the maximum wait time
2403 : * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2404 : * events that are expressed as bits in a single 32-bit word.
2405 : *
2406 : * @note The caller must be careful when resetting if there are multiple threads
2407 : * waiting for the event object @a event.
2408 : *
2409 : * @param event Address of the event object
2410 : * @param events Set of desired events on which to wait
2411 : * @param reset If true, clear the set of events tracked by the event object
2412 : * before waiting. If false, do not clear the events.
2413 : * @param timeout Waiting period for the desired set of events or one of the
2414 : * special values K_NO_WAIT and K_FOREVER.
2415 : *
2416 : * @retval set of matching events upon success
2417 : * @retval 0 if matching events were not received within the specified time
2418 : */
2419 1 : __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2420 : bool reset, k_timeout_t timeout);
2421 :
2422 : /**
2423 : * @brief Wait for all of the specified events
2424 : *
2425 : * This routine waits on event object @a event until all of the specified
2426 : * events have been delivered to the event object, or the maximum wait time
2427 : * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2428 : * events that are expressed as bits in a single 32-bit word.
2429 : *
2430 : * @note The caller must be careful when resetting if there are multiple threads
2431 : * waiting for the event object @a event.
2432 : *
2433 : * @param event Address of the event object
2434 : * @param events Set of desired events on which to wait
2435 : * @param reset If true, clear the set of events tracked by the event object
2436 : * before waiting. If false, do not clear the events.
2437 : * @param timeout Waiting period for the desired set of events or one of the
2438 : * special values K_NO_WAIT and K_FOREVER.
2439 : *
2440 : * @retval set of matching events upon success
2441 : * @retval 0 if matching events were not received within the specified time
2442 : */
2443 1 : __syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2444 : bool reset, k_timeout_t timeout);
2445 :
2446 : /**
2447 : * @brief Test the events currently tracked in the event object
2448 : *
2449 : * @param event Address of the event object
2450 : * @param events_mask Set of desired events to test
2451 : *
2452 : * @retval Current value of events in @a events_mask
2453 : */
2454 1 : static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2455 : {
2456 : return k_event_wait(event, events_mask, false, K_NO_WAIT);
2457 : }
2458 :
2459 : /**
2460 : * @brief Statically define and initialize an event object
2461 : *
2462 : * The event can be accessed outside the module where it is defined using:
2463 : *
2464 : * @code extern struct k_event <name>; @endcode
2465 : *
2466 : * @param name Name of the event object.
2467 : */
2468 1 : #define K_EVENT_DEFINE(name) \
2469 : STRUCT_SECTION_ITERABLE(k_event, name) = \
2470 : Z_EVENT_INITIALIZER(name);
2471 :
2472 : /** @} */
2473 :
2474 0 : struct k_fifo {
2475 : struct k_queue _queue;
2476 : #ifdef CONFIG_OBJ_CORE_FIFO
2477 : struct k_obj_core obj_core;
2478 : #endif
2479 : };
2480 :
2481 : /**
2482 : * @cond INTERNAL_HIDDEN
2483 : */
2484 : #define Z_FIFO_INITIALIZER(obj) \
2485 : { \
2486 : ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2487 : }
2488 :
2489 : /**
2490 : * INTERNAL_HIDDEN @endcond
2491 : */
2492 :
2493 : /**
2494 : * @defgroup fifo_apis FIFO APIs
2495 : * @ingroup kernel_apis
2496 : * @{
2497 : */
2498 :
2499 : /**
2500 : * @brief Initialize a FIFO queue.
2501 : *
2502 : * This routine initializes a FIFO queue, prior to its first use.
2503 : *
2504 : * @param fifo Address of the FIFO queue.
2505 : */
2506 1 : #define k_fifo_init(fifo) \
2507 : ({ \
2508 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2509 : k_queue_init(&(fifo)->_queue); \
2510 : K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2511 : K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2512 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2513 : })
2514 :
2515 : /**
2516 : * @brief Cancel waiting on a FIFO queue.
2517 : *
2518 : * This routine causes first thread pending on @a fifo, if any, to
2519 : * return from k_fifo_get() call with NULL value (as if timeout
2520 : * expired).
2521 : *
2522 : * @funcprops \isr_ok
2523 : *
2524 : * @param fifo Address of the FIFO queue.
2525 : */
2526 1 : #define k_fifo_cancel_wait(fifo) \
2527 : ({ \
2528 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2529 : k_queue_cancel_wait(&(fifo)->_queue); \
2530 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2531 : })
2532 :
2533 : /**
2534 : * @brief Add an element to a FIFO queue.
2535 : *
2536 : * This routine adds a data item to @a fifo. A FIFO data item must be
2537 : * aligned on a word boundary, and the first word of the item is reserved
2538 : * for the kernel's use.
2539 : *
2540 : * @funcprops \isr_ok
2541 : *
2542 : * @param fifo Address of the FIFO.
2543 : * @param data Address of the data item.
2544 : */
2545 1 : #define k_fifo_put(fifo, data) \
2546 : ({ \
2547 : void *_data = data; \
2548 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2549 : k_queue_append(&(fifo)->_queue, _data); \
2550 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2551 : })
2552 :
2553 : /**
2554 : * @brief Add an element to a FIFO queue.
2555 : *
2556 : * This routine adds a data item to @a fifo. There is an implicit memory
2557 : * allocation to create an additional temporary bookkeeping data structure from
2558 : * the calling thread's resource pool, which is automatically freed when the
2559 : * item is removed. The data itself is not copied.
2560 : *
2561 : * @funcprops \isr_ok
2562 : *
2563 : * @param fifo Address of the FIFO.
2564 : * @param data Address of the data item.
2565 : *
2566 : * @retval 0 on success
2567 : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2568 : */
2569 1 : #define k_fifo_alloc_put(fifo, data) \
2570 : ({ \
2571 : void *_data = data; \
2572 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2573 : int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2574 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
2575 : fap_ret; \
2576 : })
2577 :
2578 : /**
2579 : * @brief Atomically add a list of elements to a FIFO.
2580 : *
2581 : * This routine adds a list of data items to @a fifo in one operation.
2582 : * The data items must be in a singly-linked list, with the first word of
2583 : * each data item pointing to the next data item; the list must be
2584 : * NULL-terminated.
2585 : *
2586 : * @funcprops \isr_ok
2587 : *
2588 : * @param fifo Address of the FIFO queue.
2589 : * @param head Pointer to first node in singly-linked list.
2590 : * @param tail Pointer to last node in singly-linked list.
2591 : */
2592 1 : #define k_fifo_put_list(fifo, head, tail) \
2593 : ({ \
2594 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2595 : k_queue_append_list(&(fifo)->_queue, head, tail); \
2596 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2597 : })
2598 :
2599 : /**
2600 : * @brief Atomically add a list of elements to a FIFO queue.
2601 : *
2602 : * This routine adds a list of data items to @a fifo in one operation.
2603 : * The data items must be in a singly-linked list implemented using a
2604 : * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
2605 : * and must be re-initialized via sys_slist_init().
2606 : *
2607 : * @funcprops \isr_ok
2608 : *
2609 : * @param fifo Address of the FIFO queue.
2610 : * @param list Pointer to sys_slist_t object.
2611 : */
2612 1 : #define k_fifo_put_slist(fifo, list) \
2613 : ({ \
2614 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2615 : k_queue_merge_slist(&(fifo)->_queue, list); \
2616 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2617 : })
2618 :
2619 : /**
2620 : * @brief Get an element from a FIFO queue.
2621 : *
2622 : * This routine removes a data item from @a fifo in a "first in, first out"
2623 : * manner. The first word of the data item is reserved for the kernel's use.
2624 : *
2625 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2626 : *
2627 : * @funcprops \isr_ok
2628 : *
2629 : * @param fifo Address of the FIFO queue.
2630 : * @param timeout Waiting period to obtain a data item,
2631 : * or one of the special values K_NO_WAIT and K_FOREVER.
2632 : *
2633 : * @return Address of the data item if successful; NULL if returned
2634 : * without waiting, or waiting period timed out.
2635 : */
2636 1 : #define k_fifo_get(fifo, timeout) \
2637 : ({ \
2638 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2639 : void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2640 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2641 : fg_ret; \
2642 : })
2643 :
2644 : /**
2645 : * @brief Query a FIFO queue to see if it has data available.
2646 : *
2647 : * Note that the data might be already gone by the time this function returns
2648 : * if other threads is also trying to read from the FIFO.
2649 : *
2650 : * @funcprops \isr_ok
2651 : *
2652 : * @param fifo Address of the FIFO queue.
2653 : *
2654 : * @return Non-zero if the FIFO queue is empty.
2655 : * @return 0 if data is available.
2656 : */
2657 1 : #define k_fifo_is_empty(fifo) \
2658 : k_queue_is_empty(&(fifo)->_queue)
2659 :
2660 : /**
2661 : * @brief Peek element at the head of a FIFO queue.
2662 : *
2663 : * Return element from the head of FIFO queue without removing it. A usecase
2664 : * for this is if elements of the FIFO object are themselves containers. Then
2665 : * on each iteration of processing, a head container will be peeked,
2666 : * and some data processed out of it, and only if the container is empty,
2667 : * it will be completely remove from the FIFO queue.
2668 : *
2669 : * @param fifo Address of the FIFO queue.
2670 : *
2671 : * @return Head element, or NULL if the FIFO queue is empty.
2672 : */
2673 1 : #define k_fifo_peek_head(fifo) \
2674 : ({ \
2675 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2676 : void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2677 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2678 : fph_ret; \
2679 : })
2680 :
2681 : /**
2682 : * @brief Peek element at the tail of FIFO queue.
2683 : *
2684 : * Return element from the tail of FIFO queue (without removing it). A usecase
2685 : * for this is if elements of the FIFO queue are themselves containers. Then
2686 : * it may be useful to add more data to the last container in a FIFO queue.
2687 : *
2688 : * @param fifo Address of the FIFO queue.
2689 : *
2690 : * @return Tail element, or NULL if a FIFO queue is empty.
2691 : */
2692 1 : #define k_fifo_peek_tail(fifo) \
2693 : ({ \
2694 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2695 : void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2696 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2697 : fpt_ret; \
2698 : })
2699 :
2700 : /**
2701 : * @brief Statically define and initialize a FIFO queue.
2702 : *
2703 : * The FIFO queue can be accessed outside the module where it is defined using:
2704 : *
2705 : * @code extern struct k_fifo <name>; @endcode
2706 : *
2707 : * @param name Name of the FIFO queue.
2708 : */
2709 1 : #define K_FIFO_DEFINE(name) \
2710 : STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2711 : Z_FIFO_INITIALIZER(name)
2712 :
2713 : /** @} */
2714 :
2715 0 : struct k_lifo {
2716 : struct k_queue _queue;
2717 : #ifdef CONFIG_OBJ_CORE_LIFO
2718 : struct k_obj_core obj_core;
2719 : #endif
2720 : };
2721 :
2722 : /**
2723 : * @cond INTERNAL_HIDDEN
2724 : */
2725 :
2726 : #define Z_LIFO_INITIALIZER(obj) \
2727 : { \
2728 : ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2729 : }
2730 :
2731 : /**
2732 : * INTERNAL_HIDDEN @endcond
2733 : */
2734 :
2735 : /**
2736 : * @defgroup lifo_apis LIFO APIs
2737 : * @ingroup kernel_apis
2738 : * @{
2739 : */
2740 :
2741 : /**
2742 : * @brief Initialize a LIFO queue.
2743 : *
2744 : * This routine initializes a LIFO queue object, prior to its first use.
2745 : *
2746 : * @param lifo Address of the LIFO queue.
2747 : */
2748 1 : #define k_lifo_init(lifo) \
2749 : ({ \
2750 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2751 : k_queue_init(&(lifo)->_queue); \
2752 : K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
2753 : K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
2754 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2755 : })
2756 :
2757 : /**
2758 : * @brief Add an element to a LIFO queue.
2759 : *
2760 : * This routine adds a data item to @a lifo. A LIFO queue data item must be
2761 : * aligned on a word boundary, and the first word of the item is
2762 : * reserved for the kernel's use.
2763 : *
2764 : * @funcprops \isr_ok
2765 : *
2766 : * @param lifo Address of the LIFO queue.
2767 : * @param data Address of the data item.
2768 : */
2769 1 : #define k_lifo_put(lifo, data) \
2770 : ({ \
2771 : void *_data = data; \
2772 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
2773 : k_queue_prepend(&(lifo)->_queue, _data); \
2774 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
2775 : })
2776 :
2777 : /**
2778 : * @brief Add an element to a LIFO queue.
2779 : *
2780 : * This routine adds a data item to @a lifo. There is an implicit memory
2781 : * allocation to create an additional temporary bookkeeping data structure from
2782 : * the calling thread's resource pool, which is automatically freed when the
2783 : * item is removed. The data itself is not copied.
2784 : *
2785 : * @funcprops \isr_ok
2786 : *
2787 : * @param lifo Address of the LIFO.
2788 : * @param data Address of the data item.
2789 : *
2790 : * @retval 0 on success
2791 : * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2792 : */
2793 1 : #define k_lifo_alloc_put(lifo, data) \
2794 : ({ \
2795 : void *_data = data; \
2796 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
2797 : int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
2798 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
2799 : lap_ret; \
2800 : })
2801 :
2802 : /**
2803 : * @brief Get an element from a LIFO queue.
2804 : *
2805 : * This routine removes a data item from @a LIFO in a "last in, first out"
2806 : * manner. The first word of the data item is reserved for the kernel's use.
2807 : *
2808 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2809 : *
2810 : * @funcprops \isr_ok
2811 : *
2812 : * @param lifo Address of the LIFO queue.
2813 : * @param timeout Waiting period to obtain a data item,
2814 : * or one of the special values K_NO_WAIT and K_FOREVER.
2815 : *
2816 : * @return Address of the data item if successful; NULL if returned
2817 : * without waiting, or waiting period timed out.
2818 : */
2819 1 : #define k_lifo_get(lifo, timeout) \
2820 : ({ \
2821 : SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2822 : void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
2823 : SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
2824 : lg_ret; \
2825 : })
2826 :
2827 : /**
2828 : * @brief Statically define and initialize a LIFO queue.
2829 : *
2830 : * The LIFO queue can be accessed outside the module where it is defined using:
2831 : *
2832 : * @code extern struct k_lifo <name>; @endcode
2833 : *
2834 : * @param name Name of the fifo.
2835 : */
2836 1 : #define K_LIFO_DEFINE(name) \
2837 : STRUCT_SECTION_ITERABLE(k_lifo, name) = \
2838 : Z_LIFO_INITIALIZER(name)
2839 :
2840 : /** @} */
2841 :
2842 : /**
2843 : * @cond INTERNAL_HIDDEN
2844 : */
2845 : #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
2846 :
2847 : typedef uintptr_t stack_data_t;
2848 :
2849 : struct k_stack {
2850 : _wait_q_t wait_q;
2851 : struct k_spinlock lock;
2852 : stack_data_t *base, *next, *top;
2853 :
2854 : uint8_t flags;
2855 :
2856 : SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
2857 :
2858 : #ifdef CONFIG_OBJ_CORE_STACK
2859 : struct k_obj_core obj_core;
2860 : #endif
2861 : };
2862 :
2863 : #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
2864 : { \
2865 : .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
2866 : .base = (stack_buffer), \
2867 : .next = (stack_buffer), \
2868 : .top = (stack_buffer) + (stack_num_entries), \
2869 : }
2870 :
2871 : /**
2872 : * INTERNAL_HIDDEN @endcond
2873 : */
2874 :
2875 : /**
2876 : * @defgroup stack_apis Stack APIs
2877 : * @ingroup kernel_apis
2878 : * @{
2879 : */
2880 :
2881 : /**
2882 : * @brief Initialize a stack.
2883 : *
2884 : * This routine initializes a stack object, prior to its first use.
2885 : *
2886 : * @param stack Address of the stack.
2887 : * @param buffer Address of array used to hold stacked values.
2888 : * @param num_entries Maximum number of values that can be stacked.
2889 : */
2890 1 : void k_stack_init(struct k_stack *stack,
2891 : stack_data_t *buffer, uint32_t num_entries);
2892 :
2893 :
2894 : /**
2895 : * @brief Initialize a stack.
2896 : *
2897 : * This routine initializes a stack object, prior to its first use. Internal
2898 : * buffers will be allocated from the calling thread's resource pool.
2899 : * This memory will be released if k_stack_cleanup() is called, or
2900 : * userspace is enabled and the stack object loses all references to it.
2901 : *
2902 : * @param stack Address of the stack.
2903 : * @param num_entries Maximum number of values that can be stacked.
2904 : *
2905 : * @return -ENOMEM if memory couldn't be allocated
2906 : */
2907 :
2908 1 : __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2909 : uint32_t num_entries);
2910 :
2911 : /**
2912 : * @brief Release a stack's allocated buffer
2913 : *
2914 : * If a stack object was given a dynamically allocated buffer via
2915 : * k_stack_alloc_init(), this will free it. This function does nothing
2916 : * if the buffer wasn't dynamically allocated.
2917 : *
2918 : * @param stack Address of the stack.
2919 : * @retval 0 on success
2920 : * @retval -EAGAIN when object is still in use
2921 : */
2922 1 : int k_stack_cleanup(struct k_stack *stack);
2923 :
2924 : /**
2925 : * @brief Push an element onto a stack.
2926 : *
2927 : * This routine adds a stack_data_t value @a data to @a stack.
2928 : *
2929 : * @funcprops \isr_ok
2930 : *
2931 : * @param stack Address of the stack.
2932 : * @param data Value to push onto the stack.
2933 : *
2934 : * @retval 0 on success
2935 : * @retval -ENOMEM if stack is full
2936 : */
2937 1 : __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
2938 :
2939 : /**
2940 : * @brief Pop an element from a stack.
2941 : *
2942 : * This routine removes a stack_data_t value from @a stack in a "last in,
2943 : * first out" manner and stores the value in @a data.
2944 : *
2945 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2946 : *
2947 : * @funcprops \isr_ok
2948 : *
2949 : * @param stack Address of the stack.
2950 : * @param data Address of area to hold the value popped from the stack.
2951 : * @param timeout Waiting period to obtain a value,
2952 : * or one of the special values K_NO_WAIT and
2953 : * K_FOREVER.
2954 : *
2955 : * @retval 0 Element popped from stack.
2956 : * @retval -EBUSY Returned without waiting.
2957 : * @retval -EAGAIN Waiting period timed out.
2958 : */
2959 1 : __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
2960 : k_timeout_t timeout);
2961 :
2962 : /**
2963 : * @brief Statically define and initialize a stack
2964 : *
2965 : * The stack can be accessed outside the module where it is defined using:
2966 : *
2967 : * @code extern struct k_stack <name>; @endcode
2968 : *
2969 : * @param name Name of the stack.
2970 : * @param stack_num_entries Maximum number of values that can be stacked.
2971 : */
2972 1 : #define K_STACK_DEFINE(name, stack_num_entries) \
2973 : stack_data_t __noinit \
2974 : _k_stack_buf_##name[stack_num_entries]; \
2975 : STRUCT_SECTION_ITERABLE(k_stack, name) = \
2976 : Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
2977 : stack_num_entries)
2978 :
2979 : /** @} */
2980 :
2981 : /**
2982 : * @cond INTERNAL_HIDDEN
2983 : */
2984 :
2985 : struct k_work;
2986 : struct k_work_q;
2987 : struct k_work_queue_config;
2988 : extern struct k_work_q k_sys_work_q;
2989 :
2990 : /**
2991 : * INTERNAL_HIDDEN @endcond
2992 : */
2993 :
2994 : /**
2995 : * @defgroup mutex_apis Mutex APIs
2996 : * @ingroup kernel_apis
2997 : * @{
2998 : */
2999 :
3000 : /**
3001 : * Mutex Structure
3002 : * @ingroup mutex_apis
3003 : */
3004 1 : struct k_mutex {
3005 : /** Mutex wait queue */
3006 1 : _wait_q_t wait_q;
3007 : /** Mutex owner */
3008 1 : struct k_thread *owner;
3009 :
3010 : /** Current lock count */
3011 1 : uint32_t lock_count;
3012 :
3013 : /** Original thread priority */
3014 1 : int owner_orig_prio;
3015 :
3016 : SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
3017 :
3018 : #ifdef CONFIG_OBJ_CORE_MUTEX
3019 : struct k_obj_core obj_core;
3020 : #endif
3021 : };
3022 :
3023 : /**
3024 : * @cond INTERNAL_HIDDEN
3025 : */
3026 : #define Z_MUTEX_INITIALIZER(obj) \
3027 : { \
3028 : .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3029 : .owner = NULL, \
3030 : .lock_count = 0, \
3031 : .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3032 : }
3033 :
3034 : /**
3035 : * INTERNAL_HIDDEN @endcond
3036 : */
3037 :
3038 : /**
3039 : * @brief Statically define and initialize a mutex.
3040 : *
3041 : * The mutex can be accessed outside the module where it is defined using:
3042 : *
3043 : * @code extern struct k_mutex <name>; @endcode
3044 : *
3045 : * @param name Name of the mutex.
3046 : */
3047 1 : #define K_MUTEX_DEFINE(name) \
3048 : STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3049 : Z_MUTEX_INITIALIZER(name)
3050 :
3051 : /**
3052 : * @brief Initialize a mutex.
3053 : *
3054 : * This routine initializes a mutex object, prior to its first use.
3055 : *
3056 : * Upon completion, the mutex is available and does not have an owner.
3057 : *
3058 : * @param mutex Address of the mutex.
3059 : *
3060 : * @retval 0 Mutex object created
3061 : *
3062 : */
3063 1 : __syscall int k_mutex_init(struct k_mutex *mutex);
3064 :
3065 :
3066 : /**
3067 : * @brief Lock a mutex.
3068 : *
3069 : * This routine locks @a mutex. If the mutex is locked by another thread,
3070 : * the calling thread waits until the mutex becomes available or until
3071 : * a timeout occurs.
3072 : *
3073 : * A thread is permitted to lock a mutex it has already locked. The operation
3074 : * completes immediately and the lock count is increased by 1.
3075 : *
3076 : * Mutexes may not be locked in ISRs.
3077 : *
3078 : * @param mutex Address of the mutex.
3079 : * @param timeout Waiting period to lock the mutex,
3080 : * or one of the special values K_NO_WAIT and
3081 : * K_FOREVER.
3082 : *
3083 : * @retval 0 Mutex locked.
3084 : * @retval -EBUSY Returned without waiting.
3085 : * @retval -EAGAIN Waiting period timed out.
3086 : */
3087 1 : __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3088 :
3089 : /**
3090 : * @brief Unlock a mutex.
3091 : *
3092 : * This routine unlocks @a mutex. The mutex must already be locked by the
3093 : * calling thread.
3094 : *
3095 : * The mutex cannot be claimed by another thread until it has been unlocked by
3096 : * the calling thread as many times as it was previously locked by that
3097 : * thread.
3098 : *
3099 : * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
3100 : * in thread context due to ownership and priority inheritance semantics.
3101 : *
3102 : * @param mutex Address of the mutex.
3103 : *
3104 : * @retval 0 Mutex unlocked.
3105 : * @retval -EPERM The current thread does not own the mutex
3106 : * @retval -EINVAL The mutex is not locked
3107 : *
3108 : */
3109 1 : __syscall int k_mutex_unlock(struct k_mutex *mutex);
3110 :
3111 : /**
3112 : * @}
3113 : */
3114 :
3115 :
3116 0 : struct k_condvar {
3117 0 : _wait_q_t wait_q;
3118 :
3119 : #ifdef CONFIG_OBJ_CORE_CONDVAR
3120 : struct k_obj_core obj_core;
3121 : #endif
3122 : };
3123 :
3124 : #define Z_CONDVAR_INITIALIZER(obj) \
3125 : { \
3126 : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3127 : }
3128 :
3129 : /**
3130 : * @defgroup condvar_apis Condition Variables APIs
3131 : * @ingroup kernel_apis
3132 : * @{
3133 : */
3134 :
3135 : /**
3136 : * @brief Initialize a condition variable
3137 : *
3138 : * @param condvar pointer to a @p k_condvar structure
3139 : * @retval 0 Condition variable created successfully
3140 : */
3141 1 : __syscall int k_condvar_init(struct k_condvar *condvar);
3142 :
3143 : /**
3144 : * @brief Signals one thread that is pending on the condition variable
3145 : *
3146 : * @param condvar pointer to a @p k_condvar structure
3147 : * @retval 0 On success
3148 : */
3149 1 : __syscall int k_condvar_signal(struct k_condvar *condvar);
3150 :
3151 : /**
3152 : * @brief Unblock all threads that are pending on the condition
3153 : * variable
3154 : *
3155 : * @param condvar pointer to a @p k_condvar structure
3156 : * @return An integer with number of woken threads on success
3157 : */
3158 1 : __syscall int k_condvar_broadcast(struct k_condvar *condvar);
3159 :
3160 : /**
3161 : * @brief Waits on the condition variable releasing the mutex lock
3162 : *
3163 : * Atomically releases the currently owned mutex, blocks the current thread
3164 : * waiting on the condition variable specified by @a condvar,
3165 : * and finally acquires the mutex again.
3166 : *
3167 : * The waiting thread unblocks only after another thread calls
3168 : * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
3169 : *
3170 : * @param condvar pointer to a @p k_condvar structure
3171 : * @param mutex Address of the mutex.
3172 : * @param timeout Waiting period for the condition variable
3173 : * or one of the special values K_NO_WAIT and K_FOREVER.
3174 : * @retval 0 On success
3175 : * @retval -EAGAIN Waiting period timed out.
3176 : */
3177 1 : __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3178 : k_timeout_t timeout);
3179 :
3180 : /**
3181 : * @brief Statically define and initialize a condition variable.
3182 : *
3183 : * The condition variable can be accessed outside the module where it is
3184 : * defined using:
3185 : *
3186 : * @code extern struct k_condvar <name>; @endcode
3187 : *
3188 : * @param name Name of the condition variable.
3189 : */
3190 1 : #define K_CONDVAR_DEFINE(name) \
3191 : STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3192 : Z_CONDVAR_INITIALIZER(name)
3193 : /**
3194 : * @}
3195 : */
3196 :
3197 : /**
3198 : * @cond INTERNAL_HIDDEN
3199 : */
3200 :
3201 : struct k_sem {
3202 : _wait_q_t wait_q;
3203 : unsigned int count;
3204 : unsigned int limit;
3205 :
3206 : Z_DECL_POLL_EVENT
3207 :
3208 : SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
3209 :
3210 : #ifdef CONFIG_OBJ_CORE_SEM
3211 : struct k_obj_core obj_core;
3212 : #endif
3213 : };
3214 :
3215 : #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3216 : { \
3217 : .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3218 : .count = (initial_count), \
3219 : .limit = (count_limit), \
3220 : Z_POLL_EVENT_OBJ_INIT(obj) \
3221 : }
3222 :
3223 : /**
3224 : * INTERNAL_HIDDEN @endcond
3225 : */
3226 :
3227 : /**
3228 : * @defgroup semaphore_apis Semaphore APIs
3229 : * @ingroup kernel_apis
3230 : * @{
3231 : */
3232 :
3233 : /**
3234 : * @brief Maximum limit value allowed for a semaphore.
3235 : *
3236 : * This is intended for use when a semaphore does not have
3237 : * an explicit maximum limit, and instead is just used for
3238 : * counting purposes.
3239 : *
3240 : */
3241 1 : #define K_SEM_MAX_LIMIT UINT_MAX
3242 :
3243 : /**
3244 : * @brief Initialize a semaphore.
3245 : *
3246 : * This routine initializes a semaphore object, prior to its first use.
3247 : *
3248 : * @param sem Address of the semaphore.
3249 : * @param initial_count Initial semaphore count.
3250 : * @param limit Maximum permitted semaphore count.
3251 : *
3252 : * @see K_SEM_MAX_LIMIT
3253 : *
3254 : * @retval 0 Semaphore created successfully
3255 : * @retval -EINVAL Invalid values
3256 : *
3257 : */
3258 1 : __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3259 : unsigned int limit);
3260 :
3261 : /**
3262 : * @brief Take a semaphore.
3263 : *
3264 : * This routine takes @a sem.
3265 : *
3266 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
3267 : *
3268 : * @funcprops \isr_ok
3269 : *
3270 : * @param sem Address of the semaphore.
3271 : * @param timeout Waiting period to take the semaphore,
3272 : * or one of the special values K_NO_WAIT and K_FOREVER.
3273 : *
3274 : * @retval 0 Semaphore taken.
3275 : * @retval -EBUSY Returned without waiting.
3276 : * @retval -EAGAIN Waiting period timed out,
3277 : * or the semaphore was reset during the waiting period.
3278 : */
3279 1 : __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3280 :
3281 : /**
3282 : * @brief Give a semaphore.
3283 : *
3284 : * This routine gives @a sem, unless the semaphore is already at its maximum
3285 : * permitted count.
3286 : *
3287 : * @funcprops \isr_ok
3288 : *
3289 : * @param sem Address of the semaphore.
3290 : */
3291 1 : __syscall void k_sem_give(struct k_sem *sem);
3292 :
3293 : /**
3294 : * @brief Resets a semaphore's count to zero.
3295 : *
3296 : * This routine sets the count of @a sem to zero.
3297 : * Any outstanding semaphore takes will be aborted
3298 : * with -EAGAIN.
3299 : *
3300 : * @param sem Address of the semaphore.
3301 : */
3302 1 : __syscall void k_sem_reset(struct k_sem *sem);
3303 :
3304 : /**
3305 : * @brief Get a semaphore's count.
3306 : *
3307 : * This routine returns the current count of @a sem.
3308 : *
3309 : * @param sem Address of the semaphore.
3310 : *
3311 : * @return Current semaphore count.
3312 : */
3313 1 : __syscall unsigned int k_sem_count_get(struct k_sem *sem);
3314 :
3315 : /**
3316 : * @internal
3317 : */
3318 : static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3319 : {
3320 : return sem->count;
3321 : }
3322 :
3323 : /**
3324 : * @brief Statically define and initialize a semaphore.
3325 : *
3326 : * The semaphore can be accessed outside the module where it is defined using:
3327 : *
3328 : * @code extern struct k_sem <name>; @endcode
3329 : *
3330 : * @param name Name of the semaphore.
3331 : * @param initial_count Initial semaphore count.
3332 : * @param count_limit Maximum permitted semaphore count.
3333 : */
3334 1 : #define K_SEM_DEFINE(name, initial_count, count_limit) \
3335 : STRUCT_SECTION_ITERABLE(k_sem, name) = \
3336 : Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3337 : BUILD_ASSERT(((count_limit) != 0) && \
3338 : (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) && \
3339 : ((count_limit) <= K_SEM_MAX_LIMIT));
3340 :
3341 : /** @} */
3342 :
3343 : /**
3344 : * @cond INTERNAL_HIDDEN
3345 : */
3346 :
3347 : struct k_work_delayable;
3348 : struct k_work_sync;
3349 :
3350 : /**
3351 : * INTERNAL_HIDDEN @endcond
3352 : */
3353 :
3354 : /**
3355 : * @defgroup workqueue_apis Work Queue APIs
3356 : * @ingroup kernel_apis
3357 : * @{
3358 : */
3359 :
3360 : /** @brief The signature for a work item handler function.
3361 : *
3362 : * The function will be invoked by the thread animating a work queue.
3363 : *
3364 : * @param work the work item that provided the handler.
3365 : */
3366 1 : typedef void (*k_work_handler_t)(struct k_work *work);
3367 :
3368 : /** @brief Initialize a (non-delayable) work structure.
3369 : *
3370 : * This must be invoked before submitting a work structure for the first time.
3371 : * It need not be invoked again on the same work structure. It can be
3372 : * re-invoked to change the associated handler, but this must be done when the
3373 : * work item is idle.
3374 : *
3375 : * @funcprops \isr_ok
3376 : *
3377 : * @param work the work structure to be initialized.
3378 : *
3379 : * @param handler the handler to be invoked by the work item.
3380 : */
3381 1 : void k_work_init(struct k_work *work,
3382 : k_work_handler_t handler);
3383 :
3384 : /** @brief Busy state flags from the work item.
3385 : *
3386 : * A zero return value indicates the work item appears to be idle.
3387 : *
3388 : * @note This is a live snapshot of state, which may change before the result
3389 : * is checked. Use locks where appropriate.
3390 : *
3391 : * @funcprops \isr_ok
3392 : *
3393 : * @param work pointer to the work item.
3394 : *
3395 : * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3396 : * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING.
3397 : */
3398 1 : int k_work_busy_get(const struct k_work *work);
3399 :
3400 : /** @brief Test whether a work item is currently pending.
3401 : *
3402 : * Wrapper to determine whether a work item is in a non-idle dstate.
3403 : *
3404 : * @note This is a live snapshot of state, which may change before the result
3405 : * is checked. Use locks where appropriate.
3406 : *
3407 : * @funcprops \isr_ok
3408 : *
3409 : * @param work pointer to the work item.
3410 : *
3411 : * @return true if and only if k_work_busy_get() returns a non-zero value.
3412 : */
3413 : static inline bool k_work_is_pending(const struct k_work *work);
3414 :
3415 : /** @brief Submit a work item to a queue.
3416 : *
3417 : * @param queue pointer to the work queue on which the item should run. If
3418 : * NULL the queue from the most recent submission will be used.
3419 : *
3420 : * @funcprops \isr_ok
3421 : *
3422 : * @param work pointer to the work item.
3423 : *
3424 : * @retval 0 if work was already submitted to a queue
3425 : * @retval 1 if work was not submitted and has been queued to @p queue
3426 : * @retval 2 if work was running and has been queued to the queue that was
3427 : * running it
3428 : * @retval -EBUSY
3429 : * * if work submission was rejected because the work item is cancelling; or
3430 : * * @p queue is draining; or
3431 : * * @p queue is plugged.
3432 : * @retval -EINVAL if @p queue is null and the work item has never been run.
3433 : * @retval -ENODEV if @p queue has not been started.
3434 : */
3435 1 : int k_work_submit_to_queue(struct k_work_q *queue,
3436 : struct k_work *work);
3437 :
3438 : /** @brief Submit a work item to the system queue.
3439 : *
3440 : * @funcprops \isr_ok
3441 : *
3442 : * @param work pointer to the work item.
3443 : *
3444 : * @return as with k_work_submit_to_queue().
3445 : */
3446 1 : int k_work_submit(struct k_work *work);
3447 :
3448 : /** @brief Wait for last-submitted instance to complete.
3449 : *
3450 : * Resubmissions may occur while waiting, including chained submissions (from
3451 : * within the handler).
3452 : *
3453 : * @note Be careful of caller and work queue thread relative priority. If
3454 : * this function sleeps it will not return until the work queue thread
3455 : * completes the tasks that allow this thread to resume.
3456 : *
3457 : * @note Behavior is undefined if this function is invoked on @p work from a
3458 : * work queue running @p work.
3459 : *
3460 : * @param work pointer to the work item.
3461 : *
3462 : * @param sync pointer to an opaque item containing state related to the
3463 : * pending cancellation. The object must persist until the call returns, and
3464 : * be accessible from both the caller thread and the work queue thread. The
3465 : * object must not be used for any other flush or cancel operation until this
3466 : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3467 : * must be allocated in coherent memory.
3468 : *
3469 : * @retval true if call had to wait for completion
3470 : * @retval false if work was already idle
3471 : */
3472 1 : bool k_work_flush(struct k_work *work,
3473 : struct k_work_sync *sync);
3474 :
3475 : /** @brief Cancel a work item.
3476 : *
3477 : * This attempts to prevent a pending (non-delayable) work item from being
3478 : * processed by removing it from the work queue. If the item is being
3479 : * processed, the work item will continue to be processed, but resubmissions
3480 : * are rejected until cancellation completes.
3481 : *
3482 : * If this returns zero cancellation is complete, otherwise something
3483 : * (probably a work queue thread) is still referencing the item.
3484 : *
3485 : * See also k_work_cancel_sync().
3486 : *
3487 : * @funcprops \isr_ok
3488 : *
3489 : * @param work pointer to the work item.
3490 : *
3491 : * @return the k_work_busy_get() status indicating the state of the item after all
3492 : * cancellation steps performed by this call are completed.
3493 : */
3494 1 : int k_work_cancel(struct k_work *work);
3495 :
3496 : /** @brief Cancel a work item and wait for it to complete.
3497 : *
3498 : * Same as k_work_cancel() but does not return until cancellation is complete.
3499 : * This can be invoked by a thread after k_work_cancel() to synchronize with a
3500 : * previous cancellation.
3501 : *
3502 : * On return the work structure will be idle unless something submits it after
3503 : * the cancellation was complete.
3504 : *
3505 : * @note Be careful of caller and work queue thread relative priority. If
3506 : * this function sleeps it will not return until the work queue thread
3507 : * completes the tasks that allow this thread to resume.
3508 : *
3509 : * @note Behavior is undefined if this function is invoked on @p work from a
3510 : * work queue running @p work.
3511 : *
3512 : * @param work pointer to the work item.
3513 : *
3514 : * @param sync pointer to an opaque item containing state related to the
3515 : * pending cancellation. The object must persist until the call returns, and
3516 : * be accessible from both the caller thread and the work queue thread. The
3517 : * object must not be used for any other flush or cancel operation until this
3518 : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3519 : * must be allocated in coherent memory.
3520 : *
3521 : * @retval true if work was pending (call had to wait for cancellation of a
3522 : * running handler to complete, or scheduled or submitted operations were
3523 : * cancelled);
3524 : * @retval false otherwise
3525 : */
3526 1 : bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3527 :
3528 : /** @brief Initialize a work queue structure.
3529 : *
3530 : * This must be invoked before starting a work queue structure for the first time.
3531 : * It need not be invoked again on the same work queue structure.
3532 : *
3533 : * @funcprops \isr_ok
3534 : *
3535 : * @param queue the queue structure to be initialized.
3536 : */
3537 1 : void k_work_queue_init(struct k_work_q *queue);
3538 :
3539 : /** @brief Initialize a work queue.
3540 : *
3541 : * This configures the work queue thread and starts it running. The function
3542 : * should not be re-invoked on a queue.
3543 : *
3544 : * @param queue pointer to the queue structure. It must be initialized
3545 : * in zeroed/bss memory or with @ref k_work_queue_init before
3546 : * use.
3547 : *
3548 : * @param stack pointer to the work thread stack area.
3549 : *
3550 : * @param stack_size size of the work thread stack area, in bytes.
3551 : *
3552 : * @param prio initial thread priority
3553 : *
3554 : * @param cfg optional additional configuration parameters. Pass @c
3555 : * NULL if not required, to use the defaults documented in
3556 : * k_work_queue_config.
3557 : */
3558 1 : void k_work_queue_start(struct k_work_q *queue,
3559 : k_thread_stack_t *stack, size_t stack_size,
3560 : int prio, const struct k_work_queue_config *cfg);
3561 :
3562 : /** @brief Access the thread that animates a work queue.
3563 : *
3564 : * This is necessary to grant a work queue thread access to things the work
3565 : * items it will process are expected to use.
3566 : *
3567 : * @param queue pointer to the queue structure.
3568 : *
3569 : * @return the thread associated with the work queue.
3570 : */
3571 : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3572 :
3573 : /** @brief Wait until the work queue has drained, optionally plugging it.
3574 : *
3575 : * This blocks submission to the work queue except when coming from queue
3576 : * thread, and blocks the caller until no more work items are available in the
3577 : * queue.
3578 : *
3579 : * If @p plug is true then submission will continue to be blocked after the
3580 : * drain operation completes until k_work_queue_unplug() is invoked.
3581 : *
3582 : * Note that work items that are delayed are not yet associated with their
3583 : * work queue. They must be cancelled externally if a goal is to ensure the
3584 : * work queue remains empty. The @p plug feature can be used to prevent
3585 : * delayed items from being submitted after the drain completes.
3586 : *
3587 : * @param queue pointer to the queue structure.
3588 : *
3589 : * @param plug if true the work queue will continue to block new submissions
3590 : * after all items have drained.
3591 : *
3592 : * @retval 1 if call had to wait for the drain to complete
3593 : * @retval 0 if call did not have to wait
3594 : * @retval negative if wait was interrupted or failed
3595 : */
3596 1 : int k_work_queue_drain(struct k_work_q *queue, bool plug);
3597 :
3598 : /** @brief Release a work queue to accept new submissions.
3599 : *
3600 : * This releases the block on new submissions placed when k_work_queue_drain()
3601 : * is invoked with the @p plug option enabled. If this is invoked before the
3602 : * drain completes new items may be submitted as soon as the drain completes.
3603 : *
3604 : * @funcprops \isr_ok
3605 : *
3606 : * @param queue pointer to the queue structure.
3607 : *
3608 : * @retval 0 if successfully unplugged
3609 : * @retval -EALREADY if the work queue was not plugged.
3610 : */
3611 1 : int k_work_queue_unplug(struct k_work_q *queue);
3612 :
3613 : /** @brief Stop a work queue.
3614 : *
3615 : * Stops the work queue thread and ensures that no further work will be processed.
3616 : * This call is blocking and guarantees that the work queue thread has terminated
3617 : * cleanly if successful, no work will be processed past this point.
3618 : *
3619 : * @param queue Pointer to the queue structure.
3620 : * @param timeout Maximum time to wait for the work queue to stop.
3621 : *
3622 : * @retval 0 if the work queue was stopped
3623 : * @retval -EALREADY if the work queue was not started (or already stopped)
3624 : * @retval -EBUSY if the work queue is actively processing work items
3625 : * @retval -ETIMEDOUT if the work queue did not stop within the stipulated timeout
3626 : */
3627 1 : int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
3628 :
3629 : /** @brief Initialize a delayable work structure.
3630 : *
3631 : * This must be invoked before scheduling a delayable work structure for the
3632 : * first time. It need not be invoked again on the same work structure. It
3633 : * can be re-invoked to change the associated handler, but this must be done
3634 : * when the work item is idle.
3635 : *
3636 : * @funcprops \isr_ok
3637 : *
3638 : * @param dwork the delayable work structure to be initialized.
3639 : *
3640 : * @param handler the handler to be invoked by the work item.
3641 : */
3642 1 : void k_work_init_delayable(struct k_work_delayable *dwork,
3643 : k_work_handler_t handler);
3644 :
3645 : /**
3646 : * @brief Get the parent delayable work structure from a work pointer.
3647 : *
3648 : * This function is necessary when a @c k_work_handler_t function is passed to
3649 : * k_work_schedule_for_queue() and the handler needs to access data from the
3650 : * container of the containing `k_work_delayable`.
3651 : *
3652 : * @param work Address passed to the work handler
3653 : *
3654 : * @return Address of the containing @c k_work_delayable structure.
3655 : */
3656 : static inline struct k_work_delayable *
3657 : k_work_delayable_from_work(struct k_work *work);
3658 :
3659 : /** @brief Busy state flags from the delayable work item.
3660 : *
3661 : * @funcprops \isr_ok
3662 : *
3663 : * @note This is a live snapshot of state, which may change before the result
3664 : * can be inspected. Use locks where appropriate.
3665 : *
3666 : * @param dwork pointer to the delayable work item.
3667 : *
3668 : * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING,
3669 : * K_WORK_CANCELING, and K_WORK_FLUSHING. A zero return value indicates the
3670 : * work item appears to be idle.
3671 : */
3672 1 : int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3673 :
3674 : /** @brief Test whether a delayed work item is currently pending.
3675 : *
3676 : * Wrapper to determine whether a delayed work item is in a non-idle state.
3677 : *
3678 : * @note This is a live snapshot of state, which may change before the result
3679 : * can be inspected. Use locks where appropriate.
3680 : *
3681 : * @funcprops \isr_ok
3682 : *
3683 : * @param dwork pointer to the delayable work item.
3684 : *
3685 : * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3686 : * value.
3687 : */
3688 : static inline bool k_work_delayable_is_pending(
3689 : const struct k_work_delayable *dwork);
3690 :
3691 : /** @brief Get the absolute tick count at which a scheduled delayable work
3692 : * will be submitted.
3693 : *
3694 : * @note This is a live snapshot of state, which may change before the result
3695 : * can be inspected. Use locks where appropriate.
3696 : *
3697 : * @funcprops \isr_ok
3698 : *
3699 : * @param dwork pointer to the delayable work item.
3700 : *
3701 : * @return the tick count when the timer that will schedule the work item will
3702 : * expire, or the current tick count if the work is not scheduled.
3703 : */
3704 : static inline k_ticks_t k_work_delayable_expires_get(
3705 : const struct k_work_delayable *dwork);
3706 :
3707 : /** @brief Get the number of ticks until a scheduled delayable work will be
3708 : * submitted.
3709 : *
3710 : * @note This is a live snapshot of state, which may change before the result
3711 : * can be inspected. Use locks where appropriate.
3712 : *
3713 : * @funcprops \isr_ok
3714 : *
3715 : * @param dwork pointer to the delayable work item.
3716 : *
3717 : * @return the number of ticks until the timer that will schedule the work
3718 : * item will expire, or zero if the item is not scheduled.
3719 : */
3720 : static inline k_ticks_t k_work_delayable_remaining_get(
3721 : const struct k_work_delayable *dwork);
3722 :
3723 : /** @brief Submit an idle work item to a queue after a delay.
3724 : *
3725 : * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3726 : * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3727 : *
3728 : * @funcprops \isr_ok
3729 : *
3730 : * @param queue the queue on which the work item should be submitted after the
3731 : * delay.
3732 : *
3733 : * @param dwork pointer to the delayable work item.
3734 : *
3735 : * @param delay the time to wait before submitting the work item. If @c
3736 : * K_NO_WAIT and the work is not pending this is equivalent to
3737 : * k_work_submit_to_queue().
3738 : *
3739 : * @retval 0 if work was already scheduled or submitted.
3740 : * @retval 1 if work has been scheduled.
3741 : * @retval 2 if @p delay is @c K_NO_WAIT and work
3742 : * was running and has been queued to the queue that was running it.
3743 : * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3744 : * k_work_submit_to_queue() fails with this code.
3745 : * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3746 : * k_work_submit_to_queue() fails with this code.
3747 : * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3748 : * k_work_submit_to_queue() fails with this code.
3749 : */
3750 1 : int k_work_schedule_for_queue(struct k_work_q *queue,
3751 : struct k_work_delayable *dwork,
3752 : k_timeout_t delay);
3753 :
3754 : /** @brief Submit an idle work item to the system work queue after a
3755 : * delay.
3756 : *
3757 : * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3758 : * characteristics of that function.
3759 : *
3760 : * @param dwork pointer to the delayable work item.
3761 : *
3762 : * @param delay the time to wait before submitting the work item. If @c
3763 : * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3764 : *
3765 : * @return as with k_work_schedule_for_queue().
3766 : */
3767 1 : int k_work_schedule(struct k_work_delayable *dwork,
3768 : k_timeout_t delay);
3769 :
3770 : /** @brief Reschedule a work item to a queue after a delay.
3771 : *
3772 : * Unlike k_work_schedule_for_queue() this function can change the deadline of
3773 : * a scheduled work item, and will schedule a work item that is in any state
3774 : * (e.g. is idle, submitted, or running). This function does not affect
3775 : * ("unsubmit") a work item that has been submitted to a queue.
3776 : *
3777 : * @funcprops \isr_ok
3778 : *
3779 : * @param queue the queue on which the work item should be submitted after the
3780 : * delay.
3781 : *
3782 : * @param dwork pointer to the delayable work item.
3783 : *
3784 : * @param delay the time to wait before submitting the work item. If @c
3785 : * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3786 : * any previous scheduled submission.
3787 : *
3788 : * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3789 : * k_work_submit_to_queue().
3790 : *
3791 : * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3792 : * @retval 1 if
3793 : * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3794 : * to @p queue; or
3795 : * * delay not @c K_NO_WAIT and work has been scheduled
3796 : * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3797 : * to the queue that was running it
3798 : * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3799 : * k_work_submit_to_queue() fails with this code.
3800 : * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3801 : * k_work_submit_to_queue() fails with this code.
3802 : * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3803 : * k_work_submit_to_queue() fails with this code.
3804 : */
3805 1 : int k_work_reschedule_for_queue(struct k_work_q *queue,
3806 : struct k_work_delayable *dwork,
3807 : k_timeout_t delay);
3808 :
3809 : /** @brief Reschedule a work item to the system work queue after a
3810 : * delay.
3811 : *
3812 : * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3813 : * API characteristics of that function.
3814 : *
3815 : * @param dwork pointer to the delayable work item.
3816 : *
3817 : * @param delay the time to wait before submitting the work item.
3818 : *
3819 : * @return as with k_work_reschedule_for_queue().
3820 : */
3821 1 : int k_work_reschedule(struct k_work_delayable *dwork,
3822 : k_timeout_t delay);
3823 :
3824 : /** @brief Flush delayable work.
3825 : *
3826 : * If the work is scheduled, it is immediately submitted. Then the caller
3827 : * blocks until the work completes, as with k_work_flush().
3828 : *
3829 : * @note Be careful of caller and work queue thread relative priority. If
3830 : * this function sleeps it will not return until the work queue thread
3831 : * completes the tasks that allow this thread to resume.
3832 : *
3833 : * @note Behavior is undefined if this function is invoked on @p dwork from a
3834 : * work queue running @p dwork.
3835 : *
3836 : * @param dwork pointer to the delayable work item.
3837 : *
3838 : * @param sync pointer to an opaque item containing state related to the
3839 : * pending cancellation. The object must persist until the call returns, and
3840 : * be accessible from both the caller thread and the work queue thread. The
3841 : * object must not be used for any other flush or cancel operation until this
3842 : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3843 : * must be allocated in coherent memory.
3844 : *
3845 : * @retval true if call had to wait for completion
3846 : * @retval false if work was already idle
3847 : */
3848 1 : bool k_work_flush_delayable(struct k_work_delayable *dwork,
3849 : struct k_work_sync *sync);
3850 :
3851 : /** @brief Cancel delayable work.
3852 : *
3853 : * Similar to k_work_cancel() but for delayable work. If the work is
3854 : * scheduled or submitted it is canceled. This function does not wait for the
3855 : * cancellation to complete.
3856 : *
3857 : * @note The work may still be running when this returns. Use
3858 : * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3859 : * not running.
3860 : *
3861 : * @note Canceling delayable work does not prevent rescheduling it. It does
3862 : * prevent submitting it until the cancellation completes.
3863 : *
3864 : * @funcprops \isr_ok
3865 : *
3866 : * @param dwork pointer to the delayable work item.
3867 : *
3868 : * @return the k_work_delayable_busy_get() status indicating the state of the
3869 : * item after all cancellation steps performed by this call are completed.
3870 : */
3871 1 : int k_work_cancel_delayable(struct k_work_delayable *dwork);
3872 :
3873 : /** @brief Cancel delayable work and wait.
3874 : *
3875 : * Like k_work_cancel_delayable() but waits until the work becomes idle.
3876 : *
3877 : * @note Canceling delayable work does not prevent rescheduling it. It does
3878 : * prevent submitting it until the cancellation completes.
3879 : *
3880 : * @note Be careful of caller and work queue thread relative priority. If
3881 : * this function sleeps it will not return until the work queue thread
3882 : * completes the tasks that allow this thread to resume.
3883 : *
3884 : * @note Behavior is undefined if this function is invoked on @p dwork from a
3885 : * work queue running @p dwork.
3886 : *
3887 : * @param dwork pointer to the delayable work item.
3888 : *
3889 : * @param sync pointer to an opaque item containing state related to the
3890 : * pending cancellation. The object must persist until the call returns, and
3891 : * be accessible from both the caller thread and the work queue thread. The
3892 : * object must not be used for any other flush or cancel operation until this
3893 : * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3894 : * must be allocated in coherent memory.
3895 : *
3896 : * @retval true if work was not idle (call had to wait for cancellation of a
3897 : * running handler to complete, or scheduled or submitted operations were
3898 : * cancelled);
3899 : * @retval false otherwise
3900 : */
3901 1 : bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3902 : struct k_work_sync *sync);
3903 :
3904 0 : enum {
3905 : /**
3906 : * @cond INTERNAL_HIDDEN
3907 : */
3908 :
3909 : /* The atomic API is used for all work and queue flags fields to
3910 : * enforce sequential consistency in SMP environments.
3911 : */
3912 :
3913 : /* Bits that represent the work item states. At least nine of the
3914 : * combinations are distinct valid stable states.
3915 : */
3916 : K_WORK_RUNNING_BIT = 0,
3917 : K_WORK_CANCELING_BIT = 1,
3918 : K_WORK_QUEUED_BIT = 2,
3919 : K_WORK_DELAYED_BIT = 3,
3920 : K_WORK_FLUSHING_BIT = 4,
3921 :
3922 : K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3923 : | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
3924 :
3925 : /* Static work flags */
3926 : K_WORK_DELAYABLE_BIT = 8,
3927 : K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3928 :
3929 : /* Dynamic work queue flags */
3930 : K_WORK_QUEUE_STARTED_BIT = 0,
3931 : K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3932 : K_WORK_QUEUE_BUSY_BIT = 1,
3933 : K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3934 : K_WORK_QUEUE_DRAIN_BIT = 2,
3935 : K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3936 : K_WORK_QUEUE_PLUGGED_BIT = 3,
3937 : K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3938 : K_WORK_QUEUE_STOP_BIT = 4,
3939 : K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
3940 :
3941 : /* Static work queue flags */
3942 : K_WORK_QUEUE_NO_YIELD_BIT = 8,
3943 : K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3944 :
3945 : /**
3946 : * INTERNAL_HIDDEN @endcond
3947 : */
3948 : /* Transient work flags */
3949 :
3950 : /** @brief Flag indicating a work item that is running under a work
3951 : * queue thread.
3952 : *
3953 : * Accessed via k_work_busy_get(). May co-occur with other flags.
3954 : */
3955 : K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3956 :
3957 : /** @brief Flag indicating a work item that is being canceled.
3958 : *
3959 : * Accessed via k_work_busy_get(). May co-occur with other flags.
3960 : */
3961 : K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3962 :
3963 : /** @brief Flag indicating a work item that has been submitted to a
3964 : * queue but has not started running.
3965 : *
3966 : * Accessed via k_work_busy_get(). May co-occur with other flags.
3967 : */
3968 : K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3969 :
3970 : /** @brief Flag indicating a delayed work item that is scheduled for
3971 : * submission to a queue.
3972 : *
3973 : * Accessed via k_work_busy_get(). May co-occur with other flags.
3974 : */
3975 : K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3976 :
3977 : /** @brief Flag indicating a synced work item that is being flushed.
3978 : *
3979 : * Accessed via k_work_busy_get(). May co-occur with other flags.
3980 : */
3981 : K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
3982 : };
3983 :
3984 : /** @brief A structure used to submit work. */
3985 1 : struct k_work {
3986 : /* All fields are protected by the work module spinlock. No fields
3987 : * are to be accessed except through kernel API.
3988 : */
3989 :
3990 : /* Node to link into k_work_q pending list. */
3991 0 : sys_snode_t node;
3992 :
3993 : /* The function to be invoked by the work queue thread. */
3994 0 : k_work_handler_t handler;
3995 :
3996 : /* The queue on which the work item was last submitted. */
3997 0 : struct k_work_q *queue;
3998 :
3999 : /* State of the work item.
4000 : *
4001 : * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
4002 : *
4003 : * It can be RUNNING and CANCELING simultaneously.
4004 : */
4005 0 : uint32_t flags;
4006 : };
4007 :
4008 : #define Z_WORK_INITIALIZER(work_handler) { \
4009 : .handler = (work_handler), \
4010 : }
4011 :
4012 : /** @brief A structure used to submit work after a delay. */
4013 1 : struct k_work_delayable {
4014 : /* The work item. */
4015 0 : struct k_work work;
4016 :
4017 : /* Timeout used to submit work after a delay. */
4018 0 : struct _timeout timeout;
4019 :
4020 : /* The queue to which the work should be submitted. */
4021 0 : struct k_work_q *queue;
4022 : };
4023 :
4024 : #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4025 : .work = { \
4026 : .handler = (work_handler), \
4027 : .flags = K_WORK_DELAYABLE, \
4028 : }, \
4029 : }
4030 :
4031 : /**
4032 : * @brief Initialize a statically-defined delayable work item.
4033 : *
4034 : * This macro can be used to initialize a statically-defined delayable
4035 : * work item, prior to its first use. For example,
4036 : *
4037 : * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
4038 : *
4039 : * Note that if the runtime dependencies support initialization with
4040 : * k_work_init_delayable() using that will eliminate the initialized
4041 : * object in ROM that is produced by this macro and copied in at
4042 : * system startup.
4043 : *
4044 : * @param work Symbol name for delayable work item object
4045 : * @param work_handler Function to invoke each time work item is processed.
4046 : */
4047 1 : #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4048 : struct k_work_delayable work \
4049 : = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4050 :
4051 : /**
4052 : * @cond INTERNAL_HIDDEN
4053 : */
4054 :
4055 : /* Record used to wait for work to flush.
4056 : *
4057 : * The work item is inserted into the queue that will process (or is
4058 : * processing) the item, and will be processed as soon as the item
4059 : * completes. When the flusher is processed the semaphore will be
4060 : * signaled, releasing the thread waiting for the flush.
4061 : */
4062 : struct z_work_flusher {
4063 : struct k_work work;
4064 : struct k_sem sem;
4065 : };
4066 :
4067 : /* Record used to wait for work to complete a cancellation.
4068 : *
4069 : * The work item is inserted into a global queue of pending cancels.
4070 : * When a cancelling work item goes idle any matching waiters are
4071 : * removed from pending_cancels and are woken.
4072 : */
4073 : struct z_work_canceller {
4074 : sys_snode_t node;
4075 : struct k_work *work;
4076 : struct k_sem sem;
4077 : };
4078 :
4079 : /**
4080 : * INTERNAL_HIDDEN @endcond
4081 : */
4082 :
4083 : /** @brief A structure holding internal state for a pending synchronous
4084 : * operation on a work item or queue.
4085 : *
4086 : * Instances of this type are provided by the caller for invocation of
4087 : * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
4088 : * referenced object must persist until the call returns, and be accessible
4089 : * from both the caller thread and the work queue thread.
4090 : *
4091 : * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
4092 : * coherent memory; see arch_mem_coherent(). The stack on these architectures
4093 : * is generally not coherent. be stack-allocated. Violations are detected by
4094 : * runtime assertion.
4095 : */
4096 1 : struct k_work_sync {
4097 : union {
4098 0 : struct z_work_flusher flusher;
4099 0 : struct z_work_canceller canceller;
4100 0 : };
4101 : };
4102 :
4103 : /** @brief A structure holding optional configuration items for a work
4104 : * queue.
4105 : *
4106 : * This structure, and values it references, are not retained by
4107 : * k_work_queue_start().
4108 : */
4109 1 : struct k_work_queue_config {
4110 : /** The name to be given to the work queue thread.
4111 : *
4112 : * If left null the thread will not have a name.
4113 : */
4114 1 : const char *name;
4115 :
4116 : /** Control whether the work queue thread should yield between
4117 : * items.
4118 : *
4119 : * Yielding between items helps guarantee the work queue
4120 : * thread does not starve other threads, including cooperative
4121 : * ones released by a work item. This is the default behavior.
4122 : *
4123 : * Set this to @c true to prevent the work queue thread from
4124 : * yielding between items. This may be appropriate when a
4125 : * sequence of items should complete without yielding
4126 : * control.
4127 : */
4128 1 : bool no_yield;
4129 :
4130 : /** Control whether the work queue thread should be marked as
4131 : * essential thread.
4132 : */
4133 1 : bool essential;
4134 : };
4135 :
4136 : /** @brief A structure used to hold work until it can be processed. */
4137 1 : struct k_work_q {
4138 : /* The thread that animates the work. */
4139 0 : struct k_thread thread;
4140 :
4141 : /* All the following fields must be accessed only while the
4142 : * work module spinlock is held.
4143 : */
4144 :
4145 : /* List of k_work items to be worked. */
4146 0 : sys_slist_t pending;
4147 :
4148 : /* Wait queue for idle work thread. */
4149 0 : _wait_q_t notifyq;
4150 :
4151 : /* Wait queue for threads waiting for the queue to drain. */
4152 0 : _wait_q_t drainq;
4153 :
4154 : /* Flags describing queue state. */
4155 0 : uint32_t flags;
4156 : };
4157 :
4158 : /* Provide the implementation for inline functions declared above */
4159 :
4160 1 : static inline bool k_work_is_pending(const struct k_work *work)
4161 : {
4162 : return k_work_busy_get(work) != 0;
4163 : }
4164 :
4165 : static inline struct k_work_delayable *
4166 1 : k_work_delayable_from_work(struct k_work *work)
4167 : {
4168 : return CONTAINER_OF(work, struct k_work_delayable, work);
4169 : }
4170 :
4171 1 : static inline bool k_work_delayable_is_pending(
4172 : const struct k_work_delayable *dwork)
4173 : {
4174 : return k_work_delayable_busy_get(dwork) != 0;
4175 : }
4176 :
4177 1 : static inline k_ticks_t k_work_delayable_expires_get(
4178 : const struct k_work_delayable *dwork)
4179 : {
4180 : return z_timeout_expires(&dwork->timeout);
4181 : }
4182 :
4183 1 : static inline k_ticks_t k_work_delayable_remaining_get(
4184 : const struct k_work_delayable *dwork)
4185 : {
4186 : return z_timeout_remaining(&dwork->timeout);
4187 : }
4188 :
4189 1 : static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4190 : {
4191 : return &queue->thread;
4192 : }
4193 :
4194 : /** @} */
4195 :
4196 : struct k_work_user;
4197 :
4198 : /**
4199 : * @addtogroup workqueue_apis
4200 : * @{
4201 : */
4202 :
4203 : /**
4204 : * @typedef k_work_user_handler_t
4205 : * @brief Work item handler function type for user work queues.
4206 : *
4207 : * A work item's handler function is executed by a user workqueue's thread
4208 : * when the work item is processed by the workqueue.
4209 : *
4210 : * @param work Address of the work item.
4211 : */
4212 1 : typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4213 :
4214 : /**
4215 : * @cond INTERNAL_HIDDEN
4216 : */
4217 :
4218 : struct k_work_user_q {
4219 : struct k_queue queue;
4220 : struct k_thread thread;
4221 : };
4222 :
4223 : enum {
4224 : K_WORK_USER_STATE_PENDING, /* Work item pending state */
4225 : };
4226 :
4227 : struct k_work_user {
4228 : void *_reserved; /* Used by k_queue implementation. */
4229 : k_work_user_handler_t handler;
4230 : atomic_t flags;
4231 : };
4232 :
4233 : /**
4234 : * INTERNAL_HIDDEN @endcond
4235 : */
4236 :
4237 : #if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4238 : #define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4239 : #else
4240 : #define Z_WORK_USER_INITIALIZER(work_handler) \
4241 : { \
4242 : ._reserved = NULL, \
4243 : .handler = (work_handler), \
4244 : .flags = 0 \
4245 : }
4246 : #endif
4247 :
4248 : /**
4249 : * @brief Initialize a statically-defined user work item.
4250 : *
4251 : * This macro can be used to initialize a statically-defined user work
4252 : * item, prior to its first use. For example,
4253 : *
4254 : * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4255 : *
4256 : * @param work Symbol name for work item object
4257 : * @param work_handler Function to invoke each time work item is processed.
4258 : */
4259 1 : #define K_WORK_USER_DEFINE(work, work_handler) \
4260 : struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4261 :
4262 : /**
4263 : * @brief Initialize a userspace work item.
4264 : *
4265 : * This routine initializes a user workqueue work item, prior to its
4266 : * first use.
4267 : *
4268 : * @param work Address of work item.
4269 : * @param handler Function to invoke each time work item is processed.
4270 : */
4271 1 : static inline void k_work_user_init(struct k_work_user *work,
4272 : k_work_user_handler_t handler)
4273 : {
4274 : *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4275 : }
4276 :
4277 : /**
4278 : * @brief Check if a userspace work item is pending.
4279 : *
4280 : * This routine indicates if user work item @a work is pending in a workqueue's
4281 : * queue.
4282 : *
4283 : * @note Checking if the work is pending gives no guarantee that the
4284 : * work will still be pending when this information is used. It is up to
4285 : * the caller to make sure that this information is used in a safe manner.
4286 : *
4287 : * @funcprops \isr_ok
4288 : *
4289 : * @param work Address of work item.
4290 : *
4291 : * @return true if work item is pending, or false if it is not pending.
4292 : */
4293 1 : static inline bool k_work_user_is_pending(struct k_work_user *work)
4294 : {
4295 : return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4296 : }
4297 :
4298 : /**
4299 : * @brief Submit a work item to a user mode workqueue
4300 : *
4301 : * Submits a work item to a workqueue that runs in user mode. A temporary
4302 : * memory allocation is made from the caller's resource pool which is freed
4303 : * once the worker thread consumes the k_work item. The workqueue
4304 : * thread must have memory access to the k_work item being submitted. The caller
4305 : * must have permission granted on the work_q parameter's queue object.
4306 : *
4307 : * @funcprops \isr_ok
4308 : *
4309 : * @param work_q Address of workqueue.
4310 : * @param work Address of work item.
4311 : *
4312 : * @retval -EBUSY if the work item was already in some workqueue
4313 : * @retval -ENOMEM if no memory for thread resource pool allocation
4314 : * @retval 0 Success
4315 : */
4316 1 : static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4317 : struct k_work_user *work)
4318 : {
4319 : int ret = -EBUSY;
4320 :
4321 : if (!atomic_test_and_set_bit(&work->flags,
4322 : K_WORK_USER_STATE_PENDING)) {
4323 : ret = k_queue_alloc_append(&work_q->queue, work);
4324 :
4325 : /* Couldn't insert into the queue. Clear the pending bit
4326 : * so the work item can be submitted again
4327 : */
4328 : if (ret != 0) {
4329 : atomic_clear_bit(&work->flags,
4330 : K_WORK_USER_STATE_PENDING);
4331 : }
4332 : }
4333 :
4334 : return ret;
4335 : }
4336 :
4337 : /**
4338 : * @brief Start a workqueue in user mode
4339 : *
4340 : * This works identically to k_work_queue_start() except it is callable from
4341 : * user mode, and the worker thread created will run in user mode. The caller
4342 : * must have permissions granted on both the work_q parameter's thread and
4343 : * queue objects, and the same restrictions on priority apply as
4344 : * k_thread_create().
4345 : *
4346 : * @param work_q Address of workqueue.
4347 : * @param stack Pointer to work queue thread's stack space, as defined by
4348 : * K_THREAD_STACK_DEFINE()
4349 : * @param stack_size Size of the work queue thread's stack (in bytes), which
4350 : * should either be the same constant passed to
4351 : * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4352 : * @param prio Priority of the work queue's thread.
4353 : * @param name optional thread name. If not null a copy is made into the
4354 : * thread's name buffer.
4355 : */
4356 1 : void k_work_user_queue_start(struct k_work_user_q *work_q,
4357 : k_thread_stack_t *stack,
4358 : size_t stack_size, int prio,
4359 : const char *name);
4360 :
4361 : /**
4362 : * @brief Access the user mode thread that animates a work queue.
4363 : *
4364 : * This is necessary to grant a user mode work queue thread access to things
4365 : * the work items it will process are expected to use.
4366 : *
4367 : * @param work_q pointer to the user mode queue structure.
4368 : *
4369 : * @return the user mode thread associated with the work queue.
4370 : */
4371 1 : static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4372 : {
4373 : return &work_q->thread;
4374 : }
4375 :
4376 : /** @} */
4377 :
4378 : /**
4379 : * @cond INTERNAL_HIDDEN
4380 : */
4381 :
4382 : struct k_work_poll {
4383 : struct k_work work;
4384 : struct k_work_q *workq;
4385 : struct z_poller poller;
4386 : struct k_poll_event *events;
4387 : int num_events;
4388 : k_work_handler_t real_handler;
4389 : struct _timeout timeout;
4390 : int poll_result;
4391 : };
4392 :
4393 : /**
4394 : * INTERNAL_HIDDEN @endcond
4395 : */
4396 :
4397 : /**
4398 : * @addtogroup workqueue_apis
4399 : * @{
4400 : */
4401 :
4402 : /**
4403 : * @brief Initialize a statically-defined work item.
4404 : *
4405 : * This macro can be used to initialize a statically-defined workqueue work
4406 : * item, prior to its first use. For example,
4407 : *
4408 : * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4409 : *
4410 : * @param work Symbol name for work item object
4411 : * @param work_handler Function to invoke each time work item is processed.
4412 : */
4413 1 : #define K_WORK_DEFINE(work, work_handler) \
4414 : struct k_work work = Z_WORK_INITIALIZER(work_handler)
4415 :
4416 : /**
4417 : * @brief Initialize a triggered work item.
4418 : *
4419 : * This routine initializes a workqueue triggered work item, prior to
4420 : * its first use.
4421 : *
4422 : * @param work Address of triggered work item.
4423 : * @param handler Function to invoke each time work item is processed.
4424 : */
4425 1 : void k_work_poll_init(struct k_work_poll *work,
4426 : k_work_handler_t handler);
4427 :
4428 : /**
4429 : * @brief Submit a triggered work item.
4430 : *
4431 : * This routine schedules work item @a work to be processed by workqueue
4432 : * @a work_q when one of the given @a events is signaled. The routine
4433 : * initiates internal poller for the work item and then returns to the caller.
4434 : * Only when one of the watched events happen the work item is actually
4435 : * submitted to the workqueue and becomes pending.
4436 : *
4437 : * Submitting a previously submitted triggered work item that is still
4438 : * waiting for the event cancels the existing submission and reschedules it
4439 : * the using the new event list. Note that this behavior is inherently subject
4440 : * to race conditions with the pre-existing triggered work item and work queue,
4441 : * so care must be taken to synchronize such resubmissions externally.
4442 : *
4443 : * @funcprops \isr_ok
4444 : *
4445 : * @warning
4446 : * Provided array of events as well as a triggered work item must be placed
4447 : * in persistent memory (valid until work handler execution or work
4448 : * cancellation) and cannot be modified after submission.
4449 : *
4450 : * @param work_q Address of workqueue.
4451 : * @param work Address of delayed work item.
4452 : * @param events An array of events which trigger the work.
4453 : * @param num_events The number of events in the array.
4454 : * @param timeout Timeout after which the work will be scheduled
4455 : * for execution even if not triggered.
4456 : *
4457 : *
4458 : * @retval 0 Work item started watching for events.
4459 : * @retval -EINVAL Work item is being processed or has completed its work.
4460 : * @retval -EADDRINUSE Work item is pending on a different workqueue.
4461 : */
4462 1 : int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4463 : struct k_work_poll *work,
4464 : struct k_poll_event *events,
4465 : int num_events,
4466 : k_timeout_t timeout);
4467 :
4468 : /**
4469 : * @brief Submit a triggered work item to the system workqueue.
4470 : *
4471 : * This routine schedules work item @a work to be processed by system
4472 : * workqueue when one of the given @a events is signaled. The routine
4473 : * initiates internal poller for the work item and then returns to the caller.
4474 : * Only when one of the watched events happen the work item is actually
4475 : * submitted to the workqueue and becomes pending.
4476 : *
4477 : * Submitting a previously submitted triggered work item that is still
4478 : * waiting for the event cancels the existing submission and reschedules it
4479 : * the using the new event list. Note that this behavior is inherently subject
4480 : * to race conditions with the pre-existing triggered work item and work queue,
4481 : * so care must be taken to synchronize such resubmissions externally.
4482 : *
4483 : * @funcprops \isr_ok
4484 : *
4485 : * @warning
4486 : * Provided array of events as well as a triggered work item must not be
4487 : * modified until the item has been processed by the workqueue.
4488 : *
4489 : * @param work Address of delayed work item.
4490 : * @param events An array of events which trigger the work.
4491 : * @param num_events The number of events in the array.
4492 : * @param timeout Timeout after which the work will be scheduled
4493 : * for execution even if not triggered.
4494 : *
4495 : * @retval 0 Work item started watching for events.
4496 : * @retval -EINVAL Work item is being processed or has completed its work.
4497 : * @retval -EADDRINUSE Work item is pending on a different workqueue.
4498 : */
4499 1 : int k_work_poll_submit(struct k_work_poll *work,
4500 : struct k_poll_event *events,
4501 : int num_events,
4502 : k_timeout_t timeout);
4503 :
4504 : /**
4505 : * @brief Cancel a triggered work item.
4506 : *
4507 : * This routine cancels the submission of triggered work item @a work.
4508 : * A triggered work item can only be canceled if no event triggered work
4509 : * submission.
4510 : *
4511 : * @funcprops \isr_ok
4512 : *
4513 : * @param work Address of delayed work item.
4514 : *
4515 : * @retval 0 Work item canceled.
4516 : * @retval -EINVAL Work item is being processed or has completed its work.
4517 : */
4518 1 : int k_work_poll_cancel(struct k_work_poll *work);
4519 :
4520 : /** @} */
4521 :
4522 : /**
4523 : * @defgroup msgq_apis Message Queue APIs
4524 : * @ingroup kernel_apis
4525 : * @{
4526 : */
4527 :
4528 : /**
4529 : * @brief Message Queue Structure
4530 : */
4531 1 : struct k_msgq {
4532 : /** Message queue wait queue */
4533 1 : _wait_q_t wait_q;
4534 : /** Lock */
4535 1 : struct k_spinlock lock;
4536 : /** Message size */
4537 1 : size_t msg_size;
4538 : /** Maximal number of messages */
4539 1 : uint32_t max_msgs;
4540 : /** Start of message buffer */
4541 1 : char *buffer_start;
4542 : /** End of message buffer */
4543 1 : char *buffer_end;
4544 : /** Read pointer */
4545 1 : char *read_ptr;
4546 : /** Write pointer */
4547 1 : char *write_ptr;
4548 : /** Number of used messages */
4549 1 : uint32_t used_msgs;
4550 :
4551 : Z_DECL_POLL_EVENT
4552 :
4553 : /** Message queue */
4554 1 : uint8_t flags;
4555 :
4556 : SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
4557 :
4558 : #ifdef CONFIG_OBJ_CORE_MSGQ
4559 : struct k_obj_core obj_core;
4560 : #endif
4561 : };
4562 : /**
4563 : * @cond INTERNAL_HIDDEN
4564 : */
4565 :
4566 :
4567 : #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4568 : { \
4569 : .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4570 : .msg_size = q_msg_size, \
4571 : .max_msgs = q_max_msgs, \
4572 : .buffer_start = q_buffer, \
4573 : .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4574 : .read_ptr = q_buffer, \
4575 : .write_ptr = q_buffer, \
4576 : .used_msgs = 0, \
4577 : Z_POLL_EVENT_OBJ_INIT(obj) \
4578 : }
4579 :
4580 : /**
4581 : * INTERNAL_HIDDEN @endcond
4582 : */
4583 :
4584 :
4585 0 : #define K_MSGQ_FLAG_ALLOC BIT(0)
4586 :
4587 : /**
4588 : * @brief Message Queue Attributes
4589 : */
4590 1 : struct k_msgq_attrs {
4591 : /** Message Size */
4592 1 : size_t msg_size;
4593 : /** Maximal number of messages */
4594 1 : uint32_t max_msgs;
4595 : /** Used messages */
4596 1 : uint32_t used_msgs;
4597 : };
4598 :
4599 :
4600 : /**
4601 : * @brief Statically define and initialize a message queue.
4602 : *
4603 : * The message queue's ring buffer contains space for @a q_max_msgs messages,
4604 : * each of which is @a q_msg_size bytes long. Alignment of the message queue's
4605 : * ring buffer is not necessary, setting @a q_align to 1 is sufficient.
4606 : *
4607 : * The message queue can be accessed outside the module where it is defined
4608 : * using:
4609 : *
4610 : * @code extern struct k_msgq <name>; @endcode
4611 : *
4612 : * @param q_name Name of the message queue.
4613 : * @param q_msg_size Message size (in bytes).
4614 : * @param q_max_msgs Maximum number of messages that can be queued.
4615 : * @param q_align Alignment of the message queue's ring buffer (power of 2).
4616 : *
4617 : */
4618 1 : #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4619 : static char __noinit __aligned(q_align) \
4620 : _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4621 : STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4622 : Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4623 : (q_msg_size), (q_max_msgs))
4624 :
4625 : /**
4626 : * @brief Initialize a message queue.
4627 : *
4628 : * This routine initializes a message queue object, prior to its first use.
4629 : *
4630 : * The message queue's ring buffer must contain space for @a max_msgs messages,
4631 : * each of which is @a msg_size bytes long. Alignment of the message queue's
4632 : * ring buffer is not necessary.
4633 : *
4634 : * @param msgq Address of the message queue.
4635 : * @param buffer Pointer to ring buffer that holds queued messages.
4636 : * @param msg_size Message size (in bytes).
4637 : * @param max_msgs Maximum number of messages that can be queued.
4638 : */
4639 1 : void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
4640 : uint32_t max_msgs);
4641 :
4642 : /**
4643 : * @brief Initialize a message queue.
4644 : *
4645 : * This routine initializes a message queue object, prior to its first use,
4646 : * allocating its internal ring buffer from the calling thread's resource
4647 : * pool.
4648 : *
4649 : * Memory allocated for the ring buffer can be released by calling
4650 : * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4651 : * all of its references.
4652 : *
4653 : * @param msgq Address of the message queue.
4654 : * @param msg_size Message size (in bytes).
4655 : * @param max_msgs Maximum number of messages that can be queued.
4656 : *
4657 : * @return 0 on success, -ENOMEM if there was insufficient memory in the
4658 : * thread's resource pool, or -EINVAL if the size parameters cause
4659 : * an integer overflow.
4660 : */
4661 1 : __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
4662 : uint32_t max_msgs);
4663 :
4664 : /**
4665 : * @brief Release allocated buffer for a queue
4666 : *
4667 : * Releases memory allocated for the ring buffer.
4668 : *
4669 : * @param msgq message queue to cleanup
4670 : *
4671 : * @retval 0 on success
4672 : * @retval -EBUSY Queue not empty
4673 : */
4674 1 : int k_msgq_cleanup(struct k_msgq *msgq);
4675 :
4676 : /**
4677 : * @brief Send a message to a message queue.
4678 : *
4679 : * This routine sends a message to message queue @a q.
4680 : *
4681 : * @note The message content is copied from @a data into @a msgq and the @a data
4682 : * pointer is not retained, so the message content will not be modified
4683 : * by this function.
4684 : *
4685 : * @funcprops \isr_ok
4686 : *
4687 : * @param msgq Address of the message queue.
4688 : * @param data Pointer to the message.
4689 : * @param timeout Waiting period to add the message, or one of the special
4690 : * values K_NO_WAIT and K_FOREVER.
4691 : *
4692 : * @retval 0 Message sent.
4693 : * @retval -ENOMSG Returned without waiting or queue purged.
4694 : * @retval -EAGAIN Waiting period timed out.
4695 : */
4696 1 : __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
4697 :
4698 : /**
4699 : * @brief Receive a message from a message queue.
4700 : *
4701 : * This routine receives a message from message queue @a q in a "first in,
4702 : * first out" manner.
4703 : *
4704 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4705 : *
4706 : * @funcprops \isr_ok
4707 : *
4708 : * @param msgq Address of the message queue.
4709 : * @param data Address of area to hold the received message.
4710 : * @param timeout Waiting period to receive the message,
4711 : * or one of the special values K_NO_WAIT and
4712 : * K_FOREVER.
4713 : *
4714 : * @retval 0 Message received.
4715 : * @retval -ENOMSG Returned without waiting or queue purged.
4716 : * @retval -EAGAIN Waiting period timed out.
4717 : */
4718 1 : __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
4719 :
4720 : /**
4721 : * @brief Peek/read a message from a message queue.
4722 : *
4723 : * This routine reads a message from message queue @a q in a "first in,
4724 : * first out" manner and leaves the message in the queue.
4725 : *
4726 : * @funcprops \isr_ok
4727 : *
4728 : * @param msgq Address of the message queue.
4729 : * @param data Address of area to hold the message read from the queue.
4730 : *
4731 : * @retval 0 Message read.
4732 : * @retval -ENOMSG Returned when the queue has no message.
4733 : */
4734 1 : __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
4735 :
4736 : /**
4737 : * @brief Peek/read a message from a message queue at the specified index
4738 : *
4739 : * This routine reads a message from message queue at the specified index
4740 : * and leaves the message in the queue.
4741 : * k_msgq_peek_at(msgq, data, 0) is equivalent to k_msgq_peek(msgq, data)
4742 : *
4743 : * @funcprops \isr_ok
4744 : *
4745 : * @param msgq Address of the message queue.
4746 : * @param data Address of area to hold the message read from the queue.
4747 : * @param idx Message queue index at which to peek
4748 : *
4749 : * @retval 0 Message read.
4750 : * @retval -ENOMSG Returned when the queue has no message at index.
4751 : */
4752 1 : __syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
4753 :
4754 : /**
4755 : * @brief Purge a message queue.
4756 : *
4757 : * This routine discards all unreceived messages in a message queue's ring
4758 : * buffer. Any threads that are blocked waiting to send a message to the
4759 : * message queue are unblocked and see an -ENOMSG error code.
4760 : *
4761 : * @param msgq Address of the message queue.
4762 : */
4763 1 : __syscall void k_msgq_purge(struct k_msgq *msgq);
4764 :
4765 : /**
4766 : * @brief Get the amount of free space in a message queue.
4767 : *
4768 : * This routine returns the number of unused entries in a message queue's
4769 : * ring buffer.
4770 : *
4771 : * @param msgq Address of the message queue.
4772 : *
4773 : * @return Number of unused ring buffer entries.
4774 : */
4775 1 : __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
4776 :
4777 : /**
4778 : * @brief Get basic attributes of a message queue.
4779 : *
4780 : * This routine fetches basic attributes of message queue into attr argument.
4781 : *
4782 : * @param msgq Address of the message queue.
4783 : * @param attrs pointer to message queue attribute structure.
4784 : */
4785 1 : __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4786 : struct k_msgq_attrs *attrs);
4787 :
4788 :
4789 : static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
4790 : {
4791 : return msgq->max_msgs - msgq->used_msgs;
4792 : }
4793 :
4794 : /**
4795 : * @brief Get the number of messages in a message queue.
4796 : *
4797 : * This routine returns the number of messages in a message queue's ring buffer.
4798 : *
4799 : * @param msgq Address of the message queue.
4800 : *
4801 : * @return Number of messages.
4802 : */
4803 1 : __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
4804 :
4805 : static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
4806 : {
4807 : return msgq->used_msgs;
4808 : }
4809 :
4810 : /** @} */
4811 :
4812 : /**
4813 : * @defgroup mailbox_apis Mailbox APIs
4814 : * @ingroup kernel_apis
4815 : * @{
4816 : */
4817 :
4818 : /**
4819 : * @brief Mailbox Message Structure
4820 : *
4821 : */
4822 1 : struct k_mbox_msg {
4823 : /** size of message (in bytes) */
4824 1 : size_t size;
4825 : /** application-defined information value */
4826 1 : uint32_t info;
4827 : /** sender's message data buffer */
4828 1 : void *tx_data;
4829 : /** source thread id */
4830 1 : k_tid_t rx_source_thread;
4831 : /** target thread id */
4832 1 : k_tid_t tx_target_thread;
4833 : /** internal use only - thread waiting on send (may be a dummy) */
4834 : k_tid_t _syncing_thread;
4835 : #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4836 : /** internal use only - semaphore used during asynchronous send */
4837 : struct k_sem *_async_sem;
4838 : #endif
4839 : };
4840 : /**
4841 : * @brief Mailbox Structure
4842 : *
4843 : */
4844 1 : struct k_mbox {
4845 : /** Transmit messages queue */
4846 1 : _wait_q_t tx_msg_queue;
4847 : /** Receive message queue */
4848 1 : _wait_q_t rx_msg_queue;
4849 0 : struct k_spinlock lock;
4850 :
4851 : SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
4852 :
4853 : #ifdef CONFIG_OBJ_CORE_MAILBOX
4854 : struct k_obj_core obj_core;
4855 : #endif
4856 : };
4857 : /**
4858 : * @cond INTERNAL_HIDDEN
4859 : */
4860 :
4861 : #define Z_MBOX_INITIALIZER(obj) \
4862 : { \
4863 : .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4864 : .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
4865 : }
4866 :
4867 : /**
4868 : * INTERNAL_HIDDEN @endcond
4869 : */
4870 :
4871 : /**
4872 : * @brief Statically define and initialize a mailbox.
4873 : *
4874 : * The mailbox is to be accessed outside the module where it is defined using:
4875 : *
4876 : * @code extern struct k_mbox <name>; @endcode
4877 : *
4878 : * @param name Name of the mailbox.
4879 : */
4880 1 : #define K_MBOX_DEFINE(name) \
4881 : STRUCT_SECTION_ITERABLE(k_mbox, name) = \
4882 : Z_MBOX_INITIALIZER(name) \
4883 :
4884 : /**
4885 : * @brief Initialize a mailbox.
4886 : *
4887 : * This routine initializes a mailbox object, prior to its first use.
4888 : *
4889 : * @param mbox Address of the mailbox.
4890 : */
4891 1 : void k_mbox_init(struct k_mbox *mbox);
4892 :
4893 : /**
4894 : * @brief Send a mailbox message in a synchronous manner.
4895 : *
4896 : * This routine sends a message to @a mbox and waits for a receiver to both
4897 : * receive and process it. The message data may be in a buffer or non-existent
4898 : * (i.e. an empty message).
4899 : *
4900 : * @param mbox Address of the mailbox.
4901 : * @param tx_msg Address of the transmit message descriptor.
4902 : * @param timeout Waiting period for the message to be received,
4903 : * or one of the special values K_NO_WAIT
4904 : * and K_FOREVER. Once the message has been received,
4905 : * this routine waits as long as necessary for the message
4906 : * to be completely processed.
4907 : *
4908 : * @retval 0 Message sent.
4909 : * @retval -ENOMSG Returned without waiting.
4910 : * @retval -EAGAIN Waiting period timed out.
4911 : */
4912 1 : int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4913 : k_timeout_t timeout);
4914 :
4915 : /**
4916 : * @brief Send a mailbox message in an asynchronous manner.
4917 : *
4918 : * This routine sends a message to @a mbox without waiting for a receiver
4919 : * to process it. The message data may be in a buffer or non-existent
4920 : * (i.e. an empty message). Optionally, the semaphore @a sem will be given
4921 : * when the message has been both received and completely processed by
4922 : * the receiver.
4923 : *
4924 : * @param mbox Address of the mailbox.
4925 : * @param tx_msg Address of the transmit message descriptor.
4926 : * @param sem Address of a semaphore, or NULL if none is needed.
4927 : */
4928 1 : void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
4929 : struct k_sem *sem);
4930 :
4931 : /**
4932 : * @brief Receive a mailbox message.
4933 : *
4934 : * This routine receives a message from @a mbox, then optionally retrieves
4935 : * its data and disposes of the message.
4936 : *
4937 : * @param mbox Address of the mailbox.
4938 : * @param rx_msg Address of the receive message descriptor.
4939 : * @param buffer Address of the buffer to receive data, or NULL to defer data
4940 : * retrieval and message disposal until later.
4941 : * @param timeout Waiting period for a message to be received,
4942 : * or one of the special values K_NO_WAIT and K_FOREVER.
4943 : *
4944 : * @retval 0 Message received.
4945 : * @retval -ENOMSG Returned without waiting.
4946 : * @retval -EAGAIN Waiting period timed out.
4947 : */
4948 1 : int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
4949 : void *buffer, k_timeout_t timeout);
4950 :
4951 : /**
4952 : * @brief Retrieve mailbox message data into a buffer.
4953 : *
4954 : * This routine completes the processing of a received message by retrieving
4955 : * its data into a buffer, then disposing of the message.
4956 : *
4957 : * Alternatively, this routine can be used to dispose of a received message
4958 : * without retrieving its data.
4959 : *
4960 : * @param rx_msg Address of the receive message descriptor.
4961 : * @param buffer Address of the buffer to receive data, or NULL to discard
4962 : * the data.
4963 : */
4964 1 : void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
4965 :
4966 : /** @} */
4967 :
4968 : /**
4969 : * @defgroup pipe_apis Pipe APIs
4970 : * @ingroup kernel_apis
4971 : * @{
4972 : */
4973 :
4974 : /** Pipe Structure */
4975 1 : struct k_pipe {
4976 1 : unsigned char *buffer; /**< Pipe buffer: may be NULL */
4977 1 : size_t size; /**< Buffer size */
4978 1 : size_t bytes_used; /**< Number of bytes used in buffer */
4979 1 : size_t read_index; /**< Where in buffer to read from */
4980 1 : size_t write_index; /**< Where in buffer to write */
4981 1 : struct k_spinlock lock; /**< Synchronization lock */
4982 :
4983 : struct {
4984 1 : _wait_q_t readers; /**< Reader wait queue */
4985 1 : _wait_q_t writers; /**< Writer wait queue */
4986 0 : } wait_q; /** Wait queue */
4987 :
4988 : Z_DECL_POLL_EVENT
4989 :
4990 1 : uint8_t flags; /**< Flags */
4991 :
4992 : SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
4993 :
4994 : #ifdef CONFIG_OBJ_CORE_PIPE
4995 : struct k_obj_core obj_core;
4996 : #endif
4997 : };
4998 :
4999 : /**
5000 : * @cond INTERNAL_HIDDEN
5001 : */
5002 : #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
5003 :
5004 : #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
5005 : { \
5006 : .buffer = pipe_buffer, \
5007 : .size = pipe_buffer_size, \
5008 : .bytes_used = 0, \
5009 : .read_index = 0, \
5010 : .write_index = 0, \
5011 : .lock = {}, \
5012 : .wait_q = { \
5013 : .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
5014 : .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
5015 : }, \
5016 : Z_POLL_EVENT_OBJ_INIT(obj) \
5017 : .flags = 0, \
5018 : }
5019 :
5020 : /**
5021 : * INTERNAL_HIDDEN @endcond
5022 : */
5023 :
5024 : /**
5025 : * @brief Statically define and initialize a pipe.
5026 : *
5027 : * The pipe can be accessed outside the module where it is defined using:
5028 : *
5029 : * @code extern struct k_pipe <name>; @endcode
5030 : *
5031 : * @param name Name of the pipe.
5032 : * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
5033 : * or zero if no ring buffer is used.
5034 : * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
5035 : *
5036 : */
5037 1 : #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5038 : static unsigned char __noinit __aligned(pipe_align) \
5039 : _k_pipe_buf_##name[pipe_buffer_size]; \
5040 : STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5041 : Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5042 :
5043 : /**
5044 : * @brief Initialize a pipe.
5045 : *
5046 : * This routine initializes a pipe object, prior to its first use.
5047 : *
5048 : * @param pipe Address of the pipe.
5049 : * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
5050 : * is used.
5051 : * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
5052 : * buffer is used.
5053 : */
5054 1 : void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
5055 :
5056 : /**
5057 : * @brief Release a pipe's allocated buffer
5058 : *
5059 : * If a pipe object was given a dynamically allocated buffer via
5060 : * k_pipe_alloc_init(), this will free it. This function does nothing
5061 : * if the buffer wasn't dynamically allocated.
5062 : *
5063 : * @param pipe Address of the pipe.
5064 : * @retval 0 on success
5065 : * @retval -EAGAIN nothing to cleanup
5066 : */
5067 1 : int k_pipe_cleanup(struct k_pipe *pipe);
5068 :
5069 : /**
5070 : * @brief Initialize a pipe and allocate a buffer for it
5071 : *
5072 : * Storage for the buffer region will be allocated from the calling thread's
5073 : * resource pool. This memory will be released if k_pipe_cleanup() is called,
5074 : * or userspace is enabled and the pipe object loses all references to it.
5075 : *
5076 : * This function should only be called on uninitialized pipe objects.
5077 : *
5078 : * @param pipe Address of the pipe.
5079 : * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
5080 : * buffer is used.
5081 : * @retval 0 on success
5082 : * @retval -ENOMEM if memory couldn't be allocated
5083 : */
5084 1 : __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
5085 :
5086 : /**
5087 : * @brief Write data to a pipe.
5088 : *
5089 : * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
5090 : *
5091 : * @param pipe Address of the pipe.
5092 : * @param data Address of data to write.
5093 : * @param bytes_to_write Size of data (in bytes).
5094 : * @param bytes_written Address of area to hold the number of bytes written.
5095 : * @param min_xfer Minimum number of bytes to write.
5096 : * @param timeout Waiting period to wait for the data to be written,
5097 : * or one of the special values K_NO_WAIT and K_FOREVER.
5098 : *
5099 : * @retval 0 At least @a min_xfer bytes of data were written.
5100 : * @retval -EIO Returned without waiting; zero data bytes were written.
5101 : * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5102 : * minus one data bytes were written.
5103 : */
5104 1 : __syscall int k_pipe_put(struct k_pipe *pipe, const void *data,
5105 : size_t bytes_to_write, size_t *bytes_written,
5106 : size_t min_xfer, k_timeout_t timeout);
5107 :
5108 : /**
5109 : * @brief Read data from a pipe.
5110 : *
5111 : * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
5112 : *
5113 : * @param pipe Address of the pipe.
5114 : * @param data Address to place the data read from pipe.
5115 : * @param bytes_to_read Maximum number of data bytes to read.
5116 : * @param bytes_read Address of area to hold the number of bytes read.
5117 : * @param min_xfer Minimum number of data bytes to read.
5118 : * @param timeout Waiting period to wait for the data to be read,
5119 : * or one of the special values K_NO_WAIT and K_FOREVER.
5120 : *
5121 : * @retval 0 At least @a min_xfer bytes of data were read.
5122 : * @retval -EINVAL invalid parameters supplied
5123 : * @retval -EIO Returned without waiting; zero data bytes were read.
5124 : * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
5125 : * minus one data bytes were read.
5126 : */
5127 1 : __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
5128 : size_t bytes_to_read, size_t *bytes_read,
5129 : size_t min_xfer, k_timeout_t timeout);
5130 :
5131 : /**
5132 : * @brief Query the number of bytes that may be read from @a pipe.
5133 : *
5134 : * @param pipe Address of the pipe.
5135 : *
5136 : * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5137 : * result is zero for unbuffered pipes.
5138 : */
5139 1 : __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
5140 :
5141 : /**
5142 : * @brief Query the number of bytes that may be written to @a pipe
5143 : *
5144 : * @param pipe Address of the pipe.
5145 : *
5146 : * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
5147 : * result is zero for unbuffered pipes.
5148 : */
5149 1 : __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
5150 :
5151 : /**
5152 : * @brief Flush the pipe of write data
5153 : *
5154 : * This routine flushes the pipe. Flushing the pipe is equivalent to reading
5155 : * both all the data in the pipe's buffer and all the data waiting to go into
5156 : * that pipe into a large temporary buffer and discarding the buffer. Any
5157 : * writers that were previously pended become unpended.
5158 : *
5159 : * @param pipe Address of the pipe.
5160 : */
5161 1 : __syscall void k_pipe_flush(struct k_pipe *pipe);
5162 :
5163 : /**
5164 : * @brief Flush the pipe's internal buffer
5165 : *
5166 : * This routine flushes the pipe's internal buffer. This is equivalent to
5167 : * reading up to N bytes from the pipe (where N is the size of the pipe's
5168 : * buffer) into a temporary buffer and then discarding that buffer. If there
5169 : * were writers previously pending, then some may unpend as they try to fill
5170 : * up the pipe's emptied buffer.
5171 : *
5172 : * @param pipe Address of the pipe.
5173 : */
5174 1 : __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
5175 :
5176 : /** @} */
5177 :
5178 : /**
5179 : * @cond INTERNAL_HIDDEN
5180 : */
5181 :
5182 : struct k_mem_slab_info {
5183 : uint32_t num_blocks;
5184 : size_t block_size;
5185 : uint32_t num_used;
5186 : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5187 : uint32_t max_used;
5188 : #endif
5189 : };
5190 :
5191 : struct k_mem_slab {
5192 : _wait_q_t wait_q;
5193 : struct k_spinlock lock;
5194 : char *buffer;
5195 : char *free_list;
5196 : struct k_mem_slab_info info;
5197 :
5198 : SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
5199 :
5200 : #ifdef CONFIG_OBJ_CORE_MEM_SLAB
5201 : struct k_obj_core obj_core;
5202 : #endif
5203 : };
5204 :
5205 : #define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5206 : _slab_num_blocks) \
5207 : { \
5208 : .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5209 : .lock = {}, \
5210 : .buffer = _slab_buffer, \
5211 : .free_list = NULL, \
5212 : .info = {_slab_num_blocks, _slab_block_size, 0} \
5213 : }
5214 :
5215 :
5216 : /**
5217 : * INTERNAL_HIDDEN @endcond
5218 : */
5219 :
5220 : /**
5221 : * @defgroup mem_slab_apis Memory Slab APIs
5222 : * @ingroup kernel_apis
5223 : * @{
5224 : */
5225 :
5226 : /**
5227 : * @brief Statically define and initialize a memory slab in a public (non-static) scope.
5228 : *
5229 : * The memory slab's buffer contains @a slab_num_blocks memory blocks
5230 : * that are @a slab_block_size bytes long. The buffer is aligned to a
5231 : * @a slab_align -byte boundary. To ensure that each memory block is similarly
5232 : * aligned to this boundary, @a slab_block_size must also be a multiple of
5233 : * @a slab_align.
5234 : *
5235 : * The memory slab can be accessed outside the module where it is defined
5236 : * using:
5237 : *
5238 : * @code extern struct k_mem_slab <name>; @endcode
5239 : *
5240 : * @note This macro cannot be used together with a static keyword.
5241 : * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
5242 : * instead.
5243 : *
5244 : * @param name Name of the memory slab.
5245 : * @param slab_block_size Size of each memory block (in bytes).
5246 : * @param slab_num_blocks Number memory blocks.
5247 : * @param slab_align Alignment of the memory slab's buffer (power of 2).
5248 : */
5249 1 : #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5250 : char __noinit_named(k_mem_slab_buf_##name) \
5251 : __aligned(WB_UP(slab_align)) \
5252 : _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5253 : STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5254 : Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5255 : WB_UP(slab_block_size), slab_num_blocks)
5256 :
5257 : /**
5258 : * @brief Statically define and initialize a memory slab in a private (static) scope.
5259 : *
5260 : * The memory slab's buffer contains @a slab_num_blocks memory blocks
5261 : * that are @a slab_block_size bytes long. The buffer is aligned to a
5262 : * @a slab_align -byte boundary. To ensure that each memory block is similarly
5263 : * aligned to this boundary, @a slab_block_size must also be a multiple of
5264 : * @a slab_align.
5265 : *
5266 : * @param name Name of the memory slab.
5267 : * @param slab_block_size Size of each memory block (in bytes).
5268 : * @param slab_num_blocks Number memory blocks.
5269 : * @param slab_align Alignment of the memory slab's buffer (power of 2).
5270 : */
5271 1 : #define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5272 : static char __noinit_named(k_mem_slab_buf_##name) \
5273 : __aligned(WB_UP(slab_align)) \
5274 : _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5275 : static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5276 : Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5277 : WB_UP(slab_block_size), slab_num_blocks)
5278 :
5279 : /**
5280 : * @brief Initialize a memory slab.
5281 : *
5282 : * Initializes a memory slab, prior to its first use.
5283 : *
5284 : * The memory slab's buffer contains @a slab_num_blocks memory blocks
5285 : * that are @a slab_block_size bytes long. The buffer must be aligned to an
5286 : * N-byte boundary matching a word boundary, where N is a power of 2
5287 : * (i.e. 4 on 32-bit systems, 8, 16, ...).
5288 : * To ensure that each memory block is similarly aligned to this boundary,
5289 : * @a slab_block_size must also be a multiple of N.
5290 : *
5291 : * @param slab Address of the memory slab.
5292 : * @param buffer Pointer to buffer used for the memory blocks.
5293 : * @param block_size Size of each memory block (in bytes).
5294 : * @param num_blocks Number of memory blocks.
5295 : *
5296 : * @retval 0 on success
5297 : * @retval -EINVAL invalid data supplied
5298 : *
5299 : */
5300 1 : int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5301 : size_t block_size, uint32_t num_blocks);
5302 :
5303 : /**
5304 : * @brief Allocate memory from a memory slab.
5305 : *
5306 : * This routine allocates a memory block from a memory slab.
5307 : *
5308 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5309 : * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5310 : *
5311 : * @funcprops \isr_ok
5312 : *
5313 : * @param slab Address of the memory slab.
5314 : * @param mem Pointer to block address area.
5315 : * @param timeout Waiting period to wait for operation to complete.
5316 : * Use K_NO_WAIT to return without waiting,
5317 : * or K_FOREVER to wait as long as necessary.
5318 : *
5319 : * @retval 0 Memory allocated. The block address area pointed at by @a mem
5320 : * is set to the starting address of the memory block.
5321 : * @retval -ENOMEM Returned without waiting.
5322 : * @retval -EAGAIN Waiting period timed out.
5323 : * @retval -EINVAL Invalid data supplied
5324 : */
5325 1 : int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5326 : k_timeout_t timeout);
5327 :
5328 : /**
5329 : * @brief Free memory allocated from a memory slab.
5330 : *
5331 : * This routine releases a previously allocated memory block back to its
5332 : * associated memory slab.
5333 : *
5334 : * @param slab Address of the memory slab.
5335 : * @param mem Pointer to the memory block (as returned by k_mem_slab_alloc()).
5336 : */
5337 1 : void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5338 :
5339 : /**
5340 : * @brief Get the number of used blocks in a memory slab.
5341 : *
5342 : * This routine gets the number of memory blocks that are currently
5343 : * allocated in @a slab.
5344 : *
5345 : * @param slab Address of the memory slab.
5346 : *
5347 : * @return Number of allocated memory blocks.
5348 : */
5349 1 : static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5350 : {
5351 : return slab->info.num_used;
5352 : }
5353 :
5354 : /**
5355 : * @brief Get the number of maximum used blocks so far in a memory slab.
5356 : *
5357 : * This routine gets the maximum number of memory blocks that were
5358 : * allocated in @a slab.
5359 : *
5360 : * @param slab Address of the memory slab.
5361 : *
5362 : * @return Maximum number of allocated memory blocks.
5363 : */
5364 1 : static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5365 : {
5366 : #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5367 : return slab->info.max_used;
5368 : #else
5369 : ARG_UNUSED(slab);
5370 : return 0;
5371 : #endif
5372 : }
5373 :
5374 : /**
5375 : * @brief Get the number of unused blocks in a memory slab.
5376 : *
5377 : * This routine gets the number of memory blocks that are currently
5378 : * unallocated in @a slab.
5379 : *
5380 : * @param slab Address of the memory slab.
5381 : *
5382 : * @return Number of unallocated memory blocks.
5383 : */
5384 1 : static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5385 : {
5386 : return slab->info.num_blocks - slab->info.num_used;
5387 : }
5388 :
5389 : /**
5390 : * @brief Get the memory stats for a memory slab
5391 : *
5392 : * This routine gets the runtime memory usage stats for the slab @a slab.
5393 : *
5394 : * @param slab Address of the memory slab
5395 : * @param stats Pointer to memory into which to copy memory usage statistics
5396 : *
5397 : * @retval 0 Success
5398 : * @retval -EINVAL Any parameter points to NULL
5399 : */
5400 :
5401 1 : int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5402 :
5403 : /**
5404 : * @brief Reset the maximum memory usage for a slab
5405 : *
5406 : * This routine resets the maximum memory usage for the slab @a slab to its
5407 : * current usage.
5408 : *
5409 : * @param slab Address of the memory slab
5410 : *
5411 : * @retval 0 Success
5412 : * @retval -EINVAL Memory slab is NULL
5413 : */
5414 1 : int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5415 :
5416 : /** @} */
5417 :
5418 : /**
5419 : * @addtogroup heap_apis
5420 : * @{
5421 : */
5422 :
5423 : /* kernel synchronized heap struct */
5424 :
5425 0 : struct k_heap {
5426 0 : struct sys_heap heap;
5427 0 : _wait_q_t wait_q;
5428 0 : struct k_spinlock lock;
5429 : };
5430 :
5431 : /**
5432 : * @brief Initialize a k_heap
5433 : *
5434 : * This constructs a synchronized k_heap object over a memory region
5435 : * specified by the user. Note that while any alignment and size can
5436 : * be passed as valid parameters, internal alignment restrictions
5437 : * inside the inner sys_heap mean that not all bytes may be usable as
5438 : * allocated memory.
5439 : *
5440 : * @param h Heap struct to initialize
5441 : * @param mem Pointer to memory.
5442 : * @param bytes Size of memory region, in bytes
5443 : */
5444 1 : void k_heap_init(struct k_heap *h, void *mem,
5445 : size_t bytes) __attribute_nonnull(1);
5446 :
5447 : /**
5448 : * @brief Allocate aligned memory from a k_heap
5449 : *
5450 : * Behaves in all ways like k_heap_alloc(), except that the returned
5451 : * memory (if available) will have a starting address in memory which
5452 : * is a multiple of the specified power-of-two alignment value in
5453 : * bytes. The resulting memory can be returned to the heap using
5454 : * k_heap_free().
5455 : *
5456 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5457 : * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5458 : *
5459 : * @funcprops \isr_ok
5460 : *
5461 : * @param h Heap from which to allocate
5462 : * @param align Alignment in bytes, must be a power of two
5463 : * @param bytes Number of bytes requested
5464 : * @param timeout How long to wait, or K_NO_WAIT
5465 : * @return Pointer to memory the caller can now use
5466 : */
5467 1 : void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5468 : k_timeout_t timeout) __attribute_nonnull(1);
5469 :
5470 : /**
5471 : * @brief Allocate memory from a k_heap
5472 : *
5473 : * Allocates and returns a memory buffer from the memory region owned
5474 : * by the heap. If no memory is available immediately, the call will
5475 : * block for the specified timeout (constructed via the standard
5476 : * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5477 : * freed. If the allocation cannot be performed by the expiration of
5478 : * the timeout, NULL will be returned.
5479 : * Allocated memory is aligned on a multiple of pointer sizes.
5480 : *
5481 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5482 : * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5483 : *
5484 : * @funcprops \isr_ok
5485 : *
5486 : * @param h Heap from which to allocate
5487 : * @param bytes Desired size of block to allocate
5488 : * @param timeout How long to wait, or K_NO_WAIT
5489 : * @return A pointer to valid heap memory, or NULL
5490 : */
5491 1 : void *k_heap_alloc(struct k_heap *h, size_t bytes,
5492 : k_timeout_t timeout) __attribute_nonnull(1);
5493 :
5494 : /**
5495 : * @brief Reallocate memory from a k_heap
5496 : *
5497 : * Reallocates and returns a memory buffer from the memory region owned
5498 : * by the heap. If no memory is available immediately, the call will
5499 : * block for the specified timeout (constructed via the standard
5500 : * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5501 : * freed. If the allocation cannot be performed by the expiration of
5502 : * the timeout, NULL will be returned.
5503 : * Reallocated memory is aligned on a multiple of pointer sizes.
5504 : *
5505 : * @note @a timeout must be set to K_NO_WAIT if called from ISR.
5506 : * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
5507 : *
5508 : * @funcprops \isr_ok
5509 : *
5510 : * @param h Heap from which to allocate
5511 : * @param ptr Original pointer returned from a previous allocation
5512 : * @param bytes Desired size of block to allocate
5513 : * @param timeout How long to wait, or K_NO_WAIT
5514 : *
5515 : * @return Pointer to memory the caller can now use, or NULL
5516 : */
5517 1 : void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
5518 : __attribute_nonnull(1);
5519 :
5520 : /**
5521 : * @brief Free memory allocated by k_heap_alloc()
5522 : *
5523 : * Returns the specified memory block, which must have been returned
5524 : * from k_heap_alloc(), to the heap for use by other callers. Passing
5525 : * a NULL block is legal, and has no effect.
5526 : *
5527 : * @param h Heap to which to return the memory
5528 : * @param mem A valid memory block, or NULL
5529 : */
5530 1 : void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
5531 :
5532 : /* Hand-calculated minimum heap sizes needed to return a successful
5533 : * 1-byte allocation. See details in lib/os/heap.[ch]
5534 : */
5535 : #define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
5536 :
5537 : /**
5538 : * @brief Define a static k_heap in the specified linker section
5539 : *
5540 : * This macro defines and initializes a static memory region and
5541 : * k_heap of the requested size in the specified linker section.
5542 : * After kernel start, &name can be used as if k_heap_init() had
5543 : * been called.
5544 : *
5545 : * Note that this macro enforces a minimum size on the memory region
5546 : * to accommodate metadata requirements. Very small heaps will be
5547 : * padded to fit.
5548 : *
5549 : * @param name Symbol name for the struct k_heap object
5550 : * @param bytes Size of memory region, in bytes
5551 : * @param in_section __attribute__((section(name))
5552 : */
5553 : #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5554 : char in_section \
5555 : __aligned(8) /* CHUNK_UNIT */ \
5556 : kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5557 : STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5558 : .heap = { \
5559 : .init_mem = kheap_##name, \
5560 : .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5561 : }, \
5562 : }
5563 :
5564 : /**
5565 : * @brief Define a static k_heap
5566 : *
5567 : * This macro defines and initializes a static memory region and
5568 : * k_heap of the requested size. After kernel start, &name can be
5569 : * used as if k_heap_init() had been called.
5570 : *
5571 : * Note that this macro enforces a minimum size on the memory region
5572 : * to accommodate metadata requirements. Very small heaps will be
5573 : * padded to fit.
5574 : *
5575 : * @param name Symbol name for the struct k_heap object
5576 : * @param bytes Size of memory region, in bytes
5577 : */
5578 1 : #define K_HEAP_DEFINE(name, bytes) \
5579 : Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5580 : __noinit_named(kheap_buf_##name))
5581 :
5582 : /**
5583 : * @brief Define a static k_heap in uncached memory
5584 : *
5585 : * This macro defines and initializes a static memory region and
5586 : * k_heap of the requested size in uncached memory. After kernel
5587 : * start, &name can be used as if k_heap_init() had been called.
5588 : *
5589 : * Note that this macro enforces a minimum size on the memory region
5590 : * to accommodate metadata requirements. Very small heaps will be
5591 : * padded to fit.
5592 : *
5593 : * @param name Symbol name for the struct k_heap object
5594 : * @param bytes Size of memory region, in bytes
5595 : */
5596 1 : #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5597 : Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5598 :
5599 : /**
5600 : * @}
5601 : */
5602 :
5603 : /**
5604 : * @defgroup heap_apis Heap APIs
5605 : * @ingroup kernel_apis
5606 : * @{
5607 : */
5608 :
5609 : /**
5610 : * @brief Allocate memory from the heap with a specified alignment.
5611 : *
5612 : * This routine provides semantics similar to aligned_alloc(); memory is
5613 : * allocated from the heap with a specified alignment. However, one minor
5614 : * difference is that k_aligned_alloc() accepts any non-zero @p size,
5615 : * whereas aligned_alloc() only accepts a @p size that is an integral
5616 : * multiple of @p align.
5617 : *
5618 : * Above, aligned_alloc() refers to:
5619 : * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5620 : * The aligned_alloc function (p: 347-348)
5621 : *
5622 : * @param align Alignment of memory requested (in bytes).
5623 : * @param size Amount of memory requested (in bytes).
5624 : *
5625 : * @return Address of the allocated memory if successful; otherwise NULL.
5626 : */
5627 1 : void *k_aligned_alloc(size_t align, size_t size);
5628 :
5629 : /**
5630 : * @brief Allocate memory from the heap.
5631 : *
5632 : * This routine provides traditional malloc() semantics. Memory is
5633 : * allocated from the heap memory pool.
5634 : * Allocated memory is aligned on a multiple of pointer sizes.
5635 : *
5636 : * @param size Amount of memory requested (in bytes).
5637 : *
5638 : * @return Address of the allocated memory if successful; otherwise NULL.
5639 : */
5640 1 : void *k_malloc(size_t size);
5641 :
5642 : /**
5643 : * @brief Free memory allocated from heap.
5644 : *
5645 : * This routine provides traditional free() semantics. The memory being
5646 : * returned must have been allocated from the heap memory pool.
5647 : *
5648 : * If @a ptr is NULL, no operation is performed.
5649 : *
5650 : * @param ptr Pointer to previously allocated memory.
5651 : */
5652 1 : void k_free(void *ptr);
5653 :
5654 : /**
5655 : * @brief Allocate memory from heap, array style
5656 : *
5657 : * This routine provides traditional calloc() semantics. Memory is
5658 : * allocated from the heap memory pool and zeroed.
5659 : *
5660 : * @param nmemb Number of elements in the requested array
5661 : * @param size Size of each array element (in bytes).
5662 : *
5663 : * @return Address of the allocated memory if successful; otherwise NULL.
5664 : */
5665 1 : void *k_calloc(size_t nmemb, size_t size);
5666 :
5667 : /** @brief Expand the size of an existing allocation
5668 : *
5669 : * Returns a pointer to a new memory region with the same contents,
5670 : * but a different allocated size. If the new allocation can be
5671 : * expanded in place, the pointer returned will be identical.
5672 : * Otherwise the data will be copies to a new block and the old one
5673 : * will be freed as per sys_heap_free(). If the specified size is
5674 : * smaller than the original, the block will be truncated in place and
5675 : * the remaining memory returned to the heap. If the allocation of a
5676 : * new block fails, then NULL will be returned and the old block will
5677 : * not be freed or modified.
5678 : *
5679 : * @param ptr Original pointer returned from a previous allocation
5680 : * @param size Amount of memory requested (in bytes).
5681 : *
5682 : * @return Pointer to memory the caller can now use, or NULL.
5683 : */
5684 1 : void *k_realloc(void *ptr, size_t size);
5685 :
5686 : /** @} */
5687 :
5688 : /* polling API - PRIVATE */
5689 :
5690 : #ifdef CONFIG_POLL
5691 : #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
5692 : #else
5693 : #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
5694 : #endif
5695 :
5696 : /* private - types bit positions */
5697 : enum _poll_types_bits {
5698 : /* can be used to ignore an event */
5699 : _POLL_TYPE_IGNORE,
5700 :
5701 : /* to be signaled by k_poll_signal_raise() */
5702 : _POLL_TYPE_SIGNAL,
5703 :
5704 : /* semaphore availability */
5705 : _POLL_TYPE_SEM_AVAILABLE,
5706 :
5707 : /* queue/FIFO/LIFO data availability */
5708 : _POLL_TYPE_DATA_AVAILABLE,
5709 :
5710 : /* msgq data availability */
5711 : _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5712 :
5713 : /* pipe data availability */
5714 : _POLL_TYPE_PIPE_DATA_AVAILABLE,
5715 :
5716 : _POLL_NUM_TYPES
5717 : };
5718 :
5719 : #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
5720 :
5721 : /* private - states bit positions */
5722 : enum _poll_states_bits {
5723 : /* default state when creating event */
5724 : _POLL_STATE_NOT_READY,
5725 :
5726 : /* signaled by k_poll_signal_raise() */
5727 : _POLL_STATE_SIGNALED,
5728 :
5729 : /* semaphore is available */
5730 : _POLL_STATE_SEM_AVAILABLE,
5731 :
5732 : /* data is available to read on queue/FIFO/LIFO */
5733 : _POLL_STATE_DATA_AVAILABLE,
5734 :
5735 : /* queue/FIFO/LIFO wait was cancelled */
5736 : _POLL_STATE_CANCELLED,
5737 :
5738 : /* data is available to read on a message queue */
5739 : _POLL_STATE_MSGQ_DATA_AVAILABLE,
5740 :
5741 : /* data is available to read from a pipe */
5742 : _POLL_STATE_PIPE_DATA_AVAILABLE,
5743 :
5744 : _POLL_NUM_STATES
5745 : };
5746 :
5747 : #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
5748 :
5749 : #define _POLL_EVENT_NUM_UNUSED_BITS \
5750 : (32 - (0 \
5751 : + 8 /* tag */ \
5752 : + _POLL_NUM_TYPES \
5753 : + _POLL_NUM_STATES \
5754 : + 1 /* modes */ \
5755 : ))
5756 :
5757 : /* end of polling API - PRIVATE */
5758 :
5759 :
5760 : /**
5761 : * @defgroup poll_apis Async polling APIs
5762 : * @ingroup kernel_apis
5763 : * @{
5764 : */
5765 :
5766 : /* Public polling API */
5767 :
5768 : /* public - values for k_poll_event.type bitfield */
5769 0 : #define K_POLL_TYPE_IGNORE 0
5770 0 : #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5771 0 : #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5772 0 : #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
5773 0 : #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
5774 0 : #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
5775 0 : #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
5776 :
5777 : /* public - polling modes */
5778 0 : enum k_poll_modes {
5779 : /* polling thread does not take ownership of objects when available */
5780 : K_POLL_MODE_NOTIFY_ONLY = 0,
5781 :
5782 : K_POLL_NUM_MODES
5783 : };
5784 :
5785 : /* public - values for k_poll_event.state bitfield */
5786 0 : #define K_POLL_STATE_NOT_READY 0
5787 0 : #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5788 0 : #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5789 0 : #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
5790 0 : #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
5791 0 : #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
5792 0 : #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
5793 0 : #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
5794 :
5795 : /* public - poll signal object */
5796 0 : struct k_poll_signal {
5797 : /** PRIVATE - DO NOT TOUCH */
5798 1 : sys_dlist_t poll_events;
5799 :
5800 : /**
5801 : * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5802 : * user resets it to 0.
5803 : */
5804 1 : unsigned int signaled;
5805 :
5806 : /** custom result value passed to k_poll_signal_raise() if needed */
5807 1 : int result;
5808 : };
5809 :
5810 0 : #define K_POLL_SIGNAL_INITIALIZER(obj) \
5811 : { \
5812 : .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
5813 : .signaled = 0, \
5814 : .result = 0, \
5815 : }
5816 : /**
5817 : * @brief Poll Event
5818 : *
5819 : */
5820 1 : struct k_poll_event {
5821 : /** PRIVATE - DO NOT TOUCH */
5822 : sys_dnode_t _node;
5823 :
5824 : /** PRIVATE - DO NOT TOUCH */
5825 1 : struct z_poller *poller;
5826 :
5827 : /** optional user-specified tag, opaque, untouched by the API */
5828 1 : uint32_t tag:8;
5829 :
5830 : /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
5831 1 : uint32_t type:_POLL_NUM_TYPES;
5832 :
5833 : /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
5834 1 : uint32_t state:_POLL_NUM_STATES;
5835 :
5836 : /** mode of operation, from enum k_poll_modes */
5837 1 : uint32_t mode:1;
5838 :
5839 : /** unused bits in 32-bit word */
5840 1 : uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
5841 :
5842 : /** per-type data */
5843 : union {
5844 : /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
5845 : * type safety of polled objects.
5846 : */
5847 0 : void *obj, *typed_K_POLL_TYPE_IGNORE;
5848 0 : struct k_poll_signal *signal, *typed_K_POLL_TYPE_SIGNAL;
5849 0 : struct k_sem *sem, *typed_K_POLL_TYPE_SEM_AVAILABLE;
5850 0 : struct k_fifo *fifo, *typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE;
5851 0 : struct k_queue *queue, *typed_K_POLL_TYPE_DATA_AVAILABLE;
5852 0 : struct k_msgq *msgq, *typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE;
5853 : #ifdef CONFIG_PIPES
5854 : struct k_pipe *pipe, *typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE;
5855 : #endif
5856 1 : };
5857 : };
5858 :
5859 0 : #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
5860 : { \
5861 : .poller = NULL, \
5862 : .type = _event_type, \
5863 : .state = K_POLL_STATE_NOT_READY, \
5864 : .mode = _event_mode, \
5865 : .unused = 0, \
5866 : { \
5867 : .typed_##_event_type = _event_obj, \
5868 : }, \
5869 : }
5870 :
5871 : #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
5872 0 : event_tag) \
5873 : { \
5874 : .tag = event_tag, \
5875 : .type = _event_type, \
5876 : .state = K_POLL_STATE_NOT_READY, \
5877 : .mode = _event_mode, \
5878 : .unused = 0, \
5879 : { \
5880 : .typed_##_event_type = _event_obj, \
5881 : }, \
5882 : }
5883 :
5884 : /**
5885 : * @brief Initialize one struct k_poll_event instance
5886 : *
5887 : * After this routine is called on a poll event, the event it ready to be
5888 : * placed in an event array to be passed to k_poll().
5889 : *
5890 : * @param event The event to initialize.
5891 : * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5892 : * values. Only values that apply to the same object being polled
5893 : * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5894 : * event.
5895 : * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
5896 : * @param obj Kernel object or poll signal.
5897 : */
5898 :
5899 1 : void k_poll_event_init(struct k_poll_event *event, uint32_t type,
5900 : int mode, void *obj);
5901 :
5902 : /**
5903 : * @brief Wait for one or many of multiple poll events to occur
5904 : *
5905 : * This routine allows a thread to wait concurrently for one or many of
5906 : * multiple poll events to have occurred. Such events can be a kernel object
5907 : * being available, like a semaphore, or a poll signal event.
5908 : *
5909 : * When an event notifies that a kernel object is available, the kernel object
5910 : * is not "given" to the thread calling k_poll(): it merely signals the fact
5911 : * that the object was available when the k_poll() call was in effect. Also,
5912 : * all threads trying to acquire an object the regular way, i.e. by pending on
5913 : * the object, have precedence over the thread polling on the object. This
5914 : * means that the polling thread will never get the poll event on an object
5915 : * until the object becomes available and its pend queue is empty. For this
5916 : * reason, the k_poll() call is more effective when the objects being polled
5917 : * only have one thread, the polling thread, trying to acquire them.
5918 : *
5919 : * When k_poll() returns 0, the caller should loop on all the events that were
5920 : * passed to k_poll() and check the state field for the values that were
5921 : * expected and take the associated actions.
5922 : *
5923 : * Before being reused for another call to k_poll(), the user has to reset the
5924 : * state field to K_POLL_STATE_NOT_READY.
5925 : *
5926 : * When called from user mode, a temporary memory allocation is required from
5927 : * the caller's resource pool.
5928 : *
5929 : * @param events An array of events to be polled for.
5930 : * @param num_events The number of events in the array.
5931 : * @param timeout Waiting period for an event to be ready,
5932 : * or one of the special values K_NO_WAIT and K_FOREVER.
5933 : *
5934 : * @retval 0 One or more events are ready.
5935 : * @retval -EAGAIN Waiting period timed out.
5936 : * @retval -EINTR Polling has been interrupted, e.g. with
5937 : * k_queue_cancel_wait(). All output events are still set and valid,
5938 : * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5939 : * words, -EINTR status means that at least one of output events is
5940 : * K_POLL_STATE_CANCELLED.
5941 : * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5942 : * @retval -EINVAL Bad parameters (user mode only)
5943 : */
5944 :
5945 1 : __syscall int k_poll(struct k_poll_event *events, int num_events,
5946 : k_timeout_t timeout);
5947 :
5948 : /**
5949 : * @brief Initialize a poll signal object.
5950 : *
5951 : * Ready a poll signal object to be signaled via k_poll_signal_raise().
5952 : *
5953 : * @param sig A poll signal.
5954 : */
5955 :
5956 1 : __syscall void k_poll_signal_init(struct k_poll_signal *sig);
5957 :
5958 : /**
5959 : * @brief Reset a poll signal object's state to unsignaled.
5960 : *
5961 : * @param sig A poll signal object
5962 : */
5963 1 : __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
5964 :
5965 : /**
5966 : * @brief Fetch the signaled state and result value of a poll signal
5967 : *
5968 : * @param sig A poll signal object
5969 : * @param signaled An integer buffer which will be written nonzero if the
5970 : * object was signaled
5971 : * @param result An integer destination buffer which will be written with the
5972 : * result value if the object was signaled, or an undefined
5973 : * value if it was not.
5974 : */
5975 1 : __syscall void k_poll_signal_check(struct k_poll_signal *sig,
5976 : unsigned int *signaled, int *result);
5977 :
5978 : /**
5979 : * @brief Signal a poll signal object.
5980 : *
5981 : * This routine makes ready a poll signal, which is basically a poll event of
5982 : * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5983 : * made ready to run. A @a result value can be specified.
5984 : *
5985 : * The poll signal contains a 'signaled' field that, when set by
5986 : * k_poll_signal_raise(), stays set until the user sets it back to 0 with
5987 : * k_poll_signal_reset(). It thus has to be reset by the user before being
5988 : * passed again to k_poll() or k_poll() will consider it being signaled, and
5989 : * will return immediately.
5990 : *
5991 : * @note The result is stored and the 'signaled' field is set even if
5992 : * this function returns an error indicating that an expiring poll was
5993 : * not notified. The next k_poll() will detect the missed raise.
5994 : *
5995 : * @param sig A poll signal.
5996 : * @param result The value to store in the result field of the signal.
5997 : *
5998 : * @retval 0 The signal was delivered successfully.
5999 : * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
6000 : */
6001 :
6002 1 : __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6003 :
6004 : /** @} */
6005 :
6006 : /**
6007 : * @defgroup cpu_idle_apis CPU Idling APIs
6008 : * @ingroup kernel_apis
6009 : * @{
6010 : */
6011 : /**
6012 : * @brief Make the CPU idle.
6013 : *
6014 : * This function makes the CPU idle until an event wakes it up.
6015 : *
6016 : * In a regular system, the idle thread should be the only thread responsible
6017 : * for making the CPU idle and triggering any type of power management.
6018 : * However, in some more constrained systems, such as a single-threaded system,
6019 : * the only thread would be responsible for this if needed.
6020 : *
6021 : * @note In some architectures, before returning, the function unmasks interrupts
6022 : * unconditionally.
6023 : */
6024 1 : static inline void k_cpu_idle(void)
6025 : {
6026 : arch_cpu_idle();
6027 : }
6028 :
6029 : /**
6030 : * @brief Make the CPU idle in an atomic fashion.
6031 : *
6032 : * Similar to k_cpu_idle(), but must be called with interrupts locked.
6033 : *
6034 : * Enabling interrupts and entering a low-power mode will be atomic,
6035 : * i.e. there will be no period of time where interrupts are enabled before
6036 : * the processor enters a low-power mode.
6037 : *
6038 : * After waking up from the low-power mode, the interrupt lockout state will
6039 : * be restored as if by irq_unlock(key).
6040 : *
6041 : * @param key Interrupt locking key obtained from irq_lock().
6042 : */
6043 1 : static inline void k_cpu_atomic_idle(unsigned int key)
6044 : {
6045 : arch_cpu_atomic_idle(key);
6046 : }
6047 :
6048 : /**
6049 : * @}
6050 : */
6051 :
6052 : /**
6053 : * @cond INTERNAL_HIDDEN
6054 : * @internal
6055 : */
6056 : #ifdef ARCH_EXCEPT
6057 : /* This architecture has direct support for triggering a CPU exception */
6058 : #define z_except_reason(reason) ARCH_EXCEPT(reason)
6059 : #else
6060 :
6061 : #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6062 : #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6063 : #else
6064 : #define __EXCEPT_LOC()
6065 : #endif
6066 :
6067 : /* NOTE: This is the implementation for arches that do not implement
6068 : * ARCH_EXCEPT() to generate a real CPU exception.
6069 : *
6070 : * We won't have a real exception frame to determine the PC value when
6071 : * the oops occurred, so print file and line number before we jump into
6072 : * the fatal error handler.
6073 : */
6074 : #define z_except_reason(reason) do { \
6075 : __EXCEPT_LOC(); \
6076 : z_fatal_error(reason, NULL); \
6077 : } while (false)
6078 :
6079 : #endif /* _ARCH__EXCEPT */
6080 : /**
6081 : * INTERNAL_HIDDEN @endcond
6082 : */
6083 :
6084 : /**
6085 : * @brief Fatally terminate a thread
6086 : *
6087 : * This should be called when a thread has encountered an unrecoverable
6088 : * runtime condition and needs to terminate. What this ultimately
6089 : * means is determined by the _fatal_error_handler() implementation, which
6090 : * will be called will reason code K_ERR_KERNEL_OOPS.
6091 : *
6092 : * If this is called from ISR context, the default system fatal error handler
6093 : * will treat it as an unrecoverable system error, just like k_panic().
6094 : */
6095 1 : #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6096 :
6097 : /**
6098 : * @brief Fatally terminate the system
6099 : *
6100 : * This should be called when the Zephyr kernel has encountered an
6101 : * unrecoverable runtime condition and needs to terminate. What this ultimately
6102 : * means is determined by the _fatal_error_handler() implementation, which
6103 : * will be called will reason code K_ERR_KERNEL_PANIC.
6104 : */
6105 1 : #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6106 :
6107 : /**
6108 : * @cond INTERNAL_HIDDEN
6109 : */
6110 :
6111 : /*
6112 : * private APIs that are utilized by one or more public APIs
6113 : */
6114 :
6115 : /**
6116 : * @internal
6117 : */
6118 : void z_timer_expiration_handler(struct _timeout *timeout);
6119 : /**
6120 : * INTERNAL_HIDDEN @endcond
6121 : */
6122 :
6123 : #ifdef CONFIG_PRINTK
6124 : /**
6125 : * @brief Emit a character buffer to the console device
6126 : *
6127 : * @param c String of characters to print
6128 : * @param n The length of the string
6129 : *
6130 : */
6131 : __syscall void k_str_out(char *c, size_t n);
6132 : #endif
6133 :
6134 : /**
6135 : * @defgroup float_apis Floating Point APIs
6136 : * @ingroup kernel_apis
6137 : * @{
6138 : */
6139 :
6140 : /**
6141 : * @brief Disable preservation of floating point context information.
6142 : *
6143 : * This routine informs the kernel that the specified thread
6144 : * will no longer be using the floating point registers.
6145 : *
6146 : * @warning
6147 : * Some architectures apply restrictions on how the disabling of floating
6148 : * point preservation may be requested, see arch_float_disable.
6149 : *
6150 : * @warning
6151 : * This routine should only be used to disable floating point support for
6152 : * a thread that currently has such support enabled.
6153 : *
6154 : * @param thread ID of thread.
6155 : *
6156 : * @retval 0 On success.
6157 : * @retval -ENOTSUP If the floating point disabling is not implemented.
6158 : * -EINVAL If the floating point disabling could not be performed.
6159 : */
6160 1 : __syscall int k_float_disable(struct k_thread *thread);
6161 :
6162 : /**
6163 : * @brief Enable preservation of floating point context information.
6164 : *
6165 : * This routine informs the kernel that the specified thread
6166 : * will use the floating point registers.
6167 :
6168 : * Invoking this routine initializes the thread's floating point context info
6169 : * to that of an FPU that has been reset. The next time the thread is scheduled
6170 : * by z_swap() it will either inherit an FPU that is guaranteed to be in a
6171 : * "sane" state (if the most recent user of the FPU was cooperatively swapped
6172 : * out) or the thread's own floating point context will be loaded (if the most
6173 : * recent user of the FPU was preempted, or if this thread is the first user
6174 : * of the FPU). Thereafter, the kernel will protect the thread's FP context
6175 : * so that it is not altered during a preemptive context switch.
6176 : *
6177 : * The @a options parameter indicates which floating point register sets will
6178 : * be used by the specified thread.
6179 : *
6180 : * For x86 options:
6181 : *
6182 : * - K_FP_REGS indicates x87 FPU and MMX registers only
6183 : * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
6184 : *
6185 : * @warning
6186 : * Some architectures apply restrictions on how the enabling of floating
6187 : * point preservation may be requested, see arch_float_enable.
6188 : *
6189 : * @warning
6190 : * This routine should only be used to enable floating point support for
6191 : * a thread that currently has such support enabled.
6192 : *
6193 : * @param thread ID of thread.
6194 : * @param options architecture dependent options
6195 : *
6196 : * @retval 0 On success.
6197 : * @retval -ENOTSUP If the floating point enabling is not implemented.
6198 : * -EINVAL If the floating point enabling could not be performed.
6199 : */
6200 1 : __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6201 :
6202 : /**
6203 : * @}
6204 : */
6205 :
6206 : /**
6207 : * @brief Get the runtime statistics of a thread
6208 : *
6209 : * @param thread ID of thread.
6210 : * @param stats Pointer to struct to copy statistics into.
6211 : * @return -EINVAL if null pointers, otherwise 0
6212 : */
6213 1 : int k_thread_runtime_stats_get(k_tid_t thread,
6214 : k_thread_runtime_stats_t *stats);
6215 :
6216 : /**
6217 : * @brief Get the runtime statistics of all threads
6218 : *
6219 : * @param stats Pointer to struct to copy statistics into.
6220 : * @return -EINVAL if null pointers, otherwise 0
6221 : */
6222 1 : int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
6223 :
6224 : /**
6225 : * @brief Get the runtime statistics of all threads on specified cpu
6226 : *
6227 : * @param cpu The cpu number
6228 : * @param stats Pointer to struct to copy statistics into.
6229 : * @return -EINVAL if null pointers, otherwise 0
6230 : */
6231 1 : int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats);
6232 :
6233 : /**
6234 : * @brief Enable gathering of runtime statistics for specified thread
6235 : *
6236 : * This routine enables the gathering of runtime statistics for the specified
6237 : * thread.
6238 : *
6239 : * @param thread ID of thread
6240 : * @return -EINVAL if invalid thread ID, otherwise 0
6241 : */
6242 1 : int k_thread_runtime_stats_enable(k_tid_t thread);
6243 :
6244 : /**
6245 : * @brief Disable gathering of runtime statistics for specified thread
6246 : *
6247 : * This routine disables the gathering of runtime statistics for the specified
6248 : * thread.
6249 : *
6250 : * @param thread ID of thread
6251 : * @return -EINVAL if invalid thread ID, otherwise 0
6252 : */
6253 1 : int k_thread_runtime_stats_disable(k_tid_t thread);
6254 :
6255 : /**
6256 : * @brief Enable gathering of system runtime statistics
6257 : *
6258 : * This routine enables the gathering of system runtime statistics. Note that
6259 : * it does not affect the gathering of similar statistics for individual
6260 : * threads.
6261 : */
6262 1 : void k_sys_runtime_stats_enable(void);
6263 :
6264 : /**
6265 : * @brief Disable gathering of system runtime statistics
6266 : *
6267 : * This routine disables the gathering of system runtime statistics. Note that
6268 : * it does not affect the gathering of similar statistics for individual
6269 : * threads.
6270 : */
6271 1 : void k_sys_runtime_stats_disable(void);
6272 :
6273 : #ifdef __cplusplus
6274 : }
6275 : #endif
6276 :
6277 : #include <zephyr/tracing/tracing.h>
6278 : #include <zephyr/syscalls/kernel.h>
6279 :
6280 : #endif /* !_ASMLANGUAGE */
6281 :
6282 : #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
|