Zephyr API Documentation 4.4.0-rc1
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
kernel.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
12
13#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
15
16#if !defined(_ASMLANGUAGE)
18#include <errno.h>
19#include <limits.h>
20#include <stdbool.h>
21#include <zephyr/toolchain.h>
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31/*
32 * Zephyr currently assumes the size of a couple standard types to simplify
33 * print string formats. Let's make sure this doesn't change without notice.
34 */
35BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
36BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
37BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
38
47
48#define K_ANY NULL
49
50#if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
51#error Zero available thread priorities defined!
52#endif
53
54#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
55#define K_PRIO_PREEMPT(x) (x)
56
57#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
58#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
59#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
60#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
61#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
62
63#ifdef CONFIG_POLL
64#define Z_POLL_EVENT_OBJ_INIT(obj) \
65 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
66#define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
67#else
68#define Z_POLL_EVENT_OBJ_INIT(obj)
69#define Z_DECL_POLL_EVENT
70#endif
71
72struct k_thread;
73struct k_mutex;
74struct k_sem;
75struct k_msgq;
76struct k_mbox;
77struct k_pipe;
78struct k_queue;
79struct k_fifo;
80struct k_lifo;
81struct k_stack;
82struct k_mem_slab;
83struct k_timer;
84struct k_poll_event;
85struct k_poll_signal;
86struct k_mem_domain;
87struct k_mem_partition;
88struct k_futex;
89struct k_event;
90
96
97/* private, used by k_poll and k_work_poll */
98struct k_work_poll;
99typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
100
105
119static inline void
121{
122#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
123 thread->base.usage.longest = 0ULL;
124#endif
125}
126
127typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
128 void *user_data);
129
145void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
146
165#ifdef CONFIG_SMP
166void k_thread_foreach_filter_by_cpu(unsigned int cpu,
167 k_thread_user_cb_t user_cb, void *user_data);
168#else
169static inline
170void k_thread_foreach_filter_by_cpu(unsigned int cpu,
171 k_thread_user_cb_t user_cb, void *user_data)
172{
173 __ASSERT(cpu == 0, "cpu filter out of bounds");
174 ARG_UNUSED(cpu);
175 k_thread_foreach(user_cb, user_data);
176}
177#endif
178
207 k_thread_user_cb_t user_cb, void *user_data);
208
240#ifdef CONFIG_SMP
242 k_thread_user_cb_t user_cb, void *user_data);
243#else
244static inline
245void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
246 k_thread_user_cb_t user_cb, void *user_data)
247{
248 __ASSERT(cpu == 0, "cpu filter out of bounds");
249 ARG_UNUSED(cpu);
250 k_thread_foreach_unlocked(user_cb, user_data);
251}
252#endif
253
255
261
262#endif /* !_ASMLANGUAGE */
263
264
265/*
266 * Thread user options. May be needed by assembly code. Common part uses low
267 * bits, arch-specific use high bits.
268 */
269
273#define K_ESSENTIAL (BIT(0))
274
275#define K_FP_IDX 1
285#define K_FP_REGS (BIT(K_FP_IDX))
286
293#define K_USER (BIT(2))
294
303#define K_INHERIT_PERMS (BIT(3))
304
314#define K_CALLBACK_STATE (BIT(4))
315
325#define K_DSP_IDX 13
326#define K_DSP_REGS (BIT(K_DSP_IDX))
327
336#define K_AGU_IDX 14
337#define K_AGU_REGS (BIT(K_AGU_IDX))
338
348#define K_SSE_REGS (BIT(15))
349
350/* end - thread options */
351
352#if !defined(_ASMLANGUAGE)
377__syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
378
392
444__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
445 k_thread_stack_t *stack,
446 size_t stack_size,
448 void *p1, void *p2, void *p3,
449 int prio, uint32_t options, k_timeout_t delay);
450
473 void *p1, void *p2,
474 void *p3);
475
489#define k_thread_access_grant(thread, ...) \
490 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
491
506static inline void k_thread_heap_assign(struct k_thread *thread,
507 struct k_heap *heap)
508{
509 thread->resource_pool = heap;
510}
511
512#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
533__syscall int k_thread_stack_space_get(const struct k_thread *thread,
534 size_t *unused_ptr);
535
551__syscall int k_thread_runtime_stack_unused_threshold_pct_set(struct k_thread *thread,
552 uint32_t pct);
553
569__syscall int k_thread_runtime_stack_unused_threshold_set(struct k_thread *thread,
570 size_t threshold);
571
584__syscall size_t k_thread_runtime_stack_unused_threshold_get(struct k_thread *thread);
585
597typedef void (*k_thread_stack_safety_handler_t)(const struct k_thread *thread,
598 size_t unused_space, void *arg);
599
614int k_thread_runtime_stack_safety_full_check(const struct k_thread *thread,
615 size_t *unused_ptr,
616 k_thread_stack_safety_handler_t handler,
617 void *arg);
618
633int k_thread_runtime_stack_safety_threshold_check(const struct k_thread *thread,
634 size_t *unused_ptr,
635 k_thread_stack_safety_handler_t handler,
636 void *arg);
637#endif
638
639#if (K_HEAP_MEM_POOL_SIZE > 0)
652void k_thread_system_pool_assign(struct k_thread *thread);
653#endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
654
674__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
675
689__syscall int32_t k_sleep(k_timeout_t timeout);
690
702static inline int32_t k_msleep(int32_t ms)
703{
704 return k_sleep(Z_TIMEOUT_MS(ms));
705}
706
724
741__syscall void k_busy_wait(uint32_t usec_to_wait);
742
754bool k_can_yield(void);
755
763__syscall void k_yield(void);
764
774__syscall void k_wakeup(k_tid_t thread);
775
789__attribute_const__
791
803static inline bool k_is_pre_kernel(void)
804{
805 extern bool z_sys_post_kernel; /* in init.c */
806
807 /*
808 * If called from userspace, it must be post kernel.
809 * This guard is necessary because z_sys_post_kernel memory
810 * is not accessible to user threads.
811 */
812 if (k_is_user_context()) {
813 return false;
814 }
815
816 /*
817 * Some compilers might optimize by pre-reading
818 * z_sys_post_kernel. This is absolutely not desirable.
819 * We are trying to avoid reading it if we are in user
820 * context as reading z_sys_post_kernel in user context
821 * will result in access fault. So add a compiler barrier
822 * here to stop that kind of optimizations.
823 */
824 compiler_barrier();
825
826 return !z_sys_post_kernel;
827}
828
835__attribute_const__
836static inline k_tid_t k_current_get(void)
837{
838 __ASSERT(!k_is_pre_kernel(), "k_current_get called pre-kernel");
839
840#ifdef CONFIG_CURRENT_THREAD_USE_TLS
841
842 /* Thread-local cache of current thread ID, set in z_thread_entry() */
843 extern Z_THREAD_LOCAL k_tid_t z_tls_current;
844
845 return z_tls_current;
846#else
848#endif
849}
850
870__syscall void k_thread_abort(k_tid_t thread);
871
872k_ticks_t z_timeout_expires(const struct _timeout *timeout);
873k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
874
875#ifdef CONFIG_SYS_CLOCK_EXISTS
876
884__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
885
886static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
887 const struct k_thread *thread)
888{
889 return z_timeout_expires(&thread->base.timeout);
890}
891
900
901static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
902 const struct k_thread *thread)
903{
904 return z_timeout_remaining(&thread->base.timeout);
905}
906
907#endif /* CONFIG_SYS_CLOCK_EXISTS */
908
912struct _static_thread_data {
913 struct k_thread *init_thread;
914 k_thread_stack_t *init_stack;
915 unsigned int init_stack_size;
916 k_thread_entry_t init_entry;
917 void *init_p1;
918 void *init_p2;
919 void *init_p3;
920 int init_prio;
921 uint32_t init_options;
922 const char *init_name;
923#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
924 int32_t init_delay_ms;
925#else
926 k_timeout_t init_delay;
927#endif
928};
929
930#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
931#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
932#define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
933#else
934#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
935#define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
936#endif
937
938#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
939 entry, p1, p2, p3, \
940 prio, options, delay, tname) \
941 { \
942 .init_thread = (thread), \
943 .init_stack = (stack), \
944 .init_stack_size = (stack_size), \
945 .init_entry = (k_thread_entry_t)entry, \
946 .init_p1 = (void *)p1, \
947 .init_p2 = (void *)p2, \
948 .init_p3 = (void *)p3, \
949 .init_prio = (prio), \
950 .init_options = (options), \
951 .init_name = STRINGIFY(tname), \
952 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
953 }
954
955/*
956 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
957 * information on arguments.
958 */
959#define Z_THREAD_COMMON_DEFINE(name, stack_size, \
960 entry, p1, p2, p3, \
961 prio, options, delay) \
962 struct k_thread _k_thread_obj_##name; \
963 const STRUCT_SECTION_ITERABLE(_static_thread_data, \
964 _k_thread_data_##name) = \
965 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
966 _k_thread_stack_##name, stack_size,\
967 entry, p1, p2, p3, prio, options, \
968 delay, name); \
969 __maybe_unused const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
973
1005#define K_THREAD_DEFINE(name, stack_size, \
1006 entry, p1, p2, p3, \
1007 prio, options, delay) \
1008 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
1009 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
1010 prio, options, delay)
1011
1042#define K_KERNEL_THREAD_DEFINE(name, stack_size, \
1043 entry, p1, p2, p3, \
1044 prio, options, delay) \
1045 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
1046 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
1047 prio, options, delay)
1048
1058__syscall int k_thread_priority_get(k_tid_t thread);
1059
1085__syscall void k_thread_priority_set(k_tid_t thread, int prio);
1086
1087
1088#ifdef CONFIG_SCHED_DEADLINE
1120__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
1121
1162__syscall void k_thread_absolute_deadline_set(k_tid_t thread, int deadline);
1163#endif
1164
1183__syscall void k_reschedule(void);
1184
1185#ifdef CONFIG_SCHED_CPU_MASK
1199
1213
1227
1241
1252int k_thread_cpu_pin(k_tid_t thread, int cpu);
1253#endif
1254
1276__syscall void k_thread_suspend(k_tid_t thread);
1277
1289__syscall void k_thread_resume(k_tid_t thread);
1290
1304static inline void k_thread_start(k_tid_t thread)
1305{
1306 k_wakeup(thread);
1307}
1308
1335void k_sched_time_slice_set(int32_t slice, int prio);
1336
1375void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1376 k_thread_timeslice_fn_t expired, void *data);
1377
1379
1384
1396bool k_is_in_isr(void);
1397
1414__syscall int k_is_preempt_thread(void);
1415
1419
1424
1450void k_sched_lock(void);
1451
1460
1473__syscall void k_thread_custom_data_set(void *value);
1474
1482__syscall void *k_thread_custom_data_get(void);
1483
1497__syscall int k_thread_name_set(k_tid_t thread, const char *str);
1498
1507const char *k_thread_name_get(k_tid_t thread);
1508
1520__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1521 size_t size);
1522
1535const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1536
1540
1545
1554#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1555
1568#define K_NSEC(t) Z_TIMEOUT_NS(t)
1569
1582#define K_USEC(t) Z_TIMEOUT_US(t)
1583
1594#define K_CYC(t) Z_TIMEOUT_CYC(t)
1595
1606#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1607
1618#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1619
1630#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1631
1642#define K_MINUTES(m) K_SECONDS((m) * 60)
1643
1654#define K_HOURS(h) K_MINUTES((h) * 60)
1655
1664#define K_FOREVER Z_FOREVER
1665
1680#define K_TIMEOUT_SUM(timeout1, timeout2) K_TICKS(z_timeout_sum(timeout1, timeout2))
1681
1682#ifdef CONFIG_TIMEOUT_64BIT
1683
1695#define K_TIMEOUT_ABS_TICKS(t) \
1696 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)CLAMP(t, 0, (INT64_MAX - 1))))
1697
1709#define K_TIMEOUT_ABS_SEC(t) K_TIMEOUT_ABS_TICKS(k_sec_to_ticks_ceil64(t))
1710
1722#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1723
1736#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1737
1750#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1751
1764#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1765#endif
1766
1770
1777struct k_timer {
1781 /*
1782 * _timeout structure must be first here if we want to use
1783 * dynamic timer allocation. timeout.node is used in the double-linked
1784 * list of free timers
1785 */
1786 struct _timeout timeout;
1787
1788 /* wait queue for the (single) thread waiting on this timer */
1789 _wait_q_t wait_q;
1790
1791 /* runs in ISR context */
1792 void (*expiry_fn)(struct k_timer *timer);
1793
1794 /* runs in the context of the thread that calls k_timer_stop() */
1795 void (*stop_fn)(struct k_timer *timer);
1796
1797 /* timer period */
1798 k_timeout_t period;
1799
1800 /* timer status */
1801 uint32_t status;
1802
1803 /* user-specific data, also used to support legacy features */
1804 void *user_data;
1805
1807
1808#ifdef CONFIG_OBJ_CORE_TIMER
1809 struct k_obj_core obj_core;
1810#endif
1814};
1815
1816#ifdef CONFIG_TIMER_OBSERVER
1817struct k_timer_observer {
1818 /* Invoked upon completion of k_timer initialization */
1819 void (*on_init)(struct k_timer *timer);
1820
1821 /* Invoked after the timer transitions to the running state */
1822 void (*on_start)(struct k_timer *timer, k_timeout_t duration,
1823 k_timeout_t period);
1824
1825 /* Invoked when the active timer is explicitly stopped */
1826 void (*on_stop)(struct k_timer *timer);
1827
1828 /* Executes in ISR context, keep minimal and non-blocking */
1829 void (*on_expiry)(struct k_timer *timer);
1830};
1831#endif /* CONFIG_TIMER_OBSERVER */
1832
1836#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1837 { \
1838 .timeout = { \
1839 .node = {},\
1840 .fn = z_timer_expiration_handler, \
1841 .dticks = 0, \
1842 }, \
1843 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1844 .expiry_fn = expiry, \
1845 .stop_fn = stop, \
1846 .period = {}, \
1847 .status = 0, \
1848 .user_data = 0, \
1849 }
1853
1859
1870typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1871
1886typedef void (*k_timer_stop_t)(struct k_timer *timer);
1887
1899#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1900 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1901 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1902
1903
1904#ifdef CONFIG_TIMER_OBSERVER
1905
1909#define Z_TIMER_OBSERVER_INITIALIZER(name, init, start, stop, expiry) \
1910 { \
1911 .on_init = init, \
1912 .on_start = start, \
1913 .on_stop = stop, \
1914 .on_expiry = expiry \
1915 }
1919
1933#define K_TIMER_OBSERVER_DEFINE(name, init, start, stop, expiry) \
1934 static const STRUCT_SECTION_ITERABLE(k_timer_observer, name) = \
1935 Z_TIMER_OBSERVER_INITIALIZER(name, init, start, stop, expiry)
1936
1937#endif /* CONFIG_TIMER_OBSERVER */
1938
1948void k_timer_init(struct k_timer *timer,
1949 k_timer_expiry_t expiry_fn,
1950 k_timer_stop_t stop_fn);
1951
1969__syscall void k_timer_start(struct k_timer *timer,
1970 k_timeout_t duration, k_timeout_t period);
1971
1988__syscall void k_timer_stop(struct k_timer *timer);
1989
2002__syscall uint32_t k_timer_status_get(struct k_timer *timer);
2003
2021__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
2022
2023#ifdef CONFIG_SYS_CLOCK_EXISTS
2024
2035__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
2036
2037static inline k_ticks_t z_impl_k_timer_expires_ticks(
2038 const struct k_timer *timer)
2039{
2040 return z_timeout_expires(&timer->timeout);
2041}
2042
2053__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
2054
2055static inline k_ticks_t z_impl_k_timer_remaining_ticks(
2056 const struct k_timer *timer)
2057{
2058 return z_timeout_remaining(&timer->timeout);
2059}
2060
2071static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
2072{
2074}
2075
2076#endif /* CONFIG_SYS_CLOCK_EXISTS */
2077
2090__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
2091
2095static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
2096 void *user_data)
2097{
2098 timer->user_data = user_data;
2099}
2100
2108__syscall void *k_timer_user_data_get(const struct k_timer *timer);
2109
2110static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
2111{
2112 return timer->user_data;
2113}
2114
2116
2122
2132__syscall int64_t k_uptime_ticks(void);
2133
2147static inline int64_t k_uptime_get(void)
2148{
2150}
2151
2171static inline uint32_t k_uptime_get_32(void)
2172{
2173 return (uint32_t)k_uptime_get();
2174}
2175
2184static inline uint32_t k_uptime_seconds(void)
2185{
2187}
2188
2200static inline int64_t k_uptime_delta(int64_t *reftime)
2201{
2202 int64_t uptime, delta;
2203
2204 uptime = k_uptime_get();
2205 delta = uptime - *reftime;
2206 *reftime = uptime;
2207
2208 return delta;
2209}
2210
2219static inline uint32_t k_cycle_get_32(void)
2220{
2221 return arch_k_cycle_get_32();
2222}
2223
2234static inline uint64_t k_cycle_get_64(void)
2235{
2236 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
2237 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
2238 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
2239 return 0;
2240 }
2241
2242 return arch_k_cycle_get_64();
2243}
2244
2248
2255struct k_queue {
2259 sys_sflist_t data_q;
2260 struct k_spinlock lock;
2261 _wait_q_t wait_q;
2262
2263 Z_DECL_POLL_EVENT
2264
2269};
2270
2274#define Z_QUEUE_INITIALIZER(obj) \
2275 { \
2276 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
2277 .lock = { }, \
2278 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2279 Z_POLL_EVENT_OBJ_INIT(obj) \
2280 }
2284
2290
2298__syscall void k_queue_init(struct k_queue *queue);
2299
2313__syscall void k_queue_cancel_wait(struct k_queue *queue);
2314
2327void k_queue_append(struct k_queue *queue, void *data);
2328
2345__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2346
2359void k_queue_prepend(struct k_queue *queue, void *data);
2360
2377__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2378
2392void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2393
2412int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2413
2429int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2430
2448__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2449
2466bool k_queue_remove(struct k_queue *queue, void *data);
2467
2482bool k_queue_unique_append(struct k_queue *queue, void *data);
2483
2497__syscall int k_queue_is_empty(struct k_queue *queue);
2498
2499static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2500{
2501 return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2502}
2503
2513__syscall void *k_queue_peek_head(struct k_queue *queue);
2514
2524__syscall void *k_queue_peek_tail(struct k_queue *queue);
2525
2535#define K_QUEUE_DEFINE(name) \
2536 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2537 Z_QUEUE_INITIALIZER(name)
2538
2540
2541#ifdef CONFIG_USERSPACE
2551struct k_futex {
2553};
2554
2564struct z_futex_data {
2568 _wait_q_t wait_q;
2569 struct k_spinlock lock;
2573};
2574
2578#define Z_FUTEX_DATA_INITIALIZER(obj) \
2579 { \
2580 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2581 }
2585
2591
2611__syscall int k_futex_wait(struct k_futex *futex, int expected,
2612 k_timeout_t timeout);
2613
2628__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2629
2631#endif
2632
2638
2643
2650
2651struct k_event {
2655 _wait_q_t wait_q;
2656 uint32_t events;
2657 struct k_spinlock lock;
2658
2660
2661#ifdef CONFIG_OBJ_CORE_EVENT
2662 struct k_obj_core obj_core;
2663#endif
2667};
2668
2672#define Z_EVENT_INITIALIZER(obj) \
2673 { \
2674 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2675 .events = 0, \
2676 .lock = {}, \
2677 }
2681
2689__syscall void k_event_init(struct k_event *event);
2690
2708__syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2709
2727__syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2728
2745__syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2746 uint32_t events_mask);
2747
2760__syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2761
2786__syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2787 bool reset, k_timeout_t timeout);
2788
2813__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2814 bool reset, k_timeout_t timeout);
2815
2835__syscall uint32_t k_event_wait_safe(struct k_event *event, uint32_t events,
2836 bool reset, k_timeout_t timeout);
2837
2857__syscall uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events,
2858 bool reset, k_timeout_t timeout);
2859
2870static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2871{
2872 return k_event_wait(event, events_mask, false, K_NO_WAIT);
2873}
2874
2884#define K_EVENT_DEFINE(name) \
2885 STRUCT_SECTION_ITERABLE(k_event, name) = \
2886 Z_EVENT_INITIALIZER(name);
2887
2889
2895struct k_fifo {
2899 struct k_queue _queue;
2900#ifdef CONFIG_OBJ_CORE_FIFO
2901 struct k_obj_core obj_core;
2902#endif
2906};
2907
2911#define Z_FIFO_INITIALIZER(obj) \
2912 { \
2913 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2914 }
2918
2924
2932#define k_fifo_init(fifo) \
2933 ({ \
2934 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2935 k_queue_init(&(fifo)->_queue); \
2936 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2937 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2938 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2939 })
2940
2952#define k_fifo_cancel_wait(fifo) \
2953 ({ \
2954 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2955 k_queue_cancel_wait(&(fifo)->_queue); \
2956 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2957 })
2958
2971#define k_fifo_put(fifo, data) \
2972 ({ \
2973 void *_data = data; \
2974 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2975 k_queue_append(&(fifo)->_queue, _data); \
2976 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2977 })
2978
2995#define k_fifo_alloc_put(fifo, data) \
2996 ({ \
2997 void *_data = data; \
2998 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2999 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
3000 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
3001 fap_ret; \
3002 })
3003
3018#define k_fifo_put_list(fifo, head, tail) \
3019 ({ \
3020 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
3021 k_queue_append_list(&(fifo)->_queue, head, tail); \
3022 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
3023 })
3024
3038#define k_fifo_put_slist(fifo, list) \
3039 ({ \
3040 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
3041 k_queue_merge_slist(&(fifo)->_queue, list); \
3042 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
3043 })
3044
3062#define k_fifo_get(fifo, timeout) \
3063 ({ \
3064 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
3065 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
3066 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
3067 fg_ret; \
3068 })
3069
3083#define k_fifo_is_empty(fifo) \
3084 k_queue_is_empty(&(fifo)->_queue)
3085
3099#define k_fifo_peek_head(fifo) \
3100 ({ \
3101 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
3102 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
3103 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
3104 fph_ret; \
3105 })
3106
3118#define k_fifo_peek_tail(fifo) \
3119 ({ \
3120 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
3121 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
3122 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
3123 fpt_ret; \
3124 })
3125
3135#define K_FIFO_DEFINE(name) \
3136 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
3137 Z_FIFO_INITIALIZER(name)
3138
3140
3146struct k_lifo {
3150 struct k_queue _queue;
3151#ifdef CONFIG_OBJ_CORE_LIFO
3152 struct k_obj_core obj_core;
3153#endif
3157};
3158
3162#define Z_LIFO_INITIALIZER(obj) \
3163 { \
3164 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
3165 }
3169
3175
3183#define k_lifo_init(lifo) \
3184 ({ \
3185 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
3186 k_queue_init(&(lifo)->_queue); \
3187 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
3188 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
3189 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
3190 })
3191
3204#define k_lifo_put(lifo, data) \
3205 ({ \
3206 void *_data = data; \
3207 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
3208 k_queue_prepend(&(lifo)->_queue, _data); \
3209 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
3210 })
3211
3228#define k_lifo_alloc_put(lifo, data) \
3229 ({ \
3230 void *_data = data; \
3231 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
3232 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
3233 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
3234 lap_ret; \
3235 })
3236
3254#define k_lifo_get(lifo, timeout) \
3255 ({ \
3256 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
3257 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
3258 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
3259 lg_ret; \
3260 })
3261
3271#define K_LIFO_DEFINE(name) \
3272 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
3273 Z_LIFO_INITIALIZER(name)
3274
3276
3280#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
3281
3282typedef uintptr_t stack_data_t;
3283
3284struct k_stack {
3285 _wait_q_t wait_q;
3286 struct k_spinlock lock;
3287 stack_data_t *base, *next, *top;
3288
3289 uint8_t flags;
3290
3292
3293#ifdef CONFIG_OBJ_CORE_STACK
3294 struct k_obj_core obj_core;
3295#endif
3296};
3297
3298#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
3299 { \
3300 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3301 .base = (stack_buffer), \
3302 .next = (stack_buffer), \
3303 .top = (stack_buffer) + (stack_num_entries), \
3304 }
3308
3314
3324void k_stack_init(struct k_stack *stack,
3325 stack_data_t *buffer, uint32_t num_entries);
3326
3327
3341
3342__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
3343 uint32_t num_entries);
3344
3356int k_stack_cleanup(struct k_stack *stack);
3357
3371__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
3372
3393__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
3394 k_timeout_t timeout);
3395
3406#define K_STACK_DEFINE(name, stack_num_entries) \
3407 stack_data_t __noinit \
3408 _k_stack_buf_##name[stack_num_entries]; \
3409 STRUCT_SECTION_ITERABLE(k_stack, name) = \
3410 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
3411 stack_num_entries)
3412
3414
3418struct k_work;
3419struct k_work_q;
3420struct k_work_queue_config;
3421extern struct k_work_q k_sys_work_q;
3425
3431
3437struct k_mutex {
3442 _wait_q_t wait_q;
3444 struct k_thread *owner;
3445
3447 uint32_t lock_count;
3448
3450 int owner_orig_prio;
3451
3453
3454#ifdef CONFIG_OBJ_CORE_MUTEX
3455 struct k_obj_core obj_core;
3456#endif
3460};
3461
3465#define Z_MUTEX_INITIALIZER(obj) \
3466 { \
3467 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3468 .owner = NULL, \
3469 .lock_count = 0, \
3470 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3471 }
3475
3485#define K_MUTEX_DEFINE(name) \
3486 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3487 Z_MUTEX_INITIALIZER(name)
3488
3501__syscall int k_mutex_init(struct k_mutex *mutex);
3502
3503
3525__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3526
3547__syscall int k_mutex_unlock(struct k_mutex *mutex);
3548
3552
3562 _wait_q_t wait_q;
3563
3564#ifdef CONFIG_OBJ_CORE_CONDVAR
3565 struct k_obj_core obj_core;
3566#endif
3570};
3571
3575#define Z_CONDVAR_INITIALIZER(obj) \
3576 { \
3577 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3578 }
3582
3588
3595__syscall int k_condvar_init(struct k_condvar *condvar);
3596
3603__syscall int k_condvar_signal(struct k_condvar *condvar);
3604
3612__syscall int k_condvar_broadcast(struct k_condvar *condvar);
3613
3631__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3632 k_timeout_t timeout);
3633
3644#define K_CONDVAR_DEFINE(name) \
3645 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3646 Z_CONDVAR_INITIALIZER(name)
3647
3650
3656
3663struct k_sem {
3667 _wait_q_t wait_q;
3668 unsigned int count;
3669 unsigned int limit;
3670
3671 Z_DECL_POLL_EVENT
3672
3674
3675#ifdef CONFIG_OBJ_CORE_SEM
3676 struct k_obj_core obj_core;
3677#endif
3681};
3682
3686#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3687 { \
3688 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3689 .count = (initial_count), \
3690 .limit = (count_limit), \
3691 Z_POLL_EVENT_OBJ_INIT(obj) \
3692 }
3696
3705#define K_SEM_MAX_LIMIT UINT_MAX
3706
3722__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3723 unsigned int limit);
3724
3743__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3744
3755__syscall void k_sem_give(struct k_sem *sem);
3756
3766__syscall void k_sem_reset(struct k_sem *sem);
3767
3777__syscall unsigned int k_sem_count_get(struct k_sem *sem);
3778
3782static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3783{
3784 return sem->count;
3785}
3786
3798#define K_SEM_DEFINE(name, initial_count, count_limit) \
3799 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3800 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3801 BUILD_ASSERT(((count_limit) != 0) && \
3802 (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) && \
3803 ((count_limit) <= K_SEM_MAX_LIMIT));
3804
3806
3807#if defined(CONFIG_SCHED_IPI_SUPPORTED) || defined(__DOXYGEN__)
3808struct k_ipi_work;
3809
3810
3811typedef void (*k_ipi_func_t)(struct k_ipi_work *work);
3812
3823 sys_dnode_t node[CONFIG_MP_MAX_NUM_CPUS]; /* Node in IPI work queue */
3824 k_ipi_func_t func; /* Function to execute on target CPU */
3825 struct k_event event; /* Event to signal when processed */
3826 uint32_t bitmask; /* Bitmask of targeted CPUs */
3830};
3831
3832
3840static inline void k_ipi_work_init(struct k_ipi_work *work)
3841{
3842 k_event_init(&work->event);
3843 for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
3844 sys_dnode_init(&work->node[i]);
3845 }
3846 work->bitmask = 0;
3847}
3848
3867int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask,
3868 k_ipi_func_t func);
3869
3892int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout);
3893
3903
3904#endif /* CONFIG_SCHED_IPI_SUPPORTED */
3905
3909struct k_work_delayable;
3910struct k_work_sync;
3914
3920
3927typedef void (*k_work_handler_t)(struct k_work *work);
3928
3942void k_work_init(struct k_work *work,
3943 k_work_handler_t handler);
3944
3959int k_work_busy_get(const struct k_work *work);
3960
3974static inline bool k_work_is_pending(const struct k_work *work);
3975
3997 struct k_work *work);
3998
4007int k_work_submit(struct k_work *work);
4008
4033bool k_work_flush(struct k_work *work,
4034 struct k_work_sync *sync);
4035
4055int k_work_cancel(struct k_work *work);
4056
4087bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
4088
4098void k_work_queue_init(struct k_work_q *queue);
4099
4119void k_work_queue_start(struct k_work_q *queue,
4120 k_thread_stack_t *stack, size_t stack_size,
4121 int prio, const struct k_work_queue_config *cfg);
4122
4133void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg);
4134
4144static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
4145
4169int k_work_queue_drain(struct k_work_q *queue, bool plug);
4170
4185
4201int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout);
4202
4217 k_work_handler_t handler);
4218
4230static inline struct k_work_delayable *
4232
4247
4262static inline bool k_work_delayable_is_pending(
4263 const struct k_work_delayable *dwork);
4264
4279 const struct k_work_delayable *dwork);
4280
4295 const struct k_work_delayable *dwork);
4296
4325 struct k_work_delayable *dwork,
4326 k_timeout_t delay);
4327
4342 k_timeout_t delay);
4343
4380 struct k_work_delayable *dwork,
4381 k_timeout_t delay);
4382
4396 k_timeout_t delay);
4397
4423 struct k_work_sync *sync);
4424
4446
4476 struct k_work_sync *sync);
4477
4478enum {
4482 /* The atomic API is used for all work and queue flags fields to
4483 * enforce sequential consistency in SMP environments.
4484 */
4485
4486 /* Bits that represent the work item states. At least nine of the
4487 * combinations are distinct valid stable states.
4488 */
4489 K_WORK_RUNNING_BIT = 0,
4490 K_WORK_CANCELING_BIT = 1,
4491 K_WORK_QUEUED_BIT = 2,
4492 K_WORK_DELAYED_BIT = 3,
4493 K_WORK_FLUSHING_BIT = 4,
4494
4495 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
4496 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
4497
4498 /* Static work flags */
4499 K_WORK_DELAYABLE_BIT = 8,
4500 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
4501
4502 /* Dynamic work queue flags */
4503 K_WORK_QUEUE_STARTED_BIT = 0,
4504 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
4505 K_WORK_QUEUE_BUSY_BIT = 1,
4506 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
4507 K_WORK_QUEUE_DRAIN_BIT = 2,
4508 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
4509 K_WORK_QUEUE_PLUGGED_BIT = 3,
4510 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
4511 K_WORK_QUEUE_STOP_BIT = 4,
4512 K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
4513
4514 /* Static work queue flags */
4515 K_WORK_QUEUE_NO_YIELD_BIT = 8,
4516 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
4520 /* Transient work flags */
4521
4527 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
4528
4533 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
4534
4540 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
4541
4547 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
4548
4553 K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
4554};
4555
4561struct k_work {
4565 /* All fields are protected by the work module spinlock. */
4566
4567 /* Node to link into k_work_q pending list. */
4568 sys_snode_t node;
4569
4570 /* The function to be invoked by the work queue thread. */
4571 k_work_handler_t handler;
4572
4573 /* The queue on which the work item was last submitted. */
4574 struct k_work_q *queue;
4575
4576 /* State of the work item.
4577 *
4578 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
4579 *
4580 * It can be RUNNING and CANCELING simultaneously.
4581 */
4586};
4587
4591#define Z_WORK_INITIALIZER(work_handler) { \
4592 .handler = (work_handler), \
4593}
4597
4607 /* The work item. */
4608 struct k_work work;
4609
4610 /* Timeout used to submit work after a delay. */
4611 struct _timeout timeout;
4612
4613 /* The queue to which the work should be submitted. */
4614 struct k_work_q *queue;
4618};
4619
4623#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4624 .work = { \
4625 .handler = (work_handler), \
4626 .flags = K_WORK_DELAYABLE, \
4627 }, \
4628}
4632
4649#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4650 struct k_work_delayable work \
4651 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4652
4656/* Record used to wait for work to flush.
4657 *
4658 * The work item is inserted into the queue that will process (or is
4659 * processing) the item, and will be processed as soon as the item
4660 * completes. When the flusher is processed the semaphore will be
4661 * signaled, releasing the thread waiting for the flush.
4662 */
4663struct z_work_flusher {
4664 struct k_work work;
4665 struct k_sem sem;
4666};
4667
4668/* Record used to wait for work to complete a cancellation.
4669 *
4670 * The work item is inserted into a global queue of pending cancels.
4671 * When a cancelling work item goes idle any matching waiters are
4672 * removed from pending_cancels and are woken.
4673 */
4674struct z_work_canceller {
4675 sys_snode_t node;
4676 struct k_work *work;
4677 struct k_sem sem;
4678};
4682
4702 union {
4703 struct z_work_flusher flusher;
4704 struct z_work_canceller canceller;
4705 };
4709};
4710
4722 const char *name;
4723
4737
4742
4752};
4753
4759struct k_work_q {
4763 /* The thread that animates the work. */
4764 struct k_thread thread;
4765
4766 /* The thread ID that animates the work. This may be an external thread
4767 * if k_work_queue_run() is used.
4768 */
4769 k_tid_t thread_id;
4770
4771 /* All the following fields must be accessed only while the
4772 * work module spinlock is held.
4773 */
4774
4775 /* List of k_work items to be worked. */
4776 sys_slist_t pending;
4777
4778 /* Wait queue for idle work thread. */
4779 _wait_q_t notifyq;
4780
4781 /* Wait queue for threads waiting for the queue to drain. */
4782 _wait_q_t drainq;
4783
4784 /* Flags describing queue state. */
4786
4787#if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT)
4788 struct _timeout work_timeout_record;
4789 struct k_work *work;
4790 k_timeout_t work_timeout;
4791#endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
4795};
4796
4797/* Provide the implementation for inline functions declared above */
4798
4799static inline bool k_work_is_pending(const struct k_work *work)
4800{
4801 return k_work_busy_get(work) != 0;
4802}
4803
4804static inline struct k_work_delayable *
4806{
4807 return CONTAINER_OF(work, struct k_work_delayable, work);
4808}
4809
4811 const struct k_work_delayable *dwork)
4812{
4813 return k_work_delayable_busy_get(dwork) != 0;
4814}
4815
4817 const struct k_work_delayable *dwork)
4818{
4819 return z_timeout_expires(&dwork->timeout);
4820}
4821
4823 const struct k_work_delayable *dwork)
4824{
4825 return z_timeout_remaining(&dwork->timeout);
4826}
4827
4828static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
4829{
4830 return queue->thread_id;
4831}
4832
4834
4835struct k_work_user;
4836
4841
4851typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4852
4856struct k_work_user_q {
4857 struct k_queue queue;
4858 struct k_thread thread;
4859};
4860
4861enum {
4862 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4863};
4864
4865struct k_work_user {
4866 void *_reserved; /* Used by k_queue implementation. */
4867 k_work_user_handler_t handler;
4869};
4870
4871#if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4872#define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4873#else
4874#define Z_WORK_USER_INITIALIZER(work_handler) \
4875 { \
4876 ._reserved = NULL, \
4877 .handler = (work_handler), \
4878 .flags = 0 \
4879 }
4880#endif
4884
4896#define K_WORK_USER_DEFINE(work, work_handler) \
4897 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4898
4908static inline void k_work_user_init(struct k_work_user *work,
4909 k_work_user_handler_t handler)
4910{
4911 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4912}
4913
4930static inline bool k_work_user_is_pending(struct k_work_user *work)
4931{
4932 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4933}
4934
4953static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4954 struct k_work_user *work)
4955{
4956 int ret = -EBUSY;
4957
4958 if (!atomic_test_and_set_bit(&work->flags,
4959 K_WORK_USER_STATE_PENDING)) {
4960 ret = k_queue_alloc_append(&work_q->queue, work);
4961
4962 /* Couldn't insert into the queue. Clear the pending bit
4963 * so the work item can be submitted again
4964 */
4965 if (ret != 0) {
4966 atomic_clear_bit(&work->flags,
4967 K_WORK_USER_STATE_PENDING);
4968 }
4969 }
4970
4971 return ret;
4972}
4973
4993void k_work_user_queue_start(struct k_work_user_q *work_q,
4994 k_thread_stack_t *stack,
4995 size_t stack_size, int prio,
4996 const char *name);
4997
5008static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
5009{
5010 return &work_q->thread;
5011}
5012
5014
5018struct k_work_poll {
5019 struct k_work work;
5020 struct k_work_q *workq;
5021 struct z_poller poller;
5022 struct k_poll_event *events;
5023 int num_events;
5024 k_work_handler_t real_handler;
5025 struct _timeout timeout;
5026 int poll_result;
5027};
5031
5036
5048#define K_WORK_DEFINE(work, work_handler) \
5049 struct k_work work = Z_WORK_INITIALIZER(work_handler)
5050
5060void k_work_poll_init(struct k_work_poll *work,
5061 k_work_handler_t handler);
5062
5098 struct k_work_poll *work,
5099 struct k_poll_event *events,
5100 int num_events,
5101 k_timeout_t timeout);
5102
5134int k_work_poll_submit(struct k_work_poll *work,
5135 struct k_poll_event *events,
5136 int num_events,
5137 k_timeout_t timeout);
5138
5153int k_work_poll_cancel(struct k_work_poll *work);
5154
5156
5162
5168struct k_msgq {
5173 _wait_q_t wait_q;
5175 struct k_spinlock lock;
5177 size_t msg_size;
5179 uint32_t max_msgs;
5181 char *buffer_start;
5183 char *buffer_end;
5185 char *read_ptr;
5187 char *write_ptr;
5189 uint32_t used_msgs;
5190
5191 Z_DECL_POLL_EVENT
5192
5194 uint8_t flags;
5195
5197
5198#ifdef CONFIG_OBJ_CORE_MSGQ
5199 struct k_obj_core obj_core;
5200#endif
5204};
5205
5209#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
5210 { \
5211 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
5212 .lock = {}, \
5213 .msg_size = q_msg_size, \
5214 .max_msgs = q_max_msgs, \
5215 .buffer_start = q_buffer, \
5216 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
5217 .read_ptr = q_buffer, \
5218 .write_ptr = q_buffer, \
5219 .used_msgs = 0, \
5220 Z_POLL_EVENT_OBJ_INIT(obj) \
5221 .flags = 0, \
5222 }
5226
5227
5228#define K_MSGQ_FLAG_ALLOC BIT(0)
5229
5241
5242
5261#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
5262 static char __noinit __aligned(q_align) \
5263 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
5264 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
5265 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
5266 (q_msg_size), (q_max_msgs))
5267
5282void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
5283 uint32_t max_msgs);
5284
5304__syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
5305 uint32_t max_msgs);
5306
5317int k_msgq_cleanup(struct k_msgq *msgq);
5318
5339__syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
5340
5365__syscall int k_msgq_put_front(struct k_msgq *msgq, const void *data);
5366
5387__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
5388
5403__syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
5404
5421__syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
5422
5432__syscall void k_msgq_purge(struct k_msgq *msgq);
5433
5444__syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
5445
5454__syscall void k_msgq_get_attrs(struct k_msgq *msgq,
5455 struct k_msgq_attrs *attrs);
5456
5457
5458static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
5459{
5460 return msgq->max_msgs - msgq->used_msgs;
5461}
5462
5472__syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
5473
5474static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
5475{
5476 return msgq->used_msgs;
5477}
5478
5480
5486
5493 size_t size;
5497 void *tx_data;
5506 k_tid_t _syncing_thread;
5507#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
5509 struct k_sem *_async_sem;
5510#endif
5514};
5515
5520struct k_mbox {
5525 _wait_q_t tx_msg_queue;
5527 _wait_q_t rx_msg_queue;
5528 struct k_spinlock lock;
5529
5531
5532#ifdef CONFIG_OBJ_CORE_MAILBOX
5533 struct k_obj_core obj_core;
5534#endif
5538};
5539
5543#define Z_MBOX_INITIALIZER(obj) \
5544 { \
5545 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
5546 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
5547 }
5551
5561#define K_MBOX_DEFINE(name) \
5562 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
5563 Z_MBOX_INITIALIZER(name) \
5564
5565
5572void k_mbox_init(struct k_mbox *mbox);
5573
5593int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5594 k_timeout_t timeout);
5595
5609void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5610 struct k_sem *sem);
5611
5629int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
5630 void *buffer, k_timeout_t timeout);
5631
5645void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
5646
5648
5654
5664__syscall void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size);
5665
5670
5676struct k_pipe {
5680 size_t waiting;
5681 struct ring_buf buf;
5682 struct k_spinlock lock;
5683 _wait_q_t data;
5684 _wait_q_t space;
5685 uint8_t flags;
5686
5687 Z_DECL_POLL_EVENT
5688#ifdef CONFIG_OBJ_CORE_PIPE
5689 struct k_obj_core obj_core;
5690#endif
5695};
5696
5700#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
5701{ \
5702 .waiting = 0, \
5703 .buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size), \
5704 .data = Z_WAIT_Q_INIT(&obj.data), \
5705 .space = Z_WAIT_Q_INIT(&obj.space), \
5706 .flags = PIPE_FLAG_OPEN, \
5707 Z_POLL_EVENT_OBJ_INIT(obj) \
5708}
5712
5726#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5727 static unsigned char __noinit __aligned(pipe_align) \
5728 _k_pipe_buf_##name[pipe_buffer_size]; \
5729 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5730 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5731
5732
5749__syscall int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len,
5750 k_timeout_t timeout);
5751
5767__syscall int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len,
5768 k_timeout_t timeout);
5769
5779__syscall void k_pipe_reset(struct k_pipe *pipe);
5780
5789__syscall void k_pipe_close(struct k_pipe *pipe);
5791
5795struct k_mem_slab_info {
5796 uint32_t num_blocks;
5797 size_t block_size;
5798 uint32_t num_used;
5799#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5800 uint32_t max_used;
5801#endif
5802};
5803
5804struct k_mem_slab {
5805 _wait_q_t wait_q;
5806 struct k_spinlock lock;
5807 char *buffer;
5808 char *free_list;
5809 struct k_mem_slab_info info;
5810
5812
5813#ifdef CONFIG_OBJ_CORE_MEM_SLAB
5814 struct k_obj_core obj_core;
5815#endif
5816};
5817
5818#define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5819 _slab_num_blocks) \
5820 { \
5821 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5822 .lock = {}, \
5823 .buffer = _slab_buffer, \
5824 .free_list = NULL, \
5825 .info = {_slab_num_blocks, _slab_block_size, 0} \
5826 }
5830
5836
5862#define K_MEM_SLAB_DEFINE_IN_SECT(name, in_section, slab_block_size, slab_num_blocks, slab_align) \
5863 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5864 "slab_block_size must be a multiple of slab_align"); \
5865 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5866 "slab_align must be a power of 2"); \
5867 char in_section __aligned(WB_UP( \
5868 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5869 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5870 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5871
5895#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5896 K_MEM_SLAB_DEFINE_IN_SECT(name, __noinit_named(k_mem_slab_buf_##name), slab_block_size, \
5897 slab_num_blocks, slab_align)
5898
5915#define K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, in_section, slab_block_size, slab_num_blocks, \
5916 slab_align) \
5917 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5918 "slab_block_size must be a multiple of slab_align"); \
5919 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5920 "slab_align must be a power of 2"); \
5921 static char in_section __aligned(WB_UP( \
5922 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5923 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5924 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5925
5940#define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5941 K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, __noinit_named(k_mem_slab_buf_##name), \
5942 slab_block_size, slab_num_blocks, slab_align)
5943
5965int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5966 size_t block_size, uint32_t num_blocks);
5967
5990int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5991 k_timeout_t timeout);
5992
6004void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
6005
6018static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
6019{
6020 return slab->info.num_used;
6021}
6022
6035static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
6036{
6037#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
6038 return slab->info.max_used;
6039#else
6040 ARG_UNUSED(slab);
6041 return 0;
6042#endif
6043}
6044
6057static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
6058{
6059 return slab->info.num_blocks - slab->info.num_used;
6060}
6061
6075
6076int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
6077
6091int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
6092
6094
6099
6105struct k_heap {
6109 struct sys_heap heap;
6110 _wait_q_t wait_q;
6111 struct k_spinlock lock;
6115};
6116
6130void k_heap_init(struct k_heap *h, void *mem,
6131 size_t bytes) __attribute_nonnull(1);
6132
6153void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
6154 k_timeout_t timeout) __attribute_nonnull(1);
6155
6177void *k_heap_alloc(struct k_heap *h, size_t bytes,
6178 k_timeout_t timeout) __attribute_nonnull(1);
6179
6202void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
6203 __attribute_nonnull(1);
6204
6228void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
6229 __attribute_nonnull(1);
6230
6241void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
6242
6243/*
6244 * Heap sizing constants computed at build time from actual struct layouts
6245 * in lib/heap/heap_constants.c via the gen_offset mechanism.
6246 */
6247#include <zephyr/heap_constants.h>
6248
6249/* chunk0 size in bytes for nb buckets (includes trailer metadata) */
6250#define _Z_HEAP_C0(nb) \
6251 (ROUND_UP(___z_heap_struct_SIZEOF + \
6252 (nb) * ___z_heap_bucket_SIZEOF, ___z_heap_chunk_unit_SIZEOF) + \
6253 ___z_heap_trailer_SIZEOF)
6254
6255/* Allocation chunk size in bytes (header + data rounded up, plus trailer) */
6256#define _Z_HEAP_AC(ab) \
6257 (ROUND_UP(___z_heap_hdr_SIZEOF + (ab), ___z_heap_chunk_unit_SIZEOF) + \
6258 ___z_heap_trailer_SIZEOF)
6259
6260/* Total heap size in chunk units */
6261#define _Z_HEAP_SZ(nb, ab) \
6262 ((_Z_HEAP_C0(nb) + _Z_HEAP_AC(ab)) / ___z_heap_chunk_unit_SIZEOF)
6263
6264/* Bucket count from heap size in chunk units (mirrors bucket_idx() + 1) */
6265#define _Z_HEAP_NB(sz) \
6266 (32 - __builtin_clz((unsigned int)((sz) - \
6267 ___z_heap_min_chunk_SIZEOF + 1)))
6268
6269/* 3-round convergent iteration starting from 1 bucket */
6270#define _Z_HEAP_NB1(ab) _Z_HEAP_NB(_Z_HEAP_SZ(1, ab))
6271#define _Z_HEAP_NB2(ab) _Z_HEAP_NB(_Z_HEAP_SZ(_Z_HEAP_NB1(ab), ab))
6272#define _Z_HEAP_NB3(ab) _Z_HEAP_NB(_Z_HEAP_SZ(_Z_HEAP_NB2(ab), ab))
6273
6287#define Z_HEAP_MIN_SIZE_FOR(alloc_bytes) \
6288 (_Z_HEAP_C0(_Z_HEAP_NB3(alloc_bytes)) + \
6289 _Z_HEAP_AC(alloc_bytes) + ___z_heap_ftr_SIZEOF)
6290
6291#define Z_HEAP_MIN_SIZE Z_HEAP_MIN_SIZE_FOR(1)
6292
6309#define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
6310 char in_section \
6311 __aligned(8) /* CHUNK_UNIT */ \
6312 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
6313 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
6314 .heap = { \
6315 .init_mem = kheap_##name, \
6316 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
6317 }, \
6318 }
6319
6334#define K_HEAP_DEFINE(name, bytes) \
6335 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
6336 __noinit_named(kheap_buf_##name))
6337
6352#define K_HEAP_DEFINE_NOCACHE(name, bytes) \
6353 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
6354
6364int k_heap_array_get(struct k_heap **heap);
6365
6369
6376
6395void *k_aligned_alloc(size_t align, size_t size);
6396
6408void *k_malloc(size_t size);
6409
6420void k_free(void *ptr);
6421
6433void *k_calloc(size_t nmemb, size_t size);
6434
6452void *k_realloc(void *ptr, size_t size);
6453
6455
6456/* polling API - PRIVATE */
6457
6458#ifdef CONFIG_POLL
6459#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
6460#else
6461#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
6462#endif
6463
6464/* private - types bit positions */
6465enum _poll_types_bits {
6466 /* can be used to ignore an event */
6467 _POLL_TYPE_IGNORE,
6468
6469 /* to be signaled by k_poll_signal_raise() */
6470 _POLL_TYPE_SIGNAL,
6471
6472 /* semaphore availability */
6473 _POLL_TYPE_SEM_AVAILABLE,
6474
6475 /* queue/FIFO/LIFO data availability */
6476 _POLL_TYPE_DATA_AVAILABLE,
6477
6478 /* msgq data availability */
6479 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
6480
6481 /* pipe data availability */
6482 _POLL_TYPE_PIPE_DATA_AVAILABLE,
6483
6484 _POLL_NUM_TYPES
6485};
6486
6487#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
6488
6489/* private - states bit positions */
6490enum _poll_states_bits {
6491 /* default state when creating event */
6492 _POLL_STATE_NOT_READY,
6493
6494 /* signaled by k_poll_signal_raise() */
6495 _POLL_STATE_SIGNALED,
6496
6497 /* semaphore is available */
6498 _POLL_STATE_SEM_AVAILABLE,
6499
6500 /* data is available to read on queue/FIFO/LIFO */
6501 _POLL_STATE_DATA_AVAILABLE,
6502
6503 /* queue/FIFO/LIFO wait was cancelled */
6504 _POLL_STATE_CANCELLED,
6505
6506 /* data is available to read on a message queue */
6507 _POLL_STATE_MSGQ_DATA_AVAILABLE,
6508
6509 /* data is available to read from a pipe */
6510 _POLL_STATE_PIPE_DATA_AVAILABLE,
6511
6512 _POLL_NUM_STATES
6513};
6514
6515#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
6516
6517#define _POLL_EVENT_NUM_UNUSED_BITS \
6518 (32 - (0 \
6519 + 8 /* tag */ \
6520 + _POLL_NUM_TYPES \
6521 + _POLL_NUM_STATES \
6522 + 1 /* modes */ \
6523 ))
6524
6525/* end of polling API - PRIVATE */
6526
6527
6535
6536/* Public polling API */
6537
6538/* public - values for k_poll_event.type bitfield */
6539#define K_POLL_TYPE_IGNORE 0
6540#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
6541#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
6542#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
6543#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
6544#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
6545#define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
6546
6547/* public - polling modes */
6549 /* polling thread does not take ownership of objects when available */
6551
6553};
6554
6555/* public - values for k_poll_event.state bitfield */
6556#define K_POLL_STATE_NOT_READY 0
6557#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
6558#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
6559#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
6560#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
6561#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
6562#define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
6563#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
6564
6565/* public - poll signal object */
6571 sys_dlist_t poll_events;
6575
6580 unsigned int signaled;
6581
6584};
6585
6586#define K_POLL_SIGNAL_INITIALIZER(obj) \
6587 { \
6588 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
6589 .signaled = 0, \
6590 .result = 0, \
6591 }
6592
6601 sys_dnode_t _node;
6602
6604 struct z_poller *poller;
6608
6611
6613 uint32_t type:_POLL_NUM_TYPES;
6614
6616 uint32_t state:_POLL_NUM_STATES;
6617
6620
6622 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
6623
6625 union {
6626 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
6627 * type safety of polled objects.
6628 */
6636 };
6637};
6638
6639#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
6640 { \
6641 .poller = NULL, \
6642 .type = _event_type, \
6643 .state = K_POLL_STATE_NOT_READY, \
6644 .mode = _event_mode, \
6645 .unused = 0, \
6646 { \
6647 .typed_##_event_type = _event_obj, \
6648 }, \
6649 }
6650
6651#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
6652 event_tag) \
6653 { \
6654 .tag = event_tag, \
6655 .type = _event_type, \
6656 .state = K_POLL_STATE_NOT_READY, \
6657 .mode = _event_mode, \
6658 .unused = 0, \
6659 { \
6660 .typed_##_event_type = _event_obj, \
6661 }, \
6662 }
6663
6678
6679void k_poll_event_init(struct k_poll_event *event, uint32_t type,
6680 int mode, void *obj);
6681
6724
6725__syscall int k_poll(struct k_poll_event *events, int num_events,
6726 k_timeout_t timeout);
6727
6735
6736__syscall void k_poll_signal_init(struct k_poll_signal *sig);
6737
6743__syscall void k_poll_signal_reset(struct k_poll_signal *sig);
6744
6755__syscall void k_poll_signal_check(struct k_poll_signal *sig,
6756 unsigned int *signaled, int *result);
6757
6781
6782__syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6783
6785
6804static inline void k_cpu_idle(void)
6805{
6806 arch_cpu_idle();
6807}
6808
6823static inline void k_cpu_atomic_idle(unsigned int key)
6824{
6826}
6827
6831
6836#ifdef ARCH_EXCEPT
6837/* This architecture has direct support for triggering a CPU exception */
6838#define z_except_reason(reason) ARCH_EXCEPT(reason)
6839#else
6840
6841#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6842#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6843#else
6844#define __EXCEPT_LOC()
6845#endif
6846
6847/* NOTE: This is the implementation for arches that do not implement
6848 * ARCH_EXCEPT() to generate a real CPU exception.
6849 *
6850 * We won't have a real exception frame to determine the PC value when
6851 * the oops occurred, so print file and line number before we jump into
6852 * the fatal error handler.
6853 */
6854#define z_except_reason(reason) do { \
6855 __EXCEPT_LOC(); \
6856 z_fatal_error(reason, NULL); \
6857 } while (false)
6858
6859#endif /* _ARCH__EXCEPT */
6863
6875#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6876
6885#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6886
6890/*
6891 * private APIs that are utilized by one or more public APIs
6892 */
6893
6897void z_timer_expiration_handler(struct _timeout *timeout);
6901
6902#ifdef CONFIG_PRINTK
6910__syscall void k_str_out(char *c, size_t n);
6911#endif
6912
6918
6939__syscall int k_float_disable(struct k_thread *thread);
6940
6979__syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6980
6984
6994
7002
7011
7022
7033
7042
7051
7052#ifdef __cplusplus
7053}
7054#endif
7055
7056#include <zephyr/tracing/tracing.h>
7057#include <zephyr/syscalls/kernel.h>
7058
7059#endif /* !_ASMLANGUAGE */
7060
7061#endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
static uint32_t arch_k_cycle_get_32(void)
Definition misc.h:26
static uint64_t arch_k_cycle_get_64(void)
Definition misc.h:33
void(* k_thread_entry_t)(void *p1, void *p2, void *p3)
Thread entry point function type.
Definition arch_interface.h:48
struct z_thread_stack_element k_thread_stack_t
Typedef of struct z_thread_stack_element.
Definition arch_interface.h:46
long atomic_t
Definition atomic_types.h:15
System error numbers.
void arch_cpu_atomic_idle(unsigned int key)
Atomically re-enable interrupts and enter low power mode.
void arch_cpu_idle(void)
Power save idle routine.
static _Bool atomic_test_and_set_bit(atomic_t *target, int bit)
Atomically set a bit and test it.
Definition atomic.h:172
static _Bool atomic_test_bit(const atomic_t *target, int bit)
Atomically get and test a bit.
Definition atomic.h:129
static void atomic_clear_bit(atomic_t *target, int bit)
Atomically clear a bit.
Definition atomic.h:193
static uint32_t k_cycle_get_32(void)
Read the hardware clock.
Definition kernel.h:2219
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1554
int64_t k_uptime_ticks(void)
Get system uptime, in system ticks.
static uint32_t k_uptime_get_32(void)
Get system uptime (32-bit version).
Definition kernel.h:2171
uint32_t k_ticks_t
Tick precision used in timeout APIs.
Definition clock.h:48
static int64_t k_uptime_delta(int64_t *reftime)
Get elapsed time, and update the referenced time.
Definition kernel.h:2200
static uint32_t k_uptime_seconds(void)
Get system uptime in seconds.
Definition kernel.h:2184
static uint64_t k_cycle_get_64(void)
Read the 64-bit hardware clock.
Definition kernel.h:2234
static int64_t k_uptime_get(void)
Get system uptime.
Definition kernel.h:2147
int k_condvar_signal(struct k_condvar *condvar)
Signals one thread that is pending on the condition variable.
int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout)
Waits on the condition variable releasing the mutex lock.
int k_condvar_init(struct k_condvar *condvar)
Initialize a condition variable.
int k_condvar_broadcast(struct k_condvar *condvar)
Unblock all threads that are pending on the condition variable.
static void k_cpu_idle(void)
Make the CPU idle.
Definition kernel.h:6804
static void k_cpu_atomic_idle(unsigned int key)
Make the CPU idle in an atomic fashion.
Definition kernel.h:6823
struct _dnode sys_dnode_t
Doubly-linked list node structure.
Definition dlist.h:54
struct _dnode sys_dlist_t
Doubly-linked list structure.
Definition dlist.h:50
static void sys_dnode_init(sys_dnode_t *node)
initialize node to its state when not in a list
Definition dlist.h:219
uint32_t k_event_wait(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events.
uint32_t k_event_set_masked(struct k_event *event, uint32_t events, uint32_t events_mask)
Set or clear the events in an event object.
uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events (safe version).
static uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
Test the events currently tracked in the event object.
Definition kernel.h:2870
uint32_t k_event_wait_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events (safe version).
uint32_t k_event_set(struct k_event *event, uint32_t events)
Set the events in an event object.
uint32_t k_event_post(struct k_event *event, uint32_t events)
Post one or more events to an event object.
void k_event_init(struct k_event *event)
Initialize an event object.
uint32_t k_event_clear(struct k_event *event, uint32_t events)
Clear the events in an event object.
uint32_t k_event_wait_all(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events.
static bool sys_sflist_is_empty(const sys_sflist_t *list)
Test if the given list is empty.
Definition sflist.h:336
struct _sflist sys_sflist_t
Flagged single-linked list structure.
Definition sflist.h:54
int k_float_disable(struct k_thread *thread)
Disable preservation of floating point context information.
int k_float_enable(struct k_thread *thread, unsigned int options)
Enable preservation of floating point context information.
int k_futex_wait(struct k_futex *futex, int expected, k_timeout_t timeout)
Pend the current thread on a futex.
int k_futex_wake(struct k_futex *futex, bool wake_all)
Wake one/all threads pending on a futex.
void * k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
Allocate memory from a k_heap.
int k_heap_array_get(struct k_heap **heap)
Get the array of statically defined heaps.
void * k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
Allocate and initialize memory for an array of objects from a k_heap.
void k_heap_free(struct k_heap *h, void *mem)
Free memory allocated by k_heap_alloc().
void k_free(void *ptr)
Free memory allocated from heap.
void * k_realloc(void *ptr, size_t size)
Expand the size of an existing allocation.
void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
Initialize a k_heap.
void * k_malloc(size_t size)
Allocate memory from the heap.
void * k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
Reallocate memory from a k_heap.
void * k_calloc(size_t nmemb, size_t size)
Allocate memory from heap, array style.
void * k_aligned_alloc(size_t align, size_t size)
Allocate memory from the heap with a specified alignment.
void * k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes, k_timeout_t timeout)
Allocate aligned memory from a k_heap.
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int k_is_preempt_thread(void)
Determine if code is running in a preemptible thread.
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout)
Receive a mailbox message.
void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
Retrieve mailbox message data into a buffer.
void k_mbox_init(struct k_mbox *mbox)
Initialize a mailbox.
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout)
Send a mailbox message in a synchronous manner.
void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, struct k_sem *sem)
Send a mailbox message in an asynchronous manner.
int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks)
Initialize a memory slab.
void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
Free memory allocated from a memory slab.
int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats)
Get the memory stats for a memory slab.
int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
Reset the maximum memory usage for a slab.
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
Allocate memory from a memory slab.
static uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Get the number of used blocks in a memory slab.
Definition kernel.h:6018
static uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
Get the number of maximum used blocks so far in a memory slab.
Definition kernel.h:6035
static uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Get the number of unused blocks in a memory slab.
Definition kernel.h:6057
int k_msgq_peek(struct k_msgq *msgq, void *data)
Peek/read a message from a message queue.
uint32_t k_msgq_num_used_get(struct k_msgq *msgq)
Get the number of messages in a message queue.
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout)
Send a message to the end of a message queue.
int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
Peek/read a message from a message queue at the specified index.
uint32_t k_msgq_num_free_get(struct k_msgq *msgq)
Get the amount of free space in a message queue.
void k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
Get basic attributes of a message queue.
void k_msgq_purge(struct k_msgq *msgq)
Purge a message queue.
int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put_front(struct k_msgq *msgq, const void *data)
Send a message to the front of a message queue.
int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
Receive a message from a message queue.
int k_msgq_cleanup(struct k_msgq *msgq)
Release allocated buffer for a queue.
int k_mutex_unlock(struct k_mutex *mutex)
Unlock a mutex.
int k_mutex_init(struct k_mutex *mutex)
Initialize a mutex.
int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
Lock a mutex.
int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len, k_timeout_t timeout)
Write data to a pipe.
void k_pipe_close(struct k_pipe *pipe)
Close a pipe.
void k_pipe_reset(struct k_pipe *pipe)
Reset a pipe This routine resets the pipe, discarding any unread data and unblocking any threads wait...
void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size)
initialize a pipe
pipe_flags
Definition kernel.h:5666
int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len, k_timeout_t timeout)
Read data from a pipe This routine reads up to len bytes of data from pipe.
@ PIPE_FLAG_RESET
Definition kernel.h:5668
@ PIPE_FLAG_OPEN
Definition kernel.h:5667
void k_poll_signal_reset(struct k_poll_signal *sig)
Reset a poll signal object's state to unsignaled.
k_poll_modes
Definition kernel.h:6548
void k_poll_signal_check(struct k_poll_signal *sig, unsigned int *signaled, int *result)
Fetch the signaled state and result value of a poll signal.
void k_poll_event_init(struct k_poll_event *event, uint32_t type, int mode, void *obj)
Initialize one struct k_poll_event instance.
int k_poll(struct k_poll_event *events, int num_events, k_timeout_t timeout)
Wait for one or many of multiple poll events to occur.
int k_poll_signal_raise(struct k_poll_signal *sig, int result)
Signal a poll signal object.
void k_poll_signal_init(struct k_poll_signal *sig)
Initialize a poll signal object.
@ K_POLL_MODE_NOTIFY_ONLY
Definition kernel.h:6550
@ K_POLL_NUM_MODES
Definition kernel.h:6552
void k_queue_init(struct k_queue *queue)
Initialize a queue.
void * k_queue_get(struct k_queue *queue, k_timeout_t timeout)
Get an element from a queue.
void * k_queue_peek_tail(struct k_queue *queue)
Peek element at the tail of queue.
bool k_queue_unique_append(struct k_queue *queue, void *data)
Append an element to a queue only if it's not present already.
bool k_queue_remove(struct k_queue *queue, void *data)
Remove an element from a queue.
int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
Atomically add a list of elements to a queue.
int32_t k_queue_alloc_append(struct k_queue *queue, void *data)
Append an element to a queue.
void k_queue_cancel_wait(struct k_queue *queue)
Cancel waiting on a queue.
void * k_queue_peek_head(struct k_queue *queue)
Peek element at the head of queue.
void k_queue_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
Atomically append a list of elements to a queue.
void k_queue_append(struct k_queue *queue, void *data)
Append an element to the end of a queue.
int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
Inserts an element to a queue.
int k_queue_is_empty(struct k_queue *queue)
Query a queue to see if it has data available.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
unsigned int k_sem_count_get(struct k_sem *sem)
Get a semaphore's count.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
int k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit)
Initialize a semaphore.
struct _slist sys_slist_t
Single-linked list structure.
Definition slist.h:49
struct _snode sys_snode_t
Single-linked list node structure.
Definition slist.h:39
int k_stack_pop(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout)
Pop an element from a stack.
void k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries)
Initialize a stack.
int k_stack_cleanup(struct k_stack *stack)
Release a stack's allocated buffer.
int k_stack_push(struct k_stack *stack, stack_data_t data)
Push an element onto a stack.
int32_t k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
Initialize a stack.
#define SYS_PORT_TRACING_TRACKING_FIELD(type)
Field added to kernel objects so they are tracked.
Definition tracing_macros.h:375
#define IS_ENABLED(config_macro)
Check for macro definition in compiler-visible expressions.
Definition util_macro.h:154
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:281
#define EBUSY
Mount device busy.
Definition errno.h:54
int k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
Copy the thread name into a supplied buffer.
void k_yield(void)
Yield the current thread.
const char * k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
Get thread state string.
void k_thread_resume(k_tid_t thread)
Resume a suspended thread.
void * k_thread_custom_data_get(void)
Get current thread's custom data.
void k_thread_abort(k_tid_t thread)
Abort a thread.
int k_thread_name_set(k_tid_t thread, const char *str)
Set current thread name.
void k_thread_priority_set(k_tid_t thread, int prio)
Set a thread's priority.
void k_thread_absolute_deadline_set(k_tid_t thread, int deadline)
Set absolute deadline expiration time for scheduler.
int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
Enable thread to run on specified CPU.
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system without locking.
bool k_can_yield(void)
Check whether it is possible to yield in the current context.
int k_thread_priority_get(k_tid_t thread)
Get a thread's priority.
static void k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap)
Assign a resource memory pool to a thread.
Definition kernel.h:506
FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3)
Drop a thread's privileges permanently to user mode.
int k_thread_join(struct k_thread *thread, k_timeout_t timeout)
Sleep until a thread exits.
k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread)
Get time remaining before a thread wakes up, in system ticks.
void k_thread_custom_data_set(void *value)
Set current thread's custom data.
int32_t k_sleep(k_timeout_t timeout)
Put the current thread to sleep.
void k_sched_lock(void)
Lock the scheduler.
static int32_t k_msleep(int32_t ms)
Put the current thread to sleep.
Definition kernel.h:702
void k_busy_wait(uint32_t usec_to_wait)
Cause the current thread to busy wait.
void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks, k_thread_timeslice_fn_t expired, void *data)
Set thread time slice.
static void k_thread_runtime_stats_longest_frame_reset(__maybe_unused struct k_thread *thread)
Resets thread longest frame usage data for specified thread.
Definition kernel.h:120
void k_thread_suspend(k_tid_t thread)
Suspend a thread.
void k_sched_unlock(void)
Unlock the scheduler.
static __attribute_const__ k_tid_t k_current_get(void)
Get thread ID of the current thread.
Definition kernel.h:836
int k_thread_cpu_mask_clear(k_tid_t thread)
Sets all CPU enable masks to zero.
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in running on specified cpu.
void k_sched_time_slice_set(int32_t slice, int prio)
Set time-slicing period and scope.
int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
Prevent thread to run on specified CPU.
void k_wakeup(k_tid_t thread)
Wake up a sleeping thread.
int k_thread_stack_free(k_thread_stack_t *stack)
Free a dynamically allocated thread stack.
k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread)
Get time when a thread wakes up, in system ticks.
__attribute_const__ k_tid_t k_sched_current_thread_query(void)
Query thread ID of the current thread.
static void k_thread_start(k_tid_t thread)
Start an inactive thread.
Definition kernel.h:1304
k_tid_t k_thread_create(struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, uint32_t options, k_timeout_t delay)
Create a thread.
void k_reschedule(void)
Invoke the scheduler.
void k_thread_deadline_set(k_tid_t thread, int deadline)
Set relative deadline expiration time for scheduler.
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over the threads in running on current cpu without locking.
const char * k_thread_name_get(k_tid_t thread)
Get thread name.
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system.
static bool k_is_pre_kernel(void)
Test whether startup is in the before-main-task phase.
Definition kernel.h:803
int k_thread_cpu_pin(k_tid_t thread, int cpu)
Pin a thread to a CPU.
int32_t k_usleep(int32_t us)
Put the current thread to sleep with microsecond resolution.
int k_thread_cpu_mask_enable_all(k_tid_t thread)
Sets all CPU enable masks to one.
void(* k_thread_user_cb_t)(const struct k_thread *thread, void *user_data)
Definition kernel.h:127
k_thread_stack_t * k_thread_stack_alloc(size_t size, int flags)
Dynamically allocate a thread stack.
k_ticks_t k_timer_expires_ticks(const struct k_timer *timer)
Get next expiration time of a timer, in system ticks.
void(* k_timer_stop_t)(struct k_timer *timer)
Timer stop function type.
Definition kernel.h:1886
k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer)
Get time remaining before a timer next expires, in system ticks.
void * k_timer_user_data_get(const struct k_timer *timer)
Retrieve the user-specific data from a timer.
void(* k_timer_expiry_t)(struct k_timer *timer)
Timer expiry function type.
Definition kernel.h:1870
void k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_stop_t stop_fn)
Initialize a timer.
void k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period)
Start a timer.
static uint32_t k_timer_remaining_get(struct k_timer *timer)
Get time remaining before a timer next expires.
Definition kernel.h:2071
uint32_t k_timer_status_sync(struct k_timer *timer)
Synchronize thread to timer expiration.
void k_timer_stop(struct k_timer *timer)
Stop a timer.
uint32_t k_timer_status_get(struct k_timer *timer)
Read timer status.
void k_timer_user_data_set(struct k_timer *timer, void *user_data)
Associate user-specific data with a timer.
#define k_ticks_to_ms_floor32(t)
Convert ticks to milliseconds.
Definition time_units.h:1718
#define k_ticks_to_sec_floor32(t)
Convert ticks to seconds.
Definition time_units.h:1622
#define k_ticks_to_ms_floor64(t)
Convert ticks to milliseconds.
Definition time_units.h:1734
int k_work_poll_submit_to_queue(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item.
static k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
Access the thread that animates a work queue.
Definition kernel.h:4828
static bool k_work_is_pending(const struct k_work *work)
Test whether a work item is currently pending.
Definition kernel.h:4799
int k_work_queue_drain(struct k_work_q *queue, bool plug)
Wait until the work queue has drained, optionally plugging it.
static k_ticks_t k_work_delayable_expires_get(const struct k_work_delayable *dwork)
Get the absolute tick count at which a scheduled delayable work will be submitted.
Definition kernel.h:4816
int k_work_schedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to a queue after a delay.
int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
Busy state flags from the delayable work item.
int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout)
Stop a work queue.
void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
Initialize a delayable work structure.
int k_work_poll_cancel(struct k_work_poll *work)
Cancel a triggered work item.
void k_work_user_queue_start(struct k_work_user_q *work_q, k_thread_stack_t *stack, size_t stack_size, int prio, const char *name)
Start a workqueue in user mode.
void k_work_poll_init(struct k_work_poll *work, k_work_handler_t handler)
Initialize a triggered work item.
int k_work_cancel(struct k_work *work)
Cancel a work item.
static int k_work_user_submit_to_queue(struct k_work_user_q *work_q, struct k_work_user *work)
Submit a work item to a user mode workqueue.
Definition kernel.h:4953
int k_work_submit_to_queue(struct k_work_q *queue, struct k_work *work)
Submit a work item to a queue.
static bool k_work_user_is_pending(struct k_work_user *work)
Check if a userspace work item is pending.
Definition kernel.h:4930
void(* k_work_handler_t)(struct k_work *work)
The signature for a work item handler function.
Definition kernel.h:3927
int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to the system work queue after a delay.
static bool k_work_delayable_is_pending(const struct k_work_delayable *dwork)
Test whether a delayed work item is currently pending.
Definition kernel.h:4810
bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, struct k_work_sync *sync)
Cancel delayable work and wait.
int k_work_cancel_delayable(struct k_work_delayable *dwork)
Cancel delayable work.
static void k_work_user_init(struct k_work_user *work, k_work_user_handler_t handler)
Initialize a userspace work item.
Definition kernel.h:4908
int k_work_queue_unplug(struct k_work_q *queue)
Release a work queue to accept new submissions.
int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to the system work queue after a delay.
void(* k_work_user_handler_t)(struct k_work_user *work)
Work item handler function type for user work queues.
Definition kernel.h:4851
bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync)
Cancel a work item and wait for it to complete.
static k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
Access the user mode thread that animates a work queue.
Definition kernel.h:5008
int k_work_busy_get(const struct k_work *work)
Busy state flags from the work item.
static struct k_work_delayable * k_work_delayable_from_work(struct k_work *work)
Get the parent delayable work structure from a work pointer.
Definition kernel.h:4805
static k_ticks_t k_work_delayable_remaining_get(const struct k_work_delayable *dwork)
Get the number of ticks until a scheduled delayable work will be submitted.
Definition kernel.h:4822
bool k_work_flush(struct k_work *work, struct k_work_sync *sync)
Wait for last-submitted instance to complete.
int k_work_reschedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to a queue after a delay.
void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg)
Run work queue using calling thread.
int k_work_submit(struct k_work *work)
Submit a work item to the system queue.
bool k_work_flush_delayable(struct k_work_delayable *dwork, struct k_work_sync *sync)
Flush delayable work.
int k_work_poll_submit(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item to the system workqueue.
void k_work_queue_init(struct k_work_q *queue)
Initialize a work queue structure.
void k_work_queue_start(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg)
Initialize a work queue.
void k_work_init(struct k_work *work, k_work_handler_t handler)
Initialize a (non-delayable) work structure.
@ K_WORK_CANCELING
Flag indicating a work item that is being canceled.
Definition kernel.h:4533
@ K_WORK_QUEUED
Flag indicating a work item that has been submitted to a queue but has not started running.
Definition kernel.h:4540
@ K_WORK_DELAYED
Flag indicating a delayed work item that is scheduled for submission to a queue.
Definition kernel.h:4547
@ K_WORK_RUNNING
Flag indicating a work item that is running under a work queue thread.
Definition kernel.h:4527
@ K_WORK_FLUSHING
Flag indicating a synced work item that is being flushed.
Definition kernel.h:4553
#define BUILD_ASSERT(EXPR, MSG...)
Definition llvm.h:51
struct k_thread * k_tid_t
Definition thread.h:383
struct k_thread_runtime_stats k_thread_runtime_stats_t
void k_sys_runtime_stats_disable(void)
Disable gathering of system runtime statistics.
int k_thread_runtime_stats_enable(k_tid_t thread)
Enable gathering of runtime statistics for specified thread.
int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask, k_ipi_func_t func)
Add an IPI work item to the IPI work queue.
void k_sys_runtime_stats_enable(void)
Enable gathering of system runtime statistics.
int k_thread_runtime_stats_get(k_tid_t thread, k_thread_runtime_stats_t *stats)
Get the runtime statistics of a thread.
void k_ipi_work_signal(void)
Signal that there is one or more IPI work items to process.
int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout)
Wait until the IPI work item has been processed by all targeted CPUs.
execution_context_types
Definition kernel.h:91
@ K_ISR
Definition kernel.h:92
@ K_COOP_THREAD
Definition kernel.h:93
@ K_PREEMPT_THREAD
Definition kernel.h:94
void(* k_ipi_func_t)(struct k_ipi_work *work)
Definition kernel.h:3811
int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads.
static void k_ipi_work_init(struct k_ipi_work *work)
Initialize the specified IPI work item.
Definition kernel.h:3840
int k_thread_runtime_stats_disable(k_tid_t thread)
Disable gathering of runtime statistics for specified thread.
int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads on specified cpu.
Header files included by kernel.h.
void(* k_thread_timeslice_fn_t)(struct k_thread *thread, void *data)
Definition kernel_structs.h:313
Memory Statistics.
flags
Definition parser.h:97
state
Definition parser_state.h:29
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INTPTR_TYPE__ intptr_t
Definition stdint.h:104
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__INT64_TYPE__ int64_t
Definition stdint.h:75
Kernel condition variable structure.
Definition kernel.h:3558
Event Structure.
Definition kernel.h:2651
Kernel FIFO structure.
Definition kernel.h:2895
futex structure
Definition kernel.h:2551
atomic_t val
Definition kernel.h:2552
Kernel synchronized heap structure.
Definition kernel.h:6105
IPI work item structure.
Definition kernel.h:3819
Kernel LIFO structure.
Definition kernel.h:3146
Mailbox Message Structure.
Definition kernel.h:5491
k_tid_t tx_target_thread
target thread id
Definition kernel.h:5501
void * tx_data
sender's message data buffer
Definition kernel.h:5497
k_tid_t rx_source_thread
source thread id
Definition kernel.h:5499
uint32_t info
application-defined information value
Definition kernel.h:5495
size_t size
size of message (in bytes)
Definition kernel.h:5493
Mailbox Structure.
Definition kernel.h:5520
Memory Domain.
Definition mem_domain.h:80
Memory Partition.
Definition mem_domain.h:55
Message Queue Attributes.
Definition kernel.h:5233
uint32_t used_msgs
Used messages.
Definition kernel.h:5239
size_t msg_size
Message Size.
Definition kernel.h:5235
uint32_t max_msgs
Maximal number of messages.
Definition kernel.h:5237
Message Queue Structure.
Definition kernel.h:5168
Kernel mutex structure.
Definition kernel.h:3437
Object core structure.
Definition obj_core.h:121
Kernel pipe structure.
Definition kernel.h:5676
Poll Event.
Definition kernel.h:6596
struct k_msgq * typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE
Definition kernel.h:6634
void * typed_K_POLL_TYPE_IGNORE
Definition kernel.h:6629
struct k_poll_signal * signal
Definition kernel.h:6630
struct k_pipe * pipe
Definition kernel.h:6635
uint32_t tag
optional user-specified tag, opaque, untouched by the API
Definition kernel.h:6610
struct k_fifo * fifo
Definition kernel.h:6632
struct k_msgq * msgq
Definition kernel.h:6634
struct k_queue * queue
Definition kernel.h:6633
uint32_t unused
unused bits in 32-bit word
Definition kernel.h:6622
struct k_pipe * typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE
Definition kernel.h:6635
uint32_t type
bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values)
Definition kernel.h:6613
struct k_sem * sem
Definition kernel.h:6631
struct k_queue * typed_K_POLL_TYPE_DATA_AVAILABLE
Definition kernel.h:6633
struct k_sem * typed_K_POLL_TYPE_SEM_AVAILABLE
Definition kernel.h:6631
uint32_t state
bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values)
Definition kernel.h:6616
uint32_t mode
mode of operation, from enum k_poll_modes
Definition kernel.h:6619
struct k_poll_signal * typed_K_POLL_TYPE_SIGNAL
Definition kernel.h:6630
void * obj
Definition kernel.h:6629
struct k_fifo * typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE
Definition kernel.h:6632
Definition kernel.h:6566
int result
custom result value passed to k_poll_signal_raise() if needed
Definition kernel.h:6583
unsigned int signaled
1 if the event has been signaled, 0 otherwise.
Definition kernel.h:6580
Kernel queue structure.
Definition kernel.h:2255
Semaphore structure.
Definition kernel.h:3663
Kernel Spin Lock.
Definition spinlock.h:45
Thread Structure.
Definition thread.h:259
struct _thread_base base
Definition thread.h:261
struct k_heap * resource_pool
resource pool
Definition thread.h:357
struct __thread_entry entry
thread entry and parameters description
Definition thread.h:296
Kernel timeout type.
Definition clock.h:65
Kernel timer structure.
Definition kernel.h:1777
A structure used to submit work after a delay.
Definition kernel.h:4603
Kernel workqueue structure.
Definition kernel.h:4759
A structure holding optional configuration items for a work queue.
Definition kernel.h:4717
const char * name
The name to be given to the work queue thread.
Definition kernel.h:4722
uint32_t work_timeout_ms
Controls whether work queue monitors work timeouts.
Definition kernel.h:4751
bool essential
Control whether the work queue thread should be marked as essential thread.
Definition kernel.h:4741
bool no_yield
Control whether the work queue thread should yield between items.
Definition kernel.h:4736
A structure holding internal state for a pending synchronous operation on a work item or queue.
Definition kernel.h:4698
A structure used to submit work.
Definition kernel.h:4561
A structure to represent a ring buffer.
Definition ring_buffer.h:50
Definition sys_heap.h:57
Definition mem_stats.h:24
static __pinned_func bool k_is_user_context(void)
Indicate whether the CPU is currently in user mode.
Definition syscall.h:121
Macros to abstract toolchain specific capabilities.
Main header file for tracing subsystem API.
Header file for tracing macros.