Zephyr API Documentation 4.3.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
kernel.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
12
13#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
15
16#if !defined(_ASMLANGUAGE)
18#include <errno.h>
19#include <limits.h>
20#include <stdbool.h>
21#include <zephyr/toolchain.h>
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31/*
32 * Zephyr currently assumes the size of a couple standard types to simplify
33 * print string formats. Let's make sure this doesn't change without notice.
34 */
35BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
36BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
37BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
38
47
48#define K_ANY NULL
49
50#if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
51#error Zero available thread priorities defined!
52#endif
53
54#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
55#define K_PRIO_PREEMPT(x) (x)
56
57#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
58#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
59#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
60#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
61#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
62
63#ifdef CONFIG_POLL
64#define Z_POLL_EVENT_OBJ_INIT(obj) \
65 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
66#define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
67#else
68#define Z_POLL_EVENT_OBJ_INIT(obj)
69#define Z_DECL_POLL_EVENT
70#endif
71
72struct k_thread;
73struct k_mutex;
74struct k_sem;
75struct k_msgq;
76struct k_mbox;
77struct k_pipe;
78struct k_queue;
79struct k_fifo;
80struct k_lifo;
81struct k_stack;
82struct k_mem_slab;
83struct k_timer;
84struct k_poll_event;
85struct k_poll_signal;
86struct k_mem_domain;
87struct k_mem_partition;
88struct k_futex;
89struct k_event;
90
96
97/* private, used by k_poll and k_work_poll */
98struct k_work_poll;
99typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
100
105
119static inline void
121{
122#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
123 thread->base.usage.longest = 0ULL;
124#endif
125}
126
127typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
128 void *user_data);
129
145void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
146
165#ifdef CONFIG_SMP
166void k_thread_foreach_filter_by_cpu(unsigned int cpu,
167 k_thread_user_cb_t user_cb, void *user_data);
168#else
169static inline
170void k_thread_foreach_filter_by_cpu(unsigned int cpu,
171 k_thread_user_cb_t user_cb, void *user_data)
172{
173 __ASSERT(cpu == 0, "cpu filter out of bounds");
174 ARG_UNUSED(cpu);
175 k_thread_foreach(user_cb, user_data);
176}
177#endif
178
207 k_thread_user_cb_t user_cb, void *user_data);
208
240#ifdef CONFIG_SMP
242 k_thread_user_cb_t user_cb, void *user_data);
243#else
244static inline
245void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
246 k_thread_user_cb_t user_cb, void *user_data)
247{
248 __ASSERT(cpu == 0, "cpu filter out of bounds");
249 ARG_UNUSED(cpu);
250 k_thread_foreach_unlocked(user_cb, user_data);
251}
252#endif
253
255
261
262#endif /* !_ASMLANGUAGE */
263
264
265/*
266 * Thread user options. May be needed by assembly code. Common part uses low
267 * bits, arch-specific use high bits.
268 */
269
273#define K_ESSENTIAL (BIT(0))
274
275#define K_FP_IDX 1
285#define K_FP_REGS (BIT(K_FP_IDX))
286
293#define K_USER (BIT(2))
294
303#define K_INHERIT_PERMS (BIT(3))
304
314#define K_CALLBACK_STATE (BIT(4))
315
325#define K_DSP_IDX 13
326#define K_DSP_REGS (BIT(K_DSP_IDX))
327
336#define K_AGU_IDX 14
337#define K_AGU_REGS (BIT(K_AGU_IDX))
338
348#define K_SSE_REGS (BIT(15))
349
350/* end - thread options */
351
352#if !defined(_ASMLANGUAGE)
377__syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
378
392
444__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
445 k_thread_stack_t *stack,
446 size_t stack_size,
448 void *p1, void *p2, void *p3,
449 int prio, uint32_t options, k_timeout_t delay);
450
473 void *p1, void *p2,
474 void *p3);
475
489#define k_thread_access_grant(thread, ...) \
490 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
491
506static inline void k_thread_heap_assign(struct k_thread *thread,
507 struct k_heap *heap)
508{
509 thread->resource_pool = heap;
510}
511
512#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
533__syscall int k_thread_stack_space_get(const struct k_thread *thread,
534 size_t *unused_ptr);
535
551__syscall int k_thread_runtime_stack_unused_threshold_pct_set(struct k_thread *thread,
552 uint32_t pct);
553
569__syscall int k_thread_runtime_stack_unused_threshold_set(struct k_thread *thread,
570 size_t threshold);
571
584__syscall size_t k_thread_runtime_stack_unused_threshold_get(struct k_thread *thread);
585
597typedef void (*k_thread_stack_safety_handler_t)(const struct k_thread *thread,
598 size_t unused_space, void *arg);
599
614int k_thread_runtime_stack_safety_full_check(const struct k_thread *thread,
615 size_t *unused_ptr,
616 k_thread_stack_safety_handler_t handler,
617 void *arg);
618
633int k_thread_runtime_stack_safety_threshold_check(const struct k_thread *thread,
634 size_t *unused_ptr,
635 k_thread_stack_safety_handler_t handler,
636 void *arg);
637#endif
638
639#if (K_HEAP_MEM_POOL_SIZE > 0)
652void k_thread_system_pool_assign(struct k_thread *thread);
653#endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
654
674__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
675
689__syscall int32_t k_sleep(k_timeout_t timeout);
690
702static inline int32_t k_msleep(int32_t ms)
703{
704 return k_sleep(Z_TIMEOUT_MS(ms));
705}
706
724
741__syscall void k_busy_wait(uint32_t usec_to_wait);
742
754bool k_can_yield(void);
755
763__syscall void k_yield(void);
764
774__syscall void k_wakeup(k_tid_t thread);
775
789__attribute_const__
791
803static inline bool k_is_pre_kernel(void)
804{
805 extern bool z_sys_post_kernel; /* in init.c */
806
807 /*
808 * If called from userspace, it must be post kernel.
809 * This guard is necessary because z_sys_post_kernel memory
810 * is not accessible to user threads.
811 */
812 if (k_is_user_context()) {
813 return false;
814 }
815
816 /*
817 * Some compilers might optimize by pre-reading
818 * z_sys_post_kernel. This is absolutely not desirable.
819 * We are trying to avoid reading it if we are in user
820 * context as reading z_sys_post_kernel in user context
821 * will result in access fault. So add a compiler barrier
822 * here to stop that kind of optimizations.
823 */
824 compiler_barrier();
825
826 return !z_sys_post_kernel;
827}
828
835__attribute_const__
836static inline k_tid_t k_current_get(void)
837{
838 __ASSERT(!k_is_pre_kernel(), "k_current_get called pre-kernel");
839
840#ifdef CONFIG_CURRENT_THREAD_USE_TLS
841
842 /* Thread-local cache of current thread ID, set in z_thread_entry() */
843 extern Z_THREAD_LOCAL k_tid_t z_tls_current;
844
845 return z_tls_current;
846#else
848#endif
849}
850
870__syscall void k_thread_abort(k_tid_t thread);
871
872k_ticks_t z_timeout_expires(const struct _timeout *timeout);
873k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
874
875#ifdef CONFIG_SYS_CLOCK_EXISTS
876
884__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
885
886static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
887 const struct k_thread *thread)
888{
889 return z_timeout_expires(&thread->base.timeout);
890}
891
900
901static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
902 const struct k_thread *thread)
903{
904 return z_timeout_remaining(&thread->base.timeout);
905}
906
907#endif /* CONFIG_SYS_CLOCK_EXISTS */
908
912
913struct _static_thread_data {
914 struct k_thread *init_thread;
915 k_thread_stack_t *init_stack;
916 unsigned int init_stack_size;
917 k_thread_entry_t init_entry;
918 void *init_p1;
919 void *init_p2;
920 void *init_p3;
921 int init_prio;
922 uint32_t init_options;
923 const char *init_name;
924#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
925 int32_t init_delay_ms;
926#else
927 k_timeout_t init_delay;
928#endif
929};
930
931#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
932#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
933#define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
934#else
935#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
936#define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
937#endif
938
939#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
940 entry, p1, p2, p3, \
941 prio, options, delay, tname) \
942 { \
943 .init_thread = (thread), \
944 .init_stack = (stack), \
945 .init_stack_size = (stack_size), \
946 .init_entry = (k_thread_entry_t)entry, \
947 .init_p1 = (void *)p1, \
948 .init_p2 = (void *)p2, \
949 .init_p3 = (void *)p3, \
950 .init_prio = (prio), \
951 .init_options = (options), \
952 .init_name = STRINGIFY(tname), \
953 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
954 }
955
956/*
957 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
958 * information on arguments.
959 */
960#define Z_THREAD_COMMON_DEFINE(name, stack_size, \
961 entry, p1, p2, p3, \
962 prio, options, delay) \
963 struct k_thread _k_thread_obj_##name; \
964 const STRUCT_SECTION_ITERABLE(_static_thread_data, \
965 _k_thread_data_##name) = \
966 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
967 _k_thread_stack_##name, stack_size,\
968 entry, p1, p2, p3, prio, options, \
969 delay, name); \
970 __maybe_unused const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
971
975
1007#define K_THREAD_DEFINE(name, stack_size, \
1008 entry, p1, p2, p3, \
1009 prio, options, delay) \
1010 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
1011 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
1012 prio, options, delay)
1013
1044#define K_KERNEL_THREAD_DEFINE(name, stack_size, \
1045 entry, p1, p2, p3, \
1046 prio, options, delay) \
1047 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
1048 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
1049 prio, options, delay)
1050
1060__syscall int k_thread_priority_get(k_tid_t thread);
1061
1087__syscall void k_thread_priority_set(k_tid_t thread, int prio);
1088
1089
1090#ifdef CONFIG_SCHED_DEADLINE
1122__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
1123
1164__syscall void k_thread_absolute_deadline_set(k_tid_t thread, int deadline);
1165#endif
1166
1185__syscall void k_reschedule(void);
1186
1187#ifdef CONFIG_SCHED_CPU_MASK
1201
1215
1229
1243
1254int k_thread_cpu_pin(k_tid_t thread, int cpu);
1255#endif
1256
1278__syscall void k_thread_suspend(k_tid_t thread);
1279
1291__syscall void k_thread_resume(k_tid_t thread);
1292
1306static inline void k_thread_start(k_tid_t thread)
1307{
1308 k_wakeup(thread);
1309}
1310
1337void k_sched_time_slice_set(int32_t slice, int prio);
1338
1377void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1378 k_thread_timeslice_fn_t expired, void *data);
1379
1381
1386
1398bool k_is_in_isr(void);
1399
1416__syscall int k_is_preempt_thread(void);
1417
1421
1426
1452void k_sched_lock(void);
1453
1462
1475__syscall void k_thread_custom_data_set(void *value);
1476
1484__syscall void *k_thread_custom_data_get(void);
1485
1499__syscall int k_thread_name_set(k_tid_t thread, const char *str);
1500
1509const char *k_thread_name_get(k_tid_t thread);
1510
1522__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1523 size_t size);
1524
1537const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1538
1542
1547
1556#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1557
1570#define K_NSEC(t) Z_TIMEOUT_NS(t)
1571
1584#define K_USEC(t) Z_TIMEOUT_US(t)
1585
1596#define K_CYC(t) Z_TIMEOUT_CYC(t)
1597
1608#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1609
1620#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1621
1632#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1633
1644#define K_MINUTES(m) K_SECONDS((m) * 60)
1645
1656#define K_HOURS(h) K_MINUTES((h) * 60)
1657
1666#define K_FOREVER Z_FOREVER
1667
1682#define K_TIMEOUT_SUM(timeout1, timeout2) K_TICKS(z_timeout_sum(timeout1, timeout2))
1683
1684#ifdef CONFIG_TIMEOUT_64BIT
1685
1697#define K_TIMEOUT_ABS_TICKS(t) \
1698 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)CLAMP(t, 0, (INT64_MAX - 1))))
1699
1711#define K_TIMEOUT_ABS_SEC(t) K_TIMEOUT_ABS_TICKS(k_sec_to_ticks_ceil64(t))
1712
1724#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1725
1738#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1739
1752#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1753
1766#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1767#endif
1768
1772
1779struct k_timer {
1783
1784 /*
1785 * _timeout structure must be first here if we want to use
1786 * dynamic timer allocation. timeout.node is used in the double-linked
1787 * list of free timers
1788 */
1789 struct _timeout timeout;
1790
1791 /* wait queue for the (single) thread waiting on this timer */
1792 _wait_q_t wait_q;
1793
1794 /* runs in ISR context */
1795 void (*expiry_fn)(struct k_timer *timer);
1796
1797 /* runs in the context of the thread that calls k_timer_stop() */
1798 void (*stop_fn)(struct k_timer *timer);
1799
1800 /* timer period */
1801 k_timeout_t period;
1802
1803 /* timer status */
1804 uint32_t status;
1805
1806 /* user-specific data, also used to support legacy features */
1807 void *user_data;
1808
1810
1811#ifdef CONFIG_OBJ_CORE_TIMER
1812 struct k_obj_core obj_core;
1813#endif
1817};
1818
1819#ifdef CONFIG_TIMER_OBSERVER
1820struct k_timer_observer {
1821 /* Invoked upon completion of k_timer initialization */
1822 void (*on_init)(struct k_timer *timer);
1823
1824 /* Invoked after the timer transitions to the running state */
1825 void (*on_start)(struct k_timer *timer, k_timeout_t duration,
1826 k_timeout_t period);
1827
1828 /* Invoked when the active timer is explicitly stopped */
1829 void (*on_stop)(struct k_timer *timer);
1830
1831 /* Executes in ISR context, keep minimal and non-blocking */
1832 void (*on_expiry)(struct k_timer *timer);
1833};
1834#endif /* CONFIG_TIMER_OBSERVER */
1835
1839#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1840 { \
1841 .timeout = { \
1842 .node = {},\
1843 .fn = z_timer_expiration_handler, \
1844 .dticks = 0, \
1845 }, \
1846 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1847 .expiry_fn = expiry, \
1848 .stop_fn = stop, \
1849 .period = {}, \
1850 .status = 0, \
1851 .user_data = 0, \
1852 }
1853
1857
1863
1874typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1875
1890typedef void (*k_timer_stop_t)(struct k_timer *timer);
1891
1903#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1904 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1905 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1906
1907
1908#ifdef CONFIG_TIMER_OBSERVER
1909
1913#define Z_TIMER_OBSERVER_INITIALIZER(name, init, start, stop, expiry) \
1914 { \
1915 .on_init = init, \
1916 .on_start = start, \
1917 .on_stop = stop, \
1918 .on_expiry = expiry \
1919 }
1923
1937#define K_TIMER_OBSERVER_DEFINE(name, init, start, stop, expiry) \
1938 static const STRUCT_SECTION_ITERABLE(k_timer_observer, name) = \
1939 Z_TIMER_OBSERVER_INITIALIZER(name, init, start, stop, expiry)
1940
1941#endif /* CONFIG_TIMER_OBSERVER */
1942
1952void k_timer_init(struct k_timer *timer,
1953 k_timer_expiry_t expiry_fn,
1954 k_timer_stop_t stop_fn);
1955
1973__syscall void k_timer_start(struct k_timer *timer,
1974 k_timeout_t duration, k_timeout_t period);
1975
1992__syscall void k_timer_stop(struct k_timer *timer);
1993
2006__syscall uint32_t k_timer_status_get(struct k_timer *timer);
2007
2025__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
2026
2027#ifdef CONFIG_SYS_CLOCK_EXISTS
2028
2039__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
2040
2041static inline k_ticks_t z_impl_k_timer_expires_ticks(
2042 const struct k_timer *timer)
2043{
2044 return z_timeout_expires(&timer->timeout);
2045}
2046
2057__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
2058
2059static inline k_ticks_t z_impl_k_timer_remaining_ticks(
2060 const struct k_timer *timer)
2061{
2062 return z_timeout_remaining(&timer->timeout);
2063}
2064
2075static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
2076{
2078}
2079
2080#endif /* CONFIG_SYS_CLOCK_EXISTS */
2081
2094__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
2095
2099static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
2100 void *user_data)
2101{
2102 timer->user_data = user_data;
2103}
2104
2112__syscall void *k_timer_user_data_get(const struct k_timer *timer);
2113
2114static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
2115{
2116 return timer->user_data;
2117}
2118
2120
2126
2136__syscall int64_t k_uptime_ticks(void);
2137
2151static inline int64_t k_uptime_get(void)
2152{
2154}
2155
2175static inline uint32_t k_uptime_get_32(void)
2176{
2177 return (uint32_t)k_uptime_get();
2178}
2179
2188static inline uint32_t k_uptime_seconds(void)
2189{
2191}
2192
2204static inline int64_t k_uptime_delta(int64_t *reftime)
2205{
2206 int64_t uptime, delta;
2207
2208 uptime = k_uptime_get();
2209 delta = uptime - *reftime;
2210 *reftime = uptime;
2211
2212 return delta;
2213}
2214
2223static inline uint32_t k_cycle_get_32(void)
2224{
2225 return arch_k_cycle_get_32();
2226}
2227
2238static inline uint64_t k_cycle_get_64(void)
2239{
2240 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
2241 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
2242 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
2243 return 0;
2244 }
2245
2246 return arch_k_cycle_get_64();
2247}
2248
2252
2253struct k_queue {
2256 _wait_q_t wait_q;
2257
2258 Z_DECL_POLL_EVENT
2259
2261};
2262
2266
2267#define Z_QUEUE_INITIALIZER(obj) \
2268 { \
2269 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
2270 .lock = { }, \
2271 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2272 Z_POLL_EVENT_OBJ_INIT(obj) \
2273 }
2274
2278
2284
2292__syscall void k_queue_init(struct k_queue *queue);
2293
2307__syscall void k_queue_cancel_wait(struct k_queue *queue);
2308
2321void k_queue_append(struct k_queue *queue, void *data);
2322
2339__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2340
2353void k_queue_prepend(struct k_queue *queue, void *data);
2354
2371__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2372
2386void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2387
2406int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2407
2423int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2424
2442__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2443
2460bool k_queue_remove(struct k_queue *queue, void *data);
2461
2476bool k_queue_unique_append(struct k_queue *queue, void *data);
2477
2491__syscall int k_queue_is_empty(struct k_queue *queue);
2492
2493static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2494{
2495 return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2496}
2497
2507__syscall void *k_queue_peek_head(struct k_queue *queue);
2508
2518__syscall void *k_queue_peek_tail(struct k_queue *queue);
2519
2529#define K_QUEUE_DEFINE(name) \
2530 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2531 Z_QUEUE_INITIALIZER(name)
2532
2534
2535#ifdef CONFIG_USERSPACE
2545struct k_futex {
2547};
2548
2556struct z_futex_data {
2557 _wait_q_t wait_q;
2558 struct k_spinlock lock;
2559};
2560
2561#define Z_FUTEX_DATA_INITIALIZER(obj) \
2562 { \
2563 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2564 }
2565
2571
2591__syscall int k_futex_wait(struct k_futex *futex, int expected,
2592 k_timeout_t timeout);
2593
2608__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2609
2611#endif
2612
2618
2623
2630
2631struct k_event {
2635 _wait_q_t wait_q;
2636 uint32_t events;
2637 struct k_spinlock lock;
2638
2640
2641#ifdef CONFIG_OBJ_CORE_EVENT
2642 struct k_obj_core obj_core;
2643#endif
2647
2648};
2649
2653
2654#define Z_EVENT_INITIALIZER(obj) \
2655 { \
2656 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2657 .events = 0, \
2658 .lock = {}, \
2659 }
2663
2671__syscall void k_event_init(struct k_event *event);
2672
2690__syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2691
2709__syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2710
2727__syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2728 uint32_t events_mask);
2729
2742__syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2743
2768__syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2769 bool reset, k_timeout_t timeout);
2770
2795__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2796 bool reset, k_timeout_t timeout);
2797
2817__syscall uint32_t k_event_wait_safe(struct k_event *event, uint32_t events,
2818 bool reset, k_timeout_t timeout);
2819
2839__syscall uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events,
2840 bool reset, k_timeout_t timeout);
2841
2852static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2853{
2854 return k_event_wait(event, events_mask, false, K_NO_WAIT);
2855}
2856
2866#define K_EVENT_DEFINE(name) \
2867 STRUCT_SECTION_ITERABLE(k_event, name) = \
2868 Z_EVENT_INITIALIZER(name);
2869
2871
2872struct k_fifo {
2873 struct k_queue _queue;
2874#ifdef CONFIG_OBJ_CORE_FIFO
2875 struct k_obj_core obj_core;
2876#endif
2877};
2878
2882#define Z_FIFO_INITIALIZER(obj) \
2883 { \
2884 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2885 }
2886
2890
2896
2904#define k_fifo_init(fifo) \
2905 ({ \
2906 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2907 k_queue_init(&(fifo)->_queue); \
2908 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2909 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2910 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2911 })
2912
2924#define k_fifo_cancel_wait(fifo) \
2925 ({ \
2926 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2927 k_queue_cancel_wait(&(fifo)->_queue); \
2928 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2929 })
2930
2943#define k_fifo_put(fifo, data) \
2944 ({ \
2945 void *_data = data; \
2946 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2947 k_queue_append(&(fifo)->_queue, _data); \
2948 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2949 })
2950
2967#define k_fifo_alloc_put(fifo, data) \
2968 ({ \
2969 void *_data = data; \
2970 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2971 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2972 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
2973 fap_ret; \
2974 })
2975
2990#define k_fifo_put_list(fifo, head, tail) \
2991 ({ \
2992 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2993 k_queue_append_list(&(fifo)->_queue, head, tail); \
2994 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2995 })
2996
3010#define k_fifo_put_slist(fifo, list) \
3011 ({ \
3012 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
3013 k_queue_merge_slist(&(fifo)->_queue, list); \
3014 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
3015 })
3016
3034#define k_fifo_get(fifo, timeout) \
3035 ({ \
3036 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
3037 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
3038 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
3039 fg_ret; \
3040 })
3041
3055#define k_fifo_is_empty(fifo) \
3056 k_queue_is_empty(&(fifo)->_queue)
3057
3071#define k_fifo_peek_head(fifo) \
3072 ({ \
3073 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
3074 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
3075 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
3076 fph_ret; \
3077 })
3078
3090#define k_fifo_peek_tail(fifo) \
3091 ({ \
3092 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
3093 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
3094 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
3095 fpt_ret; \
3096 })
3097
3107#define K_FIFO_DEFINE(name) \
3108 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
3109 Z_FIFO_INITIALIZER(name)
3110
3112
3113struct k_lifo {
3114 struct k_queue _queue;
3115#ifdef CONFIG_OBJ_CORE_LIFO
3116 struct k_obj_core obj_core;
3117#endif
3118};
3119
3123
3124#define Z_LIFO_INITIALIZER(obj) \
3125 { \
3126 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
3127 }
3128
3132
3138
3146#define k_lifo_init(lifo) \
3147 ({ \
3148 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
3149 k_queue_init(&(lifo)->_queue); \
3150 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
3151 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
3152 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
3153 })
3154
3167#define k_lifo_put(lifo, data) \
3168 ({ \
3169 void *_data = data; \
3170 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
3171 k_queue_prepend(&(lifo)->_queue, _data); \
3172 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
3173 })
3174
3191#define k_lifo_alloc_put(lifo, data) \
3192 ({ \
3193 void *_data = data; \
3194 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
3195 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
3196 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
3197 lap_ret; \
3198 })
3199
3217#define k_lifo_get(lifo, timeout) \
3218 ({ \
3219 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
3220 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
3221 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
3222 lg_ret; \
3223 })
3224
3234#define K_LIFO_DEFINE(name) \
3235 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
3236 Z_LIFO_INITIALIZER(name)
3237
3239
3243#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
3244
3245typedef uintptr_t stack_data_t;
3246
3247struct k_stack {
3248 _wait_q_t wait_q;
3249 struct k_spinlock lock;
3250 stack_data_t *base, *next, *top;
3251
3252 uint8_t flags;
3253
3255
3256#ifdef CONFIG_OBJ_CORE_STACK
3257 struct k_obj_core obj_core;
3258#endif
3259};
3260
3261#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
3262 { \
3263 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3264 .base = (stack_buffer), \
3265 .next = (stack_buffer), \
3266 .top = (stack_buffer) + (stack_num_entries), \
3267 }
3268
3272
3278
3288void k_stack_init(struct k_stack *stack,
3289 stack_data_t *buffer, uint32_t num_entries);
3290
3291
3305
3306__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
3307 uint32_t num_entries);
3308
3320int k_stack_cleanup(struct k_stack *stack);
3321
3335__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
3336
3357__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
3358 k_timeout_t timeout);
3359
3370#define K_STACK_DEFINE(name, stack_num_entries) \
3371 stack_data_t __noinit \
3372 _k_stack_buf_##name[stack_num_entries]; \
3373 STRUCT_SECTION_ITERABLE(k_stack, name) = \
3374 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
3375 stack_num_entries)
3376
3378
3382
3383struct k_work;
3384struct k_work_q;
3385struct k_work_queue_config;
3386extern struct k_work_q k_sys_work_q;
3387
3391
3397
3402struct k_mutex {
3404 _wait_q_t wait_q;
3407
3410
3413
3415
3416#ifdef CONFIG_OBJ_CORE_MUTEX
3417 struct k_obj_core obj_core;
3418#endif
3419};
3420
3424#define Z_MUTEX_INITIALIZER(obj) \
3425 { \
3426 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3427 .owner = NULL, \
3428 .lock_count = 0, \
3429 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3430 }
3431
3435
3445#define K_MUTEX_DEFINE(name) \
3446 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3447 Z_MUTEX_INITIALIZER(name)
3448
3461__syscall int k_mutex_init(struct k_mutex *mutex);
3462
3463
3485__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3486
3507__syscall int k_mutex_unlock(struct k_mutex *mutex);
3508
3512
3513
3515 _wait_q_t wait_q;
3516
3517#ifdef CONFIG_OBJ_CORE_CONDVAR
3518 struct k_obj_core obj_core;
3519#endif
3520};
3521
3522#define Z_CONDVAR_INITIALIZER(obj) \
3523 { \
3524 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3525 }
3526
3532
3539__syscall int k_condvar_init(struct k_condvar *condvar);
3540
3547__syscall int k_condvar_signal(struct k_condvar *condvar);
3548
3556__syscall int k_condvar_broadcast(struct k_condvar *condvar);
3557
3575__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3576 k_timeout_t timeout);
3577
3588#define K_CONDVAR_DEFINE(name) \
3589 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3590 Z_CONDVAR_INITIALIZER(name)
3591
3594
3600
3607struct k_sem {
3611 _wait_q_t wait_q;
3612 unsigned int count;
3613 unsigned int limit;
3614
3615 Z_DECL_POLL_EVENT
3616
3618
3619#ifdef CONFIG_OBJ_CORE_SEM
3620 struct k_obj_core obj_core;
3621#endif
3623};
3624
3628
3629#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3630 { \
3631 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3632 .count = (initial_count), \
3633 .limit = (count_limit), \
3634 Z_POLL_EVENT_OBJ_INIT(obj) \
3635 }
3636
3640
3649#define K_SEM_MAX_LIMIT UINT_MAX
3650
3666__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3667 unsigned int limit);
3668
3687__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3688
3699__syscall void k_sem_give(struct k_sem *sem);
3700
3710__syscall void k_sem_reset(struct k_sem *sem);
3711
3721__syscall unsigned int k_sem_count_get(struct k_sem *sem);
3722
3726static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3727{
3728 return sem->count;
3729}
3730
3742#define K_SEM_DEFINE(name, initial_count, count_limit) \
3743 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3744 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3745 BUILD_ASSERT(((count_limit) != 0) && \
3746 (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) && \
3747 ((count_limit) <= K_SEM_MAX_LIMIT));
3748
3750
3751#if defined(CONFIG_SCHED_IPI_SUPPORTED) || defined(__DOXYGEN__)
3752struct k_ipi_work;
3753
3754
3755typedef void (*k_ipi_func_t)(struct k_ipi_work *work);
3756
3767 sys_dnode_t node[CONFIG_MP_MAX_NUM_CPUS]; /* Node in IPI work queue */
3768 k_ipi_func_t func; /* Function to execute on target CPU */
3769 struct k_event event; /* Event to signal when processed */
3770 uint32_t bitmask; /* Bitmask of targeted CPUs */
3772};
3773
3774
3782static inline void k_ipi_work_init(struct k_ipi_work *work)
3783{
3784 k_event_init(&work->event);
3785 for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
3786 sys_dnode_init(&work->node[i]);
3787 }
3788 work->bitmask = 0;
3789}
3790
3809int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask,
3810 k_ipi_func_t func);
3811
3834int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout);
3835
3845
3846#endif /* CONFIG_SCHED_IPI_SUPPORTED */
3847
3851
3852struct k_work_delayable;
3853struct k_work_sync;
3854
3858
3864
3871typedef void (*k_work_handler_t)(struct k_work *work);
3872
3886void k_work_init(struct k_work *work,
3888
3903int k_work_busy_get(const struct k_work *work);
3904
3918static inline bool k_work_is_pending(const struct k_work *work);
3919
3941 struct k_work *work);
3942
3951int k_work_submit(struct k_work *work);
3952
3977bool k_work_flush(struct k_work *work,
3978 struct k_work_sync *sync);
3979
3999int k_work_cancel(struct k_work *work);
4000
4031bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
4032
4043
4064 k_thread_stack_t *stack, size_t stack_size,
4065 int prio, const struct k_work_queue_config *cfg);
4066
4077void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg);
4078
4088static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
4089
4113int k_work_queue_drain(struct k_work_q *queue, bool plug);
4114
4129
4146
4162
4174static inline struct k_work_delayable *
4176
4191
4206static inline bool k_work_delayable_is_pending(
4207 const struct k_work_delayable *dwork);
4208
4223 const struct k_work_delayable *dwork);
4224
4239 const struct k_work_delayable *dwork);
4240
4269 struct k_work_delayable *dwork,
4270 k_timeout_t delay);
4271
4286 k_timeout_t delay);
4287
4324 struct k_work_delayable *dwork,
4325 k_timeout_t delay);
4326
4340 k_timeout_t delay);
4341
4367 struct k_work_sync *sync);
4368
4390
4420 struct k_work_sync *sync);
4421
4422enum {
4426
4427 /* The atomic API is used for all work and queue flags fields to
4428 * enforce sequential consistency in SMP environments.
4429 */
4430
4431 /* Bits that represent the work item states. At least nine of the
4432 * combinations are distinct valid stable states.
4433 */
4434 K_WORK_RUNNING_BIT = 0,
4435 K_WORK_CANCELING_BIT = 1,
4436 K_WORK_QUEUED_BIT = 2,
4437 K_WORK_DELAYED_BIT = 3,
4438 K_WORK_FLUSHING_BIT = 4,
4439
4440 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
4441 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
4442
4443 /* Static work flags */
4444 K_WORK_DELAYABLE_BIT = 8,
4445 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
4446
4447 /* Dynamic work queue flags */
4448 K_WORK_QUEUE_STARTED_BIT = 0,
4449 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
4450 K_WORK_QUEUE_BUSY_BIT = 1,
4451 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
4452 K_WORK_QUEUE_DRAIN_BIT = 2,
4453 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
4454 K_WORK_QUEUE_PLUGGED_BIT = 3,
4455 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
4456 K_WORK_QUEUE_STOP_BIT = 4,
4457 K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
4458
4459 /* Static work queue flags */
4460 K_WORK_QUEUE_NO_YIELD_BIT = 8,
4461 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
4462
4466 /* Transient work flags */
4467
4473 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
4474
4479 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
4480
4486 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
4487
4493 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
4494
4499 K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
4500};
4501
4503struct k_work {
4504 /* All fields are protected by the work module spinlock. No fields
4505 * are to be accessed except through kernel API.
4506 */
4507
4508 /* Node to link into k_work_q pending list. */
4510
4511 /* The function to be invoked by the work queue thread. */
4513
4514 /* The queue on which the work item was last submitted. */
4516
4517 /* State of the work item.
4518 *
4519 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
4520 *
4521 * It can be RUNNING and CANCELING simultaneously.
4522 */
4524};
4525
4526#define Z_WORK_INITIALIZER(work_handler) { \
4527 .handler = (work_handler), \
4528}
4529
4532 /* The work item. */
4533 struct k_work work;
4534
4535 /* Timeout used to submit work after a delay. */
4536 struct _timeout timeout;
4537
4538 /* The queue to which the work should be submitted. */
4540};
4541
4542#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4543 .work = { \
4544 .handler = (work_handler), \
4545 .flags = K_WORK_DELAYABLE, \
4546 }, \
4547}
4548
4565#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4566 struct k_work_delayable work \
4567 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4568
4572
4573/* Record used to wait for work to flush.
4574 *
4575 * The work item is inserted into the queue that will process (or is
4576 * processing) the item, and will be processed as soon as the item
4577 * completes. When the flusher is processed the semaphore will be
4578 * signaled, releasing the thread waiting for the flush.
4579 */
4580struct z_work_flusher {
4581 struct k_work work;
4582 struct k_sem sem;
4583};
4584
4585/* Record used to wait for work to complete a cancellation.
4586 *
4587 * The work item is inserted into a global queue of pending cancels.
4588 * When a cancelling work item goes idle any matching waiters are
4589 * removed from pending_cancels and are woken.
4590 */
4591struct z_work_canceller {
4592 sys_snode_t node;
4593 struct k_work *work;
4594 struct k_sem sem;
4595};
4596
4600
4615 union {
4616 struct z_work_flusher flusher;
4617 struct z_work_canceller canceller;
4618 };
4619};
4620
4632 const char *name;
4633
4647
4652
4662};
4663
4665struct k_work_q {
4666 /* The thread that animates the work. */
4668
4669 /* The thread ID that animates the work. This may be an external thread
4670 * if k_work_queue_run() is used.
4671 */
4673
4674 /* All the following fields must be accessed only while the
4675 * work module spinlock is held.
4676 */
4677
4678 /* List of k_work items to be worked. */
4680
4681 /* Wait queue for idle work thread. */
4682 _wait_q_t notifyq;
4683
4684 /* Wait queue for threads waiting for the queue to drain. */
4685 _wait_q_t drainq;
4686
4687 /* Flags describing queue state. */
4689
4690#if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT)
4691 struct _timeout work_timeout_record;
4692 struct k_work *work;
4693 k_timeout_t work_timeout;
4694#endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
4695};
4696
4697/* Provide the implementation for inline functions declared above */
4698
4699static inline bool k_work_is_pending(const struct k_work *work)
4700{
4701 return k_work_busy_get(work) != 0;
4702}
4703
4704static inline struct k_work_delayable *
4709
4711 const struct k_work_delayable *dwork)
4712{
4713 return k_work_delayable_busy_get(dwork) != 0;
4714}
4715
4717 const struct k_work_delayable *dwork)
4718{
4719 return z_timeout_expires(&dwork->timeout);
4720}
4721
4723 const struct k_work_delayable *dwork)
4724{
4725 return z_timeout_remaining(&dwork->timeout);
4726}
4727
4729{
4730 return queue->thread_id;
4731}
4732
4734
4735struct k_work_user;
4736
4741
4751typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4752
4756
4757struct k_work_user_q {
4758 struct k_queue queue;
4759 struct k_thread thread;
4760};
4761
4762enum {
4763 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4764};
4765
4766struct k_work_user {
4767 void *_reserved; /* Used by k_queue implementation. */
4768 k_work_user_handler_t handler;
4770};
4771
4775
4776#if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4777#define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4778#else
4779#define Z_WORK_USER_INITIALIZER(work_handler) \
4780 { \
4781 ._reserved = NULL, \
4782 .handler = (work_handler), \
4783 .flags = 0 \
4784 }
4785#endif
4786
4798#define K_WORK_USER_DEFINE(work, work_handler) \
4799 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4800
4810static inline void k_work_user_init(struct k_work_user *work,
4811 k_work_user_handler_t handler)
4812{
4813 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4814}
4815
4832static inline bool k_work_user_is_pending(struct k_work_user *work)
4833{
4834 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4835}
4836
4855static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4856 struct k_work_user *work)
4857{
4858 int ret = -EBUSY;
4859
4860 if (!atomic_test_and_set_bit(&work->flags,
4861 K_WORK_USER_STATE_PENDING)) {
4862 ret = k_queue_alloc_append(&work_q->queue, work);
4863
4864 /* Couldn't insert into the queue. Clear the pending bit
4865 * so the work item can be submitted again
4866 */
4867 if (ret != 0) {
4868 atomic_clear_bit(&work->flags,
4869 K_WORK_USER_STATE_PENDING);
4870 }
4871 }
4872
4873 return ret;
4874}
4875
4895void k_work_user_queue_start(struct k_work_user_q *work_q,
4896 k_thread_stack_t *stack,
4897 size_t stack_size, int prio,
4898 const char *name);
4899
4910static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4911{
4912 return &work_q->thread;
4913}
4914
4916
4920
4921struct k_work_poll {
4922 struct k_work work;
4923 struct k_work_q *workq;
4924 struct z_poller poller;
4925 struct k_poll_event *events;
4926 int num_events;
4927 k_work_handler_t real_handler;
4928 struct _timeout timeout;
4929 int poll_result;
4930};
4931
4935
4940
4952#define K_WORK_DEFINE(work, work_handler) \
4953 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4954
4964void k_work_poll_init(struct k_work_poll *work,
4965 k_work_handler_t handler);
4966
5002 struct k_work_poll *work,
5003 struct k_poll_event *events,
5004 int num_events,
5005 k_timeout_t timeout);
5006
5038int k_work_poll_submit(struct k_work_poll *work,
5039 struct k_poll_event *events,
5040 int num_events,
5041 k_timeout_t timeout);
5042
5057int k_work_poll_cancel(struct k_work_poll *work);
5058
5060
5066
5070struct k_msgq {
5072 _wait_q_t wait_q;
5076 size_t msg_size;
5089
5090 Z_DECL_POLL_EVENT
5091
5094
5096
5097#ifdef CONFIG_OBJ_CORE_MSGQ
5098 struct k_obj_core obj_core;
5099#endif
5100};
5101
5104
5105
5106#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
5107 { \
5108 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
5109 .lock = {}, \
5110 .msg_size = q_msg_size, \
5111 .max_msgs = q_max_msgs, \
5112 .buffer_start = q_buffer, \
5113 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
5114 .read_ptr = q_buffer, \
5115 .write_ptr = q_buffer, \
5116 .used_msgs = 0, \
5117 Z_POLL_EVENT_OBJ_INIT(obj) \
5118 .flags = 0, \
5119 }
5120
5124
5125
5126#define K_MSGQ_FLAG_ALLOC BIT(0)
5127
5139
5140
5159#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
5160 static char __noinit __aligned(q_align) \
5161 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
5162 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
5163 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
5164 (q_msg_size), (q_max_msgs))
5165
5180void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
5181 uint32_t max_msgs);
5182
5202__syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
5203 uint32_t max_msgs);
5204
5215int k_msgq_cleanup(struct k_msgq *msgq);
5216
5237__syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
5238
5263__syscall int k_msgq_put_front(struct k_msgq *msgq, const void *data);
5264
5285__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
5286
5301__syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
5302
5319__syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
5320
5330__syscall void k_msgq_purge(struct k_msgq *msgq);
5331
5342__syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
5343
5352__syscall void k_msgq_get_attrs(struct k_msgq *msgq,
5353 struct k_msgq_attrs *attrs);
5354
5355
5356static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
5357{
5358 return msgq->max_msgs - msgq->used_msgs;
5359}
5360
5370__syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
5371
5372static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
5373{
5374 return msgq->used_msgs;
5375}
5376
5378
5384
5391 size_t size;
5395 void *tx_data;
5401 k_tid_t _syncing_thread;
5402#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
5404 struct k_sem *_async_sem;
5405#endif
5406};
5407
5411struct k_mbox {
5413 _wait_q_t tx_msg_queue;
5415 _wait_q_t rx_msg_queue;
5417
5419
5420#ifdef CONFIG_OBJ_CORE_MAILBOX
5421 struct k_obj_core obj_core;
5422#endif
5423};
5424
5427
5428#define Z_MBOX_INITIALIZER(obj) \
5429 { \
5430 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
5431 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
5432 }
5433
5437
5447#define K_MBOX_DEFINE(name) \
5448 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
5449 Z_MBOX_INITIALIZER(name) \
5450
5451
5458void k_mbox_init(struct k_mbox *mbox);
5459
5479int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5480 k_timeout_t timeout);
5481
5495void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5496 struct k_sem *sem);
5497
5515int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
5516 void *buffer, k_timeout_t timeout);
5517
5531void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
5532
5534
5540
5550__syscall void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size);
5551
5556
5557struct k_pipe {
5558 size_t waiting;
5561 _wait_q_t data;
5562 _wait_q_t space;
5564
5565 Z_DECL_POLL_EVENT
5566#ifdef CONFIG_OBJ_CORE_PIPE
5567 struct k_obj_core obj_core;
5568#endif
5570};
5571
5575#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
5576{ \
5577 .waiting = 0, \
5578 .buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size), \
5579 .data = Z_WAIT_Q_INIT(&obj.data), \
5580 .space = Z_WAIT_Q_INIT(&obj.space), \
5581 .flags = PIPE_FLAG_OPEN, \
5582 Z_POLL_EVENT_OBJ_INIT(obj) \
5583}
5587
5601#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5602 static unsigned char __noinit __aligned(pipe_align) \
5603 _k_pipe_buf_##name[pipe_buffer_size]; \
5604 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5605 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5606
5607
5624__syscall int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len,
5625 k_timeout_t timeout);
5626
5642__syscall int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len,
5643 k_timeout_t timeout);
5644
5654__syscall void k_pipe_reset(struct k_pipe *pipe);
5655
5664__syscall void k_pipe_close(struct k_pipe *pipe);
5666
5670struct k_mem_slab_info {
5671 uint32_t num_blocks;
5672 size_t block_size;
5673 uint32_t num_used;
5674#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5675 uint32_t max_used;
5676#endif
5677};
5678
5679struct k_mem_slab {
5680 _wait_q_t wait_q;
5681 struct k_spinlock lock;
5682 char *buffer;
5683 char *free_list;
5684 struct k_mem_slab_info info;
5685
5687
5688#ifdef CONFIG_OBJ_CORE_MEM_SLAB
5689 struct k_obj_core obj_core;
5690#endif
5691};
5692
5693#define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5694 _slab_num_blocks) \
5695 { \
5696 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5697 .lock = {}, \
5698 .buffer = _slab_buffer, \
5699 .free_list = NULL, \
5700 .info = {_slab_num_blocks, _slab_block_size, 0} \
5701 }
5702
5703
5707
5713
5739#define K_MEM_SLAB_DEFINE_IN_SECT(name, in_section, slab_block_size, slab_num_blocks, slab_align) \
5740 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5741 "slab_block_size must be a multiple of slab_align"); \
5742 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5743 "slab_align must be a power of 2"); \
5744 char in_section __aligned(WB_UP( \
5745 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5746 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5747 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5748
5772#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5773 K_MEM_SLAB_DEFINE_IN_SECT(name, __noinit_named(k_mem_slab_buf_##name), slab_block_size, \
5774 slab_num_blocks, slab_align)
5775
5792#define K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, in_section, slab_block_size, slab_num_blocks, \
5793 slab_align) \
5794 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5795 "slab_block_size must be a multiple of slab_align"); \
5796 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5797 "slab_align must be a power of 2"); \
5798 static char in_section __aligned(WB_UP( \
5799 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5800 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5801 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5802
5817#define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5818 K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, __noinit_named(k_mem_slab_buf_##name), \
5819 slab_block_size, slab_num_blocks, slab_align)
5820
5842int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5843 size_t block_size, uint32_t num_blocks);
5844
5867int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5868 k_timeout_t timeout);
5869
5881void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5882
5895static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5896{
5897 return slab->info.num_used;
5898}
5899
5912static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5913{
5914#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5915 return slab->info.max_used;
5916#else
5917 ARG_UNUSED(slab);
5918 return 0;
5919#endif
5920}
5921
5934static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5935{
5936 return slab->info.num_blocks - slab->info.num_used;
5937}
5938
5952
5953int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5954
5968int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5969
5971
5976
5977/* kernel synchronized heap struct */
5978
5979struct k_heap {
5981 _wait_q_t wait_q;
5983};
5984
5998void k_heap_init(struct k_heap *h, void *mem,
5999 size_t bytes) __attribute_nonnull(1);
6000
6021void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
6022 k_timeout_t timeout) __attribute_nonnull(1);
6023
6045void *k_heap_alloc(struct k_heap *h, size_t bytes,
6046 k_timeout_t timeout) __attribute_nonnull(1);
6047
6070void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
6071 __attribute_nonnull(1);
6072
6096void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
6097 __attribute_nonnull(1);
6098
6109void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
6110
6111/* Minimum heap sizes needed to return a successful 1-byte allocation.
6112 * Assumes a chunk aligned (8 byte) memory buffer.
6113 */
6114#ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
6115#define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 80 : 52)
6116#else
6117#define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
6118#endif /* CONFIG_SYS_HEAP_RUNTIME_STATS */
6119
6120/* Size of `struct z_heap` */
6121#define _Z_HEAP_SIZE \
6122 ((4 * sizeof(uint32_t)) + \
6123 (3 * (IS_ENABLED(CONFIG_SYS_HEAP_RUNTIME_STATS) ? sizeof(size_t) : 0)))
6124
6125/* Number of buckets required to store @a bytes */
6126#define _Z_HEAP_NUM_BUCKETS(bytes) ((31 - __builtin_clz((bytes / 8) - 1)) + 1)
6127
6128/* Number of bytes consumed by buckets */
6129#define _Z_HEAP_BUCKETS_SIZE(bytes) (_Z_HEAP_NUM_BUCKETS(bytes) * sizeof(uint32_t))
6130
6153#define Z_HEAP_MIN_SIZE_FOR(alloc_bytes) \
6154 ((alloc_bytes) + _Z_HEAP_SIZE + _Z_HEAP_BUCKETS_SIZE(alloc_bytes) + (3 * 8))
6155
6172#define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
6173 char in_section \
6174 __aligned(8) /* CHUNK_UNIT */ \
6175 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
6176 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
6177 .heap = { \
6178 .init_mem = kheap_##name, \
6179 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
6180 }, \
6181 }
6182
6197#define K_HEAP_DEFINE(name, bytes) \
6198 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
6199 __noinit_named(kheap_buf_##name))
6200
6215#define K_HEAP_DEFINE_NOCACHE(name, bytes) \
6216 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
6217
6227int k_heap_array_get(struct k_heap **heap);
6228
6232
6239
6258void *k_aligned_alloc(size_t align, size_t size);
6259
6271void *k_malloc(size_t size);
6272
6283void k_free(void *ptr);
6284
6296void *k_calloc(size_t nmemb, size_t size);
6297
6315void *k_realloc(void *ptr, size_t size);
6316
6318
6319/* polling API - PRIVATE */
6320
6321#ifdef CONFIG_POLL
6322#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
6323#else
6324#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
6325#endif
6326
6327/* private - types bit positions */
6328enum _poll_types_bits {
6329 /* can be used to ignore an event */
6330 _POLL_TYPE_IGNORE,
6331
6332 /* to be signaled by k_poll_signal_raise() */
6333 _POLL_TYPE_SIGNAL,
6334
6335 /* semaphore availability */
6336 _POLL_TYPE_SEM_AVAILABLE,
6337
6338 /* queue/FIFO/LIFO data availability */
6339 _POLL_TYPE_DATA_AVAILABLE,
6340
6341 /* msgq data availability */
6342 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
6343
6344 /* pipe data availability */
6345 _POLL_TYPE_PIPE_DATA_AVAILABLE,
6346
6347 _POLL_NUM_TYPES
6348};
6349
6350#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
6351
6352/* private - states bit positions */
6353enum _poll_states_bits {
6354 /* default state when creating event */
6355 _POLL_STATE_NOT_READY,
6356
6357 /* signaled by k_poll_signal_raise() */
6358 _POLL_STATE_SIGNALED,
6359
6360 /* semaphore is available */
6361 _POLL_STATE_SEM_AVAILABLE,
6362
6363 /* data is available to read on queue/FIFO/LIFO */
6364 _POLL_STATE_DATA_AVAILABLE,
6365
6366 /* queue/FIFO/LIFO wait was cancelled */
6367 _POLL_STATE_CANCELLED,
6368
6369 /* data is available to read on a message queue */
6370 _POLL_STATE_MSGQ_DATA_AVAILABLE,
6371
6372 /* data is available to read from a pipe */
6373 _POLL_STATE_PIPE_DATA_AVAILABLE,
6374
6375 _POLL_NUM_STATES
6376};
6377
6378#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
6379
6380#define _POLL_EVENT_NUM_UNUSED_BITS \
6381 (32 - (0 \
6382 + 8 /* tag */ \
6383 + _POLL_NUM_TYPES \
6384 + _POLL_NUM_STATES \
6385 + 1 /* modes */ \
6386 ))
6387
6388/* end of polling API - PRIVATE */
6389
6390
6398
6399/* Public polling API */
6400
6401/* public - values for k_poll_event.type bitfield */
6402#define K_POLL_TYPE_IGNORE 0
6403#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
6404#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
6405#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
6406#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
6407#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
6408#define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
6409
6410/* public - polling modes */
6412 /* polling thread does not take ownership of objects when available */
6414
6416};
6417
6418/* public - values for k_poll_event.state bitfield */
6419#define K_POLL_STATE_NOT_READY 0
6420#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
6421#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
6422#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
6423#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
6424#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
6425#define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
6426#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
6427
6428/* public - poll signal object */
6432
6437 unsigned int signaled;
6438
6441};
6442
6443#define K_POLL_SIGNAL_INITIALIZER(obj) \
6444 { \
6445 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
6446 .signaled = 0, \
6447 .result = 0, \
6448 }
6449
6455 sys_dnode_t _node;
6456
6458 struct z_poller *poller;
6459
6462
6464 uint32_t type:_POLL_NUM_TYPES;
6465
6467 uint32_t state:_POLL_NUM_STATES;
6468
6471
6473 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
6474
6476 union {
6477 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
6478 * type safety of polled objects.
6479 */
6487 };
6488};
6489
6490#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
6491 { \
6492 .poller = NULL, \
6493 .type = _event_type, \
6494 .state = K_POLL_STATE_NOT_READY, \
6495 .mode = _event_mode, \
6496 .unused = 0, \
6497 { \
6498 .typed_##_event_type = _event_obj, \
6499 }, \
6500 }
6501
6502#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
6503 event_tag) \
6504 { \
6505 .tag = event_tag, \
6506 .type = _event_type, \
6507 .state = K_POLL_STATE_NOT_READY, \
6508 .mode = _event_mode, \
6509 .unused = 0, \
6510 { \
6511 .typed_##_event_type = _event_obj, \
6512 }, \
6513 }
6514
6529
6530void k_poll_event_init(struct k_poll_event *event, uint32_t type,
6531 int mode, void *obj);
6532
6575
6576__syscall int k_poll(struct k_poll_event *events, int num_events,
6577 k_timeout_t timeout);
6578
6586
6587__syscall void k_poll_signal_init(struct k_poll_signal *sig);
6588
6594__syscall void k_poll_signal_reset(struct k_poll_signal *sig);
6595
6606__syscall void k_poll_signal_check(struct k_poll_signal *sig,
6607 unsigned int *signaled, int *result);
6608
6632
6633__syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6634
6636
6655static inline void k_cpu_idle(void)
6656{
6657 arch_cpu_idle();
6658}
6659
6674static inline void k_cpu_atomic_idle(unsigned int key)
6675{
6677}
6678
6682
6687#ifdef ARCH_EXCEPT
6688/* This architecture has direct support for triggering a CPU exception */
6689#define z_except_reason(reason) ARCH_EXCEPT(reason)
6690#else
6691
6692#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6693#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6694#else
6695#define __EXCEPT_LOC()
6696#endif
6697
6698/* NOTE: This is the implementation for arches that do not implement
6699 * ARCH_EXCEPT() to generate a real CPU exception.
6700 *
6701 * We won't have a real exception frame to determine the PC value when
6702 * the oops occurred, so print file and line number before we jump into
6703 * the fatal error handler.
6704 */
6705#define z_except_reason(reason) do { \
6706 __EXCEPT_LOC(); \
6707 z_fatal_error(reason, NULL); \
6708 } while (false)
6709
6710#endif /* _ARCH__EXCEPT */
6714
6726#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6727
6736#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6737
6741
6742/*
6743 * private APIs that are utilized by one or more public APIs
6744 */
6745
6749void z_timer_expiration_handler(struct _timeout *timeout);
6753
6754#ifdef CONFIG_PRINTK
6762__syscall void k_str_out(char *c, size_t n);
6763#endif
6764
6770
6791__syscall int k_float_disable(struct k_thread *thread);
6792
6831__syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6832
6836
6846
6854
6863
6874
6885
6894
6903
6904#ifdef __cplusplus
6905}
6906#endif
6907
6908#include <zephyr/tracing/tracing.h>
6909#include <zephyr/syscalls/kernel.h>
6910
6911#endif /* !_ASMLANGUAGE */
6912
6913#endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
static uint32_t arch_k_cycle_get_32(void)
Definition misc.h:26
static uint64_t arch_k_cycle_get_64(void)
Definition misc.h:33
void(* k_thread_entry_t)(void *p1, void *p2, void *p3)
Thread entry point function type.
Definition arch_interface.h:48
struct z_thread_stack_element k_thread_stack_t
Typedef of struct z_thread_stack_element.
Definition arch_interface.h:46
long atomic_t
Definition atomic_types.h:15
System error numbers.
void arch_cpu_atomic_idle(unsigned int key)
Atomically re-enable interrupts and enter low power mode.
void arch_cpu_idle(void)
Power save idle routine.
static _Bool atomic_test_and_set_bit(atomic_t *target, int bit)
Atomically set a bit and test it.
Definition atomic.h:172
static _Bool atomic_test_bit(const atomic_t *target, int bit)
Atomically get and test a bit.
Definition atomic.h:129
static void atomic_clear_bit(atomic_t *target, int bit)
Atomically clear a bit.
Definition atomic.h:193
static uint32_t k_cycle_get_32(void)
Read the hardware clock.
Definition kernel.h:2223
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1556
int64_t k_uptime_ticks(void)
Get system uptime, in system ticks.
static uint32_t k_uptime_get_32(void)
Get system uptime (32-bit version).
Definition kernel.h:2175
uint32_t k_ticks_t
Tick precision used in timeout APIs.
Definition clock.h:48
static int64_t k_uptime_delta(int64_t *reftime)
Get elapsed time, and update the referenced time.
Definition kernel.h:2204
static uint32_t k_uptime_seconds(void)
Get system uptime in seconds.
Definition kernel.h:2188
static uint64_t k_cycle_get_64(void)
Read the 64-bit hardware clock.
Definition kernel.h:2238
static int64_t k_uptime_get(void)
Get system uptime.
Definition kernel.h:2151
int k_condvar_signal(struct k_condvar *condvar)
Signals one thread that is pending on the condition variable.
int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout)
Waits on the condition variable releasing the mutex lock.
int k_condvar_init(struct k_condvar *condvar)
Initialize a condition variable.
int k_condvar_broadcast(struct k_condvar *condvar)
Unblock all threads that are pending on the condition variable.
static void k_cpu_idle(void)
Make the CPU idle.
Definition kernel.h:6655
static void k_cpu_atomic_idle(unsigned int key)
Make the CPU idle in an atomic fashion.
Definition kernel.h:6674
struct _dnode sys_dnode_t
Doubly-linked list node structure.
Definition dlist.h:54
struct _dnode sys_dlist_t
Doubly-linked list structure.
Definition dlist.h:50
static void sys_dnode_init(sys_dnode_t *node)
initialize node to its state when not in a list
Definition dlist.h:219
uint32_t k_event_wait(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events.
uint32_t k_event_set_masked(struct k_event *event, uint32_t events, uint32_t events_mask)
Set or clear the events in an event object.
uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events (safe version).
static uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
Test the events currently tracked in the event object.
Definition kernel.h:2852
uint32_t k_event_wait_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events (safe version).
uint32_t k_event_set(struct k_event *event, uint32_t events)
Set the events in an event object.
uint32_t k_event_post(struct k_event *event, uint32_t events)
Post one or more events to an event object.
void k_event_init(struct k_event *event)
Initialize an event object.
uint32_t k_event_clear(struct k_event *event, uint32_t events)
Clear the events in an event object.
uint32_t k_event_wait_all(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events.
static bool sys_sflist_is_empty(const sys_sflist_t *list)
Test if the given list is empty.
Definition sflist.h:336
struct _sflist sys_sflist_t
Flagged single-linked list structure.
Definition sflist.h:54
int k_float_disable(struct k_thread *thread)
Disable preservation of floating point context information.
int k_float_enable(struct k_thread *thread, unsigned int options)
Enable preservation of floating point context information.
int k_futex_wait(struct k_futex *futex, int expected, k_timeout_t timeout)
Pend the current thread on a futex.
int k_futex_wake(struct k_futex *futex, bool wake_all)
Wake one/all threads pending on a futex.
void * k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
Allocate memory from a k_heap.
int k_heap_array_get(struct k_heap **heap)
Get the array of statically defined heaps.
void * k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
Allocate and initialize memory for an array of objects from a k_heap.
void k_heap_free(struct k_heap *h, void *mem)
Free memory allocated by k_heap_alloc().
void k_free(void *ptr)
Free memory allocated from heap.
void * k_realloc(void *ptr, size_t size)
Expand the size of an existing allocation.
void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
Initialize a k_heap.
void * k_malloc(size_t size)
Allocate memory from the heap.
void * k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
Reallocate memory from a k_heap.
void * k_calloc(size_t nmemb, size_t size)
Allocate memory from heap, array style.
void * k_aligned_alloc(size_t align, size_t size)
Allocate memory from the heap with a specified alignment.
void * k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes, k_timeout_t timeout)
Allocate aligned memory from a k_heap.
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int k_is_preempt_thread(void)
Determine if code is running in a preemptible thread.
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout)
Receive a mailbox message.
void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
Retrieve mailbox message data into a buffer.
void k_mbox_init(struct k_mbox *mbox)
Initialize a mailbox.
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout)
Send a mailbox message in a synchronous manner.
void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, struct k_sem *sem)
Send a mailbox message in an asynchronous manner.
int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks)
Initialize a memory slab.
void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
Free memory allocated from a memory slab.
int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats)
Get the memory stats for a memory slab.
int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
Reset the maximum memory usage for a slab.
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
Allocate memory from a memory slab.
static uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Get the number of used blocks in a memory slab.
Definition kernel.h:5895
static uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
Get the number of maximum used blocks so far in a memory slab.
Definition kernel.h:5912
static uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Get the number of unused blocks in a memory slab.
Definition kernel.h:5934
int k_msgq_peek(struct k_msgq *msgq, void *data)
Peek/read a message from a message queue.
uint32_t k_msgq_num_used_get(struct k_msgq *msgq)
Get the number of messages in a message queue.
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout)
Send a message to the end of a message queue.
int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
Peek/read a message from a message queue at the specified index.
uint32_t k_msgq_num_free_get(struct k_msgq *msgq)
Get the amount of free space in a message queue.
void k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
Get basic attributes of a message queue.
void k_msgq_purge(struct k_msgq *msgq)
Purge a message queue.
int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put_front(struct k_msgq *msgq, const void *data)
Send a message to the front of a message queue.
int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
Receive a message from a message queue.
int k_msgq_cleanup(struct k_msgq *msgq)
Release allocated buffer for a queue.
int k_mutex_unlock(struct k_mutex *mutex)
Unlock a mutex.
int k_mutex_init(struct k_mutex *mutex)
Initialize a mutex.
int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
Lock a mutex.
int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len, k_timeout_t timeout)
Write data to a pipe.
void k_pipe_close(struct k_pipe *pipe)
Close a pipe.
void k_pipe_reset(struct k_pipe *pipe)
Reset a pipe This routine resets the pipe, discarding any unread data and unblocking any threads wait...
void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size)
initialize a pipe
pipe_flags
Definition kernel.h:5552
int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len, k_timeout_t timeout)
Read data from a pipe This routine reads up to len bytes of data from pipe.
@ PIPE_FLAG_RESET
Definition kernel.h:5554
@ PIPE_FLAG_OPEN
Definition kernel.h:5553
void k_poll_signal_reset(struct k_poll_signal *sig)
Reset a poll signal object's state to unsignaled.
k_poll_modes
Definition kernel.h:6411
void k_poll_signal_check(struct k_poll_signal *sig, unsigned int *signaled, int *result)
Fetch the signaled state and result value of a poll signal.
void k_poll_event_init(struct k_poll_event *event, uint32_t type, int mode, void *obj)
Initialize one struct k_poll_event instance.
int k_poll(struct k_poll_event *events, int num_events, k_timeout_t timeout)
Wait for one or many of multiple poll events to occur.
int k_poll_signal_raise(struct k_poll_signal *sig, int result)
Signal a poll signal object.
void k_poll_signal_init(struct k_poll_signal *sig)
Initialize a poll signal object.
@ K_POLL_MODE_NOTIFY_ONLY
Definition kernel.h:6413
@ K_POLL_NUM_MODES
Definition kernel.h:6415
void k_queue_init(struct k_queue *queue)
Initialize a queue.
void * k_queue_get(struct k_queue *queue, k_timeout_t timeout)
Get an element from a queue.
void * k_queue_peek_tail(struct k_queue *queue)
Peek element at the tail of queue.
bool k_queue_unique_append(struct k_queue *queue, void *data)
Append an element to a queue only if it's not present already.
bool k_queue_remove(struct k_queue *queue, void *data)
Remove an element from a queue.
int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
Atomically add a list of elements to a queue.
int32_t k_queue_alloc_append(struct k_queue *queue, void *data)
Append an element to a queue.
void k_queue_cancel_wait(struct k_queue *queue)
Cancel waiting on a queue.
void * k_queue_peek_head(struct k_queue *queue)
Peek element at the head of queue.
void k_queue_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
Atomically append a list of elements to a queue.
void k_queue_append(struct k_queue *queue, void *data)
Append an element to the end of a queue.
int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
Inserts an element to a queue.
int k_queue_is_empty(struct k_queue *queue)
Query a queue to see if it has data available.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
unsigned int k_sem_count_get(struct k_sem *sem)
Get a semaphore's count.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
int k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit)
Initialize a semaphore.
struct _slist sys_slist_t
Single-linked list structure.
Definition slist.h:49
struct _snode sys_snode_t
Single-linked list node structure.
Definition slist.h:39
int k_stack_pop(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout)
Pop an element from a stack.
void k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries)
Initialize a stack.
int k_stack_cleanup(struct k_stack *stack)
Release a stack's allocated buffer.
int k_stack_push(struct k_stack *stack, stack_data_t data)
Push an element onto a stack.
int32_t k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
Initialize a stack.
#define SYS_PORT_TRACING_TRACKING_FIELD(type)
Field added to kernel objects so they are tracked.
Definition tracing_macros.h:375
#define IS_ENABLED(config_macro)
Check for macro definition in compiler-visible expressions.
Definition util_macro.h:148
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:281
#define EBUSY
Mount device busy.
Definition errno.h:54
int k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
Copy the thread name into a supplied buffer.
void k_yield(void)
Yield the current thread.
const char * k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
Get thread state string.
void k_thread_resume(k_tid_t thread)
Resume a suspended thread.
void * k_thread_custom_data_get(void)
Get current thread's custom data.
void k_thread_abort(k_tid_t thread)
Abort a thread.
int k_thread_name_set(k_tid_t thread, const char *str)
Set current thread name.
void k_thread_priority_set(k_tid_t thread, int prio)
Set a thread's priority.
void k_thread_absolute_deadline_set(k_tid_t thread, int deadline)
Set absolute deadline expiration time for scheduler.
int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
Enable thread to run on specified CPU.
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system without locking.
bool k_can_yield(void)
Check whether it is possible to yield in the current context.
int k_thread_priority_get(k_tid_t thread)
Get a thread's priority.
static void k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap)
Assign a resource memory pool to a thread.
Definition kernel.h:506
FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3)
Drop a thread's privileges permanently to user mode.
int k_thread_join(struct k_thread *thread, k_timeout_t timeout)
Sleep until a thread exits.
k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread)
Get time remaining before a thread wakes up, in system ticks.
void k_thread_custom_data_set(void *value)
Set current thread's custom data.
int32_t k_sleep(k_timeout_t timeout)
Put the current thread to sleep.
void k_sched_lock(void)
Lock the scheduler.
static int32_t k_msleep(int32_t ms)
Put the current thread to sleep.
Definition kernel.h:702
void k_busy_wait(uint32_t usec_to_wait)
Cause the current thread to busy wait.
void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks, k_thread_timeslice_fn_t expired, void *data)
Set thread time slice.
static void k_thread_runtime_stats_longest_frame_reset(__maybe_unused struct k_thread *thread)
Resets thread longest frame usage data for specified thread.
Definition kernel.h:120
void k_thread_suspend(k_tid_t thread)
Suspend a thread.
void k_sched_unlock(void)
Unlock the scheduler.
static __attribute_const__ k_tid_t k_current_get(void)
Get thread ID of the current thread.
Definition kernel.h:836
int k_thread_cpu_mask_clear(k_tid_t thread)
Sets all CPU enable masks to zero.
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in running on specified cpu.
void k_sched_time_slice_set(int32_t slice, int prio)
Set time-slicing period and scope.
int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
Prevent thread to run on specified CPU.
void k_wakeup(k_tid_t thread)
Wake up a sleeping thread.
int k_thread_stack_free(k_thread_stack_t *stack)
Free a dynamically allocated thread stack.
k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread)
Get time when a thread wakes up, in system ticks.
__attribute_const__ k_tid_t k_sched_current_thread_query(void)
Query thread ID of the current thread.
static void k_thread_start(k_tid_t thread)
Start an inactive thread.
Definition kernel.h:1306
k_tid_t k_thread_create(struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, uint32_t options, k_timeout_t delay)
Create a thread.
void k_reschedule(void)
Invoke the scheduler.
void k_thread_deadline_set(k_tid_t thread, int deadline)
Set relative deadline expiration time for scheduler.
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over the threads in running on current cpu without locking.
const char * k_thread_name_get(k_tid_t thread)
Get thread name.
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system.
static bool k_is_pre_kernel(void)
Test whether startup is in the before-main-task phase.
Definition kernel.h:803
int k_thread_cpu_pin(k_tid_t thread, int cpu)
Pin a thread to a CPU.
int32_t k_usleep(int32_t us)
Put the current thread to sleep with microsecond resolution.
int k_thread_cpu_mask_enable_all(k_tid_t thread)
Sets all CPU enable masks to one.
void(* k_thread_user_cb_t)(const struct k_thread *thread, void *user_data)
Definition kernel.h:127
k_thread_stack_t * k_thread_stack_alloc(size_t size, int flags)
Dynamically allocate a thread stack.
k_ticks_t k_timer_expires_ticks(const struct k_timer *timer)
Get next expiration time of a timer, in system ticks.
void(* k_timer_stop_t)(struct k_timer *timer)
Timer stop function type.
Definition kernel.h:1890
k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer)
Get time remaining before a timer next expires, in system ticks.
void * k_timer_user_data_get(const struct k_timer *timer)
Retrieve the user-specific data from a timer.
void(* k_timer_expiry_t)(struct k_timer *timer)
Timer expiry function type.
Definition kernel.h:1874
void k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_stop_t stop_fn)
Initialize a timer.
void k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period)
Start a timer.
static uint32_t k_timer_remaining_get(struct k_timer *timer)
Get time remaining before a timer next expires.
Definition kernel.h:2075
uint32_t k_timer_status_sync(struct k_timer *timer)
Synchronize thread to timer expiration.
void k_timer_stop(struct k_timer *timer)
Stop a timer.
uint32_t k_timer_status_get(struct k_timer *timer)
Read timer status.
void k_timer_user_data_set(struct k_timer *timer, void *user_data)
Associate user-specific data with a timer.
#define k_ticks_to_ms_floor32(t)
Convert ticks to milliseconds.
Definition time_units.h:1707
#define k_ticks_to_sec_floor32(t)
Convert ticks to seconds.
Definition time_units.h:1611
#define k_ticks_to_ms_floor64(t)
Convert ticks to milliseconds.
Definition time_units.h:1723
int k_work_poll_submit_to_queue(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item.
static k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
Access the thread that animates a work queue.
Definition kernel.h:4728
static bool k_work_is_pending(const struct k_work *work)
Test whether a work item is currently pending.
Definition kernel.h:4699
int k_work_queue_drain(struct k_work_q *queue, bool plug)
Wait until the work queue has drained, optionally plugging it.
static k_ticks_t k_work_delayable_expires_get(const struct k_work_delayable *dwork)
Get the absolute tick count at which a scheduled delayable work will be submitted.
Definition kernel.h:4716
int k_work_schedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to a queue after a delay.
int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
Busy state flags from the delayable work item.
int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout)
Stop a work queue.
void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
Initialize a delayable work structure.
int k_work_poll_cancel(struct k_work_poll *work)
Cancel a triggered work item.
void k_work_user_queue_start(struct k_work_user_q *work_q, k_thread_stack_t *stack, size_t stack_size, int prio, const char *name)
Start a workqueue in user mode.
void k_work_poll_init(struct k_work_poll *work, k_work_handler_t handler)
Initialize a triggered work item.
int k_work_cancel(struct k_work *work)
Cancel a work item.
static int k_work_user_submit_to_queue(struct k_work_user_q *work_q, struct k_work_user *work)
Submit a work item to a user mode workqueue.
Definition kernel.h:4855
int k_work_submit_to_queue(struct k_work_q *queue, struct k_work *work)
Submit a work item to a queue.
static bool k_work_user_is_pending(struct k_work_user *work)
Check if a userspace work item is pending.
Definition kernel.h:4832
void(* k_work_handler_t)(struct k_work *work)
The signature for a work item handler function.
Definition kernel.h:3871
int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to the system work queue after a delay.
static bool k_work_delayable_is_pending(const struct k_work_delayable *dwork)
Test whether a delayed work item is currently pending.
Definition kernel.h:4710
bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, struct k_work_sync *sync)
Cancel delayable work and wait.
int k_work_cancel_delayable(struct k_work_delayable *dwork)
Cancel delayable work.
static void k_work_user_init(struct k_work_user *work, k_work_user_handler_t handler)
Initialize a userspace work item.
Definition kernel.h:4810
int k_work_queue_unplug(struct k_work_q *queue)
Release a work queue to accept new submissions.
int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to the system work queue after a delay.
void(* k_work_user_handler_t)(struct k_work_user *work)
Work item handler function type for user work queues.
Definition kernel.h:4751
bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync)
Cancel a work item and wait for it to complete.
static k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
Access the user mode thread that animates a work queue.
Definition kernel.h:4910
int k_work_busy_get(const struct k_work *work)
Busy state flags from the work item.
static struct k_work_delayable * k_work_delayable_from_work(struct k_work *work)
Get the parent delayable work structure from a work pointer.
Definition kernel.h:4705
static k_ticks_t k_work_delayable_remaining_get(const struct k_work_delayable *dwork)
Get the number of ticks until a scheduled delayable work will be submitted.
Definition kernel.h:4722
bool k_work_flush(struct k_work *work, struct k_work_sync *sync)
Wait for last-submitted instance to complete.
int k_work_reschedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to a queue after a delay.
void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg)
Run work queue using calling thread.
int k_work_submit(struct k_work *work)
Submit a work item to the system queue.
bool k_work_flush_delayable(struct k_work_delayable *dwork, struct k_work_sync *sync)
Flush delayable work.
int k_work_poll_submit(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item to the system workqueue.
void k_work_queue_init(struct k_work_q *queue)
Initialize a work queue structure.
void k_work_queue_start(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg)
Initialize a work queue.
void k_work_init(struct k_work *work, k_work_handler_t handler)
Initialize a (non-delayable) work structure.
@ K_WORK_CANCELING
Flag indicating a work item that is being canceled.
Definition kernel.h:4479
@ K_WORK_QUEUED
Flag indicating a work item that has been submitted to a queue but has not started running.
Definition kernel.h:4486
@ K_WORK_DELAYED
Flag indicating a delayed work item that is scheduled for submission to a queue.
Definition kernel.h:4493
@ K_WORK_RUNNING
Flag indicating a work item that is running under a work queue thread.
Definition kernel.h:4473
@ K_WORK_FLUSHING
Flag indicating a synced work item that is being flushed.
Definition kernel.h:4499
#define BUILD_ASSERT(EXPR, MSG...)
Definition llvm.h:51
struct k_thread * k_tid_t
Definition thread.h:375
struct k_thread_runtime_stats k_thread_runtime_stats_t
void k_sys_runtime_stats_disable(void)
Disable gathering of system runtime statistics.
int k_thread_runtime_stats_enable(k_tid_t thread)
Enable gathering of runtime statistics for specified thread.
int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask, k_ipi_func_t func)
Add an IPI work item to the IPI work queue.
void k_sys_runtime_stats_enable(void)
Enable gathering of system runtime statistics.
int k_thread_runtime_stats_get(k_tid_t thread, k_thread_runtime_stats_t *stats)
Get the runtime statistics of a thread.
void k_ipi_work_signal(void)
Signal that there is one or more IPI work items to process.
int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout)
Wait until the IPI work item has been processed by all targeted CPUs.
execution_context_types
Definition kernel.h:91
@ K_ISR
Definition kernel.h:92
@ K_COOP_THREAD
Definition kernel.h:93
@ K_PREEMPT_THREAD
Definition kernel.h:94
void(* k_ipi_func_t)(struct k_ipi_work *work)
Definition kernel.h:3755
int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads.
static void k_ipi_work_init(struct k_ipi_work *work)
Initialize the specified IPI work item.
Definition kernel.h:3782
int k_thread_runtime_stats_disable(k_tid_t thread)
Disable gathering of runtime statistics for specified thread.
int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads on specified cpu.
Header files included by kernel.h.
void(* k_thread_timeslice_fn_t)(struct k_thread *thread, void *data)
Definition kernel_structs.h:313
Memory Statistics.
flags
Definition parser.h:97
state
Definition parser_state.h:29
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INTPTR_TYPE__ intptr_t
Definition stdint.h:104
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__INT64_TYPE__ int64_t
Definition stdint.h:75
Definition kernel.h:3514
_wait_q_t wait_q
Definition kernel.h:3515
Event Structure.
Definition kernel.h:2631
Definition kernel.h:2872
futex structure
Definition kernel.h:2545
atomic_t val
Definition kernel.h:2546
Definition kernel.h:5979
struct k_spinlock lock
Definition kernel.h:5982
struct sys_heap heap
Definition kernel.h:5980
_wait_q_t wait_q
Definition kernel.h:5981
IPI work item structure.
Definition kernel.h:3763
Definition kernel.h:3113
Mailbox Message Structure.
Definition kernel.h:5389
k_tid_t tx_target_thread
target thread id
Definition kernel.h:5399
void * tx_data
sender's message data buffer
Definition kernel.h:5395
k_tid_t rx_source_thread
source thread id
Definition kernel.h:5397
uint32_t info
application-defined information value
Definition kernel.h:5393
size_t size
size of message (in bytes)
Definition kernel.h:5391
Mailbox Structure.
Definition kernel.h:5411
_wait_q_t tx_msg_queue
Transmit messages queue.
Definition kernel.h:5413
struct k_spinlock lock
Definition kernel.h:5416
_wait_q_t rx_msg_queue
Receive message queue.
Definition kernel.h:5415
Memory Domain.
Definition mem_domain.h:80
Memory Partition.
Definition mem_domain.h:55
Message Queue Attributes.
Definition kernel.h:5131
uint32_t used_msgs
Used messages.
Definition kernel.h:5137
size_t msg_size
Message Size.
Definition kernel.h:5133
uint32_t max_msgs
Maximal number of messages.
Definition kernel.h:5135
Message Queue Structure.
Definition kernel.h:5070
size_t msg_size
Message size.
Definition kernel.h:5076
char * read_ptr
Read pointer.
Definition kernel.h:5084
uint32_t used_msgs
Number of used messages.
Definition kernel.h:5088
char * buffer_end
End of message buffer.
Definition kernel.h:5082
struct k_spinlock lock
Lock.
Definition kernel.h:5074
char * write_ptr
Write pointer.
Definition kernel.h:5086
char * buffer_start
Start of message buffer.
Definition kernel.h:5080
uint8_t flags
Message queue.
Definition kernel.h:5093
_wait_q_t wait_q
Message queue wait queue.
Definition kernel.h:5072
uint32_t max_msgs
Maximal number of messages.
Definition kernel.h:5078
Mutex Structure.
Definition kernel.h:3402
uint32_t lock_count
Current lock count.
Definition kernel.h:3409
_wait_q_t wait_q
Mutex wait queue.
Definition kernel.h:3404
int owner_orig_prio
Original thread priority.
Definition kernel.h:3412
struct k_thread * owner
Mutex owner.
Definition kernel.h:3406
Object core structure.
Definition obj_core.h:121
Definition kernel.h:5557
uint8_t flags
Definition kernel.h:5563
struct ring_buf buf
Definition kernel.h:5559
_wait_q_t data
Definition kernel.h:5561
_wait_q_t space
Definition kernel.h:5562
struct k_spinlock lock
Definition kernel.h:5560
size_t waiting
Definition kernel.h:5558
Poll Event.
Definition kernel.h:6453
struct k_msgq * typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE
Definition kernel.h:6485
void * typed_K_POLL_TYPE_IGNORE
Definition kernel.h:6480
struct k_poll_signal * signal
Definition kernel.h:6481
struct k_pipe * pipe
Definition kernel.h:6486
uint32_t tag
optional user-specified tag, opaque, untouched by the API
Definition kernel.h:6461
struct k_fifo * fifo
Definition kernel.h:6483
struct k_msgq * msgq
Definition kernel.h:6485
struct k_queue * queue
Definition kernel.h:6484
uint32_t unused
unused bits in 32-bit word
Definition kernel.h:6473
struct k_pipe * typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE
Definition kernel.h:6486
uint32_t type
bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values)
Definition kernel.h:6464
struct k_sem * sem
Definition kernel.h:6482
struct k_queue * typed_K_POLL_TYPE_DATA_AVAILABLE
Definition kernel.h:6484
struct k_sem * typed_K_POLL_TYPE_SEM_AVAILABLE
Definition kernel.h:6482
uint32_t state
bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values)
Definition kernel.h:6467
uint32_t mode
mode of operation, from enum k_poll_modes
Definition kernel.h:6470
struct z_poller * poller
PRIVATE - DO NOT TOUCH.
Definition kernel.h:6458
struct k_poll_signal * typed_K_POLL_TYPE_SIGNAL
Definition kernel.h:6481
void * obj
Definition kernel.h:6480
struct k_fifo * typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE
Definition kernel.h:6483
Definition kernel.h:6429
sys_dlist_t poll_events
PRIVATE - DO NOT TOUCH.
Definition kernel.h:6431
int result
custom result value passed to k_poll_signal_raise() if needed
Definition kernel.h:6440
unsigned int signaled
1 if the event has been signaled, 0 otherwise.
Definition kernel.h:6437
Definition kernel.h:2253
struct k_spinlock lock
Definition kernel.h:2255
_wait_q_t wait_q
Definition kernel.h:2256
sys_sflist_t data_q
Definition kernel.h:2254
Semaphore structure.
Definition kernel.h:3607
Kernel Spin Lock.
Definition spinlock.h:45
Thread Structure.
Definition thread.h:259
struct _thread_base base
Definition thread.h:261
struct k_heap * resource_pool
resource pool
Definition thread.h:349
struct __thread_entry entry
thread entry and parameters description
Definition thread.h:288
Kernel timeout type.
Definition clock.h:65
Kernel timer structure.
Definition kernel.h:1779
A structure used to submit work after a delay.
Definition kernel.h:4531
struct _timeout timeout
Definition kernel.h:4536
struct k_work_q * queue
Definition kernel.h:4539
struct k_work work
Definition kernel.h:4533
A structure used to hold work until it can be processed.
Definition kernel.h:4665
sys_slist_t pending
Definition kernel.h:4679
_wait_q_t drainq
Definition kernel.h:4685
k_tid_t thread_id
Definition kernel.h:4672
_wait_q_t notifyq
Definition kernel.h:4682
uint32_t flags
Definition kernel.h:4688
struct k_thread thread
Definition kernel.h:4667
A structure holding optional configuration items for a work queue.
Definition kernel.h:4627
const char * name
The name to be given to the work queue thread.
Definition kernel.h:4632
uint32_t work_timeout_ms
Controls whether work queue monitors work timeouts.
Definition kernel.h:4661
bool essential
Control whether the work queue thread should be marked as essential thread.
Definition kernel.h:4651
bool no_yield
Control whether the work queue thread should yield between items.
Definition kernel.h:4646
A structure holding internal state for a pending synchronous operation on a work item or queue.
Definition kernel.h:4614
struct z_work_canceller canceller
Definition kernel.h:4617
struct z_work_flusher flusher
Definition kernel.h:4616
A structure used to submit work.
Definition kernel.h:4503
k_work_handler_t handler
Definition kernel.h:4512
uint32_t flags
Definition kernel.h:4523
struct k_work_q * queue
Definition kernel.h:4515
sys_snode_t node
Definition kernel.h:4509
A structure to represent a ring buffer.
Definition ring_buffer.h:50
Definition sys_heap.h:57
Definition mem_stats.h:24
static __pinned_func bool k_is_user_context(void)
Indicate whether the CPU is currently in user mode.
Definition syscall.h:115
Macros to abstract toolchain specific capabilities.
Main header file for tracing subsystem API.
Header file for tracing macros.