Zephyr API Documentation 4.3.0-rc2
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
kernel.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
12
13#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
15
16#if !defined(_ASMLANGUAGE)
18#include <errno.h>
19#include <limits.h>
20#include <stdbool.h>
21#include <zephyr/toolchain.h>
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31/*
32 * Zephyr currently assumes the size of a couple standard types to simplify
33 * print string formats. Let's make sure this doesn't change without notice.
34 */
35BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
36BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
37BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
38
47
48#define K_ANY NULL
49
50#if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
51#error Zero available thread priorities defined!
52#endif
53
54#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
55#define K_PRIO_PREEMPT(x) (x)
56
57#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
58#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
59#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
60#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
61#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
62
63#ifdef CONFIG_POLL
64#define Z_POLL_EVENT_OBJ_INIT(obj) \
65 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
66#define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
67#else
68#define Z_POLL_EVENT_OBJ_INIT(obj)
69#define Z_DECL_POLL_EVENT
70#endif
71
72struct k_thread;
73struct k_mutex;
74struct k_sem;
75struct k_msgq;
76struct k_mbox;
77struct k_pipe;
78struct k_queue;
79struct k_fifo;
80struct k_lifo;
81struct k_stack;
82struct k_mem_slab;
83struct k_timer;
84struct k_poll_event;
85struct k_poll_signal;
86struct k_mem_domain;
87struct k_mem_partition;
88struct k_futex;
89struct k_event;
90
96
97/* private, used by k_poll and k_work_poll */
98struct k_work_poll;
99typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
100
105
119static inline void
121{
122#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
123 thread->base.usage.longest = 0ULL;
124#endif
125}
126
127typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
128 void *user_data);
129
145void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
146
165#ifdef CONFIG_SMP
166void k_thread_foreach_filter_by_cpu(unsigned int cpu,
167 k_thread_user_cb_t user_cb, void *user_data);
168#else
169static inline
170void k_thread_foreach_filter_by_cpu(unsigned int cpu,
171 k_thread_user_cb_t user_cb, void *user_data)
172{
173 __ASSERT(cpu == 0, "cpu filter out of bounds");
174 ARG_UNUSED(cpu);
175 k_thread_foreach(user_cb, user_data);
176}
177#endif
178
207 k_thread_user_cb_t user_cb, void *user_data);
208
240#ifdef CONFIG_SMP
242 k_thread_user_cb_t user_cb, void *user_data);
243#else
244static inline
245void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
246 k_thread_user_cb_t user_cb, void *user_data)
247{
248 __ASSERT(cpu == 0, "cpu filter out of bounds");
249 ARG_UNUSED(cpu);
250 k_thread_foreach_unlocked(user_cb, user_data);
251}
252#endif
253
255
261
262#endif /* !_ASMLANGUAGE */
263
264
265/*
266 * Thread user options. May be needed by assembly code. Common part uses low
267 * bits, arch-specific use high bits.
268 */
269
273#define K_ESSENTIAL (BIT(0))
274
275#define K_FP_IDX 1
285#define K_FP_REGS (BIT(K_FP_IDX))
286
293#define K_USER (BIT(2))
294
303#define K_INHERIT_PERMS (BIT(3))
304
314#define K_CALLBACK_STATE (BIT(4))
315
325#define K_DSP_IDX 6
326#define K_DSP_REGS (BIT(K_DSP_IDX))
327
336#define K_AGU_IDX 7
337#define K_AGU_REGS (BIT(K_AGU_IDX))
338
348#define K_SSE_REGS (BIT(7))
349
350/* end - thread options */
351
352#if !defined(_ASMLANGUAGE)
377__syscall k_thread_stack_t *k_thread_stack_alloc(size_t size, int flags);
378
392
444__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
445 k_thread_stack_t *stack,
446 size_t stack_size,
448 void *p1, void *p2, void *p3,
449 int prio, uint32_t options, k_timeout_t delay);
450
473 void *p1, void *p2,
474 void *p3);
475
489#define k_thread_access_grant(thread, ...) \
490 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
491
506static inline void k_thread_heap_assign(struct k_thread *thread,
507 struct k_heap *heap)
508{
509 thread->resource_pool = heap;
510}
511
512#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
533__syscall int k_thread_stack_space_get(const struct k_thread *thread,
534 size_t *unused_ptr);
535#endif
536
537#if (K_HEAP_MEM_POOL_SIZE > 0)
550void k_thread_system_pool_assign(struct k_thread *thread);
551#endif /* (K_HEAP_MEM_POOL_SIZE > 0) */
552
572__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
573
587__syscall int32_t k_sleep(k_timeout_t timeout);
588
600static inline int32_t k_msleep(int32_t ms)
601{
602 return k_sleep(Z_TIMEOUT_MS(ms));
603}
604
622
639__syscall void k_busy_wait(uint32_t usec_to_wait);
640
652bool k_can_yield(void);
653
661__syscall void k_yield(void);
662
672__syscall void k_wakeup(k_tid_t thread);
673
687__attribute_const__
689
701static inline bool k_is_pre_kernel(void)
702{
703 extern bool z_sys_post_kernel; /* in init.c */
704
705 /*
706 * If called from userspace, it must be post kernel.
707 * This guard is necessary because z_sys_post_kernel memory
708 * is not accessible to user threads.
709 */
710 if (k_is_user_context()) {
711 return false;
712 }
713
714 return !z_sys_post_kernel;
715}
716
723__attribute_const__
724static inline k_tid_t k_current_get(void)
725{
726 __ASSERT(!k_is_pre_kernel(), "k_current_get called pre-kernel");
727
728#ifdef CONFIG_CURRENT_THREAD_USE_TLS
729
730 /* Thread-local cache of current thread ID, set in z_thread_entry() */
731 extern Z_THREAD_LOCAL k_tid_t z_tls_current;
732
733 return z_tls_current;
734#else
736#endif
737}
738
758__syscall void k_thread_abort(k_tid_t thread);
759
760k_ticks_t z_timeout_expires(const struct _timeout *timeout);
761k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
762
763#ifdef CONFIG_SYS_CLOCK_EXISTS
764
772__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread);
773
774static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
775 const struct k_thread *thread)
776{
777 return z_timeout_expires(&thread->base.timeout);
778}
779
788
789static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
790 const struct k_thread *thread)
791{
792 return z_timeout_remaining(&thread->base.timeout);
793}
794
795#endif /* CONFIG_SYS_CLOCK_EXISTS */
796
800
801struct _static_thread_data {
802 struct k_thread *init_thread;
803 k_thread_stack_t *init_stack;
804 unsigned int init_stack_size;
805 k_thread_entry_t init_entry;
806 void *init_p1;
807 void *init_p2;
808 void *init_p3;
809 int init_prio;
810 uint32_t init_options;
811 const char *init_name;
812#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
813 int32_t init_delay_ms;
814#else
815 k_timeout_t init_delay;
816#endif
817};
818
819#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
820#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
821#define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
822#else
823#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
824#define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
825#endif
826
827#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
828 entry, p1, p2, p3, \
829 prio, options, delay, tname) \
830 { \
831 .init_thread = (thread), \
832 .init_stack = (stack), \
833 .init_stack_size = (stack_size), \
834 .init_entry = (k_thread_entry_t)entry, \
835 .init_p1 = (void *)p1, \
836 .init_p2 = (void *)p2, \
837 .init_p3 = (void *)p3, \
838 .init_prio = (prio), \
839 .init_options = (options), \
840 .init_name = STRINGIFY(tname), \
841 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
842 }
843
844/*
845 * Refer to K_THREAD_DEFINE() and K_KERNEL_THREAD_DEFINE() for
846 * information on arguments.
847 */
848#define Z_THREAD_COMMON_DEFINE(name, stack_size, \
849 entry, p1, p2, p3, \
850 prio, options, delay) \
851 struct k_thread _k_thread_obj_##name; \
852 STRUCT_SECTION_ITERABLE(_static_thread_data, \
853 _k_thread_data_##name) = \
854 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
855 _k_thread_stack_##name, stack_size,\
856 entry, p1, p2, p3, prio, options, \
857 delay, name); \
858 __maybe_unused const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
859
863
895#define K_THREAD_DEFINE(name, stack_size, \
896 entry, p1, p2, p3, \
897 prio, options, delay) \
898 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
899 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
900 prio, options, delay)
901
932#define K_KERNEL_THREAD_DEFINE(name, stack_size, \
933 entry, p1, p2, p3, \
934 prio, options, delay) \
935 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
936 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
937 prio, options, delay)
938
948__syscall int k_thread_priority_get(k_tid_t thread);
949
975__syscall void k_thread_priority_set(k_tid_t thread, int prio);
976
977
978#ifdef CONFIG_SCHED_DEADLINE
1010__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
1011
1052__syscall void k_thread_absolute_deadline_set(k_tid_t thread, int deadline);
1053#endif
1054
1073__syscall void k_reschedule(void);
1074
1075#ifdef CONFIG_SCHED_CPU_MASK
1089
1103
1117
1131
1142int k_thread_cpu_pin(k_tid_t thread, int cpu);
1143#endif
1144
1166__syscall void k_thread_suspend(k_tid_t thread);
1167
1179__syscall void k_thread_resume(k_tid_t thread);
1180
1194static inline void k_thread_start(k_tid_t thread)
1195{
1196 k_wakeup(thread);
1197}
1198
1225void k_sched_time_slice_set(int32_t slice, int prio);
1226
1265void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
1266 k_thread_timeslice_fn_t expired, void *data);
1267
1269
1274
1286bool k_is_in_isr(void);
1287
1304__syscall int k_is_preempt_thread(void);
1305
1309
1314
1340void k_sched_lock(void);
1341
1350
1363__syscall void k_thread_custom_data_set(void *value);
1364
1372__syscall void *k_thread_custom_data_get(void);
1373
1387__syscall int k_thread_name_set(k_tid_t thread, const char *str);
1388
1397const char *k_thread_name_get(k_tid_t thread);
1398
1410__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
1411 size_t size);
1412
1425const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
1426
1430
1435
1444#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1445
1458#define K_NSEC(t) Z_TIMEOUT_NS(t)
1459
1472#define K_USEC(t) Z_TIMEOUT_US(t)
1473
1484#define K_CYC(t) Z_TIMEOUT_CYC(t)
1485
1496#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1497
1508#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1509
1520#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1521
1532#define K_MINUTES(m) K_SECONDS((m) * 60)
1533
1544#define K_HOURS(h) K_MINUTES((h) * 60)
1545
1554#define K_FOREVER Z_FOREVER
1555
1556#ifdef CONFIG_TIMEOUT_64BIT
1557
1569#define K_TIMEOUT_ABS_TICKS(t) \
1570 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)CLAMP(t, 0, (INT64_MAX - 1))))
1571
1583#define K_TIMEOUT_ABS_SEC(t) K_TIMEOUT_ABS_TICKS(k_sec_to_ticks_ceil64(t))
1584
1596#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1597
1610#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1611
1624#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1625
1638#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1639
1640#endif
1641
1645
1652struct k_timer {
1656
1657 /*
1658 * _timeout structure must be first here if we want to use
1659 * dynamic timer allocation. timeout.node is used in the double-linked
1660 * list of free timers
1661 */
1662 struct _timeout timeout;
1663
1664 /* wait queue for the (single) thread waiting on this timer */
1665 _wait_q_t wait_q;
1666
1667 /* runs in ISR context */
1668 void (*expiry_fn)(struct k_timer *timer);
1669
1670 /* runs in the context of the thread that calls k_timer_stop() */
1671 void (*stop_fn)(struct k_timer *timer);
1672
1673 /* timer period */
1674 k_timeout_t period;
1675
1676 /* timer status */
1677 uint32_t status;
1678
1679 /* user-specific data, also used to support legacy features */
1680 void *user_data;
1681
1683
1684#ifdef CONFIG_OBJ_CORE_TIMER
1685 struct k_obj_core obj_core;
1686#endif
1690};
1691
1695#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1696 { \
1697 .timeout = { \
1698 .node = {},\
1699 .fn = z_timer_expiration_handler, \
1700 .dticks = 0, \
1701 }, \
1702 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1703 .expiry_fn = expiry, \
1704 .stop_fn = stop, \
1705 .period = {}, \
1706 .status = 0, \
1707 .user_data = 0, \
1708 }
1709
1713
1719
1730typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1731
1746typedef void (*k_timer_stop_t)(struct k_timer *timer);
1747
1759#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1760 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1761 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1762
1772void k_timer_init(struct k_timer *timer,
1773 k_timer_expiry_t expiry_fn,
1774 k_timer_stop_t stop_fn);
1775
1793__syscall void k_timer_start(struct k_timer *timer,
1794 k_timeout_t duration, k_timeout_t period);
1795
1812__syscall void k_timer_stop(struct k_timer *timer);
1813
1826__syscall uint32_t k_timer_status_get(struct k_timer *timer);
1827
1845__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
1846
1847#ifdef CONFIG_SYS_CLOCK_EXISTS
1848
1859__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
1860
1861static inline k_ticks_t z_impl_k_timer_expires_ticks(
1862 const struct k_timer *timer)
1863{
1864 return z_timeout_expires(&timer->timeout);
1865}
1866
1877__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
1878
1879static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1880 const struct k_timer *timer)
1881{
1882 return z_timeout_remaining(&timer->timeout);
1883}
1884
1895static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
1896{
1898}
1899
1900#endif /* CONFIG_SYS_CLOCK_EXISTS */
1901
1914__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1915
1919static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
1920 void *user_data)
1921{
1922 timer->user_data = user_data;
1923}
1924
1932__syscall void *k_timer_user_data_get(const struct k_timer *timer);
1933
1934static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
1935{
1936 return timer->user_data;
1937}
1938
1940
1946
1956__syscall int64_t k_uptime_ticks(void);
1957
1971static inline int64_t k_uptime_get(void)
1972{
1974}
1975
1995static inline uint32_t k_uptime_get_32(void)
1996{
1997 return (uint32_t)k_uptime_get();
1998}
1999
2008static inline uint32_t k_uptime_seconds(void)
2009{
2011}
2012
2024static inline int64_t k_uptime_delta(int64_t *reftime)
2025{
2026 int64_t uptime, delta;
2027
2028 uptime = k_uptime_get();
2029 delta = uptime - *reftime;
2030 *reftime = uptime;
2031
2032 return delta;
2033}
2034
2043static inline uint32_t k_cycle_get_32(void)
2044{
2045 return arch_k_cycle_get_32();
2046}
2047
2058static inline uint64_t k_cycle_get_64(void)
2059{
2060 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
2061 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
2062 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
2063 return 0;
2064 }
2065
2066 return arch_k_cycle_get_64();
2067}
2068
2072
2073struct k_queue {
2076 _wait_q_t wait_q;
2077
2078 Z_DECL_POLL_EVENT
2079
2081};
2082
2086
2087#define Z_QUEUE_INITIALIZER(obj) \
2088 { \
2089 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
2090 .lock = { }, \
2091 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2092 Z_POLL_EVENT_OBJ_INIT(obj) \
2093 }
2094
2098
2104
2112__syscall void k_queue_init(struct k_queue *queue);
2113
2127__syscall void k_queue_cancel_wait(struct k_queue *queue);
2128
2141void k_queue_append(struct k_queue *queue, void *data);
2142
2159__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
2160
2173void k_queue_prepend(struct k_queue *queue, void *data);
2174
2191__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
2192
2206void k_queue_insert(struct k_queue *queue, void *prev, void *data);
2207
2226int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
2227
2243int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
2244
2262__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
2263
2280bool k_queue_remove(struct k_queue *queue, void *data);
2281
2296bool k_queue_unique_append(struct k_queue *queue, void *data);
2297
2311__syscall int k_queue_is_empty(struct k_queue *queue);
2312
2313static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
2314{
2315 return sys_sflist_is_empty(&queue->data_q) ? 1 : 0;
2316}
2317
2327__syscall void *k_queue_peek_head(struct k_queue *queue);
2328
2338__syscall void *k_queue_peek_tail(struct k_queue *queue);
2339
2349#define K_QUEUE_DEFINE(name) \
2350 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2351 Z_QUEUE_INITIALIZER(name)
2352
2354
2355#ifdef CONFIG_USERSPACE
2365struct k_futex {
2367};
2368
2376struct z_futex_data {
2377 _wait_q_t wait_q;
2378 struct k_spinlock lock;
2379};
2380
2381#define Z_FUTEX_DATA_INITIALIZER(obj) \
2382 { \
2383 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2384 }
2385
2391
2411__syscall int k_futex_wait(struct k_futex *futex, int expected,
2412 k_timeout_t timeout);
2413
2428__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2429
2431#endif
2432
2438
2443
2450
2451struct k_event {
2455 _wait_q_t wait_q;
2456 uint32_t events;
2457 struct k_spinlock lock;
2458
2460
2461#ifdef CONFIG_OBJ_CORE_EVENT
2462 struct k_obj_core obj_core;
2463#endif
2467
2468};
2469
2473
2474#define Z_EVENT_INITIALIZER(obj) \
2475 { \
2476 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2477 .events = 0, \
2478 .lock = {}, \
2479 }
2483
2491__syscall void k_event_init(struct k_event *event);
2492
2510__syscall uint32_t k_event_post(struct k_event *event, uint32_t events);
2511
2529__syscall uint32_t k_event_set(struct k_event *event, uint32_t events);
2530
2547__syscall uint32_t k_event_set_masked(struct k_event *event, uint32_t events,
2548 uint32_t events_mask);
2549
2562__syscall uint32_t k_event_clear(struct k_event *event, uint32_t events);
2563
2588__syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2589 bool reset, k_timeout_t timeout);
2590
2615__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2616 bool reset, k_timeout_t timeout);
2617
2637__syscall uint32_t k_event_wait_safe(struct k_event *event, uint32_t events,
2638 bool reset, k_timeout_t timeout);
2639
2659__syscall uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events,
2660 bool reset, k_timeout_t timeout);
2661
2662
2663
2674static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
2675{
2676 return k_event_wait(event, events_mask, false, K_NO_WAIT);
2677}
2678
2688#define K_EVENT_DEFINE(name) \
2689 STRUCT_SECTION_ITERABLE(k_event, name) = \
2690 Z_EVENT_INITIALIZER(name);
2691
2693
2694struct k_fifo {
2695 struct k_queue _queue;
2696#ifdef CONFIG_OBJ_CORE_FIFO
2697 struct k_obj_core obj_core;
2698#endif
2699};
2700
2704#define Z_FIFO_INITIALIZER(obj) \
2705 { \
2706 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2707 }
2708
2712
2718
2726#define k_fifo_init(fifo) \
2727 ({ \
2728 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2729 k_queue_init(&(fifo)->_queue); \
2730 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2731 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2732 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2733 })
2734
2746#define k_fifo_cancel_wait(fifo) \
2747 ({ \
2748 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2749 k_queue_cancel_wait(&(fifo)->_queue); \
2750 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2751 })
2752
2765#define k_fifo_put(fifo, data) \
2766 ({ \
2767 void *_data = data; \
2768 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2769 k_queue_append(&(fifo)->_queue, _data); \
2770 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2771 })
2772
2789#define k_fifo_alloc_put(fifo, data) \
2790 ({ \
2791 void *_data = data; \
2792 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2793 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2794 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
2795 fap_ret; \
2796 })
2797
2812#define k_fifo_put_list(fifo, head, tail) \
2813 ({ \
2814 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2815 k_queue_append_list(&(fifo)->_queue, head, tail); \
2816 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2817 })
2818
2832#define k_fifo_put_slist(fifo, list) \
2833 ({ \
2834 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2835 k_queue_merge_slist(&(fifo)->_queue, list); \
2836 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2837 })
2838
2856#define k_fifo_get(fifo, timeout) \
2857 ({ \
2858 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2859 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2860 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2861 fg_ret; \
2862 })
2863
2877#define k_fifo_is_empty(fifo) \
2878 k_queue_is_empty(&(fifo)->_queue)
2879
2893#define k_fifo_peek_head(fifo) \
2894 ({ \
2895 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2896 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2897 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2898 fph_ret; \
2899 })
2900
2912#define k_fifo_peek_tail(fifo) \
2913 ({ \
2914 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2915 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2916 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2917 fpt_ret; \
2918 })
2919
2929#define K_FIFO_DEFINE(name) \
2930 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2931 Z_FIFO_INITIALIZER(name)
2932
2934
2935struct k_lifo {
2936 struct k_queue _queue;
2937#ifdef CONFIG_OBJ_CORE_LIFO
2938 struct k_obj_core obj_core;
2939#endif
2940};
2941
2945
2946#define Z_LIFO_INITIALIZER(obj) \
2947 { \
2948 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2949 }
2950
2954
2960
2968#define k_lifo_init(lifo) \
2969 ({ \
2970 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2971 k_queue_init(&(lifo)->_queue); \
2972 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
2973 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
2974 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2975 })
2976
2989#define k_lifo_put(lifo, data) \
2990 ({ \
2991 void *_data = data; \
2992 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
2993 k_queue_prepend(&(lifo)->_queue, _data); \
2994 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
2995 })
2996
3013#define k_lifo_alloc_put(lifo, data) \
3014 ({ \
3015 void *_data = data; \
3016 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
3017 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
3018 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
3019 lap_ret; \
3020 })
3021
3039#define k_lifo_get(lifo, timeout) \
3040 ({ \
3041 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
3042 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
3043 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
3044 lg_ret; \
3045 })
3046
3056#define K_LIFO_DEFINE(name) \
3057 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
3058 Z_LIFO_INITIALIZER(name)
3059
3061
3065#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
3066
3067typedef uintptr_t stack_data_t;
3068
3069struct k_stack {
3070 _wait_q_t wait_q;
3071 struct k_spinlock lock;
3072 stack_data_t *base, *next, *top;
3073
3074 uint8_t flags;
3075
3077
3078#ifdef CONFIG_OBJ_CORE_STACK
3079 struct k_obj_core obj_core;
3080#endif
3081};
3082
3083#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
3084 { \
3085 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3086 .base = (stack_buffer), \
3087 .next = (stack_buffer), \
3088 .top = (stack_buffer) + (stack_num_entries), \
3089 }
3090
3094
3100
3110void k_stack_init(struct k_stack *stack,
3111 stack_data_t *buffer, uint32_t num_entries);
3112
3113
3127
3128__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
3129 uint32_t num_entries);
3130
3142int k_stack_cleanup(struct k_stack *stack);
3143
3157__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
3158
3179__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
3180 k_timeout_t timeout);
3181
3192#define K_STACK_DEFINE(name, stack_num_entries) \
3193 stack_data_t __noinit \
3194 _k_stack_buf_##name[stack_num_entries]; \
3195 STRUCT_SECTION_ITERABLE(k_stack, name) = \
3196 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
3197 stack_num_entries)
3198
3200
3204
3205struct k_work;
3206struct k_work_q;
3207struct k_work_queue_config;
3208extern struct k_work_q k_sys_work_q;
3209
3213
3219
3224struct k_mutex {
3226 _wait_q_t wait_q;
3229
3232
3235
3237
3238#ifdef CONFIG_OBJ_CORE_MUTEX
3239 struct k_obj_core obj_core;
3240#endif
3241};
3242
3246#define Z_MUTEX_INITIALIZER(obj) \
3247 { \
3248 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3249 .owner = NULL, \
3250 .lock_count = 0, \
3251 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3252 }
3253
3257
3267#define K_MUTEX_DEFINE(name) \
3268 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3269 Z_MUTEX_INITIALIZER(name)
3270
3283__syscall int k_mutex_init(struct k_mutex *mutex);
3284
3285
3307__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
3308
3329__syscall int k_mutex_unlock(struct k_mutex *mutex);
3330
3334
3335
3337 _wait_q_t wait_q;
3338
3339#ifdef CONFIG_OBJ_CORE_CONDVAR
3340 struct k_obj_core obj_core;
3341#endif
3342};
3343
3344#define Z_CONDVAR_INITIALIZER(obj) \
3345 { \
3346 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3347 }
3348
3354
3361__syscall int k_condvar_init(struct k_condvar *condvar);
3362
3369__syscall int k_condvar_signal(struct k_condvar *condvar);
3370
3378__syscall int k_condvar_broadcast(struct k_condvar *condvar);
3379
3397__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
3398 k_timeout_t timeout);
3399
3410#define K_CONDVAR_DEFINE(name) \
3411 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3412 Z_CONDVAR_INITIALIZER(name)
3413
3416
3422
3429struct k_sem {
3433 _wait_q_t wait_q;
3434 unsigned int count;
3435 unsigned int limit;
3436
3437 Z_DECL_POLL_EVENT
3438
3440
3441#ifdef CONFIG_OBJ_CORE_SEM
3442 struct k_obj_core obj_core;
3443#endif
3445};
3446
3450
3451#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3452 { \
3453 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3454 .count = (initial_count), \
3455 .limit = (count_limit), \
3456 Z_POLL_EVENT_OBJ_INIT(obj) \
3457 }
3458
3462
3471#define K_SEM_MAX_LIMIT UINT_MAX
3472
3488__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
3489 unsigned int limit);
3490
3509__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
3510
3521__syscall void k_sem_give(struct k_sem *sem);
3522
3532__syscall void k_sem_reset(struct k_sem *sem);
3533
3543__syscall unsigned int k_sem_count_get(struct k_sem *sem);
3544
3548static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
3549{
3550 return sem->count;
3551}
3552
3564#define K_SEM_DEFINE(name, initial_count, count_limit) \
3565 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3566 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3567 BUILD_ASSERT(((count_limit) != 0) && \
3568 (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) && \
3569 ((count_limit) <= K_SEM_MAX_LIMIT));
3570
3572
3573#if defined(CONFIG_SCHED_IPI_SUPPORTED) || defined(__DOXYGEN__)
3574struct k_ipi_work;
3575
3576
3577typedef void (*k_ipi_func_t)(struct k_ipi_work *work);
3578
3589 sys_dnode_t node[CONFIG_MP_MAX_NUM_CPUS]; /* Node in IPI work queue */
3590 k_ipi_func_t func; /* Function to execute on target CPU */
3591 struct k_event event; /* Event to signal when processed */
3592 uint32_t bitmask; /* Bitmask of targeted CPUs */
3594};
3595
3596
3604static inline void k_ipi_work_init(struct k_ipi_work *work)
3605{
3606 k_event_init(&work->event);
3607 for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
3608 sys_dnode_init(&work->node[i]);
3609 }
3610 work->bitmask = 0;
3611}
3612
3631int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask,
3632 k_ipi_func_t func);
3633
3656int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout);
3657
3667
3668#endif /* CONFIG_SCHED_IPI_SUPPORTED */
3669
3673
3674struct k_work_delayable;
3675struct k_work_sync;
3676
3680
3686
3693typedef void (*k_work_handler_t)(struct k_work *work);
3694
3708void k_work_init(struct k_work *work,
3710
3725int k_work_busy_get(const struct k_work *work);
3726
3740static inline bool k_work_is_pending(const struct k_work *work);
3741
3763 struct k_work *work);
3764
3773int k_work_submit(struct k_work *work);
3774
3799bool k_work_flush(struct k_work *work,
3800 struct k_work_sync *sync);
3801
3821int k_work_cancel(struct k_work *work);
3822
3853bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3854
3865
3886 k_thread_stack_t *stack, size_t stack_size,
3887 int prio, const struct k_work_queue_config *cfg);
3888
3899void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg);
3900
3910static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3911
3935int k_work_queue_drain(struct k_work_q *queue, bool plug);
3936
3951
3968
3984
3996static inline struct k_work_delayable *
3998
4013
4028static inline bool k_work_delayable_is_pending(
4029 const struct k_work_delayable *dwork);
4030
4045 const struct k_work_delayable *dwork);
4046
4061 const struct k_work_delayable *dwork);
4062
4091 struct k_work_delayable *dwork,
4092 k_timeout_t delay);
4093
4108 k_timeout_t delay);
4109
4146 struct k_work_delayable *dwork,
4147 k_timeout_t delay);
4148
4162 k_timeout_t delay);
4163
4189 struct k_work_sync *sync);
4190
4212
4242 struct k_work_sync *sync);
4243
4244enum {
4248
4249 /* The atomic API is used for all work and queue flags fields to
4250 * enforce sequential consistency in SMP environments.
4251 */
4252
4253 /* Bits that represent the work item states. At least nine of the
4254 * combinations are distinct valid stable states.
4255 */
4256 K_WORK_RUNNING_BIT = 0,
4257 K_WORK_CANCELING_BIT = 1,
4258 K_WORK_QUEUED_BIT = 2,
4259 K_WORK_DELAYED_BIT = 3,
4260 K_WORK_FLUSHING_BIT = 4,
4261
4262 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
4263 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT),
4264
4265 /* Static work flags */
4266 K_WORK_DELAYABLE_BIT = 8,
4267 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
4268
4269 /* Dynamic work queue flags */
4270 K_WORK_QUEUE_STARTED_BIT = 0,
4271 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
4272 K_WORK_QUEUE_BUSY_BIT = 1,
4273 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
4274 K_WORK_QUEUE_DRAIN_BIT = 2,
4275 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
4276 K_WORK_QUEUE_PLUGGED_BIT = 3,
4277 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
4278 K_WORK_QUEUE_STOP_BIT = 4,
4279 K_WORK_QUEUE_STOP = BIT(K_WORK_QUEUE_STOP_BIT),
4280
4281 /* Static work queue flags */
4282 K_WORK_QUEUE_NO_YIELD_BIT = 8,
4283 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
4284
4288 /* Transient work flags */
4289
4295 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
4296
4301 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
4302
4308 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
4309
4315 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
4316
4321 K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT),
4322};
4323
4325struct k_work {
4326 /* All fields are protected by the work module spinlock. No fields
4327 * are to be accessed except through kernel API.
4328 */
4329
4330 /* Node to link into k_work_q pending list. */
4332
4333 /* The function to be invoked by the work queue thread. */
4335
4336 /* The queue on which the work item was last submitted. */
4338
4339 /* State of the work item.
4340 *
4341 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
4342 *
4343 * It can be RUNNING and CANCELING simultaneously.
4344 */
4346};
4347
4348#define Z_WORK_INITIALIZER(work_handler) { \
4349 .handler = (work_handler), \
4350}
4351
4354 /* The work item. */
4355 struct k_work work;
4356
4357 /* Timeout used to submit work after a delay. */
4358 struct _timeout timeout;
4359
4360 /* The queue to which the work should be submitted. */
4362};
4363
4364#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4365 .work = { \
4366 .handler = (work_handler), \
4367 .flags = K_WORK_DELAYABLE, \
4368 }, \
4369}
4370
4387#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4388 struct k_work_delayable work \
4389 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4390
4394
4395/* Record used to wait for work to flush.
4396 *
4397 * The work item is inserted into the queue that will process (or is
4398 * processing) the item, and will be processed as soon as the item
4399 * completes. When the flusher is processed the semaphore will be
4400 * signaled, releasing the thread waiting for the flush.
4401 */
4402struct z_work_flusher {
4403 struct k_work work;
4404 struct k_sem sem;
4405};
4406
4407/* Record used to wait for work to complete a cancellation.
4408 *
4409 * The work item is inserted into a global queue of pending cancels.
4410 * When a cancelling work item goes idle any matching waiters are
4411 * removed from pending_cancels and are woken.
4412 */
4413struct z_work_canceller {
4414 sys_snode_t node;
4415 struct k_work *work;
4416 struct k_sem sem;
4417};
4418
4422
4437 union {
4438 struct z_work_flusher flusher;
4439 struct z_work_canceller canceller;
4440 };
4441};
4442
4454 const char *name;
4455
4469
4474
4484};
4485
4487struct k_work_q {
4488 /* The thread that animates the work. */
4490
4491 /* The thread ID that animates the work. This may be an external thread
4492 * if k_work_queue_run() is used.
4493 */
4495
4496 /* All the following fields must be accessed only while the
4497 * work module spinlock is held.
4498 */
4499
4500 /* List of k_work items to be worked. */
4502
4503 /* Wait queue for idle work thread. */
4504 _wait_q_t notifyq;
4505
4506 /* Wait queue for threads waiting for the queue to drain. */
4507 _wait_q_t drainq;
4508
4509 /* Flags describing queue state. */
4511
4512#if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT)
4513 struct _timeout work_timeout_record;
4514 struct k_work *work;
4515 k_timeout_t work_timeout;
4516#endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
4517};
4518
4519/* Provide the implementation for inline functions declared above */
4520
4521static inline bool k_work_is_pending(const struct k_work *work)
4522{
4523 return k_work_busy_get(work) != 0;
4524}
4525
4526static inline struct k_work_delayable *
4531
4533 const struct k_work_delayable *dwork)
4534{
4535 return k_work_delayable_busy_get(dwork) != 0;
4536}
4537
4539 const struct k_work_delayable *dwork)
4540{
4541 return z_timeout_expires(&dwork->timeout);
4542}
4543
4545 const struct k_work_delayable *dwork)
4546{
4547 return z_timeout_remaining(&dwork->timeout);
4548}
4549
4551{
4552 return queue->thread_id;
4553}
4554
4556
4557struct k_work_user;
4558
4563
4573typedef void (*k_work_user_handler_t)(struct k_work_user *work);
4574
4578
4579struct k_work_user_q {
4580 struct k_queue queue;
4581 struct k_thread thread;
4582};
4583
4584enum {
4585 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4586};
4587
4588struct k_work_user {
4589 void *_reserved; /* Used by k_queue implementation. */
4590 k_work_user_handler_t handler;
4592};
4593
4597
4598#if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4599#define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4600#else
4601#define Z_WORK_USER_INITIALIZER(work_handler) \
4602 { \
4603 ._reserved = NULL, \
4604 .handler = (work_handler), \
4605 .flags = 0 \
4606 }
4607#endif
4608
4620#define K_WORK_USER_DEFINE(work, work_handler) \
4621 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4622
4632static inline void k_work_user_init(struct k_work_user *work,
4633 k_work_user_handler_t handler)
4634{
4635 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4636}
4637
4654static inline bool k_work_user_is_pending(struct k_work_user *work)
4655{
4656 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4657}
4658
4677static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4678 struct k_work_user *work)
4679{
4680 int ret = -EBUSY;
4681
4682 if (!atomic_test_and_set_bit(&work->flags,
4683 K_WORK_USER_STATE_PENDING)) {
4684 ret = k_queue_alloc_append(&work_q->queue, work);
4685
4686 /* Couldn't insert into the queue. Clear the pending bit
4687 * so the work item can be submitted again
4688 */
4689 if (ret != 0) {
4690 atomic_clear_bit(&work->flags,
4691 K_WORK_USER_STATE_PENDING);
4692 }
4693 }
4694
4695 return ret;
4696}
4697
4717void k_work_user_queue_start(struct k_work_user_q *work_q,
4718 k_thread_stack_t *stack,
4719 size_t stack_size, int prio,
4720 const char *name);
4721
4732static inline k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
4733{
4734 return &work_q->thread;
4735}
4736
4738
4742
4743struct k_work_poll {
4744 struct k_work work;
4745 struct k_work_q *workq;
4746 struct z_poller poller;
4747 struct k_poll_event *events;
4748 int num_events;
4749 k_work_handler_t real_handler;
4750 struct _timeout timeout;
4751 int poll_result;
4752};
4753
4757
4762
4774#define K_WORK_DEFINE(work, work_handler) \
4775 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4776
4786void k_work_poll_init(struct k_work_poll *work,
4787 k_work_handler_t handler);
4788
4824 struct k_work_poll *work,
4825 struct k_poll_event *events,
4826 int num_events,
4827 k_timeout_t timeout);
4828
4860int k_work_poll_submit(struct k_work_poll *work,
4861 struct k_poll_event *events,
4862 int num_events,
4863 k_timeout_t timeout);
4864
4879int k_work_poll_cancel(struct k_work_poll *work);
4880
4882
4888
4892struct k_msgq {
4894 _wait_q_t wait_q;
4898 size_t msg_size;
4911
4912 Z_DECL_POLL_EVENT
4913
4916
4918
4919#ifdef CONFIG_OBJ_CORE_MSGQ
4920 struct k_obj_core obj_core;
4921#endif
4922};
4923
4926
4927
4928#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4929 { \
4930 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4931 .lock = {}, \
4932 .msg_size = q_msg_size, \
4933 .max_msgs = q_max_msgs, \
4934 .buffer_start = q_buffer, \
4935 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4936 .read_ptr = q_buffer, \
4937 .write_ptr = q_buffer, \
4938 .used_msgs = 0, \
4939 Z_POLL_EVENT_OBJ_INIT(obj) \
4940 .flags = 0, \
4941 }
4942
4946
4947
4948#define K_MSGQ_FLAG_ALLOC BIT(0)
4949
4961
4962
4981#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4982 static char __noinit __aligned(q_align) \
4983 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4984 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4985 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4986 (q_msg_size), (q_max_msgs))
4987
5002void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
5003 uint32_t max_msgs);
5004
5024__syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
5025 uint32_t max_msgs);
5026
5037int k_msgq_cleanup(struct k_msgq *msgq);
5038
5059__syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
5060
5085__syscall int k_msgq_put_front(struct k_msgq *msgq, const void *data);
5086
5107__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
5108
5123__syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
5124
5141__syscall int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx);
5142
5152__syscall void k_msgq_purge(struct k_msgq *msgq);
5153
5164__syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
5165
5174__syscall void k_msgq_get_attrs(struct k_msgq *msgq,
5175 struct k_msgq_attrs *attrs);
5176
5177
5178static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
5179{
5180 return msgq->max_msgs - msgq->used_msgs;
5181}
5182
5192__syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
5193
5194static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
5195{
5196 return msgq->used_msgs;
5197}
5198
5200
5206
5213 size_t size;
5217 void *tx_data;
5223 k_tid_t _syncing_thread;
5224#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
5226 struct k_sem *_async_sem;
5227#endif
5228};
5229
5233struct k_mbox {
5235 _wait_q_t tx_msg_queue;
5237 _wait_q_t rx_msg_queue;
5239
5241
5242#ifdef CONFIG_OBJ_CORE_MAILBOX
5243 struct k_obj_core obj_core;
5244#endif
5245};
5246
5249
5250#define Z_MBOX_INITIALIZER(obj) \
5251 { \
5252 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
5253 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
5254 }
5255
5259
5269#define K_MBOX_DEFINE(name) \
5270 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
5271 Z_MBOX_INITIALIZER(name) \
5272
5273
5280void k_mbox_init(struct k_mbox *mbox);
5281
5301int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5302 k_timeout_t timeout);
5303
5317void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
5318 struct k_sem *sem);
5319
5337int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
5338 void *buffer, k_timeout_t timeout);
5339
5353void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
5354
5356
5362
5372__syscall void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size);
5373
5378
5379struct k_pipe {
5380 size_t waiting;
5383 _wait_q_t data;
5384 _wait_q_t space;
5386
5387 Z_DECL_POLL_EVENT
5388#ifdef CONFIG_OBJ_CORE_PIPE
5389 struct k_obj_core obj_core;
5390#endif
5392};
5393
5397#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
5398{ \
5399 .waiting = 0, \
5400 .buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size), \
5401 .data = Z_WAIT_Q_INIT(&obj.data), \
5402 .space = Z_WAIT_Q_INIT(&obj.space), \
5403 .flags = PIPE_FLAG_OPEN, \
5404 Z_POLL_EVENT_OBJ_INIT(obj) \
5405}
5409
5423#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5424 static unsigned char __noinit __aligned(pipe_align) \
5425 _k_pipe_buf_##name[pipe_buffer_size]; \
5426 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5427 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5428
5429
5446__syscall int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len,
5447 k_timeout_t timeout);
5448
5464__syscall int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len,
5465 k_timeout_t timeout);
5466
5476__syscall void k_pipe_reset(struct k_pipe *pipe);
5477
5486__syscall void k_pipe_close(struct k_pipe *pipe);
5488
5492struct k_mem_slab_info {
5493 uint32_t num_blocks;
5494 size_t block_size;
5495 uint32_t num_used;
5496#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5497 uint32_t max_used;
5498#endif
5499};
5500
5501struct k_mem_slab {
5502 _wait_q_t wait_q;
5503 struct k_spinlock lock;
5504 char *buffer;
5505 char *free_list;
5506 struct k_mem_slab_info info;
5507
5509
5510#ifdef CONFIG_OBJ_CORE_MEM_SLAB
5511 struct k_obj_core obj_core;
5512#endif
5513};
5514
5515#define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5516 _slab_num_blocks) \
5517 { \
5518 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5519 .lock = {}, \
5520 .buffer = _slab_buffer, \
5521 .free_list = NULL, \
5522 .info = {_slab_num_blocks, _slab_block_size, 0} \
5523 }
5524
5525
5529
5535
5561#define K_MEM_SLAB_DEFINE_IN_SECT(name, in_section, slab_block_size, slab_num_blocks, slab_align) \
5562 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5563 "slab_block_size must be a multiple of slab_align"); \
5564 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5565 "slab_align must be a power of 2"); \
5566 char in_section __aligned(WB_UP( \
5567 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5568 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5569 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5570
5594#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5595 K_MEM_SLAB_DEFINE_IN_SECT(name, __noinit_named(k_mem_slab_buf_##name), slab_block_size, \
5596 slab_num_blocks, slab_align)
5597
5614#define K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, in_section, slab_block_size, slab_num_blocks, \
5615 slab_align) \
5616 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5617 "slab_block_size must be a multiple of slab_align"); \
5618 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5619 "slab_align must be a power of 2"); \
5620 static char in_section __aligned(WB_UP( \
5621 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5622 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5623 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5624
5639#define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5640 K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, __noinit_named(k_mem_slab_buf_##name), \
5641 slab_block_size, slab_num_blocks, slab_align)
5642
5664int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
5665 size_t block_size, uint32_t num_blocks);
5666
5689int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
5690 k_timeout_t timeout);
5691
5703void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
5704
5717static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
5718{
5719 return slab->info.num_used;
5720}
5721
5734static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5735{
5736#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5737 return slab->info.max_used;
5738#else
5739 ARG_UNUSED(slab);
5740 return 0;
5741#endif
5742}
5743
5756static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
5757{
5758 return slab->info.num_blocks - slab->info.num_used;
5759}
5760
5774
5775int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats);
5776
5790int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab);
5791
5793
5798
5799/* kernel synchronized heap struct */
5800
5801struct k_heap {
5803 _wait_q_t wait_q;
5805};
5806
5820void k_heap_init(struct k_heap *h, void *mem,
5821 size_t bytes) __attribute_nonnull(1);
5822
5843void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5844 k_timeout_t timeout) __attribute_nonnull(1);
5845
5867void *k_heap_alloc(struct k_heap *h, size_t bytes,
5868 k_timeout_t timeout) __attribute_nonnull(1);
5869
5892void *k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
5893 __attribute_nonnull(1);
5894
5918void *k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
5919 __attribute_nonnull(1);
5920
5931void k_heap_free(struct k_heap *h, void *mem) __attribute_nonnull(1);
5932
5933/* Hand-calculated minimum heap sizes needed to return a successful
5934 * 1-byte allocation. See details in lib/os/heap.[ch]
5935 */
5936#define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
5937
5954#define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5955 char in_section \
5956 __aligned(8) /* CHUNK_UNIT */ \
5957 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5958 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5959 .heap = { \
5960 .init_mem = kheap_##name, \
5961 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5962 }, \
5963 }
5964
5979#define K_HEAP_DEFINE(name, bytes) \
5980 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5981 __noinit_named(kheap_buf_##name))
5982
5997#define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5998 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
5999
6009int k_heap_array_get(struct k_heap **heap);
6010
6014
6021
6040void *k_aligned_alloc(size_t align, size_t size);
6041
6053void *k_malloc(size_t size);
6054
6065void k_free(void *ptr);
6066
6078void *k_calloc(size_t nmemb, size_t size);
6079
6097void *k_realloc(void *ptr, size_t size);
6098
6100
6101/* polling API - PRIVATE */
6102
6103#ifdef CONFIG_POLL
6104#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
6105#else
6106#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
6107#endif
6108
6109/* private - types bit positions */
6110enum _poll_types_bits {
6111 /* can be used to ignore an event */
6112 _POLL_TYPE_IGNORE,
6113
6114 /* to be signaled by k_poll_signal_raise() */
6115 _POLL_TYPE_SIGNAL,
6116
6117 /* semaphore availability */
6118 _POLL_TYPE_SEM_AVAILABLE,
6119
6120 /* queue/FIFO/LIFO data availability */
6121 _POLL_TYPE_DATA_AVAILABLE,
6122
6123 /* msgq data availability */
6124 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
6125
6126 /* pipe data availability */
6127 _POLL_TYPE_PIPE_DATA_AVAILABLE,
6128
6129 _POLL_NUM_TYPES
6130};
6131
6132#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
6133
6134/* private - states bit positions */
6135enum _poll_states_bits {
6136 /* default state when creating event */
6137 _POLL_STATE_NOT_READY,
6138
6139 /* signaled by k_poll_signal_raise() */
6140 _POLL_STATE_SIGNALED,
6141
6142 /* semaphore is available */
6143 _POLL_STATE_SEM_AVAILABLE,
6144
6145 /* data is available to read on queue/FIFO/LIFO */
6146 _POLL_STATE_DATA_AVAILABLE,
6147
6148 /* queue/FIFO/LIFO wait was cancelled */
6149 _POLL_STATE_CANCELLED,
6150
6151 /* data is available to read on a message queue */
6152 _POLL_STATE_MSGQ_DATA_AVAILABLE,
6153
6154 /* data is available to read from a pipe */
6155 _POLL_STATE_PIPE_DATA_AVAILABLE,
6156
6157 _POLL_NUM_STATES
6158};
6159
6160#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
6161
6162#define _POLL_EVENT_NUM_UNUSED_BITS \
6163 (32 - (0 \
6164 + 8 /* tag */ \
6165 + _POLL_NUM_TYPES \
6166 + _POLL_NUM_STATES \
6167 + 1 /* modes */ \
6168 ))
6169
6170/* end of polling API - PRIVATE */
6171
6172
6180
6181/* Public polling API */
6182
6183/* public - values for k_poll_event.type bitfield */
6184#define K_POLL_TYPE_IGNORE 0
6185#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
6186#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
6187#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
6188#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
6189#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
6190#define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
6191
6192/* public - polling modes */
6194 /* polling thread does not take ownership of objects when available */
6196
6198};
6199
6200/* public - values for k_poll_event.state bitfield */
6201#define K_POLL_STATE_NOT_READY 0
6202#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
6203#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
6204#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
6205#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
6206#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
6207#define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
6208#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
6209
6210/* public - poll signal object */
6214
6219 unsigned int signaled;
6220
6223};
6224
6225#define K_POLL_SIGNAL_INITIALIZER(obj) \
6226 { \
6227 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
6228 .signaled = 0, \
6229 .result = 0, \
6230 }
6231
6237 sys_dnode_t _node;
6238
6240 struct z_poller *poller;
6241
6244
6246 uint32_t type:_POLL_NUM_TYPES;
6247
6249 uint32_t state:_POLL_NUM_STATES;
6250
6253
6255 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
6256
6258 union {
6259 /* The typed_* fields below are used by K_POLL_EVENT_*INITIALIZER() macros to ensure
6260 * type safety of polled objects.
6261 */
6269 };
6270};
6271
6272#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
6273 { \
6274 .poller = NULL, \
6275 .type = _event_type, \
6276 .state = K_POLL_STATE_NOT_READY, \
6277 .mode = _event_mode, \
6278 .unused = 0, \
6279 { \
6280 .typed_##_event_type = _event_obj, \
6281 }, \
6282 }
6283
6284#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
6285 event_tag) \
6286 { \
6287 .tag = event_tag, \
6288 .type = _event_type, \
6289 .state = K_POLL_STATE_NOT_READY, \
6290 .mode = _event_mode, \
6291 .unused = 0, \
6292 { \
6293 .typed_##_event_type = _event_obj, \
6294 }, \
6295 }
6296
6311
6312void k_poll_event_init(struct k_poll_event *event, uint32_t type,
6313 int mode, void *obj);
6314
6357
6358__syscall int k_poll(struct k_poll_event *events, int num_events,
6359 k_timeout_t timeout);
6360
6368
6369__syscall void k_poll_signal_init(struct k_poll_signal *sig);
6370
6376__syscall void k_poll_signal_reset(struct k_poll_signal *sig);
6377
6388__syscall void k_poll_signal_check(struct k_poll_signal *sig,
6389 unsigned int *signaled, int *result);
6390
6414
6415__syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
6416
6418
6437static inline void k_cpu_idle(void)
6438{
6439 arch_cpu_idle();
6440}
6441
6456static inline void k_cpu_atomic_idle(unsigned int key)
6457{
6459}
6460
6464
6469#ifdef ARCH_EXCEPT
6470/* This architecture has direct support for triggering a CPU exception */
6471#define z_except_reason(reason) ARCH_EXCEPT(reason)
6472#else
6473
6474#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6475#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6476#else
6477#define __EXCEPT_LOC()
6478#endif
6479
6480/* NOTE: This is the implementation for arches that do not implement
6481 * ARCH_EXCEPT() to generate a real CPU exception.
6482 *
6483 * We won't have a real exception frame to determine the PC value when
6484 * the oops occurred, so print file and line number before we jump into
6485 * the fatal error handler.
6486 */
6487#define z_except_reason(reason) do { \
6488 __EXCEPT_LOC(); \
6489 z_fatal_error(reason, NULL); \
6490 } while (false)
6491
6492#endif /* _ARCH__EXCEPT */
6496
6508#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6509
6518#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6519
6523
6524/*
6525 * private APIs that are utilized by one or more public APIs
6526 */
6527
6531void z_timer_expiration_handler(struct _timeout *timeout);
6535
6536#ifdef CONFIG_PRINTK
6544__syscall void k_str_out(char *c, size_t n);
6545#endif
6546
6552
6573__syscall int k_float_disable(struct k_thread *thread);
6574
6613__syscall int k_float_enable(struct k_thread *thread, unsigned int options);
6614
6618
6628
6636
6645
6656
6667
6676
6685
6686#ifdef __cplusplus
6687}
6688#endif
6689
6690#include <zephyr/tracing/tracing.h>
6691#include <zephyr/syscalls/kernel.h>
6692
6693#endif /* !_ASMLANGUAGE */
6694
6695#endif /* ZEPHYR_INCLUDE_KERNEL_H_ */
static uint32_t arch_k_cycle_get_32(void)
Definition misc.h:26
static uint64_t arch_k_cycle_get_64(void)
Definition misc.h:33
void(* k_thread_entry_t)(void *p1, void *p2, void *p3)
Thread entry point function type.
Definition arch_interface.h:48
struct z_thread_stack_element k_thread_stack_t
Typedef of struct z_thread_stack_element.
Definition arch_interface.h:46
long atomic_t
Definition atomic_types.h:15
System error numbers.
void arch_cpu_atomic_idle(unsigned int key)
Atomically re-enable interrupts and enter low power mode.
void arch_cpu_idle(void)
Power save idle routine.
static _Bool atomic_test_and_set_bit(atomic_t *target, int bit)
Atomically set a bit and test it.
Definition atomic.h:170
static _Bool atomic_test_bit(const atomic_t *target, int bit)
Atomically get and test a bit.
Definition atomic.h:127
static void atomic_clear_bit(atomic_t *target, int bit)
Atomically clear a bit.
Definition atomic.h:191
static uint32_t k_cycle_get_32(void)
Read the hardware clock.
Definition kernel.h:2043
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1444
int64_t k_uptime_ticks(void)
Get system uptime, in system ticks.
static uint32_t k_uptime_get_32(void)
Get system uptime (32-bit version).
Definition kernel.h:1995
uint32_t k_ticks_t
Tick precision used in timeout APIs.
Definition clock.h:48
static int64_t k_uptime_delta(int64_t *reftime)
Get elapsed time.
Definition kernel.h:2024
static uint32_t k_uptime_seconds(void)
Get system uptime in seconds.
Definition kernel.h:2008
static uint64_t k_cycle_get_64(void)
Read the 64-bit hardware clock.
Definition kernel.h:2058
static int64_t k_uptime_get(void)
Get system uptime.
Definition kernel.h:1971
int k_condvar_signal(struct k_condvar *condvar)
Signals one thread that is pending on the condition variable.
int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout)
Waits on the condition variable releasing the mutex lock.
int k_condvar_init(struct k_condvar *condvar)
Initialize a condition variable.
int k_condvar_broadcast(struct k_condvar *condvar)
Unblock all threads that are pending on the condition variable.
static void k_cpu_idle(void)
Make the CPU idle.
Definition kernel.h:6437
static void k_cpu_atomic_idle(unsigned int key)
Make the CPU idle in an atomic fashion.
Definition kernel.h:6456
struct _dnode sys_dnode_t
Doubly-linked list node structure.
Definition dlist.h:54
struct _dnode sys_dlist_t
Doubly-linked list structure.
Definition dlist.h:50
static void sys_dnode_init(sys_dnode_t *node)
initialize node to its state when not in a list
Definition dlist.h:219
uint32_t k_event_wait(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events.
uint32_t k_event_set_masked(struct k_event *event, uint32_t events, uint32_t events_mask)
Set or clear the events in an event object.
uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events (safe version)
static uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
Test the events currently tracked in the event object.
Definition kernel.h:2674
uint32_t k_event_wait_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events (safe version)
uint32_t k_event_set(struct k_event *event, uint32_t events)
Set the events in an event object.
uint32_t k_event_post(struct k_event *event, uint32_t events)
Post one or more events to an event object.
void k_event_init(struct k_event *event)
Initialize an event object.
uint32_t k_event_clear(struct k_event *event, uint32_t events)
Clear the events in an event object.
uint32_t k_event_wait_all(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events.
static bool sys_sflist_is_empty(const sys_sflist_t *list)
Test if the given list is empty.
Definition sflist.h:336
struct _sflist sys_sflist_t
Flagged single-linked list structure.
Definition sflist.h:54
int k_float_disable(struct k_thread *thread)
Disable preservation of floating point context information.
int k_float_enable(struct k_thread *thread, unsigned int options)
Enable preservation of floating point context information.
int k_futex_wait(struct k_futex *futex, int expected, k_timeout_t timeout)
Pend the current thread on a futex.
int k_futex_wake(struct k_futex *futex, bool wake_all)
Wake one/all threads pending on a futex.
void * k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
Allocate memory from a k_heap.
int k_heap_array_get(struct k_heap **heap)
Get the array of statically defined heaps.
void * k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
Allocate and initialize memory for an array of objects from a k_heap.
void k_heap_free(struct k_heap *h, void *mem)
Free memory allocated by k_heap_alloc()
void k_free(void *ptr)
Free memory allocated from heap.
void * k_realloc(void *ptr, size_t size)
Expand the size of an existing allocation.
void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
Initialize a k_heap.
void * k_malloc(size_t size)
Allocate memory from the heap.
void * k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
Reallocate memory from a k_heap.
void * k_calloc(size_t nmemb, size_t size)
Allocate memory from heap, array style.
void * k_aligned_alloc(size_t align, size_t size)
Allocate memory from the heap with a specified alignment.
void * k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes, k_timeout_t timeout)
Allocate aligned memory from a k_heap.
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int k_is_preempt_thread(void)
Determine if code is running in a preemptible thread.
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout)
Receive a mailbox message.
void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
Retrieve mailbox message data into a buffer.
void k_mbox_init(struct k_mbox *mbox)
Initialize a mailbox.
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout)
Send a mailbox message in a synchronous manner.
void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, struct k_sem *sem)
Send a mailbox message in an asynchronous manner.
int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks)
Initialize a memory slab.
void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
Free memory allocated from a memory slab.
int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats)
Get the memory stats for a memory slab.
int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
Reset the maximum memory usage for a slab.
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
Allocate memory from a memory slab.
static uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Get the number of used blocks in a memory slab.
Definition kernel.h:5717
static uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
Get the number of maximum used blocks so far in a memory slab.
Definition kernel.h:5734
static uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Get the number of unused blocks in a memory slab.
Definition kernel.h:5756
int k_msgq_peek(struct k_msgq *msgq, void *data)
Peek/read a message from a message queue.
uint32_t k_msgq_num_used_get(struct k_msgq *msgq)
Get the number of messages in a message queue.
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout)
Send a message to the end of a message queue.
int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
Peek/read a message from a message queue at the specified index.
uint32_t k_msgq_num_free_get(struct k_msgq *msgq)
Get the amount of free space in a message queue.
void k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
Get basic attributes of a message queue.
void k_msgq_purge(struct k_msgq *msgq)
Purge a message queue.
int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put_front(struct k_msgq *msgq, const void *data)
Send a message to the front of a message queue.
int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
Receive a message from a message queue.
int k_msgq_cleanup(struct k_msgq *msgq)
Release allocated buffer for a queue.
int k_mutex_unlock(struct k_mutex *mutex)
Unlock a mutex.
int k_mutex_init(struct k_mutex *mutex)
Initialize a mutex.
int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
Lock a mutex.
int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len, k_timeout_t timeout)
Write data to a pipe.
void k_pipe_close(struct k_pipe *pipe)
Close a pipe.
void k_pipe_reset(struct k_pipe *pipe)
Reset a pipe This routine resets the pipe, discarding any unread data and unblocking any threads wait...
void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size)
initialize a pipe
pipe_flags
Definition kernel.h:5374
int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len, k_timeout_t timeout)
Read data from a pipe This routine reads up to len bytes of data from pipe.
@ PIPE_FLAG_RESET
Definition kernel.h:5376
@ PIPE_FLAG_OPEN
Definition kernel.h:5375
void k_poll_signal_reset(struct k_poll_signal *sig)
Reset a poll signal object's state to unsignaled.
k_poll_modes
Definition kernel.h:6193
void k_poll_signal_check(struct k_poll_signal *sig, unsigned int *signaled, int *result)
Fetch the signaled state and result value of a poll signal.
void k_poll_event_init(struct k_poll_event *event, uint32_t type, int mode, void *obj)
Initialize one struct k_poll_event instance.
int k_poll(struct k_poll_event *events, int num_events, k_timeout_t timeout)
Wait for one or many of multiple poll events to occur.
int k_poll_signal_raise(struct k_poll_signal *sig, int result)
Signal a poll signal object.
void k_poll_signal_init(struct k_poll_signal *sig)
Initialize a poll signal object.
@ K_POLL_MODE_NOTIFY_ONLY
Definition kernel.h:6195
@ K_POLL_NUM_MODES
Definition kernel.h:6197
void k_queue_init(struct k_queue *queue)
Initialize a queue.
void * k_queue_get(struct k_queue *queue, k_timeout_t timeout)
Get an element from a queue.
void * k_queue_peek_tail(struct k_queue *queue)
Peek element at the tail of queue.
bool k_queue_unique_append(struct k_queue *queue, void *data)
Append an element to a queue only if it's not present already.
bool k_queue_remove(struct k_queue *queue, void *data)
Remove an element from a queue.
int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
Atomically add a list of elements to a queue.
int32_t k_queue_alloc_append(struct k_queue *queue, void *data)
Append an element to a queue.
void k_queue_cancel_wait(struct k_queue *queue)
Cancel waiting on a queue.
void * k_queue_peek_head(struct k_queue *queue)
Peek element at the head of queue.
void k_queue_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
Atomically append a list of elements to a queue.
void k_queue_append(struct k_queue *queue, void *data)
Append an element to the end of a queue.
int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
Inserts an element to a queue.
int k_queue_is_empty(struct k_queue *queue)
Query a queue to see if it has data available.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
unsigned int k_sem_count_get(struct k_sem *sem)
Get a semaphore's count.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
int k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit)
Initialize a semaphore.
struct _slist sys_slist_t
Single-linked list structure.
Definition slist.h:49
struct _snode sys_snode_t
Single-linked list node structure.
Definition slist.h:39
int k_stack_pop(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout)
Pop an element from a stack.
void k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries)
Initialize a stack.
int k_stack_cleanup(struct k_stack *stack)
Release a stack's allocated buffer.
int k_stack_push(struct k_stack *stack, stack_data_t data)
Push an element onto a stack.
int32_t k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
Initialize a stack.
#define SYS_PORT_TRACING_TRACKING_FIELD(type)
Field added to kernel objects so they are tracked.
Definition tracing_macros.h:375
#define IS_ENABLED(config_macro)
Check for macro definition in compiler-visible expressions.
Definition util_macro.h:148
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:281
#define EBUSY
Mount device busy.
Definition errno.h:54
int k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
Copy the thread name into a supplied buffer.
void k_yield(void)
Yield the current thread.
const char * k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
Get thread state string.
void k_thread_resume(k_tid_t thread)
Resume a suspended thread.
void * k_thread_custom_data_get(void)
Get current thread's custom data.
void k_thread_abort(k_tid_t thread)
Abort a thread.
int k_thread_name_set(k_tid_t thread, const char *str)
Set current thread name.
void k_thread_priority_set(k_tid_t thread, int prio)
Set a thread's priority.
void k_thread_absolute_deadline_set(k_tid_t thread, int deadline)
Set absolute deadline expiration time for scheduler.
int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
Enable thread to run on specified CPU.
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system without locking.
bool k_can_yield(void)
Check whether it is possible to yield in the current context.
int k_thread_priority_get(k_tid_t thread)
Get a thread's priority.
static void k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap)
Assign a resource memory pool to a thread.
Definition kernel.h:506
FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3)
Drop a thread's privileges permanently to user mode.
int k_thread_join(struct k_thread *thread, k_timeout_t timeout)
Sleep until a thread exits.
k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread)
Get time remaining before a thread wakes up, in system ticks.
void k_thread_custom_data_set(void *value)
Set current thread's custom data.
int32_t k_sleep(k_timeout_t timeout)
Put the current thread to sleep.
void k_sched_lock(void)
Lock the scheduler.
static int32_t k_msleep(int32_t ms)
Put the current thread to sleep.
Definition kernel.h:600
void k_busy_wait(uint32_t usec_to_wait)
Cause the current thread to busy wait.
void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks, k_thread_timeslice_fn_t expired, void *data)
Set thread time slice.
static void k_thread_runtime_stats_longest_frame_reset(__maybe_unused struct k_thread *thread)
Resets thread longest frame usage data for specified thread.
Definition kernel.h:120
void k_thread_suspend(k_tid_t thread)
Suspend a thread.
void k_sched_unlock(void)
Unlock the scheduler.
static __attribute_const__ k_tid_t k_current_get(void)
Get thread ID of the current thread.
Definition kernel.h:724
int k_thread_cpu_mask_clear(k_tid_t thread)
Sets all CPU enable masks to zero.
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in running on specified cpu.
void k_sched_time_slice_set(int32_t slice, int prio)
Set time-slicing period and scope.
int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
Prevent thread to run on specified CPU.
void k_wakeup(k_tid_t thread)
Wake up a sleeping thread.
int k_thread_stack_free(k_thread_stack_t *stack)
Free a dynamically allocated thread stack.
k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread)
Get time when a thread wakes up, in system ticks.
__attribute_const__ k_tid_t k_sched_current_thread_query(void)
Query thread ID of the current thread.
static void k_thread_start(k_tid_t thread)
Start an inactive thread.
Definition kernel.h:1194
k_tid_t k_thread_create(struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, uint32_t options, k_timeout_t delay)
Create a thread.
void k_reschedule(void)
Invoke the scheduler.
void k_thread_deadline_set(k_tid_t thread, int deadline)
Set relative deadline expiration time for scheduler.
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over the threads in running on current cpu without locking.
const char * k_thread_name_get(k_tid_t thread)
Get thread name.
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system.
static bool k_is_pre_kernel(void)
Test whether startup is in the before-main-task phase.
Definition kernel.h:701
int k_thread_cpu_pin(k_tid_t thread, int cpu)
Pin a thread to a CPU.
int32_t k_usleep(int32_t us)
Put the current thread to sleep with microsecond resolution.
int k_thread_cpu_mask_enable_all(k_tid_t thread)
Sets all CPU enable masks to one.
void(* k_thread_user_cb_t)(const struct k_thread *thread, void *user_data)
Definition kernel.h:127
k_thread_stack_t * k_thread_stack_alloc(size_t size, int flags)
Dynamically allocate a thread stack.
k_ticks_t k_timer_expires_ticks(const struct k_timer *timer)
Get next expiration time of a timer, in system ticks.
void(* k_timer_stop_t)(struct k_timer *timer)
Timer stop function type.
Definition kernel.h:1746
k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer)
Get time remaining before a timer next expires, in system ticks.
void * k_timer_user_data_get(const struct k_timer *timer)
Retrieve the user-specific data from a timer.
void(* k_timer_expiry_t)(struct k_timer *timer)
Timer expiry function type.
Definition kernel.h:1730
void k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_stop_t stop_fn)
Initialize a timer.
void k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period)
Start a timer.
static uint32_t k_timer_remaining_get(struct k_timer *timer)
Get time remaining before a timer next expires.
Definition kernel.h:1895
uint32_t k_timer_status_sync(struct k_timer *timer)
Synchronize thread to timer expiration.
void k_timer_stop(struct k_timer *timer)
Stop a timer.
uint32_t k_timer_status_get(struct k_timer *timer)
Read timer status.
void k_timer_user_data_set(struct k_timer *timer, void *user_data)
Associate user-specific data with a timer.
#define k_ticks_to_ms_floor32(t)
Convert ticks to milliseconds.
Definition time_units.h:1707
#define k_ticks_to_sec_floor32(t)
Convert ticks to seconds.
Definition time_units.h:1611
#define k_ticks_to_ms_floor64(t)
Convert ticks to milliseconds.
Definition time_units.h:1723
int k_work_poll_submit_to_queue(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item.
static k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
Access the thread that animates a work queue.
Definition kernel.h:4550
static bool k_work_is_pending(const struct k_work *work)
Test whether a work item is currently pending.
Definition kernel.h:4521
int k_work_queue_drain(struct k_work_q *queue, bool plug)
Wait until the work queue has drained, optionally plugging it.
static k_ticks_t k_work_delayable_expires_get(const struct k_work_delayable *dwork)
Get the absolute tick count at which a scheduled delayable work will be submitted.
Definition kernel.h:4538
int k_work_schedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to a queue after a delay.
int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
Busy state flags from the delayable work item.
int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout)
Stop a work queue.
void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
Initialize a delayable work structure.
int k_work_poll_cancel(struct k_work_poll *work)
Cancel a triggered work item.
void k_work_user_queue_start(struct k_work_user_q *work_q, k_thread_stack_t *stack, size_t stack_size, int prio, const char *name)
Start a workqueue in user mode.
void k_work_poll_init(struct k_work_poll *work, k_work_handler_t handler)
Initialize a triggered work item.
int k_work_cancel(struct k_work *work)
Cancel a work item.
static int k_work_user_submit_to_queue(struct k_work_user_q *work_q, struct k_work_user *work)
Submit a work item to a user mode workqueue.
Definition kernel.h:4677
int k_work_submit_to_queue(struct k_work_q *queue, struct k_work *work)
Submit a work item to a queue.
static bool k_work_user_is_pending(struct k_work_user *work)
Check if a userspace work item is pending.
Definition kernel.h:4654
void(* k_work_handler_t)(struct k_work *work)
The signature for a work item handler function.
Definition kernel.h:3693
int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to the system work queue after a delay.
static bool k_work_delayable_is_pending(const struct k_work_delayable *dwork)
Test whether a delayed work item is currently pending.
Definition kernel.h:4532
bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, struct k_work_sync *sync)
Cancel delayable work and wait.
int k_work_cancel_delayable(struct k_work_delayable *dwork)
Cancel delayable work.
static void k_work_user_init(struct k_work_user *work, k_work_user_handler_t handler)
Initialize a userspace work item.
Definition kernel.h:4632
int k_work_queue_unplug(struct k_work_q *queue)
Release a work queue to accept new submissions.
int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to the system work queue after a delay.
void(* k_work_user_handler_t)(struct k_work_user *work)
Work item handler function type for user work queues.
Definition kernel.h:4573
bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync)
Cancel a work item and wait for it to complete.
static k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
Access the user mode thread that animates a work queue.
Definition kernel.h:4732
int k_work_busy_get(const struct k_work *work)
Busy state flags from the work item.
static struct k_work_delayable * k_work_delayable_from_work(struct k_work *work)
Get the parent delayable work structure from a work pointer.
Definition kernel.h:4527
static k_ticks_t k_work_delayable_remaining_get(const struct k_work_delayable *dwork)
Get the number of ticks until a scheduled delayable work will be submitted.
Definition kernel.h:4544
bool k_work_flush(struct k_work *work, struct k_work_sync *sync)
Wait for last-submitted instance to complete.
int k_work_reschedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to a queue after a delay.
void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg)
Run work queue using calling thread.
int k_work_submit(struct k_work *work)
Submit a work item to the system queue.
bool k_work_flush_delayable(struct k_work_delayable *dwork, struct k_work_sync *sync)
Flush delayable work.
int k_work_poll_submit(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item to the system workqueue.
void k_work_queue_init(struct k_work_q *queue)
Initialize a work queue structure.
void k_work_queue_start(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg)
Initialize a work queue.
void k_work_init(struct k_work *work, k_work_handler_t handler)
Initialize a (non-delayable) work structure.
@ K_WORK_CANCELING
Flag indicating a work item that is being canceled.
Definition kernel.h:4301
@ K_WORK_QUEUED
Flag indicating a work item that has been submitted to a queue but has not started running.
Definition kernel.h:4308
@ K_WORK_DELAYED
Flag indicating a delayed work item that is scheduled for submission to a queue.
Definition kernel.h:4315
@ K_WORK_RUNNING
Flag indicating a work item that is running under a work queue thread.
Definition kernel.h:4295
@ K_WORK_FLUSHING
Flag indicating a synced work item that is being flushed.
Definition kernel.h:4321
#define BUILD_ASSERT(EXPR, MSG...)
Definition llvm.h:51
struct k_thread * k_tid_t
Definition thread.h:368
struct k_thread_runtime_stats k_thread_runtime_stats_t
void k_sys_runtime_stats_disable(void)
Disable gathering of system runtime statistics.
int k_thread_runtime_stats_enable(k_tid_t thread)
Enable gathering of runtime statistics for specified thread.
int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask, k_ipi_func_t func)
Add an IPI work item to the IPI work queue.
void k_sys_runtime_stats_enable(void)
Enable gathering of system runtime statistics.
int k_thread_runtime_stats_get(k_tid_t thread, k_thread_runtime_stats_t *stats)
Get the runtime statistics of a thread.
void k_ipi_work_signal(void)
Signal that there is one or more IPI work items to process.
int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout)
Wait until the IPI work item has been processed by all targeted CPUs.
execution_context_types
Definition kernel.h:91
@ K_ISR
Definition kernel.h:92
@ K_COOP_THREAD
Definition kernel.h:93
@ K_PREEMPT_THREAD
Definition kernel.h:94
void(* k_ipi_func_t)(struct k_ipi_work *work)
Definition kernel.h:3577
int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads.
static void k_ipi_work_init(struct k_ipi_work *work)
Initialize the specified IPI work item.
Definition kernel.h:3604
int k_thread_runtime_stats_disable(k_tid_t thread)
Disable gathering of runtime statistics for specified thread.
int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads on specified cpu.
Header files included by kernel.h.
void(* k_thread_timeslice_fn_t)(struct k_thread *thread, void *data)
Definition kernel_structs.h:314
Memory Statistics.
flags
Definition parser.h:97
state
Definition parser_state.h:29
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INTPTR_TYPE__ intptr_t
Definition stdint.h:104
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__INT64_TYPE__ int64_t
Definition stdint.h:75
Definition kernel.h:3336
_wait_q_t wait_q
Definition kernel.h:3337
Event Structure.
Definition kernel.h:2451
Definition kernel.h:2694
futex structure
Definition kernel.h:2365
atomic_t val
Definition kernel.h:2366
Definition kernel.h:5801
struct k_spinlock lock
Definition kernel.h:5804
struct sys_heap heap
Definition kernel.h:5802
_wait_q_t wait_q
Definition kernel.h:5803
IPI work item structure.
Definition kernel.h:3585
Definition kernel.h:2935
Mailbox Message Structure.
Definition kernel.h:5211
k_tid_t tx_target_thread
target thread id
Definition kernel.h:5221
void * tx_data
sender's message data buffer
Definition kernel.h:5217
k_tid_t rx_source_thread
source thread id
Definition kernel.h:5219
uint32_t info
application-defined information value
Definition kernel.h:5215
size_t size
size of message (in bytes)
Definition kernel.h:5213
Mailbox Structure.
Definition kernel.h:5233
_wait_q_t tx_msg_queue
Transmit messages queue.
Definition kernel.h:5235
struct k_spinlock lock
Definition kernel.h:5238
_wait_q_t rx_msg_queue
Receive message queue.
Definition kernel.h:5237
Memory Domain.
Definition mem_domain.h:80
Memory Partition.
Definition mem_domain.h:55
Message Queue Attributes.
Definition kernel.h:4953
uint32_t used_msgs
Used messages.
Definition kernel.h:4959
size_t msg_size
Message Size.
Definition kernel.h:4955
uint32_t max_msgs
Maximal number of messages.
Definition kernel.h:4957
Message Queue Structure.
Definition kernel.h:4892
size_t msg_size
Message size.
Definition kernel.h:4898
char * read_ptr
Read pointer.
Definition kernel.h:4906
uint32_t used_msgs
Number of used messages.
Definition kernel.h:4910
char * buffer_end
End of message buffer.
Definition kernel.h:4904
struct k_spinlock lock
Lock.
Definition kernel.h:4896
char * write_ptr
Write pointer.
Definition kernel.h:4908
char * buffer_start
Start of message buffer.
Definition kernel.h:4902
uint8_t flags
Message queue.
Definition kernel.h:4915
_wait_q_t wait_q
Message queue wait queue.
Definition kernel.h:4894
uint32_t max_msgs
Maximal number of messages.
Definition kernel.h:4900
Mutex Structure.
Definition kernel.h:3224
uint32_t lock_count
Current lock count.
Definition kernel.h:3231
_wait_q_t wait_q
Mutex wait queue.
Definition kernel.h:3226
int owner_orig_prio
Original thread priority.
Definition kernel.h:3234
struct k_thread * owner
Mutex owner.
Definition kernel.h:3228
Object core structure.
Definition obj_core.h:121
Definition kernel.h:5379
uint8_t flags
Definition kernel.h:5385
struct ring_buf buf
Definition kernel.h:5381
_wait_q_t data
Definition kernel.h:5383
_wait_q_t space
Definition kernel.h:5384
struct k_spinlock lock
Definition kernel.h:5382
size_t waiting
Definition kernel.h:5380
Poll Event.
Definition kernel.h:6235
struct k_msgq * typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE
Definition kernel.h:6267
void * typed_K_POLL_TYPE_IGNORE
Definition kernel.h:6262
struct k_poll_signal * signal
Definition kernel.h:6263
struct k_pipe * pipe
Definition kernel.h:6268
uint32_t tag
optional user-specified tag, opaque, untouched by the API
Definition kernel.h:6243
struct k_fifo * fifo
Definition kernel.h:6265
struct k_msgq * msgq
Definition kernel.h:6267
struct k_queue * queue
Definition kernel.h:6266
uint32_t unused
unused bits in 32-bit word
Definition kernel.h:6255
struct k_pipe * typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE
Definition kernel.h:6268
uint32_t type
bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values)
Definition kernel.h:6246
struct k_sem * sem
Definition kernel.h:6264
struct k_queue * typed_K_POLL_TYPE_DATA_AVAILABLE
Definition kernel.h:6266
struct k_sem * typed_K_POLL_TYPE_SEM_AVAILABLE
Definition kernel.h:6264
uint32_t state
bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values)
Definition kernel.h:6249
uint32_t mode
mode of operation, from enum k_poll_modes
Definition kernel.h:6252
struct z_poller * poller
PRIVATE - DO NOT TOUCH.
Definition kernel.h:6240
struct k_poll_signal * typed_K_POLL_TYPE_SIGNAL
Definition kernel.h:6263
void * obj
Definition kernel.h:6262
struct k_fifo * typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE
Definition kernel.h:6265
Definition kernel.h:6211
sys_dlist_t poll_events
PRIVATE - DO NOT TOUCH.
Definition kernel.h:6213
int result
custom result value passed to k_poll_signal_raise() if needed
Definition kernel.h:6222
unsigned int signaled
1 if the event has been signaled, 0 otherwise.
Definition kernel.h:6219
Definition kernel.h:2073
struct k_spinlock lock
Definition kernel.h:2075
_wait_q_t wait_q
Definition kernel.h:2076
sys_sflist_t data_q
Definition kernel.h:2074
Semaphore structure.
Definition kernel.h:3429
Kernel Spin Lock.
Definition spinlock.h:45
Thread Structure.
Definition thread.h:252
struct _thread_base base
Definition thread.h:254
struct k_heap * resource_pool
resource pool
Definition thread.h:342
struct __thread_entry entry
thread entry and parameters description
Definition thread.h:281
Kernel timeout type.
Definition clock.h:65
Kernel timer structure.
Definition kernel.h:1652
A structure used to submit work after a delay.
Definition kernel.h:4353
struct _timeout timeout
Definition kernel.h:4358
struct k_work_q * queue
Definition kernel.h:4361
struct k_work work
Definition kernel.h:4355
A structure used to hold work until it can be processed.
Definition kernel.h:4487
sys_slist_t pending
Definition kernel.h:4501
_wait_q_t drainq
Definition kernel.h:4507
k_tid_t thread_id
Definition kernel.h:4494
_wait_q_t notifyq
Definition kernel.h:4504
uint32_t flags
Definition kernel.h:4510
struct k_thread thread
Definition kernel.h:4489
A structure holding optional configuration items for a work queue.
Definition kernel.h:4449
const char * name
The name to be given to the work queue thread.
Definition kernel.h:4454
uint32_t work_timeout_ms
Controls whether work queue monitors work timeouts.
Definition kernel.h:4483
bool essential
Control whether the work queue thread should be marked as essential thread.
Definition kernel.h:4473
bool no_yield
Control whether the work queue thread should yield between items.
Definition kernel.h:4468
A structure holding internal state for a pending synchronous operation on a work item or queue.
Definition kernel.h:4436
struct z_work_canceller canceller
Definition kernel.h:4439
struct z_work_flusher flusher
Definition kernel.h:4438
A structure used to submit work.
Definition kernel.h:4325
k_work_handler_t handler
Definition kernel.h:4334
uint32_t flags
Definition kernel.h:4345
struct k_work_q * queue
Definition kernel.h:4337
sys_snode_t node
Definition kernel.h:4331
A structure to represent a ring buffer.
Definition ring_buffer.h:49
Definition sys_heap.h:57
Definition mem_stats.h:24
static __pinned_func bool k_is_user_context(void)
Indicate whether the CPU is currently in user mode.
Definition syscall.h:115
Macros to abstract toolchain specific capabilities.
Main header file for tracing subsystem API.
Header file for tracing macros.