13#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
16#if !defined(_ASMLANGUAGE)
50#if (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES) == 0
51#error Zero available thread priorities defined!
54#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
55#define K_PRIO_PREEMPT(x) (x)
57#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
58#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
59#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
60#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
61#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
64#define Z_POLL_EVENT_OBJ_INIT(obj) \
65 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
66#define Z_DECL_POLL_EVENT sys_dlist_t poll_events;
68#define Z_POLL_EVENT_OBJ_INIT(obj)
69#define Z_DECL_POLL_EVENT
122#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
123 thread->base.usage.longest = 0ULL;
173 __ASSERT(cpu == 0,
"cpu filter out of bounds");
248 __ASSERT(cpu == 0,
"cpu filter out of bounds");
273#define K_ESSENTIAL (BIT(0))
285#define K_FP_REGS (BIT(K_FP_IDX))
293#define K_USER (BIT(2))
303#define K_INHERIT_PERMS (BIT(3))
314#define K_CALLBACK_STATE (BIT(4))
326#define K_DSP_REGS (BIT(K_DSP_IDX))
337#define K_AGU_REGS (BIT(K_AGU_IDX))
348#define K_SSE_REGS (BIT(7))
352#if !defined(_ASMLANGUAGE)
448 void *p1,
void *p2,
void *p3,
489#define k_thread_access_grant(thread, ...) \
490 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__)
512#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
533__syscall
int k_thread_stack_space_get(
const struct k_thread *thread,
537#if (K_HEAP_MEM_POOL_SIZE > 0)
550void k_thread_system_pool_assign(
struct k_thread *thread);
602 return k_sleep(Z_TIMEOUT_MS(ms));
703 extern bool z_sys_post_kernel;
714 return !z_sys_post_kernel;
728#ifdef CONFIG_CURRENT_THREAD_USE_TLS
731 extern Z_THREAD_LOCAL
k_tid_t z_tls_current;
733 return z_tls_current;
760k_ticks_t z_timeout_expires(
const struct _timeout *timeout);
761k_ticks_t z_timeout_remaining(
const struct _timeout *timeout);
763#ifdef CONFIG_SYS_CLOCK_EXISTS
774static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
777 return z_timeout_expires(&thread->
base.timeout);
789static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
792 return z_timeout_remaining(&thread->
base.timeout);
801struct _static_thread_data {
802 struct k_thread *init_thread;
804 unsigned int init_stack_size;
811 const char *init_name;
812#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
815 k_timeout_t init_delay;
819#ifdef CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME
820#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay_ms = (ms)
821#define Z_THREAD_INIT_DELAY(thread) SYS_TIMEOUT_MS((thread)->init_delay_ms)
823#define Z_THREAD_INIT_DELAY_INITIALIZER(ms) .init_delay = SYS_TIMEOUT_MS_INIT(ms)
824#define Z_THREAD_INIT_DELAY(thread) (thread)->init_delay
827#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
829 prio, options, delay, tname) \
831 .init_thread = (thread), \
832 .init_stack = (stack), \
833 .init_stack_size = (stack_size), \
834 .init_entry = (k_thread_entry_t)entry, \
835 .init_p1 = (void *)p1, \
836 .init_p2 = (void *)p2, \
837 .init_p3 = (void *)p3, \
838 .init_prio = (prio), \
839 .init_options = (options), \
840 .init_name = STRINGIFY(tname), \
841 Z_THREAD_INIT_DELAY_INITIALIZER(delay) \
848#define Z_THREAD_COMMON_DEFINE(name, stack_size, \
850 prio, options, delay) \
851 struct k_thread _k_thread_obj_##name; \
852 STRUCT_SECTION_ITERABLE(_static_thread_data, \
853 _k_thread_data_##name) = \
854 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
855 _k_thread_stack_##name, stack_size,\
856 entry, p1, p2, p3, prio, options, \
858 __maybe_unused const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
895#define K_THREAD_DEFINE(name, stack_size, \
897 prio, options, delay) \
898 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
899 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
900 prio, options, delay)
932#define K_KERNEL_THREAD_DEFINE(name, stack_size, \
934 prio, options, delay) \
935 K_KERNEL_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
936 Z_THREAD_COMMON_DEFINE(name, stack_size, entry, p1, p2, p3, \
937 prio, options, delay)
978#ifdef CONFIG_SCHED_DEADLINE
1075#ifdef CONFIG_SCHED_CPU_MASK
1444#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
1458#define K_NSEC(t) Z_TIMEOUT_NS(t)
1472#define K_USEC(t) Z_TIMEOUT_US(t)
1484#define K_CYC(t) Z_TIMEOUT_CYC(t)
1496#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1508#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
1520#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
1532#define K_MINUTES(m) K_SECONDS((m) * 60)
1544#define K_HOURS(h) K_MINUTES((h) * 60)
1554#define K_FOREVER Z_FOREVER
1556#ifdef CONFIG_TIMEOUT_64BIT
1569#define K_TIMEOUT_ABS_TICKS(t) \
1570 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)CLAMP(t, 0, (INT64_MAX - 1))))
1583#define K_TIMEOUT_ABS_SEC(t) K_TIMEOUT_ABS_TICKS(k_sec_to_ticks_ceil64(t))
1596#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1610#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1624#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1638#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1662 struct _timeout timeout;
1668 void (*expiry_fn)(
struct k_timer *timer);
1671 void (*stop_fn)(
struct k_timer *timer);
1684#ifdef CONFIG_OBJ_CORE_TIMER
1695#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
1699 .fn = z_timer_expiration_handler, \
1702 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1703 .expiry_fn = expiry, \
1759#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
1760 STRUCT_SECTION_ITERABLE(k_timer, name) = \
1761 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
1847#ifdef CONFIG_SYS_CLOCK_EXISTS
1861static inline k_ticks_t z_impl_k_timer_expires_ticks(
1864 return z_timeout_expires(&timer->timeout);
1879static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1882 return z_timeout_remaining(&timer->timeout);
1919static inline void z_impl_k_timer_user_data_set(
struct k_timer *timer,
1922 timer->user_data = user_data;
1934static inline void *z_impl_k_timer_user_data_get(
const struct k_timer *timer)
1936 return timer->user_data;
2029 delta = uptime - *reftime;
2060 if (!
IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
2061 __ASSERT(0,
"64-bit cycle counter not enabled on this platform. "
2062 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
2087#define Z_QUEUE_INITIALIZER(obj) \
2089 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
2091 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2092 Z_POLL_EVENT_OBJ_INIT(obj) \
2313static inline int z_impl_k_queue_is_empty(
struct k_queue *queue)
2349#define K_QUEUE_DEFINE(name) \
2350 STRUCT_SECTION_ITERABLE(k_queue, name) = \
2351 Z_QUEUE_INITIALIZER(name)
2355#ifdef CONFIG_USERSPACE
2376struct z_futex_data {
2381#define Z_FUTEX_DATA_INITIALIZER(obj) \
2383 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2461#ifdef CONFIG_OBJ_CORE_EVENT
2474#define Z_EVENT_INITIALIZER(obj) \
2476 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2688#define K_EVENT_DEFINE(name) \
2689 STRUCT_SECTION_ITERABLE(k_event, name) = \
2690 Z_EVENT_INITIALIZER(name);
2696#ifdef CONFIG_OBJ_CORE_FIFO
2704#define Z_FIFO_INITIALIZER(obj) \
2706 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2726#define k_fifo_init(fifo) \
2728 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2729 k_queue_init(&(fifo)->_queue); \
2730 K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
2731 K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
2732 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2746#define k_fifo_cancel_wait(fifo) \
2748 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2749 k_queue_cancel_wait(&(fifo)->_queue); \
2750 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2765#define k_fifo_put(fifo, data) \
2767 void *_data = data; \
2768 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, _data); \
2769 k_queue_append(&(fifo)->_queue, _data); \
2770 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, _data); \
2789#define k_fifo_alloc_put(fifo, data) \
2791 void *_data = data; \
2792 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, _data); \
2793 int fap_ret = k_queue_alloc_append(&(fifo)->_queue, _data); \
2794 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, _data, fap_ret); \
2812#define k_fifo_put_list(fifo, head, tail) \
2814 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2815 k_queue_append_list(&(fifo)->_queue, head, tail); \
2816 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2832#define k_fifo_put_slist(fifo, list) \
2834 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2835 k_queue_merge_slist(&(fifo)->_queue, list); \
2836 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2856#define k_fifo_get(fifo, timeout) \
2858 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2859 void *fg_ret = k_queue_get(&(fifo)->_queue, timeout); \
2860 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, fg_ret); \
2877#define k_fifo_is_empty(fifo) \
2878 k_queue_is_empty(&(fifo)->_queue)
2893#define k_fifo_peek_head(fifo) \
2895 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2896 void *fph_ret = k_queue_peek_head(&(fifo)->_queue); \
2897 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, fph_ret); \
2912#define k_fifo_peek_tail(fifo) \
2914 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2915 void *fpt_ret = k_queue_peek_tail(&(fifo)->_queue); \
2916 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, fpt_ret); \
2929#define K_FIFO_DEFINE(name) \
2930 STRUCT_SECTION_ITERABLE(k_fifo, name) = \
2931 Z_FIFO_INITIALIZER(name)
2937#ifdef CONFIG_OBJ_CORE_LIFO
2946#define Z_LIFO_INITIALIZER(obj) \
2948 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
2968#define k_lifo_init(lifo) \
2970 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2971 k_queue_init(&(lifo)->_queue); \
2972 K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
2973 K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
2974 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2989#define k_lifo_put(lifo, data) \
2991 void *_data = data; \
2992 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, _data); \
2993 k_queue_prepend(&(lifo)->_queue, _data); \
2994 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, _data); \
3013#define k_lifo_alloc_put(lifo, data) \
3015 void *_data = data; \
3016 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, _data); \
3017 int lap_ret = k_queue_alloc_prepend(&(lifo)->_queue, _data); \
3018 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, _data, lap_ret); \
3039#define k_lifo_get(lifo, timeout) \
3041 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
3042 void *lg_ret = k_queue_get(&(lifo)->_queue, timeout); \
3043 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, lg_ret); \
3056#define K_LIFO_DEFINE(name) \
3057 STRUCT_SECTION_ITERABLE(k_lifo, name) = \
3058 Z_LIFO_INITIALIZER(name)
3065#define K_STACK_FLAG_ALLOC ((uint8_t)1)
3072 stack_data_t *base, *next, *top;
3078#ifdef CONFIG_OBJ_CORE_STACK
3083#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
3085 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3086 .base = (stack_buffer), \
3087 .next = (stack_buffer), \
3088 .top = (stack_buffer) + (stack_num_entries), \
3111 stack_data_t *buffer,
uint32_t num_entries);
3192#define K_STACK_DEFINE(name, stack_num_entries) \
3193 stack_data_t __noinit \
3194 _k_stack_buf_##name[stack_num_entries]; \
3195 STRUCT_SECTION_ITERABLE(k_stack, name) = \
3196 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
3208extern struct k_work_q k_sys_work_q;
3238#ifdef CONFIG_OBJ_CORE_MUTEX
3246#define Z_MUTEX_INITIALIZER(obj) \
3248 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3251 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
3267#define K_MUTEX_DEFINE(name) \
3268 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
3269 Z_MUTEX_INITIALIZER(name)
3339#ifdef CONFIG_OBJ_CORE_CONDVAR
3344#define Z_CONDVAR_INITIALIZER(obj) \
3346 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
3410#define K_CONDVAR_DEFINE(name) \
3411 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
3412 Z_CONDVAR_INITIALIZER(name)
3441#ifdef CONFIG_OBJ_CORE_SEM
3451#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
3453 .wait_q = Z_WAIT_Q_INIT(&(obj).wait_q), \
3454 .count = (initial_count), \
3455 .limit = (count_limit), \
3456 Z_POLL_EVENT_OBJ_INIT(obj) \
3471#define K_SEM_MAX_LIMIT UINT_MAX
3489 unsigned int limit);
3548static inline unsigned int z_impl_k_sem_count_get(
struct k_sem *sem)
3564#define K_SEM_DEFINE(name, initial_count, count_limit) \
3565 STRUCT_SECTION_ITERABLE(k_sem, name) = \
3566 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
3567 BUILD_ASSERT(((count_limit) != 0) && \
3568 (((initial_count) < (count_limit)) || ((initial_count) == (count_limit))) && \
3569 ((count_limit) <= K_SEM_MAX_LIMIT));
3573#if defined(CONFIG_SCHED_IPI_SUPPORTED) || defined(__DOXYGEN__)
3607 for (
unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
4256 K_WORK_RUNNING_BIT = 0,
4257 K_WORK_CANCELING_BIT = 1,
4258 K_WORK_QUEUED_BIT = 2,
4259 K_WORK_DELAYED_BIT = 3,
4260 K_WORK_FLUSHING_BIT = 4,
4262 K_WORK_MASK =
BIT(K_WORK_DELAYED_BIT) |
BIT(K_WORK_QUEUED_BIT)
4263 |
BIT(K_WORK_RUNNING_BIT) |
BIT(K_WORK_CANCELING_BIT) |
BIT(K_WORK_FLUSHING_BIT),
4266 K_WORK_DELAYABLE_BIT = 8,
4267 K_WORK_DELAYABLE =
BIT(K_WORK_DELAYABLE_BIT),
4270 K_WORK_QUEUE_STARTED_BIT = 0,
4271 K_WORK_QUEUE_STARTED =
BIT(K_WORK_QUEUE_STARTED_BIT),
4272 K_WORK_QUEUE_BUSY_BIT = 1,
4273 K_WORK_QUEUE_BUSY =
BIT(K_WORK_QUEUE_BUSY_BIT),
4274 K_WORK_QUEUE_DRAIN_BIT = 2,
4275 K_WORK_QUEUE_DRAIN =
BIT(K_WORK_QUEUE_DRAIN_BIT),
4276 K_WORK_QUEUE_PLUGGED_BIT = 3,
4277 K_WORK_QUEUE_PLUGGED =
BIT(K_WORK_QUEUE_PLUGGED_BIT),
4278 K_WORK_QUEUE_STOP_BIT = 4,
4279 K_WORK_QUEUE_STOP =
BIT(K_WORK_QUEUE_STOP_BIT),
4282 K_WORK_QUEUE_NO_YIELD_BIT = 8,
4283 K_WORK_QUEUE_NO_YIELD =
BIT(K_WORK_QUEUE_NO_YIELD_BIT),
4348#define Z_WORK_INITIALIZER(work_handler) { \
4349 .handler = (work_handler), \
4364#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
4366 .handler = (work_handler), \
4367 .flags = K_WORK_DELAYABLE, \
4387#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
4388 struct k_work_delayable work \
4389 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
4402struct z_work_flusher {
4413struct z_work_canceller {
4415 struct k_work *work;
4512#if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT)
4513 struct _timeout work_timeout_record;
4541 return z_timeout_expires(&dwork->
timeout);
4547 return z_timeout_remaining(&dwork->
timeout);
4579struct k_work_user_q {
4585 K_WORK_USER_STATE_PENDING,
4598#if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
4599#define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
4601#define Z_WORK_USER_INITIALIZER(work_handler) \
4603 ._reserved = NULL, \
4604 .handler = (work_handler), \
4620#define K_WORK_USER_DEFINE(work, work_handler) \
4621 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4635 *work = (
struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4678 struct k_work_user *work)
4683 K_WORK_USER_STATE_PENDING)) {
4691 K_WORK_USER_STATE_PENDING);
4719 size_t stack_size,
int prio,
4734 return &work_q->thread;
4745 struct k_work_q *workq;
4746 struct z_poller poller;
4747 struct k_poll_event *events;
4750 struct _timeout timeout;
4774#define K_WORK_DEFINE(work, work_handler) \
4775 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4824 struct k_work_poll *work,
4919#ifdef CONFIG_OBJ_CORE_MSGQ
4928#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
4930 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
4932 .msg_size = q_msg_size, \
4933 .max_msgs = q_max_msgs, \
4934 .buffer_start = q_buffer, \
4935 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
4936 .read_ptr = q_buffer, \
4937 .write_ptr = q_buffer, \
4939 Z_POLL_EVENT_OBJ_INIT(obj) \
4948#define K_MSGQ_FLAG_ALLOC BIT(0)
4981#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4982 static char __noinit __aligned(q_align) \
4983 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4984 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
4985 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
4986 (q_msg_size), (q_max_msgs))
5178static inline uint32_t z_impl_k_msgq_num_free_get(
struct k_msgq *msgq)
5194static inline uint32_t z_impl_k_msgq_num_used_get(
struct k_msgq *msgq)
5224#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
5226 struct k_sem *_async_sem;
5242#ifdef CONFIG_OBJ_CORE_MAILBOX
5250#define Z_MBOX_INITIALIZER(obj) \
5252 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
5253 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
5269#define K_MBOX_DEFINE(name) \
5270 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
5271 Z_MBOX_INITIALIZER(name) \
5388#ifdef CONFIG_OBJ_CORE_PIPE
5397#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
5400 .buf = RING_BUF_INIT(pipe_buffer, pipe_buffer_size), \
5401 .data = Z_WAIT_Q_INIT(&obj.data), \
5402 .space = Z_WAIT_Q_INIT(&obj.space), \
5403 .flags = PIPE_FLAG_OPEN, \
5404 Z_POLL_EVENT_OBJ_INIT(obj) \
5423#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
5424 static unsigned char __noinit __aligned(pipe_align) \
5425 _k_pipe_buf_##name[pipe_buffer_size]; \
5426 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
5427 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
5492struct k_mem_slab_info {
5496#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5503 struct k_spinlock lock;
5506 struct k_mem_slab_info info;
5510#ifdef CONFIG_OBJ_CORE_MEM_SLAB
5511 struct k_obj_core obj_core;
5515#define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
5518 .wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
5520 .buffer = _slab_buffer, \
5521 .free_list = NULL, \
5522 .info = {_slab_num_blocks, _slab_block_size, 0} \
5561#define K_MEM_SLAB_DEFINE_IN_SECT(name, in_section, slab_block_size, slab_num_blocks, slab_align) \
5562 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5563 "slab_block_size must be a multiple of slab_align"); \
5564 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5565 "slab_align must be a power of 2"); \
5566 char in_section __aligned(WB_UP( \
5567 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5568 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5569 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5594#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
5595 K_MEM_SLAB_DEFINE_IN_SECT(name, __noinit_named(k_mem_slab_buf_##name), slab_block_size, \
5596 slab_num_blocks, slab_align)
5614#define K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, in_section, slab_block_size, slab_num_blocks, \
5616 BUILD_ASSERT(((slab_block_size) % (slab_align)) == 0, \
5617 "slab_block_size must be a multiple of slab_align"); \
5618 BUILD_ASSERT((((slab_align) & ((slab_align) - 1)) == 0), \
5619 "slab_align must be a power of 2"); \
5620 static char in_section __aligned(WB_UP( \
5621 slab_align)) _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5622 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = Z_MEM_SLAB_INITIALIZER( \
5623 name, _k_mem_slab_buf_##name, WB_UP(slab_block_size), slab_num_blocks)
5639#define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5640 K_MEM_SLAB_DEFINE_IN_SECT_STATIC(name, __noinit_named(k_mem_slab_buf_##name), \
5641 slab_block_size, slab_num_blocks, slab_align)
5665 size_t block_size,
uint32_t num_blocks);
5719 return slab->info.num_used;
5736#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5737 return slab->info.max_used;
5758 return slab->info.num_blocks - slab->info.num_used;
5821 size_t bytes) __attribute_nonnull(1);
5893 __attribute_nonnull(1);
5919 __attribute_nonnull(1);
5936#define Z_HEAP_MIN_SIZE ((sizeof(void *) > 4) ? 56 : 44)
5954#define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5957 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5958 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5960 .init_mem = kheap_##name, \
5961 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5979#define K_HEAP_DEFINE(name, bytes) \
5980 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5981 __noinit_named(kheap_buf_##name))
5997#define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5998 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
6104#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
6106#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
6110enum _poll_types_bits {
6118 _POLL_TYPE_SEM_AVAILABLE,
6121 _POLL_TYPE_DATA_AVAILABLE,
6124 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
6127 _POLL_TYPE_PIPE_DATA_AVAILABLE,
6132#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
6135enum _poll_states_bits {
6137 _POLL_STATE_NOT_READY,
6140 _POLL_STATE_SIGNALED,
6143 _POLL_STATE_SEM_AVAILABLE,
6146 _POLL_STATE_DATA_AVAILABLE,
6149 _POLL_STATE_CANCELLED,
6152 _POLL_STATE_MSGQ_DATA_AVAILABLE,
6155 _POLL_STATE_PIPE_DATA_AVAILABLE,
6160#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
6162#define _POLL_EVENT_NUM_UNUSED_BITS \
6166 + _POLL_NUM_STATES \
6184#define K_POLL_TYPE_IGNORE 0
6185#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
6186#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
6187#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
6188#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
6189#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
6190#define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE)
6201#define K_POLL_STATE_NOT_READY 0
6202#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
6203#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
6204#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
6205#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
6206#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
6207#define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE)
6208#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
6225#define K_POLL_SIGNAL_INITIALIZER(obj) \
6227 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
6272#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
6275 .type = _event_type, \
6276 .state = K_POLL_STATE_NOT_READY, \
6277 .mode = _event_mode, \
6280 .typed_##_event_type = _event_obj, \
6284#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
6288 .type = _event_type, \
6289 .state = K_POLL_STATE_NOT_READY, \
6290 .mode = _event_mode, \
6293 .typed_##_event_type = _event_obj, \
6313 int mode,
void *obj);
6389 unsigned int *signaled,
int *result);
6471#define z_except_reason(reason) ARCH_EXCEPT(reason)
6474#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
6475#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
6477#define __EXCEPT_LOC()
6487#define z_except_reason(reason) do { \
6489 z_fatal_error(reason, NULL); \
6508#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
6518#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
6531void z_timer_expiration_handler(
struct _timeout *timeout);
6544__syscall
void k_str_out(
char *c,
size_t n);
6691#include <zephyr/syscalls/kernel.h>
static uint32_t arch_k_cycle_get_32(void)
Definition misc.h:26
static uint64_t arch_k_cycle_get_64(void)
Definition misc.h:33
void(* k_thread_entry_t)(void *p1, void *p2, void *p3)
Thread entry point function type.
Definition arch_interface.h:48
struct z_thread_stack_element k_thread_stack_t
Typedef of struct z_thread_stack_element.
Definition arch_interface.h:46
long atomic_t
Definition atomic_types.h:15
void arch_cpu_atomic_idle(unsigned int key)
Atomically re-enable interrupts and enter low power mode.
void arch_cpu_idle(void)
Power save idle routine.
static _Bool atomic_test_and_set_bit(atomic_t *target, int bit)
Atomically set a bit and test it.
Definition atomic.h:170
static _Bool atomic_test_bit(const atomic_t *target, int bit)
Atomically get and test a bit.
Definition atomic.h:127
static void atomic_clear_bit(atomic_t *target, int bit)
Atomically clear a bit.
Definition atomic.h:191
static uint32_t k_cycle_get_32(void)
Read the hardware clock.
Definition kernel.h:2043
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1444
int64_t k_uptime_ticks(void)
Get system uptime, in system ticks.
static uint32_t k_uptime_get_32(void)
Get system uptime (32-bit version).
Definition kernel.h:1995
uint32_t k_ticks_t
Tick precision used in timeout APIs.
Definition clock.h:48
static int64_t k_uptime_delta(int64_t *reftime)
Get elapsed time.
Definition kernel.h:2024
static uint32_t k_uptime_seconds(void)
Get system uptime in seconds.
Definition kernel.h:2008
static uint64_t k_cycle_get_64(void)
Read the 64-bit hardware clock.
Definition kernel.h:2058
static int64_t k_uptime_get(void)
Get system uptime.
Definition kernel.h:1971
int k_condvar_signal(struct k_condvar *condvar)
Signals one thread that is pending on the condition variable.
int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout)
Waits on the condition variable releasing the mutex lock.
int k_condvar_init(struct k_condvar *condvar)
Initialize a condition variable.
int k_condvar_broadcast(struct k_condvar *condvar)
Unblock all threads that are pending on the condition variable.
static void k_cpu_idle(void)
Make the CPU idle.
Definition kernel.h:6437
static void k_cpu_atomic_idle(unsigned int key)
Make the CPU idle in an atomic fashion.
Definition kernel.h:6456
struct _dnode sys_dnode_t
Doubly-linked list node structure.
Definition dlist.h:54
struct _dnode sys_dlist_t
Doubly-linked list structure.
Definition dlist.h:50
static void sys_dnode_init(sys_dnode_t *node)
initialize node to its state when not in a list
Definition dlist.h:219
uint32_t k_event_wait(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events.
uint32_t k_event_set_masked(struct k_event *event, uint32_t events, uint32_t events_mask)
Set or clear the events in an event object.
uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events (safe version)
static uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
Test the events currently tracked in the event object.
Definition kernel.h:2674
uint32_t k_event_wait_safe(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for any of the specified events (safe version)
uint32_t k_event_set(struct k_event *event, uint32_t events)
Set the events in an event object.
uint32_t k_event_post(struct k_event *event, uint32_t events)
Post one or more events to an event object.
void k_event_init(struct k_event *event)
Initialize an event object.
uint32_t k_event_clear(struct k_event *event, uint32_t events)
Clear the events in an event object.
uint32_t k_event_wait_all(struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout)
Wait for all of the specified events.
static bool sys_sflist_is_empty(const sys_sflist_t *list)
Test if the given list is empty.
Definition sflist.h:336
struct _sflist sys_sflist_t
Flagged single-linked list structure.
Definition sflist.h:54
int k_float_disable(struct k_thread *thread)
Disable preservation of floating point context information.
int k_float_enable(struct k_thread *thread, unsigned int options)
Enable preservation of floating point context information.
int k_futex_wait(struct k_futex *futex, int expected, k_timeout_t timeout)
Pend the current thread on a futex.
int k_futex_wake(struct k_futex *futex, bool wake_all)
Wake one/all threads pending on a futex.
void * k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
Allocate memory from a k_heap.
int k_heap_array_get(struct k_heap **heap)
Get the array of statically defined heaps.
void * k_heap_calloc(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
Allocate and initialize memory for an array of objects from a k_heap.
void k_heap_free(struct k_heap *h, void *mem)
Free memory allocated by k_heap_alloc()
void k_free(void *ptr)
Free memory allocated from heap.
void * k_realloc(void *ptr, size_t size)
Expand the size of an existing allocation.
void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
Initialize a k_heap.
void * k_malloc(size_t size)
Allocate memory from the heap.
void * k_heap_realloc(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
Reallocate memory from a k_heap.
void * k_calloc(size_t nmemb, size_t size)
Allocate memory from heap, array style.
void * k_aligned_alloc(size_t align, size_t size)
Allocate memory from the heap with a specified alignment.
void * k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes, k_timeout_t timeout)
Allocate aligned memory from a k_heap.
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int k_is_preempt_thread(void)
Determine if code is running in a preemptible thread.
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout)
Receive a mailbox message.
void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
Retrieve mailbox message data into a buffer.
void k_mbox_init(struct k_mbox *mbox)
Initialize a mailbox.
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout)
Send a mailbox message in a synchronous manner.
void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, struct k_sem *sem)
Send a mailbox message in an asynchronous manner.
int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks)
Initialize a memory slab.
void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
Free memory allocated from a memory slab.
int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats)
Get the memory stats for a memory slab.
int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
Reset the maximum memory usage for a slab.
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
Allocate memory from a memory slab.
static uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Get the number of used blocks in a memory slab.
Definition kernel.h:5717
static uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
Get the number of maximum used blocks so far in a memory slab.
Definition kernel.h:5734
static uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Get the number of unused blocks in a memory slab.
Definition kernel.h:5756
int k_msgq_peek(struct k_msgq *msgq, void *data)
Peek/read a message from a message queue.
uint32_t k_msgq_num_used_get(struct k_msgq *msgq)
Get the number of messages in a message queue.
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout)
Send a message to the end of a message queue.
int k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
Peek/read a message from a message queue at the specified index.
uint32_t k_msgq_num_free_get(struct k_msgq *msgq)
Get the amount of free space in a message queue.
void k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
Get basic attributes of a message queue.
void k_msgq_purge(struct k_msgq *msgq)
Purge a message queue.
int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs)
Initialize a message queue.
int k_msgq_put_front(struct k_msgq *msgq, const void *data)
Send a message to the front of a message queue.
int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
Receive a message from a message queue.
int k_msgq_cleanup(struct k_msgq *msgq)
Release allocated buffer for a queue.
int k_mutex_unlock(struct k_mutex *mutex)
Unlock a mutex.
int k_mutex_init(struct k_mutex *mutex)
Initialize a mutex.
int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
Lock a mutex.
int k_pipe_write(struct k_pipe *pipe, const uint8_t *data, size_t len, k_timeout_t timeout)
Write data to a pipe.
void k_pipe_close(struct k_pipe *pipe)
Close a pipe.
void k_pipe_reset(struct k_pipe *pipe)
Reset a pipe This routine resets the pipe, discarding any unread data and unblocking any threads wait...
void k_pipe_init(struct k_pipe *pipe, uint8_t *buffer, size_t buffer_size)
initialize a pipe
pipe_flags
Definition kernel.h:5374
int k_pipe_read(struct k_pipe *pipe, uint8_t *data, size_t len, k_timeout_t timeout)
Read data from a pipe This routine reads up to len bytes of data from pipe.
@ PIPE_FLAG_RESET
Definition kernel.h:5376
@ PIPE_FLAG_OPEN
Definition kernel.h:5375
void k_poll_signal_reset(struct k_poll_signal *sig)
Reset a poll signal object's state to unsignaled.
k_poll_modes
Definition kernel.h:6193
void k_poll_signal_check(struct k_poll_signal *sig, unsigned int *signaled, int *result)
Fetch the signaled state and result value of a poll signal.
void k_poll_event_init(struct k_poll_event *event, uint32_t type, int mode, void *obj)
Initialize one struct k_poll_event instance.
int k_poll(struct k_poll_event *events, int num_events, k_timeout_t timeout)
Wait for one or many of multiple poll events to occur.
int k_poll_signal_raise(struct k_poll_signal *sig, int result)
Signal a poll signal object.
void k_poll_signal_init(struct k_poll_signal *sig)
Initialize a poll signal object.
@ K_POLL_MODE_NOTIFY_ONLY
Definition kernel.h:6195
@ K_POLL_NUM_MODES
Definition kernel.h:6197
void k_queue_init(struct k_queue *queue)
Initialize a queue.
void * k_queue_get(struct k_queue *queue, k_timeout_t timeout)
Get an element from a queue.
void * k_queue_peek_tail(struct k_queue *queue)
Peek element at the tail of queue.
bool k_queue_unique_append(struct k_queue *queue, void *data)
Append an element to a queue only if it's not present already.
bool k_queue_remove(struct k_queue *queue, void *data)
Remove an element from a queue.
int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
Atomically add a list of elements to a queue.
int32_t k_queue_alloc_append(struct k_queue *queue, void *data)
Append an element to a queue.
void k_queue_cancel_wait(struct k_queue *queue)
Cancel waiting on a queue.
void * k_queue_peek_head(struct k_queue *queue)
Peek element at the head of queue.
void k_queue_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
Atomically append a list of elements to a queue.
void k_queue_append(struct k_queue *queue, void *data)
Append an element to the end of a queue.
int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data)
Prepend an element to a queue.
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
Inserts an element to a queue.
int k_queue_is_empty(struct k_queue *queue)
Query a queue to see if it has data available.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
unsigned int k_sem_count_get(struct k_sem *sem)
Get a semaphore's count.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
int k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit)
Initialize a semaphore.
struct _slist sys_slist_t
Single-linked list structure.
Definition slist.h:49
struct _snode sys_snode_t
Single-linked list node structure.
Definition slist.h:39
int k_stack_pop(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout)
Pop an element from a stack.
void k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries)
Initialize a stack.
int k_stack_cleanup(struct k_stack *stack)
Release a stack's allocated buffer.
int k_stack_push(struct k_stack *stack, stack_data_t data)
Push an element onto a stack.
int32_t k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
Initialize a stack.
#define SYS_PORT_TRACING_TRACKING_FIELD(type)
Field added to kernel objects so they are tracked.
Definition tracing_macros.h:375
#define IS_ENABLED(config_macro)
Check for macro definition in compiler-visible expressions.
Definition util_macro.h:148
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:281
#define EBUSY
Mount device busy.
Definition errno.h:54
int k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
Copy the thread name into a supplied buffer.
void k_yield(void)
Yield the current thread.
const char * k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
Get thread state string.
void k_thread_resume(k_tid_t thread)
Resume a suspended thread.
void * k_thread_custom_data_get(void)
Get current thread's custom data.
void k_thread_abort(k_tid_t thread)
Abort a thread.
int k_thread_name_set(k_tid_t thread, const char *str)
Set current thread name.
void k_thread_priority_set(k_tid_t thread, int prio)
Set a thread's priority.
void k_thread_absolute_deadline_set(k_tid_t thread, int deadline)
Set absolute deadline expiration time for scheduler.
int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
Enable thread to run on specified CPU.
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system without locking.
bool k_can_yield(void)
Check whether it is possible to yield in the current context.
int k_thread_priority_get(k_tid_t thread)
Get a thread's priority.
static void k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap)
Assign a resource memory pool to a thread.
Definition kernel.h:506
FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3)
Drop a thread's privileges permanently to user mode.
int k_thread_join(struct k_thread *thread, k_timeout_t timeout)
Sleep until a thread exits.
k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *thread)
Get time remaining before a thread wakes up, in system ticks.
void k_thread_custom_data_set(void *value)
Set current thread's custom data.
int32_t k_sleep(k_timeout_t timeout)
Put the current thread to sleep.
void k_sched_lock(void)
Lock the scheduler.
static int32_t k_msleep(int32_t ms)
Put the current thread to sleep.
Definition kernel.h:600
void k_busy_wait(uint32_t usec_to_wait)
Cause the current thread to busy wait.
void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks, k_thread_timeslice_fn_t expired, void *data)
Set thread time slice.
static void k_thread_runtime_stats_longest_frame_reset(__maybe_unused struct k_thread *thread)
Resets thread longest frame usage data for specified thread.
Definition kernel.h:120
void k_thread_suspend(k_tid_t thread)
Suspend a thread.
void k_sched_unlock(void)
Unlock the scheduler.
static __attribute_const__ k_tid_t k_current_get(void)
Get thread ID of the current thread.
Definition kernel.h:724
int k_thread_cpu_mask_clear(k_tid_t thread)
Sets all CPU enable masks to zero.
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in running on specified cpu.
void k_sched_time_slice_set(int32_t slice, int prio)
Set time-slicing period and scope.
int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
Prevent thread to run on specified CPU.
void k_wakeup(k_tid_t thread)
Wake up a sleeping thread.
int k_thread_stack_free(k_thread_stack_t *stack)
Free a dynamically allocated thread stack.
k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *thread)
Get time when a thread wakes up, in system ticks.
__attribute_const__ k_tid_t k_sched_current_thread_query(void)
Query thread ID of the current thread.
static void k_thread_start(k_tid_t thread)
Start an inactive thread.
Definition kernel.h:1194
k_tid_t k_thread_create(struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, uint32_t options, k_timeout_t delay)
Create a thread.
void k_reschedule(void)
Invoke the scheduler.
void k_thread_deadline_set(k_tid_t thread, int deadline)
Set relative deadline expiration time for scheduler.
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data)
Iterate over the threads in running on current cpu without locking.
const char * k_thread_name_get(k_tid_t thread)
Get thread name.
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
Iterate over all the threads in the system.
static bool k_is_pre_kernel(void)
Test whether startup is in the before-main-task phase.
Definition kernel.h:701
int k_thread_cpu_pin(k_tid_t thread, int cpu)
Pin a thread to a CPU.
int32_t k_usleep(int32_t us)
Put the current thread to sleep with microsecond resolution.
int k_thread_cpu_mask_enable_all(k_tid_t thread)
Sets all CPU enable masks to one.
void(* k_thread_user_cb_t)(const struct k_thread *thread, void *user_data)
Definition kernel.h:127
k_thread_stack_t * k_thread_stack_alloc(size_t size, int flags)
Dynamically allocate a thread stack.
k_ticks_t k_timer_expires_ticks(const struct k_timer *timer)
Get next expiration time of a timer, in system ticks.
void(* k_timer_stop_t)(struct k_timer *timer)
Timer stop function type.
Definition kernel.h:1746
k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer)
Get time remaining before a timer next expires, in system ticks.
void * k_timer_user_data_get(const struct k_timer *timer)
Retrieve the user-specific data from a timer.
void(* k_timer_expiry_t)(struct k_timer *timer)
Timer expiry function type.
Definition kernel.h:1730
void k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_stop_t stop_fn)
Initialize a timer.
void k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period)
Start a timer.
static uint32_t k_timer_remaining_get(struct k_timer *timer)
Get time remaining before a timer next expires.
Definition kernel.h:1895
uint32_t k_timer_status_sync(struct k_timer *timer)
Synchronize thread to timer expiration.
void k_timer_stop(struct k_timer *timer)
Stop a timer.
uint32_t k_timer_status_get(struct k_timer *timer)
Read timer status.
void k_timer_user_data_set(struct k_timer *timer, void *user_data)
Associate user-specific data with a timer.
#define k_ticks_to_ms_floor32(t)
Convert ticks to milliseconds.
Definition time_units.h:1707
#define k_ticks_to_sec_floor32(t)
Convert ticks to seconds.
Definition time_units.h:1611
#define k_ticks_to_ms_floor64(t)
Convert ticks to milliseconds.
Definition time_units.h:1723
int k_work_poll_submit_to_queue(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item.
static k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
Access the thread that animates a work queue.
Definition kernel.h:4550
static bool k_work_is_pending(const struct k_work *work)
Test whether a work item is currently pending.
Definition kernel.h:4521
int k_work_queue_drain(struct k_work_q *queue, bool plug)
Wait until the work queue has drained, optionally plugging it.
static k_ticks_t k_work_delayable_expires_get(const struct k_work_delayable *dwork)
Get the absolute tick count at which a scheduled delayable work will be submitted.
Definition kernel.h:4538
int k_work_schedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to a queue after a delay.
int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
Busy state flags from the delayable work item.
int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout)
Stop a work queue.
void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
Initialize a delayable work structure.
int k_work_poll_cancel(struct k_work_poll *work)
Cancel a triggered work item.
void k_work_user_queue_start(struct k_work_user_q *work_q, k_thread_stack_t *stack, size_t stack_size, int prio, const char *name)
Start a workqueue in user mode.
void k_work_poll_init(struct k_work_poll *work, k_work_handler_t handler)
Initialize a triggered work item.
int k_work_cancel(struct k_work *work)
Cancel a work item.
static int k_work_user_submit_to_queue(struct k_work_user_q *work_q, struct k_work_user *work)
Submit a work item to a user mode workqueue.
Definition kernel.h:4677
int k_work_submit_to_queue(struct k_work_q *queue, struct k_work *work)
Submit a work item to a queue.
static bool k_work_user_is_pending(struct k_work_user *work)
Check if a userspace work item is pending.
Definition kernel.h:4654
void(* k_work_handler_t)(struct k_work *work)
The signature for a work item handler function.
Definition kernel.h:3693
int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay)
Submit an idle work item to the system work queue after a delay.
static bool k_work_delayable_is_pending(const struct k_work_delayable *dwork)
Test whether a delayed work item is currently pending.
Definition kernel.h:4532
bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, struct k_work_sync *sync)
Cancel delayable work and wait.
int k_work_cancel_delayable(struct k_work_delayable *dwork)
Cancel delayable work.
static void k_work_user_init(struct k_work_user *work, k_work_user_handler_t handler)
Initialize a userspace work item.
Definition kernel.h:4632
int k_work_queue_unplug(struct k_work_q *queue)
Release a work queue to accept new submissions.
int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to the system work queue after a delay.
void(* k_work_user_handler_t)(struct k_work_user *work)
Work item handler function type for user work queues.
Definition kernel.h:4573
bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync)
Cancel a work item and wait for it to complete.
static k_tid_t k_work_user_queue_thread_get(struct k_work_user_q *work_q)
Access the user mode thread that animates a work queue.
Definition kernel.h:4732
int k_work_busy_get(const struct k_work *work)
Busy state flags from the work item.
static struct k_work_delayable * k_work_delayable_from_work(struct k_work *work)
Get the parent delayable work structure from a work pointer.
Definition kernel.h:4527
static k_ticks_t k_work_delayable_remaining_get(const struct k_work_delayable *dwork)
Get the number of ticks until a scheduled delayable work will be submitted.
Definition kernel.h:4544
bool k_work_flush(struct k_work *work, struct k_work_sync *sync)
Wait for last-submitted instance to complete.
int k_work_reschedule_for_queue(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay)
Reschedule a work item to a queue after a delay.
void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *cfg)
Run work queue using calling thread.
int k_work_submit(struct k_work *work)
Submit a work item to the system queue.
bool k_work_flush_delayable(struct k_work_delayable *dwork, struct k_work_sync *sync)
Flush delayable work.
int k_work_poll_submit(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout)
Submit a triggered work item to the system workqueue.
void k_work_queue_init(struct k_work_q *queue)
Initialize a work queue structure.
void k_work_queue_start(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg)
Initialize a work queue.
void k_work_init(struct k_work *work, k_work_handler_t handler)
Initialize a (non-delayable) work structure.
@ K_WORK_CANCELING
Flag indicating a work item that is being canceled.
Definition kernel.h:4301
@ K_WORK_QUEUED
Flag indicating a work item that has been submitted to a queue but has not started running.
Definition kernel.h:4308
@ K_WORK_DELAYED
Flag indicating a delayed work item that is scheduled for submission to a queue.
Definition kernel.h:4315
@ K_WORK_RUNNING
Flag indicating a work item that is running under a work queue thread.
Definition kernel.h:4295
@ K_WORK_FLUSHING
Flag indicating a synced work item that is being flushed.
Definition kernel.h:4321
struct k_thread * k_tid_t
Definition thread.h:368
struct k_thread_runtime_stats k_thread_runtime_stats_t
void k_sys_runtime_stats_disable(void)
Disable gathering of system runtime statistics.
int k_thread_runtime_stats_enable(k_tid_t thread)
Enable gathering of runtime statistics for specified thread.
int k_ipi_work_add(struct k_ipi_work *work, uint32_t cpu_bitmask, k_ipi_func_t func)
Add an IPI work item to the IPI work queue.
void k_sys_runtime_stats_enable(void)
Enable gathering of system runtime statistics.
int k_thread_runtime_stats_get(k_tid_t thread, k_thread_runtime_stats_t *stats)
Get the runtime statistics of a thread.
void k_ipi_work_signal(void)
Signal that there is one or more IPI work items to process.
int k_ipi_work_wait(struct k_ipi_work *work, k_timeout_t timeout)
Wait until the IPI work item has been processed by all targeted CPUs.
execution_context_types
Definition kernel.h:91
@ K_ISR
Definition kernel.h:92
@ K_COOP_THREAD
Definition kernel.h:93
@ K_PREEMPT_THREAD
Definition kernel.h:94
void(* k_ipi_func_t)(struct k_ipi_work *work)
Definition kernel.h:3577
int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads.
static void k_ipi_work_init(struct k_ipi_work *work)
Initialize the specified IPI work item.
Definition kernel.h:3604
int k_thread_runtime_stats_disable(k_tid_t thread)
Disable gathering of runtime statistics for specified thread.
int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats)
Get the runtime statistics of all threads on specified cpu.
Header files included by kernel.h.
void(* k_thread_timeslice_fn_t)(struct k_thread *thread, void *data)
Definition kernel_structs.h:314
flags
Definition parser.h:97
state
Definition parser_state.h:29
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INTPTR_TYPE__ intptr_t
Definition stdint.h:104
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__INT64_TYPE__ int64_t
Definition stdint.h:75
_wait_q_t wait_q
Definition kernel.h:3337
Event Structure.
Definition kernel.h:2451
futex structure
Definition kernel.h:2365
atomic_t val
Definition kernel.h:2366
struct k_spinlock lock
Definition kernel.h:5804
struct sys_heap heap
Definition kernel.h:5802
_wait_q_t wait_q
Definition kernel.h:5803
IPI work item structure.
Definition kernel.h:3585
Mailbox Message Structure.
Definition kernel.h:5211
k_tid_t tx_target_thread
target thread id
Definition kernel.h:5221
void * tx_data
sender's message data buffer
Definition kernel.h:5217
k_tid_t rx_source_thread
source thread id
Definition kernel.h:5219
uint32_t info
application-defined information value
Definition kernel.h:5215
size_t size
size of message (in bytes)
Definition kernel.h:5213
Mailbox Structure.
Definition kernel.h:5233
_wait_q_t tx_msg_queue
Transmit messages queue.
Definition kernel.h:5235
struct k_spinlock lock
Definition kernel.h:5238
_wait_q_t rx_msg_queue
Receive message queue.
Definition kernel.h:5237
Memory Domain.
Definition mem_domain.h:80
Memory Partition.
Definition mem_domain.h:55
Message Queue Attributes.
Definition kernel.h:4953
uint32_t used_msgs
Used messages.
Definition kernel.h:4959
size_t msg_size
Message Size.
Definition kernel.h:4955
uint32_t max_msgs
Maximal number of messages.
Definition kernel.h:4957
Message Queue Structure.
Definition kernel.h:4892
size_t msg_size
Message size.
Definition kernel.h:4898
char * read_ptr
Read pointer.
Definition kernel.h:4906
uint32_t used_msgs
Number of used messages.
Definition kernel.h:4910
char * buffer_end
End of message buffer.
Definition kernel.h:4904
struct k_spinlock lock
Lock.
Definition kernel.h:4896
char * write_ptr
Write pointer.
Definition kernel.h:4908
char * buffer_start
Start of message buffer.
Definition kernel.h:4902
uint8_t flags
Message queue.
Definition kernel.h:4915
_wait_q_t wait_q
Message queue wait queue.
Definition kernel.h:4894
uint32_t max_msgs
Maximal number of messages.
Definition kernel.h:4900
Mutex Structure.
Definition kernel.h:3224
uint32_t lock_count
Current lock count.
Definition kernel.h:3231
_wait_q_t wait_q
Mutex wait queue.
Definition kernel.h:3226
int owner_orig_prio
Original thread priority.
Definition kernel.h:3234
struct k_thread * owner
Mutex owner.
Definition kernel.h:3228
Object core structure.
Definition obj_core.h:121
uint8_t flags
Definition kernel.h:5385
struct ring_buf buf
Definition kernel.h:5381
_wait_q_t data
Definition kernel.h:5383
_wait_q_t space
Definition kernel.h:5384
struct k_spinlock lock
Definition kernel.h:5382
size_t waiting
Definition kernel.h:5380
Poll Event.
Definition kernel.h:6235
struct k_msgq * typed_K_POLL_TYPE_MSGQ_DATA_AVAILABLE
Definition kernel.h:6267
void * typed_K_POLL_TYPE_IGNORE
Definition kernel.h:6262
struct k_poll_signal * signal
Definition kernel.h:6263
struct k_pipe * pipe
Definition kernel.h:6268
uint32_t tag
optional user-specified tag, opaque, untouched by the API
Definition kernel.h:6243
struct k_fifo * fifo
Definition kernel.h:6265
struct k_msgq * msgq
Definition kernel.h:6267
struct k_queue * queue
Definition kernel.h:6266
uint32_t unused
unused bits in 32-bit word
Definition kernel.h:6255
struct k_pipe * typed_K_POLL_TYPE_PIPE_DATA_AVAILABLE
Definition kernel.h:6268
uint32_t type
bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values)
Definition kernel.h:6246
struct k_sem * sem
Definition kernel.h:6264
struct k_queue * typed_K_POLL_TYPE_DATA_AVAILABLE
Definition kernel.h:6266
struct k_sem * typed_K_POLL_TYPE_SEM_AVAILABLE
Definition kernel.h:6264
uint32_t state
bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values)
Definition kernel.h:6249
uint32_t mode
mode of operation, from enum k_poll_modes
Definition kernel.h:6252
struct z_poller * poller
PRIVATE - DO NOT TOUCH.
Definition kernel.h:6240
struct k_poll_signal * typed_K_POLL_TYPE_SIGNAL
Definition kernel.h:6263
void * obj
Definition kernel.h:6262
struct k_fifo * typed_K_POLL_TYPE_FIFO_DATA_AVAILABLE
Definition kernel.h:6265
sys_dlist_t poll_events
PRIVATE - DO NOT TOUCH.
Definition kernel.h:6213
int result
custom result value passed to k_poll_signal_raise() if needed
Definition kernel.h:6222
unsigned int signaled
1 if the event has been signaled, 0 otherwise.
Definition kernel.h:6219
struct k_spinlock lock
Definition kernel.h:2075
_wait_q_t wait_q
Definition kernel.h:2076
sys_sflist_t data_q
Definition kernel.h:2074
Semaphore structure.
Definition kernel.h:3429
Kernel Spin Lock.
Definition spinlock.h:45
Thread Structure.
Definition thread.h:252
struct _thread_base base
Definition thread.h:254
struct k_heap * resource_pool
resource pool
Definition thread.h:342
struct __thread_entry entry
thread entry and parameters description
Definition thread.h:281
Kernel timeout type.
Definition clock.h:65
Kernel timer structure.
Definition kernel.h:1652
A structure used to submit work after a delay.
Definition kernel.h:4353
struct _timeout timeout
Definition kernel.h:4358
struct k_work_q * queue
Definition kernel.h:4361
struct k_work work
Definition kernel.h:4355
A structure used to hold work until it can be processed.
Definition kernel.h:4487
sys_slist_t pending
Definition kernel.h:4501
_wait_q_t drainq
Definition kernel.h:4507
k_tid_t thread_id
Definition kernel.h:4494
_wait_q_t notifyq
Definition kernel.h:4504
uint32_t flags
Definition kernel.h:4510
struct k_thread thread
Definition kernel.h:4489
A structure holding optional configuration items for a work queue.
Definition kernel.h:4449
const char * name
The name to be given to the work queue thread.
Definition kernel.h:4454
uint32_t work_timeout_ms
Controls whether work queue monitors work timeouts.
Definition kernel.h:4483
bool essential
Control whether the work queue thread should be marked as essential thread.
Definition kernel.h:4473
bool no_yield
Control whether the work queue thread should yield between items.
Definition kernel.h:4468
A structure holding internal state for a pending synchronous operation on a work item or queue.
Definition kernel.h:4436
struct z_work_canceller canceller
Definition kernel.h:4439
struct z_work_flusher flusher
Definition kernel.h:4438
A structure used to submit work.
Definition kernel.h:4325
k_work_handler_t handler
Definition kernel.h:4334
uint32_t flags
Definition kernel.h:4345
struct k_work_q * queue
Definition kernel.h:4337
sys_snode_t node
Definition kernel.h:4331
A structure to represent a ring buffer.
Definition ring_buffer.h:49
Definition mem_stats.h:24
static __pinned_func bool k_is_user_context(void)
Indicate whether the CPU is currently in user mode.
Definition syscall.h:115
Main header file for tracing subsystem API.
Header file for tracing macros.