Zephyr API Documentation 4.3.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
thread.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_
8#define ZEPHYR_INCLUDE_KERNEL_THREAD_H_
9
10#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
12#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
13
14#include <zephyr/kernel/stats.h>
16
33
34#ifdef CONFIG_THREAD_MONITOR
35struct __thread_entry {
36 k_thread_entry_t pEntry;
37 void *parameter1;
38 void *parameter2;
39 void *parameter3;
40};
41#endif /* CONFIG_THREAD_MONITOR */
42
43struct k_thread;
44
45/* can be used for creating 'dummy' threads, e.g. for pending on objects */
46struct _thread_base {
47
48 /* this thread's entry in a ready/wait queue */
49 union {
50 sys_dnode_t qnode_dlist;
51 struct rbnode qnode_rb;
52 };
53
54 /* wait queue on which the thread is pended (needed only for
55 * trees, not dumb lists)
56 */
57 _wait_q_t *pended_on;
58
59 /* user facing 'thread options'; values defined in include/zephyr/kernel.h */
60 uint8_t user_options;
61
62 /* thread state */
63 uint8_t thread_state;
64
65 /*
66 * scheduler lock count and thread priority
67 *
68 * These two fields control the preemptibility of a thread.
69 *
70 * When the scheduler is locked, sched_locked is decremented, which
71 * means that the scheduler is locked for values from 0xff to 0x01. A
72 * thread is coop if its prio is negative, thus 0x80 to 0xff when
73 * looked at the value as unsigned.
74 *
75 * By putting them end-to-end, this means that a thread is
76 * non-preemptible if the bundled value is greater than or equal to
77 * 0x0080.
78 */
79 union {
80 struct {
81#ifdef CONFIG_BIG_ENDIAN
82 uint8_t sched_locked;
83 int8_t prio;
84#else /* Little Endian */
85 int8_t prio;
86 uint8_t sched_locked;
87#endif /* CONFIG_BIG_ENDIAN */
88 };
89 uint16_t preempt;
90 };
91
92#ifdef CONFIG_SCHED_DEADLINE
93 int prio_deadline;
94#endif /* CONFIG_SCHED_DEADLINE */
95
96#if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE)
97 uint32_t order_key;
98#endif
99
100#ifdef CONFIG_SMP
101 /* True for the per-CPU idle threads */
102 uint8_t is_idle;
103
104 /* CPU index on which thread was last run */
105 uint8_t cpu;
106
107 /* Recursive count of irq_lock() calls */
108 uint8_t global_lock_count;
109
110#endif /* CONFIG_SMP */
111
112#ifdef CONFIG_SCHED_CPU_MASK
113 /* "May run on" bits for each CPU */
114#if CONFIG_MP_MAX_NUM_CPUS <= 8
115 uint8_t cpu_mask;
116#else
117 uint16_t cpu_mask;
118#endif /* CONFIG_MP_MAX_NUM_CPUS */
119#endif /* CONFIG_SCHED_CPU_MASK */
120
121 /* data returned by APIs */
122 void *swap_data;
123
124#ifdef CONFIG_SYS_CLOCK_EXISTS
125 /* this thread's entry in a timeout queue */
126 struct _timeout timeout;
127#endif /* CONFIG_SYS_CLOCK_EXISTS */
128
129#ifdef CONFIG_TIMESLICE_PER_THREAD
130 int32_t slice_ticks;
131 k_thread_timeslice_fn_t slice_expired;
132 void *slice_data;
133#endif /* CONFIG_TIMESLICE_PER_THREAD */
134
135#ifdef CONFIG_SCHED_THREAD_USAGE
136 struct k_cycle_stats usage; /* Track thread usage statistics */
137#endif /* CONFIG_SCHED_THREAD_USAGE */
138};
139
140typedef struct _thread_base _thread_base_t;
141
142#if defined(CONFIG_THREAD_STACK_INFO)
143
144#if defined(CONFIG_THREAD_RUNTIME_STACK_SAFETY)
145struct _thread_stack_usage {
146 size_t unused_threshold; /* Threshold below which to trigger hook */
147};
148#endif
149
150/* Contains the stack information of a thread */
151struct _thread_stack_info {
152 /* Stack start - Represents the start address of the thread-writable
153 * stack area.
154 */
155 uintptr_t start;
156
157 /* Thread writable stack buffer size. Represents the size of the actual
158 * buffer, starting from the 'start' member, that should be writable by
159 * the thread. This comprises of the thread stack area, any area reserved
160 * for local thread data storage, as well as any area left-out due to
161 * random adjustments applied to the initial thread stack pointer during
162 * thread initialization.
163 */
164 size_t size;
165
166 /* Adjustment value to the size member, removing any storage
167 * used for TLS or random stack base offsets. (start + size - delta)
168 * is the initial stack pointer for a thread. May be 0.
169 */
170 size_t delta;
171
172#if defined(CONFIG_THREAD_STACK_MEM_MAPPED)
173 struct {
175 k_thread_stack_t *addr;
176
178 size_t sz;
179 } mapped;
180#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
181
182#if defined(CONFIG_THREAD_RUNTIME_STACK_SAFETY)
183 struct _thread_stack_usage usage;
184#endif
185};
186
187typedef struct _thread_stack_info _thread_stack_info_t;
188#endif /* CONFIG_THREAD_STACK_INFO */
189
190#if defined(CONFIG_USERSPACE)
191struct _mem_domain_info {
192#ifdef CONFIG_MEM_DOMAIN_HAS_THREAD_LIST
194 sys_dnode_t thread_mem_domain_node;
195#endif /* CONFIG_MEM_DOMAIN_HAS_THREAD_LIST */
197 struct k_mem_domain *mem_domain;
198};
199
200typedef struct _mem_domain_info _mem_domain_info_t;
201#endif /* CONFIG_USERSPACE */
202
203#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
204struct _thread_userspace_local_data {
205#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
206 int errno_var;
207#endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
208};
209#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
210
212#ifdef CONFIG_SCHED_THREAD_USAGE
213 /*
214 * For CPU stats, execution_cycles is the sum of non-idle + idle cycles.
215 * For thread stats, execution_cycles = total_cycles.
216 */
217 uint64_t execution_cycles; /* total # of cycles (cpu: non-idle + idle) */
218 uint64_t total_cycles; /* total # of non-idle cycles */
219#endif /* CONFIG_SCHED_THREAD_USAGE */
220
221#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
222 /*
223 * For threads, the following fields refer to the time spent executing
224 * as bounded by when the thread was scheduled in and scheduled out.
225 * For CPUs, the same fields refer to the time spent executing
226 * non-idle threads as bounded by the idle thread(s).
227 */
228
229 uint64_t current_cycles; /* current # of non-idle cycles */
230 uint64_t peak_cycles; /* peak # of non-idle cycles */
231 uint64_t average_cycles; /* average # of non-idle cycles */
232#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
233
234#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
235 /*
236 * This field is always zero for individual threads. It only comes
237 * into play when gathering statistics for the CPU. In that case it
238 * represents the total number of cycles spent idling.
239 */
240
241 uint64_t idle_cycles;
242#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
243
244#if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \
245 !defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL)
246 /* If none of the above Kconfig values are defined, this struct will have a size 0 in C
247 * which is not allowed in C++ (it'll have a size 1). To prevent this, we add a 1 byte dummy
248 * variable when the struct would otherwise be empty.
249 */
250 uint8_t dummy;
251#endif
253
254struct z_poller {
255 bool is_polling;
256 uint8_t mode;
257};
258
263struct k_thread {
264
265 struct _thread_base base;
266
268 struct _callee_saved callee_saved;
269
272
274 _wait_q_t join_queue;
275
276#if defined(CONFIG_POLL)
277 struct z_poller poller;
278#endif /* CONFIG_POLL */
279
280#if defined(CONFIG_EVENTS)
281 struct k_thread *next_event_link;
282
283 uint32_t events; /* dual purpose - wait on and then received */
284 uint32_t event_options;
285
287 bool no_wake_on_timeout;
288#endif /* CONFIG_EVENTS */
289
290#if defined(CONFIG_THREAD_MONITOR)
292 struct __thread_entry entry;
293
296#endif /* CONFIG_THREAD_MONITOR */
297
298#if defined(CONFIG_THREAD_NAME)
300 char name[CONFIG_THREAD_MAX_NAME_LEN];
301#endif /* CONFIG_THREAD_NAME */
302
303#ifdef CONFIG_THREAD_CUSTOM_DATA
306#endif /* CONFIG_THREAD_CUSTOM_DATA */
307
308#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
309 struct _thread_userspace_local_data *userspace_local_data;
310#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
311
312#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
313#ifndef CONFIG_USERSPACE
315 int errno_var;
316#endif /* CONFIG_USERSPACE */
317#endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
318
319#if defined(CONFIG_THREAD_STACK_INFO)
321 struct _thread_stack_info stack_info;
322#endif /* CONFIG_THREAD_STACK_INFO */
323
324#if defined(CONFIG_USERSPACE)
326 struct _mem_domain_info mem_domain_info;
327
335
338#endif /* CONFIG_USERSPACE */
339
340
341#if defined(CONFIG_USE_SWITCH)
342 /* When using __switch() a few previously arch-specific items
343 * become part of the core OS
344 */
345
348
351#endif /* CONFIG_USE_SWITCH */
354
355#if defined(CONFIG_THREAD_LOCAL_STORAGE)
356 /* Pointer to arch-specific TLS area */
357 uintptr_t tls;
358#endif /* CONFIG_THREAD_LOCAL_STORAGE */
359
360#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
362 struct k_mem_paging_stats_t paging_stats;
363#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
364
365#ifdef CONFIG_OBJ_CORE_THREAD
366 struct k_obj_core obj_core;
367#endif /* CONFIG_OBJ_CORE_THREAD */
368
369#ifdef CONFIG_SMP
371 _wait_q_t halt_queue;
372#endif /* CONFIG_SMP */
373
375 struct _thread_arch arch;
376};
377
378typedef struct k_thread _thread_t;
379typedef struct k_thread *k_tid_t;
380
381#endif /* ZEPHYR_INCLUDE_KERNEL_THREAD_H_ */
void(* k_thread_entry_t)(void *p1, void *p2, void *p3)
Thread entry point function type.
Definition arch_interface.h:48
struct z_thread_stack_element k_thread_stack_t
Typedef of struct z_thread_stack_element.
Definition arch_interface.h:46
struct _dnode sys_dnode_t
Doubly-linked list node structure.
Definition dlist.h:54
struct k_thread * k_tid_t
Definition thread.h:379
struct k_thread_runtime_stats k_thread_runtime_stats_t
void(* k_thread_timeslice_fn_t)(struct k_thread *thread, void *data)
Definition kernel_structs.h:313
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__UINT16_TYPE__ uint16_t
Definition stdint.h:89
__INT8_TYPE__ int8_t
Definition stdint.h:72
Definition kernel.h:5913
Paging Statistics.
Definition demand_paging.h:37
Object core structure.
Definition obj_core.h:121
Definition thread.h:211
Thread Structure.
Definition thread.h:263
struct _thread_base base
Definition thread.h:265
struct k_thread * next_thread
next item in list of all threads
Definition thread.h:295
struct _thread_arch arch
arch-specifics: must always be at the end
Definition thread.h:375
void * init_data
static thread init data
Definition thread.h:271
void * switch_handle
Context handle returned via arch_switch().
Definition thread.h:350
struct k_heap * resource_pool
resource pool
Definition thread.h:353
k_thread_stack_t * stack_obj
Base address of thread stack.
Definition thread.h:334
void * custom_data
crude thread-local storage
Definition thread.h:305
struct __thread_entry entry
thread entry and parameters description
Definition thread.h:292
void * syscall_frame
current syscall frame pointer
Definition thread.h:337
struct _thread_stack_info stack_info
Stack Info.
Definition thread.h:321
_wait_q_t join_queue
threads waiting in k_thread_join()
Definition thread.h:274
struct _mem_domain_info mem_domain_info
memory domain info of the thread
Definition thread.h:326
_wait_q_t halt_queue
threads waiting in k_thread_suspend()
Definition thread.h:371
int swap_retval
z_swap() return value
Definition thread.h:347
struct _callee_saved callee_saved
defined by the architecture, but all archs need these
Definition thread.h:268