Zephyr API Documentation  3.6.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
thread.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_
8#define ZEPHYR_INCLUDE_KERNEL_THREAD_H_
9
10#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
12#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
13
14#include <zephyr/kernel/stats.h>
16
34#ifdef CONFIG_THREAD_MONITOR
35struct __thread_entry {
36 k_thread_entry_t pEntry;
37 void *parameter1;
38 void *parameter2;
39 void *parameter3;
40};
41#endif /* CONFIG_THREAD_MONITOR */
42
43struct k_thread;
44
45/*
46 * This _pipe_desc structure is used by the pipes kernel module when
47 * CONFIG_PIPES has been selected.
48 */
49
50struct _pipe_desc {
51 sys_dnode_t node;
52 unsigned char *buffer; /* Position in src/dest buffer */
53 size_t bytes_to_xfer; /* # bytes left to transfer */
54 struct k_thread *thread; /* Back pointer to pended thread */
55};
56
57/* can be used for creating 'dummy' threads, e.g. for pending on objects */
58struct _thread_base {
59
60 /* this thread's entry in a ready/wait queue */
61 union {
62 sys_dnode_t qnode_dlist;
63 struct rbnode qnode_rb;
64 };
65
66 /* wait queue on which the thread is pended (needed only for
67 * trees, not dumb lists)
68 */
69 _wait_q_t *pended_on;
70
71 /* user facing 'thread options'; values defined in include/kernel.h */
72 uint8_t user_options;
73
74 /* thread state */
75 uint8_t thread_state;
76
77 /*
78 * scheduler lock count and thread priority
79 *
80 * These two fields control the preemptibility of a thread.
81 *
82 * When the scheduler is locked, sched_locked is decremented, which
83 * means that the scheduler is locked for values from 0xff to 0x01. A
84 * thread is coop if its prio is negative, thus 0x80 to 0xff when
85 * looked at the value as unsigned.
86 *
87 * By putting them end-to-end, this means that a thread is
88 * non-preemptible if the bundled value is greater than or equal to
89 * 0x0080.
90 */
91 union {
92 struct {
93#ifdef CONFIG_BIG_ENDIAN
94 uint8_t sched_locked;
95 int8_t prio;
96#else /* Little Endian */
97 int8_t prio;
98 uint8_t sched_locked;
99#endif /* CONFIG_BIG_ENDIAN */
100 };
101 uint16_t preempt;
102 };
103
104#ifdef CONFIG_SCHED_DEADLINE
105 int prio_deadline;
106#endif /* CONFIG_SCHED_DEADLINE */
107
108 uint32_t order_key;
109
110#ifdef CONFIG_SMP
111 /* True for the per-CPU idle threads */
112 uint8_t is_idle;
113
114 /* CPU index on which thread was last run */
115 uint8_t cpu;
116
117 /* Recursive count of irq_lock() calls */
118 uint8_t global_lock_count;
119
120#endif /* CONFIG_SMP */
121
122#ifdef CONFIG_SCHED_CPU_MASK
123 /* "May run on" bits for each CPU */
124#if CONFIG_MP_MAX_NUM_CPUS <= 8
125 uint8_t cpu_mask;
126#else
127 uint16_t cpu_mask;
128#endif /* CONFIG_MP_MAX_NUM_CPUS */
129#endif /* CONFIG_SCHED_CPU_MASK */
130
131 /* data returned by APIs */
132 void *swap_data;
133
134#ifdef CONFIG_SYS_CLOCK_EXISTS
135 /* this thread's entry in a timeout queue */
136 struct _timeout timeout;
137#endif /* CONFIG_SYS_CLOCK_EXISTS */
138
139#ifdef CONFIG_TIMESLICE_PER_THREAD
140 int32_t slice_ticks;
141 k_thread_timeslice_fn_t slice_expired;
142 void *slice_data;
143#endif /* CONFIG_TIMESLICE_PER_THREAD */
144
145#ifdef CONFIG_SCHED_THREAD_USAGE
146 struct k_cycle_stats usage; /* Track thread usage statistics */
147#endif /* CONFIG_SCHED_THREAD_USAGE */
148};
149
150typedef struct _thread_base _thread_base_t;
151
152#if defined(CONFIG_THREAD_STACK_INFO)
153/* Contains the stack information of a thread */
154struct _thread_stack_info {
155 /* Stack start - Represents the start address of the thread-writable
156 * stack area.
157 */
158 uintptr_t start;
159
160 /* Thread writable stack buffer size. Represents the size of the actual
161 * buffer, starting from the 'start' member, that should be writable by
162 * the thread. This comprises of the thread stack area, any area reserved
163 * for local thread data storage, as well as any area left-out due to
164 * random adjustments applied to the initial thread stack pointer during
165 * thread initialization.
166 */
167 size_t size;
168
169 /* Adjustment value to the size member, removing any storage
170 * used for TLS or random stack base offsets. (start + size - delta)
171 * is the initial stack pointer for a thread. May be 0.
172 */
173 size_t delta;
174
175#if defined(CONFIG_THREAD_STACK_MEM_MAPPED)
176 struct {
178 k_thread_stack_t *addr;
179
181 size_t sz;
182 } mapped;
183#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
184};
185
186typedef struct _thread_stack_info _thread_stack_info_t;
187#endif /* CONFIG_THREAD_STACK_INFO */
188
189#if defined(CONFIG_USERSPACE)
190struct _mem_domain_info {
192 sys_dnode_t mem_domain_q_node;
194 struct k_mem_domain *mem_domain;
195};
196
197#endif /* CONFIG_USERSPACE */
198
199#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
200struct _thread_userspace_local_data {
201#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
202 int errno_var;
203#endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
204};
205#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
206
208#ifdef CONFIG_SCHED_THREAD_USAGE
209 /*
210 * For CPU stats, execution_cycles is the sum of non-idle + idle cycles.
211 * For thread stats, execution_cycles = total_cycles.
212 */
213 uint64_t execution_cycles; /* total # of cycles (cpu: non-idle + idle) */
214 uint64_t total_cycles; /* total # of non-idle cycles */
215#endif /* CONFIG_SCHED_THREAD_USAGE */
216
217#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
218 /*
219 * For threads, the following fields refer to the time spent executing
220 * as bounded by when the thread was scheduled in and scheduled out.
221 * For CPUs, the same fields refer to the time spent executing
222 * non-idle threads as bounded by the idle thread(s).
223 */
224
225 uint64_t current_cycles; /* current # of non-idle cycles */
226 uint64_t peak_cycles; /* peak # of non-idle cycles */
227 uint64_t average_cycles; /* average # of non-idle cycles */
228#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
229
230#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
231 /*
232 * This field is always zero for individual threads. It only comes
233 * into play when gathering statistics for the CPU. In that case it
234 * represents the total number of cycles spent idling.
235 */
236
237 uint64_t idle_cycles;
238#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
239
240#if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \
241 !defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL)
242 /* If none of the above Kconfig values are defined, this struct will have a size 0 in C
243 * which is not allowed in C++ (it'll have a size 1). To prevent this, we add a 1 byte dummy
244 * variable when the struct would otherwise be empty.
245 */
246 uint8_t dummy;
247#endif
249
250struct z_poller {
251 bool is_polling;
252 uint8_t mode;
253};
254
259struct k_thread {
260
261 struct _thread_base base;
262
264 struct _callee_saved callee_saved;
265
268
270 _wait_q_t join_queue;
271
272#if defined(CONFIG_POLL)
273 struct z_poller poller;
274#endif /* CONFIG_POLL */
275
276#if defined(CONFIG_EVENTS)
277 struct k_thread *next_event_link;
278
279 uint32_t events;
280 uint32_t event_options;
281
283 bool no_wake_on_timeout;
284#endif /* CONFIG_EVENTS */
285
286#if defined(CONFIG_THREAD_MONITOR)
288 struct __thread_entry entry;
289
292#endif /* CONFIG_THREAD_MONITOR */
293
294#if defined(CONFIG_THREAD_NAME)
296 char name[CONFIG_THREAD_MAX_NAME_LEN];
297#endif /* CONFIG_THREAD_NAME */
298
299#ifdef CONFIG_THREAD_CUSTOM_DATA
302#endif /* CONFIG_THREAD_CUSTOM_DATA */
303
304#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
305 struct _thread_userspace_local_data *userspace_local_data;
306#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
307
308#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
309#ifndef CONFIG_USERSPACE
311 int errno_var;
312#endif /* CONFIG_USERSPACE */
313#endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
314
315#if defined(CONFIG_THREAD_STACK_INFO)
317 struct _thread_stack_info stack_info;
318#endif /* CONFIG_THREAD_STACK_INFO */
319
320#if defined(CONFIG_USERSPACE)
322 struct _mem_domain_info mem_domain_info;
323
331
334#endif /* CONFIG_USERSPACE */
335
336
337#if defined(CONFIG_USE_SWITCH)
338 /* When using __switch() a few previously arch-specific items
339 * become part of the core OS
340 */
341
344
347#endif /* CONFIG_USE_SWITCH */
350
351#if defined(CONFIG_THREAD_LOCAL_STORAGE)
352 /* Pointer to arch-specific TLS area */
353 uintptr_t tls;
354#endif /* CONFIG_THREAD_LOCAL_STORAGE */
355
356#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
358 struct k_mem_paging_stats_t paging_stats;
359#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
360
361#ifdef CONFIG_PIPES
363 struct _pipe_desc pipe_desc;
364#endif /* CONFIG_PIPES */
365
366#ifdef CONFIG_OBJ_CORE_THREAD
367 struct k_obj_core obj_core;
368#endif /* CONFIG_OBJ_CORE_THREAD */
369
370#ifdef CONFIG_SMP
372 _wait_q_t halt_queue;
373#endif /* CONFIG_SMP */
374
376 struct _thread_arch arch;
377};
378
379typedef struct k_thread _thread_t;
380typedef struct k_thread *k_tid_t;
381
382#endif /* ZEPHYR_INCLUDE_KERNEL_THREAD_H_ */
struct z_thread_stack_element k_thread_stack_t
Typedef of struct z_thread_stack_element.
Definition: arch_interface.h:45
void(* k_thread_entry_t)(void *p1, void *p2, void *p3)
Thread entry point function type.
Definition: arch_interface.h:47
struct _dnode sys_dnode_t
Doubly-linked list node structure.
Definition: dlist.h:54
struct k_thread * k_tid_t
Definition: thread.h:380
struct k_thread_runtime_stats k_thread_runtime_stats_t
void(* k_thread_timeslice_fn_t)(struct k_thread *thread, void *data)
Definition: kernel_structs.h:307
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__INT32_TYPE__ int32_t
Definition: stdint.h:74
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
__UINT16_TYPE__ uint16_t
Definition: stdint.h:89
__INT8_TYPE__ int8_t
Definition: stdint.h:72
Structure used to track internal statistics about both thread and CPU usage.
Definition: stats.h:18
Definition: kernel.h:5315
Memory Domain.
Definition: mem_domain.h:80
Paging Statistics.
Definition: demand_paging.h:35
Object core structure.
Definition: obj_core.h:121
Definition: thread.h:207
Thread Structure.
Definition: thread.h:259
struct _thread_base base
Definition: thread.h:261
struct k_thread * next_thread
next item in list of all threads
Definition: thread.h:291
struct _thread_arch arch
arch-specifics: must always be at the end
Definition: thread.h:376
void * init_data
static thread init data
Definition: thread.h:267
void * switch_handle
Context handle returned via arch_switch()
Definition: thread.h:346
struct k_heap * resource_pool
resource pool
Definition: thread.h:349
k_thread_stack_t * stack_obj
Base address of thread stack.
Definition: thread.h:330
void * custom_data
crude thread-local storage
Definition: thread.h:301
struct __thread_entry entry
thread entry and parameters description
Definition: thread.h:288
void * syscall_frame
current syscall frame pointer
Definition: thread.h:333
struct _thread_stack_info stack_info
Stack Info.
Definition: thread.h:317
_wait_q_t join_queue
threads waiting in k_thread_join()
Definition: thread.h:270
struct _mem_domain_info mem_domain_info
memory domain info of the thread
Definition: thread.h:322
_wait_q_t halt_queue
threads waiting in k_thread_suspend()
Definition: thread.h:372
int swap_retval
z_swap() return value
Definition: thread.h:343
struct _callee_saved callee_saved
defined by the architecture, but all archs need these
Definition: thread.h:264
Balanced red/black tree node structure.
Definition: rb.h:58