Line data Source code
1 0 : /*
2 : * Copyright (c) 2016, Wind River Systems, Inc.
3 : *
4 : * SPDX-License-Identifier: Apache-2.0
5 : */
6 :
7 : #ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_
8 : #define ZEPHYR_INCLUDE_KERNEL_THREAD_H_
9 :
10 : #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
11 : #include <zephyr/kernel/mm/demand_paging.h>
12 : #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
13 :
14 : #include <zephyr/kernel/stats.h>
15 : #include <zephyr/arch/arch_interface.h>
16 :
17 : /**
18 : * @typedef k_thread_entry_t
19 : * @brief Thread entry point function type.
20 : *
21 : * A thread's entry point function is invoked when the thread starts executing.
22 : * Up to 3 argument values can be passed to the function.
23 : *
24 : * The thread terminates execution permanently if the entry point function
25 : * returns. The thread is responsible for releasing any shared resources
26 : * it may own (such as mutexes and dynamically allocated memory), prior to
27 : * returning.
28 : *
29 : * @param p1 First argument.
30 : * @param p2 Second argument.
31 : * @param p3 Third argument.
32 : */
33 :
34 : #ifdef CONFIG_THREAD_MONITOR
35 : struct __thread_entry {
36 : k_thread_entry_t pEntry;
37 : void *parameter1;
38 : void *parameter2;
39 : void *parameter3;
40 : };
41 : #endif /* CONFIG_THREAD_MONITOR */
42 :
43 : struct k_thread;
44 :
45 : /* can be used for creating 'dummy' threads, e.g. for pending on objects */
46 : struct _thread_base {
47 :
48 : /* this thread's entry in a ready/wait queue */
49 : union {
50 : sys_dnode_t qnode_dlist;
51 : struct rbnode qnode_rb;
52 : };
53 :
54 : /* wait queue on which the thread is pended (needed only for
55 : * trees, not dumb lists)
56 : */
57 : _wait_q_t *pended_on;
58 :
59 : /* user facing 'thread options'; values defined in include/kernel.h */
60 : uint8_t user_options;
61 :
62 : /* thread state */
63 : uint8_t thread_state;
64 :
65 : /*
66 : * scheduler lock count and thread priority
67 : *
68 : * These two fields control the preemptibility of a thread.
69 : *
70 : * When the scheduler is locked, sched_locked is decremented, which
71 : * means that the scheduler is locked for values from 0xff to 0x01. A
72 : * thread is coop if its prio is negative, thus 0x80 to 0xff when
73 : * looked at the value as unsigned.
74 : *
75 : * By putting them end-to-end, this means that a thread is
76 : * non-preemptible if the bundled value is greater than or equal to
77 : * 0x0080.
78 : */
79 : union {
80 : struct {
81 : #ifdef CONFIG_BIG_ENDIAN
82 : uint8_t sched_locked;
83 : int8_t prio;
84 : #else /* Little Endian */
85 : int8_t prio;
86 : uint8_t sched_locked;
87 : #endif /* CONFIG_BIG_ENDIAN */
88 : };
89 : uint16_t preempt;
90 : };
91 :
92 : #ifdef CONFIG_SCHED_DEADLINE
93 : int prio_deadline;
94 : #endif /* CONFIG_SCHED_DEADLINE */
95 :
96 : #if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE)
97 : uint32_t order_key;
98 : #endif
99 :
100 : #ifdef CONFIG_SMP
101 : /* True for the per-CPU idle threads */
102 : uint8_t is_idle;
103 :
104 : /* CPU index on which thread was last run */
105 : uint8_t cpu;
106 :
107 : /* Recursive count of irq_lock() calls */
108 : uint8_t global_lock_count;
109 :
110 : #endif /* CONFIG_SMP */
111 :
112 : #ifdef CONFIG_SCHED_CPU_MASK
113 : /* "May run on" bits for each CPU */
114 : #if CONFIG_MP_MAX_NUM_CPUS <= 8
115 : uint8_t cpu_mask;
116 : #else
117 : uint16_t cpu_mask;
118 : #endif /* CONFIG_MP_MAX_NUM_CPUS */
119 : #endif /* CONFIG_SCHED_CPU_MASK */
120 :
121 : /* data returned by APIs */
122 : void *swap_data;
123 :
124 : #ifdef CONFIG_SYS_CLOCK_EXISTS
125 : /* this thread's entry in a timeout queue */
126 : struct _timeout timeout;
127 : #endif /* CONFIG_SYS_CLOCK_EXISTS */
128 :
129 : #ifdef CONFIG_TIMESLICE_PER_THREAD
130 : int32_t slice_ticks;
131 : k_thread_timeslice_fn_t slice_expired;
132 : void *slice_data;
133 : #endif /* CONFIG_TIMESLICE_PER_THREAD */
134 :
135 : #ifdef CONFIG_SCHED_THREAD_USAGE
136 : struct k_cycle_stats usage; /* Track thread usage statistics */
137 : #endif /* CONFIG_SCHED_THREAD_USAGE */
138 : };
139 :
140 : typedef struct _thread_base _thread_base_t;
141 :
142 : #if defined(CONFIG_THREAD_STACK_INFO)
143 : /* Contains the stack information of a thread */
144 : struct _thread_stack_info {
145 : /* Stack start - Represents the start address of the thread-writable
146 : * stack area.
147 : */
148 : uintptr_t start;
149 :
150 : /* Thread writable stack buffer size. Represents the size of the actual
151 : * buffer, starting from the 'start' member, that should be writable by
152 : * the thread. This comprises of the thread stack area, any area reserved
153 : * for local thread data storage, as well as any area left-out due to
154 : * random adjustments applied to the initial thread stack pointer during
155 : * thread initialization.
156 : */
157 : size_t size;
158 :
159 : /* Adjustment value to the size member, removing any storage
160 : * used for TLS or random stack base offsets. (start + size - delta)
161 : * is the initial stack pointer for a thread. May be 0.
162 : */
163 : size_t delta;
164 :
165 : #if defined(CONFIG_THREAD_STACK_MEM_MAPPED)
166 : struct {
167 : /** Base address of the memory mapped thread stack */
168 : k_thread_stack_t *addr;
169 :
170 : /** Size of whole mapped stack object */
171 : size_t sz;
172 : } mapped;
173 : #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
174 : };
175 :
176 : typedef struct _thread_stack_info _thread_stack_info_t;
177 : #endif /* CONFIG_THREAD_STACK_INFO */
178 :
179 : #if defined(CONFIG_USERSPACE)
180 : struct _mem_domain_info {
181 : /** memory domain queue node */
182 : sys_dnode_t mem_domain_q_node;
183 : /** memory domain of the thread */
184 : struct k_mem_domain *mem_domain;
185 : };
186 :
187 : typedef struct _mem_domain_info _mem_domain_info_t;
188 : #endif /* CONFIG_USERSPACE */
189 :
190 : #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
191 : struct _thread_userspace_local_data {
192 : #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
193 : int errno_var;
194 : #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
195 : };
196 : #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
197 :
198 0 : typedef struct k_thread_runtime_stats {
199 : #ifdef CONFIG_SCHED_THREAD_USAGE
200 : /*
201 : * For CPU stats, execution_cycles is the sum of non-idle + idle cycles.
202 : * For thread stats, execution_cycles = total_cycles.
203 : */
204 : uint64_t execution_cycles; /* total # of cycles (cpu: non-idle + idle) */
205 : uint64_t total_cycles; /* total # of non-idle cycles */
206 : #endif /* CONFIG_SCHED_THREAD_USAGE */
207 :
208 : #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
209 : /*
210 : * For threads, the following fields refer to the time spent executing
211 : * as bounded by when the thread was scheduled in and scheduled out.
212 : * For CPUs, the same fields refer to the time spent executing
213 : * non-idle threads as bounded by the idle thread(s).
214 : */
215 :
216 : uint64_t current_cycles; /* current # of non-idle cycles */
217 : uint64_t peak_cycles; /* peak # of non-idle cycles */
218 : uint64_t average_cycles; /* average # of non-idle cycles */
219 : #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
220 :
221 : #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
222 : /*
223 : * This field is always zero for individual threads. It only comes
224 : * into play when gathering statistics for the CPU. In that case it
225 : * represents the total number of cycles spent idling.
226 : */
227 :
228 : uint64_t idle_cycles;
229 : #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
230 :
231 : #if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \
232 : !defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL)
233 : /* If none of the above Kconfig values are defined, this struct will have a size 0 in C
234 : * which is not allowed in C++ (it'll have a size 1). To prevent this, we add a 1 byte dummy
235 : * variable when the struct would otherwise be empty.
236 : */
237 : uint8_t dummy;
238 : #endif
239 0 : } k_thread_runtime_stats_t;
240 :
241 : struct z_poller {
242 : bool is_polling;
243 : uint8_t mode;
244 : };
245 :
246 : /**
247 : * @ingroup thread_apis
248 : * Thread Structure
249 : */
250 1 : struct k_thread {
251 :
252 0 : struct _thread_base base;
253 :
254 : /** defined by the architecture, but all archs need these */
255 1 : struct _callee_saved callee_saved;
256 :
257 : /** static thread init data */
258 1 : void *init_data;
259 :
260 : /** threads waiting in k_thread_join() */
261 1 : _wait_q_t join_queue;
262 :
263 : #if defined(CONFIG_POLL)
264 : struct z_poller poller;
265 : #endif /* CONFIG_POLL */
266 :
267 : #if defined(CONFIG_EVENTS)
268 : struct k_thread *next_event_link;
269 :
270 : uint32_t events; /* dual purpose - wait on and then received */
271 : uint32_t event_options;
272 :
273 : /** true if timeout should not wake the thread */
274 : bool no_wake_on_timeout;
275 : #endif /* CONFIG_EVENTS */
276 :
277 : #if defined(CONFIG_THREAD_MONITOR)
278 : /** thread entry and parameters description */
279 1 : struct __thread_entry entry;
280 :
281 : /** next item in list of all threads */
282 1 : struct k_thread *next_thread;
283 : #endif /* CONFIG_THREAD_MONITOR */
284 :
285 : #if defined(CONFIG_THREAD_NAME)
286 : /** Thread name */
287 : char name[CONFIG_THREAD_MAX_NAME_LEN];
288 : #endif /* CONFIG_THREAD_NAME */
289 :
290 : #ifdef CONFIG_THREAD_CUSTOM_DATA
291 : /** crude thread-local storage */
292 1 : void *custom_data;
293 : #endif /* CONFIG_THREAD_CUSTOM_DATA */
294 :
295 : #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
296 : struct _thread_userspace_local_data *userspace_local_data;
297 : #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
298 :
299 : #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
300 : #ifndef CONFIG_USERSPACE
301 : /** per-thread errno variable */
302 : int errno_var;
303 : #endif /* CONFIG_USERSPACE */
304 : #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
305 :
306 : #if defined(CONFIG_THREAD_STACK_INFO)
307 : /** Stack Info */
308 1 : struct _thread_stack_info stack_info;
309 : #endif /* CONFIG_THREAD_STACK_INFO */
310 :
311 : #if defined(CONFIG_USERSPACE)
312 : /** memory domain info of the thread */
313 1 : struct _mem_domain_info mem_domain_info;
314 :
315 : /**
316 : * Base address of thread stack.
317 : *
318 : * If memory mapped stack (CONFIG_THREAD_STACK_MEM_MAPPED)
319 : * is enabled, this is the physical address of the stack.
320 : */
321 1 : k_thread_stack_t *stack_obj;
322 :
323 : /** current syscall frame pointer */
324 1 : void *syscall_frame;
325 : #endif /* CONFIG_USERSPACE */
326 :
327 :
328 : #if defined(CONFIG_USE_SWITCH)
329 : /* When using __switch() a few previously arch-specific items
330 : * become part of the core OS
331 : */
332 :
333 : /** z_swap() return value */
334 1 : int swap_retval;
335 :
336 : /** Context handle returned via arch_switch() */
337 1 : void *switch_handle;
338 : #endif /* CONFIG_USE_SWITCH */
339 : /** resource pool */
340 1 : struct k_heap *resource_pool;
341 :
342 : #if defined(CONFIG_THREAD_LOCAL_STORAGE)
343 : /* Pointer to arch-specific TLS area */
344 : uintptr_t tls;
345 : #endif /* CONFIG_THREAD_LOCAL_STORAGE */
346 :
347 : #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
348 : /** Paging statistics */
349 : struct k_mem_paging_stats_t paging_stats;
350 : #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
351 :
352 : #ifdef CONFIG_OBJ_CORE_THREAD
353 : struct k_obj_core obj_core;
354 : #endif /* CONFIG_OBJ_CORE_THREAD */
355 :
356 : #ifdef CONFIG_SMP
357 : /** threads waiting in k_thread_suspend() */
358 1 : _wait_q_t halt_queue;
359 : #endif /* CONFIG_SMP */
360 :
361 : /** arch-specifics: must always be at the end */
362 1 : struct _thread_arch arch;
363 : };
364 :
365 : typedef struct k_thread _thread_t;
366 0 : typedef struct k_thread *k_tid_t;
367 :
368 : #endif /* ZEPHYR_INCLUDE_KERNEL_THREAD_H_ */
|