LCOV - code coverage report
Current view: top level - zephyr - kernel_structs.h Coverage Total Hit
Test: new.info Lines: 0.0 % 5 0
Test Date: 2025-09-05 16:43:28

            Line data    Source code
       1            0 : /*
       2              :  * Copyright (c) 2016 Wind River Systems, Inc.
       3              :  *
       4              :  * SPDX-License-Identifier: Apache-2.0
       5              :  */
       6              : 
       7              : /*
       8              :  * The purpose of this file is to provide essential/minimal kernel structure
       9              :  * definitions, so that they can be used without including kernel.h.
      10              :  *
      11              :  * The following rules must be observed:
      12              :  *  1. kernel_structs.h shall not depend on kernel.h both directly and
      13              :  *    indirectly (i.e. it shall not include any header files that include
      14              :  *    kernel.h in their dependency chain).
      15              :  *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
      16              :  *    necessary to include kernel_structs.h explicitly when kernel.h is
      17              :  *    included.
      18              :  */
      19              : 
      20              : #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
      21              : #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
      22              : 
      23              : #if !defined(_ASMLANGUAGE)
      24              : #include <zephyr/sys/atomic.h>
      25              : #include <zephyr/types.h>
      26              : #include <zephyr/sys/dlist.h>
      27              : #include <zephyr/sys/util.h>
      28              : #include <zephyr/sys/sys_heap.h>
      29              : #include <zephyr/arch/structs.h>
      30              : #include <zephyr/kernel/stats.h>
      31              : #include <zephyr/kernel/obj_core.h>
      32              : #include <zephyr/sys/rb.h>
      33              : #endif
      34              : 
      35            0 : #define K_NUM_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + CONFIG_NUM_COOP_PRIORITIES + 1)
      36            0 : #define PRIQ_BITMAP_SIZE  (DIV_ROUND_UP(K_NUM_THREAD_PRIO, BITS_PER_LONG))
      37              : 
      38              : #ifdef __cplusplus
      39              : extern "C" {
      40              : #endif
      41              : 
      42              : /*
      43              :  * Bitmask definitions for the struct k_thread.thread_state field.
      44              :  *
      45              :  * Must be before kernel_arch_data.h because it might need them to be already
      46              :  * defined.
      47              :  */
      48              : 
      49              : /* states: common uses low bits, arch-specific use high bits */
      50              : 
      51              : /* Not a real thread */
      52              : #define _THREAD_DUMMY (BIT(0))
      53              : 
      54              : /* Thread is waiting on an object */
      55              : #define _THREAD_PENDING (BIT(1))
      56              : 
      57              : /* Thread is sleeping */
      58              : #define _THREAD_SLEEPING (BIT(2))
      59              : 
      60              : /* Thread has terminated */
      61              : #define _THREAD_DEAD (BIT(3))
      62              : 
      63              : /* Thread is suspended */
      64              : #define _THREAD_SUSPENDED (BIT(4))
      65              : 
      66              : /* Thread is in the process of aborting */
      67              : #define _THREAD_ABORTING (BIT(5))
      68              : 
      69              : /* Thread is in the process of suspending */
      70              : #define _THREAD_SUSPENDING (BIT(6))
      71              : 
      72              : /* Thread is present in the ready queue */
      73              : #define _THREAD_QUEUED (BIT(7))
      74              : 
      75              : /* end - states */
      76              : 
      77              : #ifdef CONFIG_STACK_SENTINEL
      78              : /* Magic value in lowest bytes of the stack */
      79              : #define STACK_SENTINEL 0xF0F0F0F0
      80              : #endif
      81              : 
      82              : /* lowest value of _thread_base.preempt at which a thread is non-preemptible */
      83              : #define _NON_PREEMPT_THRESHOLD 0x0080U
      84              : 
      85              : /* highest value of _thread_base.preempt at which a thread is preemptible */
      86              : #define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
      87              : 
      88              : #if !defined(_ASMLANGUAGE)
      89              : 
      90              : /* Two abstractions are defined here for "thread priority queues".
      91              :  *
      92              :  * One is a "dumb" list implementation appropriate for systems with
      93              :  * small numbers of threads and sensitive to code size.  It is stored
      94              :  * in sorted order, taking an O(N) cost every time a thread is added
      95              :  * to the list.  This corresponds to the way the original _wait_q_t
      96              :  * abstraction worked and is very fast as long as the number of
      97              :  * threads is small.
      98              :  *
      99              :  * The other is a balanced tree "fast" implementation with rather
     100              :  * larger code size (due to the data structure itself, the code here
     101              :  * is just stubs) and higher constant-factor performance overhead, but
     102              :  * much better O(logN) scaling in the presence of large number of
     103              :  * threads.
     104              :  *
     105              :  * Each can be used for either the wait_q or system ready queue,
     106              :  * configurable at build time.
     107              :  */
     108              : 
     109              : struct _priq_rb {
     110              :         struct rbtree tree;
     111              :         int next_order_key;
     112              : };
     113              : 
     114              : 
     115              : /* Traditional/textbook "multi-queue" structure.  Separate lists for a
     116              :  * small number (max 32 here) of fixed priorities.  This corresponds
     117              :  * to the original Zephyr scheduler.  RAM requirements are
     118              :  * comparatively high, but performance is very fast.  Won't work with
     119              :  * features like deadline scheduling which need large priority spaces
     120              :  * to represent their requirements.
     121              :  */
     122              : struct _priq_mq {
     123              :         sys_dlist_t queues[K_NUM_THREAD_PRIO];
     124              :         unsigned long bitmask[PRIQ_BITMAP_SIZE];
     125              : #ifndef CONFIG_SMP
     126              :         unsigned int cached_queue_index;
     127              : #endif
     128              : };
     129              : 
     130              : struct _ready_q {
     131              : #ifndef CONFIG_SMP
     132              :         /* always contains next thread to run: cannot be NULL */
     133              :         struct k_thread *cache;
     134              : #endif
     135              : 
     136              : #if defined(CONFIG_SCHED_SIMPLE)
     137              :         sys_dlist_t runq;
     138              : #elif defined(CONFIG_SCHED_SCALABLE)
     139              :         struct _priq_rb runq;
     140              : #elif defined(CONFIG_SCHED_MULTIQ)
     141              :         struct _priq_mq runq;
     142              : #endif
     143              : };
     144              : 
     145              : typedef struct _ready_q _ready_q_t;
     146              : 
     147              : struct _cpu {
     148              :         /* nested interrupt count */
     149              :         uint32_t nested;
     150              : 
     151              :         /* interrupt stack pointer base */
     152              :         char *irq_stack;
     153              : 
     154              :         /* currently scheduled thread */
     155              :         struct k_thread *current;
     156              : 
     157              :         /* one assigned idle thread per CPU */
     158              :         struct k_thread *idle_thread;
     159              : 
     160              : #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
     161              :         struct _ready_q ready_q;
     162              : #endif
     163              : 
     164              : #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
     165              :         (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
     166              :         /* Coop thread preempted by current metairq, or NULL */
     167              :         struct k_thread *metairq_preempted;
     168              : #endif
     169              : 
     170              :         uint8_t id;
     171              : 
     172              : #if defined(CONFIG_FPU_SHARING)
     173              :         void *fp_ctx;
     174              : #endif
     175              : 
     176              : #ifdef CONFIG_SMP
     177              :         /* True when _current is allowed to context switch */
     178              :         uint8_t swap_ok;
     179              : #endif
     180              : 
     181              : #ifdef CONFIG_SCHED_THREAD_USAGE
     182              :         /*
     183              :          * [usage0] is used as a timestamp to mark the beginning of an
     184              :          * execution window. [0] is a special value indicating that it
     185              :          * has been stopped (but not disabled).
     186              :          */
     187              : 
     188              :         uint32_t usage0;
     189              : 
     190              : #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
     191              :         struct k_cycle_stats *usage;
     192              : #endif
     193              : #endif
     194              : 
     195              : #ifdef CONFIG_OBJ_CORE_SYSTEM
     196              :         struct k_obj_core  obj_core;
     197              : #endif
     198              : 
     199              : #ifdef CONFIG_SCHED_IPI_SUPPORTED
     200              :         sys_dlist_t ipi_workq;
     201              : #endif
     202              : 
     203              :         /* Per CPU architecture specifics */
     204              :         struct _cpu_arch arch;
     205              : };
     206              : 
     207              : typedef struct _cpu _cpu_t;
     208              : 
     209              : struct z_kernel {
     210              :         struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
     211              : 
     212              : #ifdef CONFIG_PM
     213              :         int32_t idle; /* Number of ticks for kernel idling */
     214              : #endif
     215              : 
     216              :         /*
     217              :          * ready queue: can be big, keep after small fields, since some
     218              :          * assembly (e.g. ARC) are limited in the encoding of the offset
     219              :          */
     220              : #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
     221              :         struct _ready_q ready_q;
     222              : #endif
     223              : 
     224              : #if defined(CONFIG_THREAD_MONITOR)
     225              :         struct k_thread *threads; /* singly linked list of ALL threads */
     226              : #endif
     227              : #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
     228              :         struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
     229              : #endif
     230              : 
     231              : #ifdef CONFIG_OBJ_CORE_SYSTEM
     232              :         struct k_obj_core  obj_core;
     233              : #endif
     234              : 
     235              : #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
     236              :         /* Identify CPUs to send IPIs to at the next scheduling point */
     237              :         atomic_t pending_ipi;
     238              : #endif
     239              : };
     240              : 
     241              : typedef struct z_kernel _kernel_t;
     242              : 
     243              : extern struct z_kernel _kernel;
     244              : 
     245              : extern atomic_t _cpus_active;
     246              : 
     247              : #ifdef CONFIG_SMP
     248              : 
     249              : /* True if the current context can be preempted and migrated to
     250              :  * another SMP CPU.
     251              :  */
     252              : bool z_smp_cpu_mobile(void);
     253              : #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
     254              :                         arch_curr_cpu(); })
     255              : 
     256              : __attribute_const__ struct k_thread *z_smp_current_get(void);
     257              : #define _current z_smp_current_get()
     258              : 
     259              : #else
     260              : #define _current_cpu (&_kernel.cpus[0])
     261              : #define _current _kernel.cpus[0].current
     262              : #endif
     263              : 
     264            0 : #define CPU_ID ((CONFIG_MP_MAX_NUM_CPUS == 1) ? 0 : _current_cpu->id)
     265              : 
     266              : /* This is always invoked from a context where preemption is disabled */
     267              : #define z_current_thread_set(thread) ({ _current_cpu->current = (thread); })
     268              : 
     269              : #ifdef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL
     270              : #undef _current
     271              : #define _current arch_current_thread()
     272              : #undef z_current_thread_set
     273              : #define z_current_thread_set(thread) \
     274              :         arch_current_thread_set(({ _current_cpu->current = (thread); }))
     275              : #endif
     276              : 
     277              : /* kernel wait queue record */
     278              : #ifdef CONFIG_WAITQ_SCALABLE
     279              : 
     280              : typedef struct {
     281              :         struct _priq_rb waitq;
     282              : } _wait_q_t;
     283              : 
     284              : /* defined in kernel/priority_queues.c */
     285              : bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
     286              : 
     287              : #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
     288              : 
     289              : #else
     290              : 
     291              : typedef struct {
     292              :         sys_dlist_t waitq;
     293              : } _wait_q_t;
     294              : 
     295              : #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
     296              : 
     297              : #endif /* CONFIG_WAITQ_SCALABLE */
     298              : 
     299              : /* kernel timeout record */
     300              : struct _timeout;
     301              : typedef void (*_timeout_func_t)(struct _timeout *t);
     302              : 
     303              : struct _timeout {
     304              :         sys_dnode_t node;
     305              :         _timeout_func_t fn;
     306              : #ifdef CONFIG_TIMEOUT_64BIT
     307              :         /* Can't use k_ticks_t for header dependency reasons */
     308              :         int64_t dticks;
     309              : #else
     310              :         int32_t dticks;
     311              : #endif
     312              : };
     313              : 
     314            0 : typedef void (*k_thread_timeslice_fn_t)(struct k_thread *thread, void *data);
     315              : 
     316              : #ifdef __cplusplus
     317              : }
     318              : #endif
     319              : 
     320              : #endif /* _ASMLANGUAGE */
     321              : 
     322              : #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
        

Generated by: LCOV version 2.0-1