LCOV - code coverage report
Current view: top level - zephyr - kernel_structs.h Hit Total Coverage
Test: new.info Lines: 0 4 0.0 %
Date: 2024-12-22 00:14:23

          Line data    Source code
       1           0 : /*
       2             :  * Copyright (c) 2016 Wind River Systems, Inc.
       3             :  *
       4             :  * SPDX-License-Identifier: Apache-2.0
       5             :  */
       6             : 
       7             : /*
       8             :  * The purpose of this file is to provide essential/minimal kernel structure
       9             :  * definitions, so that they can be used without including kernel.h.
      10             :  *
      11             :  * The following rules must be observed:
      12             :  *  1. kernel_structs.h shall not depend on kernel.h both directly and
      13             :  *    indirectly (i.e. it shall not include any header files that include
      14             :  *    kernel.h in their dependency chain).
      15             :  *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
      16             :  *    necessary to include kernel_structs.h explicitly when kernel.h is
      17             :  *    included.
      18             :  */
      19             : 
      20             : #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
      21             : #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
      22             : 
      23             : #if !defined(_ASMLANGUAGE)
      24             : #include <zephyr/sys/atomic.h>
      25             : #include <zephyr/types.h>
      26             : #include <zephyr/sys/dlist.h>
      27             : #include <zephyr/sys/util.h>
      28             : #include <zephyr/sys/sys_heap.h>
      29             : #include <zephyr/arch/structs.h>
      30             : #include <zephyr/kernel/stats.h>
      31             : #include <zephyr/kernel/obj_core.h>
      32             : #include <zephyr/sys/rb.h>
      33             : #endif
      34             : 
      35           0 : #define K_NUM_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + CONFIG_NUM_COOP_PRIORITIES + 1)
      36           0 : #define PRIQ_BITMAP_SIZE  (DIV_ROUND_UP(K_NUM_THREAD_PRIO, BITS_PER_LONG))
      37             : 
      38             : #ifdef __cplusplus
      39             : extern "C" {
      40             : #endif
      41             : 
      42             : /*
      43             :  * Bitmask definitions for the struct k_thread.thread_state field.
      44             :  *
      45             :  * Must be before kernel_arch_data.h because it might need them to be already
      46             :  * defined.
      47             :  */
      48             : 
      49             : /* states: common uses low bits, arch-specific use high bits */
      50             : 
      51             : /* Not a real thread */
      52             : #define _THREAD_DUMMY (BIT(0))
      53             : 
      54             : /* Thread is waiting on an object */
      55             : #define _THREAD_PENDING (BIT(1))
      56             : 
      57             : /* Thread is sleeping */
      58             : #define _THREAD_SLEEPING (BIT(2))
      59             : 
      60             : /* Thread has terminated */
      61             : #define _THREAD_DEAD (BIT(3))
      62             : 
      63             : /* Thread is suspended */
      64             : #define _THREAD_SUSPENDED (BIT(4))
      65             : 
      66             : /* Thread is in the process of aborting */
      67             : #define _THREAD_ABORTING (BIT(5))
      68             : 
      69             : /* Thread is in the process of suspending */
      70             : #define _THREAD_SUSPENDING (BIT(6))
      71             : 
      72             : /* Thread is present in the ready queue */
      73             : #define _THREAD_QUEUED (BIT(7))
      74             : 
      75             : /* end - states */
      76             : 
      77             : #ifdef CONFIG_STACK_SENTINEL
      78             : /* Magic value in lowest bytes of the stack */
      79             : #define STACK_SENTINEL 0xF0F0F0F0
      80             : #endif
      81             : 
      82             : /* lowest value of _thread_base.preempt at which a thread is non-preemptible */
      83             : #define _NON_PREEMPT_THRESHOLD 0x0080U
      84             : 
      85             : /* highest value of _thread_base.preempt at which a thread is preemptible */
      86             : #define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
      87             : 
      88             : #if !defined(_ASMLANGUAGE)
      89             : 
      90             : /* Two abstractions are defined here for "thread priority queues".
      91             :  *
      92             :  * One is a "dumb" list implementation appropriate for systems with
      93             :  * small numbers of threads and sensitive to code size.  It is stored
      94             :  * in sorted order, taking an O(N) cost every time a thread is added
      95             :  * to the list.  This corresponds to the way the original _wait_q_t
      96             :  * abstraction worked and is very fast as long as the number of
      97             :  * threads is small.
      98             :  *
      99             :  * The other is a balanced tree "fast" implementation with rather
     100             :  * larger code size (due to the data structure itself, the code here
     101             :  * is just stubs) and higher constant-factor performance overhead, but
     102             :  * much better O(logN) scaling in the presence of large number of
     103             :  * threads.
     104             :  *
     105             :  * Each can be used for either the wait_q or system ready queue,
     106             :  * configurable at build time.
     107             :  */
     108             : 
     109             : struct _priq_rb {
     110             :         struct rbtree tree;
     111             :         int next_order_key;
     112             : };
     113             : 
     114             : 
     115             : /* Traditional/textbook "multi-queue" structure.  Separate lists for a
     116             :  * small number (max 32 here) of fixed priorities.  This corresponds
     117             :  * to the original Zephyr scheduler.  RAM requirements are
     118             :  * comparatively high, but performance is very fast.  Won't work with
     119             :  * features like deadline scheduling which need large priority spaces
     120             :  * to represent their requirements.
     121             :  */
     122             : struct _priq_mq {
     123             :         sys_dlist_t queues[K_NUM_THREAD_PRIO];
     124             :         unsigned long bitmask[PRIQ_BITMAP_SIZE];
     125             : };
     126             : 
     127             : struct _ready_q {
     128             : #ifndef CONFIG_SMP
     129             :         /* always contains next thread to run: cannot be NULL */
     130             :         struct k_thread *cache;
     131             : #endif
     132             : 
     133             : #if defined(CONFIG_SCHED_DUMB)
     134             :         sys_dlist_t runq;
     135             : #elif defined(CONFIG_SCHED_SCALABLE)
     136             :         struct _priq_rb runq;
     137             : #elif defined(CONFIG_SCHED_MULTIQ)
     138             :         struct _priq_mq runq;
     139             : #endif
     140             : };
     141             : 
     142             : typedef struct _ready_q _ready_q_t;
     143             : 
     144             : struct _cpu {
     145             :         /* nested interrupt count */
     146             :         uint32_t nested;
     147             : 
     148             :         /* interrupt stack pointer base */
     149             :         char *irq_stack;
     150             : 
     151             :         /* currently scheduled thread */
     152             :         struct k_thread *current;
     153             : 
     154             :         /* one assigned idle thread per CPU */
     155             :         struct k_thread *idle_thread;
     156             : 
     157             : #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
     158             :         struct _ready_q ready_q;
     159             : #endif
     160             : 
     161             : #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
     162             :         (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
     163             :         /* Coop thread preempted by current metairq, or NULL */
     164             :         struct k_thread *metairq_preempted;
     165             : #endif
     166             : 
     167             :         uint8_t id;
     168             : 
     169             : #if defined(CONFIG_FPU_SHARING)
     170             :         void *fp_ctx;
     171             : #endif
     172             : 
     173             : #ifdef CONFIG_SMP
     174             :         /* True when arch_current_thread() is allowed to context switch */
     175             :         uint8_t swap_ok;
     176             : #endif
     177             : 
     178             : #ifdef CONFIG_SCHED_THREAD_USAGE
     179             :         /*
     180             :          * [usage0] is used as a timestamp to mark the beginning of an
     181             :          * execution window. [0] is a special value indicating that it
     182             :          * has been stopped (but not disabled).
     183             :          */
     184             : 
     185             :         uint32_t usage0;
     186             : 
     187             : #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
     188             :         struct k_cycle_stats *usage;
     189             : #endif
     190             : #endif
     191             : 
     192             : #ifdef CONFIG_OBJ_CORE_SYSTEM
     193             :         struct k_obj_core  obj_core;
     194             : #endif
     195             : 
     196             :         /* Per CPU architecture specifics */
     197             :         struct _cpu_arch arch;
     198             : };
     199             : 
     200             : typedef struct _cpu _cpu_t;
     201             : 
     202             : struct z_kernel {
     203             :         struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
     204             : 
     205             : #ifdef CONFIG_PM
     206             :         int32_t idle; /* Number of ticks for kernel idling */
     207             : #endif
     208             : 
     209             :         /*
     210             :          * ready queue: can be big, keep after small fields, since some
     211             :          * assembly (e.g. ARC) are limited in the encoding of the offset
     212             :          */
     213             : #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
     214             :         struct _ready_q ready_q;
     215             : #endif
     216             : 
     217             : #ifdef CONFIG_FPU_SHARING
     218             :         /*
     219             :          * A 'current_sse' field does not exist in addition to the 'current_fp'
     220             :          * field since it's not possible to divide the IA-32 non-integer
     221             :          * registers into 2 distinct blocks owned by differing threads.  In
     222             :          * other words, given that the 'fxnsave/fxrstor' instructions
     223             :          * save/restore both the X87 FPU and XMM registers, it's not possible
     224             :          * for a thread to only "own" the XMM registers.
     225             :          */
     226             : 
     227             :         /* thread that owns the FP regs */
     228             :         struct k_thread *current_fp;
     229             : #endif
     230             : 
     231             : #if defined(CONFIG_THREAD_MONITOR)
     232             :         struct k_thread *threads; /* singly linked list of ALL threads */
     233             : #endif
     234             : #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
     235             :         struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
     236             : #endif
     237             : 
     238             : #ifdef CONFIG_OBJ_CORE_SYSTEM
     239             :         struct k_obj_core  obj_core;
     240             : #endif
     241             : 
     242             : #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
     243             :         /* Identify CPUs to send IPIs to at the next scheduling point */
     244             :         atomic_t pending_ipi;
     245             : #endif
     246             : };
     247             : 
     248             : typedef struct z_kernel _kernel_t;
     249             : 
     250             : extern struct z_kernel _kernel;
     251             : 
     252             : extern atomic_t _cpus_active;
     253             : 
     254             : #ifdef CONFIG_SMP
     255             : 
     256             : /* True if the current context can be preempted and migrated to
     257             :  * another SMP CPU.
     258             :  */
     259             : bool z_smp_cpu_mobile(void);
     260             : 
     261             : #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
     262             :                         arch_curr_cpu(); })
     263             : 
     264             : #else
     265             : #define _current_cpu (&_kernel.cpus[0])
     266             : #endif /* CONFIG_SMP */
     267             : 
     268             : #define _current arch_current_thread() __DEPRECATED_MACRO
     269             : 
     270             : /* kernel wait queue record */
     271             : #ifdef CONFIG_WAITQ_SCALABLE
     272             : 
     273             : typedef struct {
     274             :         struct _priq_rb waitq;
     275             : } _wait_q_t;
     276             : 
     277             : /* defined in kernel/priority_queues.c */
     278             : bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
     279             : 
     280             : #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
     281             : 
     282             : #else
     283             : 
     284             : typedef struct {
     285             :         sys_dlist_t waitq;
     286             : } _wait_q_t;
     287             : 
     288             : #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
     289             : 
     290             : #endif /* CONFIG_WAITQ_SCALABLE */
     291             : 
     292             : /* kernel timeout record */
     293             : struct _timeout;
     294             : typedef void (*_timeout_func_t)(struct _timeout *t);
     295             : 
     296             : struct _timeout {
     297             :         sys_dnode_t node;
     298             :         _timeout_func_t fn;
     299             : #ifdef CONFIG_TIMEOUT_64BIT
     300             :         /* Can't use k_ticks_t for header dependency reasons */
     301             :         int64_t dticks;
     302             : #else
     303             :         int32_t dticks;
     304             : #endif
     305             : };
     306             : 
     307           0 : typedef void (*k_thread_timeslice_fn_t)(struct k_thread *thread, void *data);
     308             : 
     309             : #ifdef __cplusplus
     310             : }
     311             : #endif
     312             : 
     313             : #endif /* _ASMLANGUAGE */
     314             : 
     315             : #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */

Generated by: LCOV version 1.14