LCOV - code coverage report
Current view: top level - zephyr/arch/riscv - arch.h Coverage Total Hit
Test: new.info Lines: 3.0 % 33 1
Test Date: 2025-09-05 20:47:19

            Line data    Source code
       1            1 : /*
       2              :  * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
       3              :  * Contributors: 2018 Antmicro <www.antmicro.com>
       4              :  *
       5              :  * SPDX-License-Identifier: Apache-2.0
       6              :  */
       7              : 
       8              : /**
       9              :  * @file
      10              :  * @brief RISCV specific kernel interface header
      11              :  *
      12              :  * This header contains the RISCV specific kernel interface.  It is
      13              :  * included by the kernel interface architecture-abstraction header
      14              :  * (include/zephyr/arch/cpu.h).
      15              :  */
      16              : 
      17              : #ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
      18              : #define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
      19              : 
      20              : #include <zephyr/arch/riscv/thread.h>
      21              : #include <zephyr/arch/exception.h>
      22              : #include <zephyr/arch/riscv/irq.h>
      23              : #include <zephyr/arch/riscv/sys_io.h>
      24              : #include <zephyr/arch/common/sys_bitops.h>
      25              : #include <zephyr/arch/common/ffs.h>
      26              : #if defined(CONFIG_USERSPACE)
      27              : #include <zephyr/arch/riscv/syscall.h>
      28              : #endif /* CONFIG_USERSPACE */
      29              : #include <zephyr/irq.h>
      30              : #include <zephyr/sw_isr_table.h>
      31              : #include <zephyr/devicetree.h>
      32              : #include <zephyr/arch/riscv/csr.h>
      33              : 
      34              : /* stacks, for RISCV architecture stack should be 16byte-aligned */
      35            0 : #define ARCH_STACK_PTR_ALIGN  16
      36              : 
      37              : #define Z_RISCV_STACK_PMP_ALIGN \
      38              :         MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
      39              : 
      40              : #ifdef CONFIG_PMP_STACK_GUARD
      41              : /*
      42              :  * The StackGuard is an area at the bottom of the kernel-mode stack made to
      43              :  * fault when accessed. It is _not_ faulting when in exception mode as we rely
      44              :  * on that area to save the exception stack frame and to process said fault.
      45              :  * Therefore the guard area must be large enough to hold the esf, plus some
      46              :  * configurable stack wiggle room to execute the fault handling code off of,
      47              :  * as well as some guard size to cover possible sudden stack pointer
      48              :  * displacement before the fault.
      49              :  */
      50              : #ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
      51              : #define Z_RISCV_STACK_GUARD_SIZE \
      52              :         Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
      53              :                         Z_RISCV_STACK_PMP_ALIGN))
      54              : #define ARCH_KERNEL_STACK_OBJ_ALIGN     Z_RISCV_STACK_GUARD_SIZE
      55              : #else
      56              : #define Z_RISCV_STACK_GUARD_SIZE \
      57              :         ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
      58              :                  Z_RISCV_STACK_PMP_ALIGN)
      59              : #define ARCH_KERNEL_STACK_OBJ_ALIGN     Z_RISCV_STACK_PMP_ALIGN
      60              : #endif
      61              : 
      62              : /* Kernel-only stacks have the following layout if a stack guard is enabled:
      63              :  *
      64              :  * +------------+ <- thread.stack_obj
      65              :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
      66              :  * +------------+ <- thread.stack_info.start
      67              :  * | Kernel     |
      68              :  * | stack      |
      69              :  * |            |
      70              :  * +............|
      71              :  * | TLS        | } thread.stack_info.delta
      72              :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
      73              :  */
      74              : #define ARCH_KERNEL_STACK_RESERVED      Z_RISCV_STACK_GUARD_SIZE
      75              : 
      76              : #else /* !CONFIG_PMP_STACK_GUARD */
      77              : #define Z_RISCV_STACK_GUARD_SIZE 0
      78              : #endif
      79              : 
      80              : #ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
      81              : /* The privilege elevation stack is located in another area of memory
      82              :  * generated at build time by gen_kobject_list.py
      83              :  *
      84              :  * +------------+ <- thread.arch.priv_stack_start
      85              :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
      86              :  * +------------+
      87              :  * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
      88              :  * +------------+ <- thread.arch.priv_stack_start +
      89              :  *                   CONFIG_PRIVILEGED_STACK_SIZE +
      90              :  *                   Z_RISCV_STACK_GUARD_SIZE
      91              :  *
      92              :  * The main stack will be initially (or potentially only) used by kernel
      93              :  * mode so we need to make room for a possible stack guard area when enabled:
      94              :  *
      95              :  * +------------+ <- thread.stack_obj
      96              :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
      97              :  * +............| <- thread.stack_info.start
      98              :  * | Thread     |
      99              :  * | stack      |
     100              :  * |            |
     101              :  * +............|
     102              :  * | TLS        | } thread.stack_info.delta
     103              :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
     104              :  *
     105              :  * When transitioning to user space, the guard area will be removed from
     106              :  * the main stack. Any thread running in user mode will have full access
     107              :  * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
     108              :  *
     109              :  * +------------+ <- thread.stack_obj = thread.stack_info.start
     110              :  * | Thread     |
     111              :  * | stack      |
     112              :  * |            |
     113              :  * +............|
     114              :  * | TLS        | } thread.stack_info.delta
     115              :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
     116              :  */
     117              : #define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
     118              : #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
     119              :         Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
     120              :                         Z_RISCV_STACK_PMP_ALIGN))
     121              : #define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
     122              :                 ARCH_THREAD_STACK_SIZE_ADJUST(size)
     123              : 
     124              : #else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
     125              : 
     126              : /* The stack object will contain the PMP guard, the privilege stack, and then
     127              :  * the usermode stack buffer in that order:
     128              :  *
     129              :  * +------------+ <- thread.stack_obj
     130              :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
     131              :  * +------------+
     132              :  * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
     133              :  * +------------+ <- thread.stack_info.start
     134              :  * | Thread     |
     135              :  * | stack      |
     136              :  * |            |
     137              :  * +............|
     138              :  * | TLS        | } thread.stack_info.delta
     139              :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
     140              :  */
     141            0 : #define ARCH_THREAD_STACK_RESERVED \
     142              :         ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
     143              :                  Z_RISCV_STACK_PMP_ALIGN)
     144            0 : #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
     145              :         ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
     146            0 : #define ARCH_THREAD_STACK_OBJ_ALIGN(size)       Z_RISCV_STACK_PMP_ALIGN
     147              : #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
     148              : 
     149              : #ifdef CONFIG_64BIT
     150              : #define RV_REGSIZE 8
     151              : #define RV_REGSHIFT 3
     152              : #else
     153            0 : #define RV_REGSIZE 4
     154            0 : #define RV_REGSHIFT 2
     155              : #endif
     156              : 
     157              : /* Common mstatus bits. All supported cores today have the same
     158              :  * layouts.
     159              :  */
     160              : 
     161            0 : #define MSTATUS_IEN     (1UL << 3)
     162            0 : #define MSTATUS_MPP_M   (3UL << 11)
     163            0 : #define MSTATUS_MPIE_EN (1UL << 7)
     164              : 
     165            0 : #define MSTATUS_FS_OFF   (0UL << 13)
     166            0 : #define MSTATUS_FS_INIT  (1UL << 13)
     167            0 : #define MSTATUS_FS_CLEAN (2UL << 13)
     168            0 : #define MSTATUS_FS_DIRTY (3UL << 13)
     169              : 
     170              : /* This comes from openisa_rv32m1, but doesn't seem to hurt on other
     171              :  * platforms:
     172              :  * - Preserve machine privileges in MPP. If you see any documentation
     173              :  *   telling you that MPP is read-only on this SoC, don't believe its
     174              :  *   lies.
     175              :  * - Enable interrupts when exiting from exception into a new thread
     176              :  *   by setting MPIE now, so it will be copied into IE on mret.
     177              :  */
     178            0 : #define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
     179              : 
     180              : #ifndef _ASMLANGUAGE
     181              : #include <zephyr/sys/util.h>
     182              : 
     183              : #ifdef __cplusplus
     184              : extern "C" {
     185              : #endif
     186              : 
     187              : #ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
     188              : #define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
     189              : #endif
     190              : 
     191              : /* Kernel macros for memory attribution
     192              :  * (access permissions and cache-ability).
     193              :  *
     194              :  * The macros are to be stored in k_mem_partition_attr_t
     195              :  * objects. The format of a k_mem_partition_attr_t object
     196              :  * is an uint8_t composed by configuration register flags
     197              :  * located in arch/riscv/include/core_pmp.h
     198              :  */
     199              : 
     200              : /* Read-Write access permission attributes */
     201            0 : #define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
     202              :         {PMP_R | PMP_W})
     203            0 : #define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
     204              :         {PMP_R})
     205            0 : #define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
     206              :         {0})
     207            0 : #define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
     208              :         {PMP_R})
     209            0 : #define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
     210              :         {0})
     211            0 : #define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
     212              :         {0})
     213              : 
     214              : /* Execution-allowed attributes */
     215            0 : #define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
     216              :         {PMP_R | PMP_W | PMP_X})
     217            0 : #define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
     218              :         {PMP_R | PMP_X})
     219              : 
     220              : /* Typedef for the k_mem_partition attribute */
     221              : typedef struct {
     222            0 :         uint8_t pmp_attr;
     223              : } k_mem_partition_attr_t;
     224              : 
     225              : struct arch_mem_domain {
     226            0 :         unsigned int pmp_update_nr;
     227              : };
     228              : 
     229              : extern void z_irq_spurious(const void *unused);
     230              : 
     231              : /*
     232              :  * use atomic instruction csrrc to lock global irq
     233              :  * csrrc: atomic read and clear bits in CSR register
     234              :  */
     235            0 : static ALWAYS_INLINE unsigned int arch_irq_lock(void)
     236              : {
     237              : #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
     238              :         return z_soc_irq_lock();
     239              : #else
     240              :         unsigned int key;
     241              : 
     242              :         __asm__ volatile ("csrrc %0, mstatus, %1"
     243              :                           : "=r" (key)
     244              :                           : "rK" (MSTATUS_IEN)
     245              :                           : "memory");
     246              : 
     247              :         return key;
     248              : #endif
     249              : }
     250              : 
     251              : /*
     252              :  * use atomic instruction csrs to unlock global irq
     253              :  * csrs: atomic set bits in CSR register
     254              :  */
     255            0 : static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
     256              : {
     257              : #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
     258              :         z_soc_irq_unlock(key);
     259              : #else
     260              :         __asm__ volatile ("csrs mstatus, %0"
     261              :                           :
     262              :                           : "r" (key & MSTATUS_IEN)
     263              :                           : "memory");
     264              : #endif
     265              : }
     266              : 
     267            0 : static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
     268              : {
     269              : #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
     270              :         return z_soc_irq_unlocked(key);
     271              : #else
     272              :         return (key & MSTATUS_IEN) != 0;
     273              : #endif
     274              : }
     275              : 
     276            0 : static ALWAYS_INLINE void arch_nop(void)
     277              : {
     278              :         __asm__ volatile("nop");
     279              : }
     280              : 
     281            0 : extern uint32_t sys_clock_cycle_get_32(void);
     282              : 
     283            0 : static inline uint32_t arch_k_cycle_get_32(void)
     284              : {
     285              :         return sys_clock_cycle_get_32();
     286              : }
     287              : 
     288            0 : extern uint64_t sys_clock_cycle_get_64(void);
     289              : 
     290            0 : static inline uint64_t arch_k_cycle_get_64(void)
     291              : {
     292              :         return sys_clock_cycle_get_64();
     293              : }
     294              : 
     295              : #include <zephyr/arch/riscv/error.h>
     296              : 
     297              : #ifdef __cplusplus
     298              : }
     299              : #endif
     300              : 
     301              : #endif /*_ASMLANGUAGE */
     302              : 
     303              : #if defined(CONFIG_RISCV_PRIVILEGED)
     304              : #include <zephyr/arch/riscv/riscv-privileged/asm_inline.h>
     305              : #endif
     306              : 
     307              : 
     308              : #endif
        

Generated by: LCOV version 2.0-1