LCOV - code coverage report
Current view: top level - zephyr/arch/riscv - arch.h Hit Total Coverage
Test: new.info Lines: 1 33 3.0 %
Date: 2024-12-22 00:14:23

          Line data    Source code
       1           1 : /*
       2             :  * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
       3             :  * Contributors: 2018 Antmicro <www.antmicro.com>
       4             :  *
       5             :  * SPDX-License-Identifier: Apache-2.0
       6             :  */
       7             : 
       8             : /**
       9             :  * @file
      10             :  * @brief RISCV specific kernel interface header
      11             :  * This header contains the RISCV specific kernel interface.  It is
      12             :  * included by the generic kernel interface header (arch/cpu.h)
      13             :  */
      14             : 
      15             : #ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
      16             : #define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
      17             : 
      18             : #include <zephyr/arch/riscv/thread.h>
      19             : #include <zephyr/arch/riscv/exception.h>
      20             : #include <zephyr/arch/riscv/irq.h>
      21             : #include <zephyr/arch/riscv/sys_io.h>
      22             : #include <zephyr/arch/common/sys_bitops.h>
      23             : #include <zephyr/arch/common/ffs.h>
      24             : #if defined(CONFIG_USERSPACE)
      25             : #include <zephyr/arch/riscv/syscall.h>
      26             : #endif /* CONFIG_USERSPACE */
      27             : #include <zephyr/irq.h>
      28             : #include <zephyr/sw_isr_table.h>
      29             : #include <zephyr/devicetree.h>
      30             : #include <zephyr/arch/riscv/csr.h>
      31             : #include <zephyr/arch/riscv/exception.h>
      32             : 
      33             : /* stacks, for RISCV architecture stack should be 16byte-aligned */
      34           0 : #define ARCH_STACK_PTR_ALIGN  16
      35             : 
      36             : #define Z_RISCV_STACK_PMP_ALIGN \
      37             :         MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
      38             : 
      39             : #ifdef CONFIG_PMP_STACK_GUARD
      40             : /*
      41             :  * The StackGuard is an area at the bottom of the kernel-mode stack made to
      42             :  * fault when accessed. It is _not_ faulting when in exception mode as we rely
      43             :  * on that area to save the exception stack frame and to process said fault.
      44             :  * Therefore the guard area must be large enough to hold the esf, plus some
      45             :  * configurable stack wiggle room to execute the fault handling code off of,
      46             :  * as well as some guard size to cover possible sudden stack pointer
      47             :  * displacement before the fault.
      48             :  */
      49             : #ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
      50             : #define Z_RISCV_STACK_GUARD_SIZE \
      51             :         Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
      52             :                         Z_RISCV_STACK_PMP_ALIGN))
      53             : #define ARCH_KERNEL_STACK_OBJ_ALIGN     Z_RISCV_STACK_GUARD_SIZE
      54             : #else
      55             : #define Z_RISCV_STACK_GUARD_SIZE \
      56             :         ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
      57             :                  Z_RISCV_STACK_PMP_ALIGN)
      58             : #define ARCH_KERNEL_STACK_OBJ_ALIGN     Z_RISCV_STACK_PMP_ALIGN
      59             : #endif
      60             : 
      61             : /* Kernel-only stacks have the following layout if a stack guard is enabled:
      62             :  *
      63             :  * +------------+ <- thread.stack_obj
      64             :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
      65             :  * +------------+ <- thread.stack_info.start
      66             :  * | Kernel     |
      67             :  * | stack      |
      68             :  * |            |
      69             :  * +............|
      70             :  * | TLS        | } thread.stack_info.delta
      71             :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
      72             :  */
      73             : #define ARCH_KERNEL_STACK_RESERVED      Z_RISCV_STACK_GUARD_SIZE
      74             : 
      75             : #else /* !CONFIG_PMP_STACK_GUARD */
      76             : #define Z_RISCV_STACK_GUARD_SIZE 0
      77             : #endif
      78             : 
      79             : #ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
      80             : /* The privilege elevation stack is located in another area of memory
      81             :  * generated at build time by gen_kobject_list.py
      82             :  *
      83             :  * +------------+ <- thread.arch.priv_stack_start
      84             :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
      85             :  * +------------+
      86             :  * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
      87             :  * +------------+ <- thread.arch.priv_stack_start +
      88             :  *                   CONFIG_PRIVILEGED_STACK_SIZE +
      89             :  *                   Z_RISCV_STACK_GUARD_SIZE
      90             :  *
      91             :  * The main stack will be initially (or potentially only) used by kernel
      92             :  * mode so we need to make room for a possible stack guard area when enabled:
      93             :  *
      94             :  * +------------+ <- thread.stack_obj
      95             :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
      96             :  * +............| <- thread.stack_info.start
      97             :  * | Thread     |
      98             :  * | stack      |
      99             :  * |            |
     100             :  * +............|
     101             :  * | TLS        | } thread.stack_info.delta
     102             :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
     103             :  *
     104             :  * When transitioning to user space, the guard area will be removed from
     105             :  * the main stack. Any thread running in user mode will have full access
     106             :  * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
     107             :  *
     108             :  * +------------+ <- thread.stack_obj = thread.stack_info.start
     109             :  * | Thread     |
     110             :  * | stack      |
     111             :  * |            |
     112             :  * +............|
     113             :  * | TLS        | } thread.stack_info.delta
     114             :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
     115             :  */
     116             : #define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
     117             : #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
     118             :         Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
     119             :                         Z_RISCV_STACK_PMP_ALIGN))
     120             : #define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
     121             :                 ARCH_THREAD_STACK_SIZE_ADJUST(size)
     122             : 
     123             : #else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
     124             : 
     125             : /* The stack object will contain the PMP guard, the privilege stack, and then
     126             :  * the usermode stack buffer in that order:
     127             :  *
     128             :  * +------------+ <- thread.stack_obj
     129             :  * | Guard      | } Z_RISCV_STACK_GUARD_SIZE
     130             :  * +------------+
     131             :  * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
     132             :  * +------------+ <- thread.stack_info.start
     133             :  * | Thread     |
     134             :  * | stack      |
     135             :  * |            |
     136             :  * +............|
     137             :  * | TLS        | } thread.stack_info.delta
     138             :  * +------------+ <- thread.stack_info.start + thread.stack_info.size
     139             :  */
     140           0 : #define ARCH_THREAD_STACK_RESERVED \
     141             :         ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
     142             :                  Z_RISCV_STACK_PMP_ALIGN)
     143           0 : #define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
     144             :         ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
     145           0 : #define ARCH_THREAD_STACK_OBJ_ALIGN(size)       Z_RISCV_STACK_PMP_ALIGN
     146             : #endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
     147             : 
     148             : #ifdef CONFIG_64BIT
     149             : #define RV_REGSIZE 8
     150             : #define RV_REGSHIFT 3
     151             : #else
     152           0 : #define RV_REGSIZE 4
     153           0 : #define RV_REGSHIFT 2
     154             : #endif
     155             : 
     156             : /* Common mstatus bits. All supported cores today have the same
     157             :  * layouts.
     158             :  */
     159             : 
     160           0 : #define MSTATUS_IEN     (1UL << 3)
     161           0 : #define MSTATUS_MPP_M   (3UL << 11)
     162           0 : #define MSTATUS_MPIE_EN (1UL << 7)
     163             : 
     164           0 : #define MSTATUS_FS_OFF   (0UL << 13)
     165           0 : #define MSTATUS_FS_INIT  (1UL << 13)
     166           0 : #define MSTATUS_FS_CLEAN (2UL << 13)
     167           0 : #define MSTATUS_FS_DIRTY (3UL << 13)
     168             : 
     169             : /* This comes from openisa_rv32m1, but doesn't seem to hurt on other
     170             :  * platforms:
     171             :  * - Preserve machine privileges in MPP. If you see any documentation
     172             :  *   telling you that MPP is read-only on this SoC, don't believe its
     173             :  *   lies.
     174             :  * - Enable interrupts when exiting from exception into a new thread
     175             :  *   by setting MPIE now, so it will be copied into IE on mret.
     176             :  */
     177           0 : #define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
     178             : 
     179             : #ifndef _ASMLANGUAGE
     180             : #include <zephyr/sys/util.h>
     181             : 
     182             : #ifdef __cplusplus
     183             : extern "C" {
     184             : #endif
     185             : 
     186             : #ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
     187             : #define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
     188             : #endif
     189             : 
     190             : /* Kernel macros for memory attribution
     191             :  * (access permissions and cache-ability).
     192             :  *
     193             :  * The macros are to be stored in k_mem_partition_attr_t
     194             :  * objects. The format of a k_mem_partition_attr_t object
     195             :  * is an uint8_t composed by configuration register flags
     196             :  * located in arch/riscv/include/core_pmp.h
     197             :  */
     198             : 
     199             : /* Read-Write access permission attributes */
     200           0 : #define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
     201             :         {PMP_R | PMP_W})
     202           0 : #define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
     203             :         {PMP_R})
     204           0 : #define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
     205             :         {0})
     206           0 : #define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
     207             :         {PMP_R})
     208           0 : #define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
     209             :         {0})
     210           0 : #define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
     211             :         {0})
     212             : 
     213             : /* Execution-allowed attributes */
     214           0 : #define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
     215             :         {PMP_R | PMP_W | PMP_X})
     216           0 : #define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
     217             :         {PMP_R | PMP_X})
     218             : 
     219             : /* Typedef for the k_mem_partition attribute */
     220             : typedef struct {
     221           0 :         uint8_t pmp_attr;
     222             : } k_mem_partition_attr_t;
     223             : 
     224             : struct arch_mem_domain {
     225           0 :         unsigned int pmp_update_nr;
     226             : };
     227             : 
     228             : extern void z_irq_spurious(const void *unused);
     229             : 
     230             : /*
     231             :  * use atomic instruction csrrc to lock global irq
     232             :  * csrrc: atomic read and clear bits in CSR register
     233             :  */
     234           0 : static ALWAYS_INLINE unsigned int arch_irq_lock(void)
     235             : {
     236             : #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
     237             :         return z_soc_irq_lock();
     238             : #else
     239             :         unsigned int key;
     240             : 
     241             :         __asm__ volatile ("csrrc %0, mstatus, %1"
     242             :                           : "=r" (key)
     243             :                           : "rK" (MSTATUS_IEN)
     244             :                           : "memory");
     245             : 
     246             :         return key;
     247             : #endif
     248             : }
     249             : 
     250             : /*
     251             :  * use atomic instruction csrs to unlock global irq
     252             :  * csrs: atomic set bits in CSR register
     253             :  */
     254           0 : static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
     255             : {
     256             : #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
     257             :         z_soc_irq_unlock(key);
     258             : #else
     259             :         __asm__ volatile ("csrs mstatus, %0"
     260             :                           :
     261             :                           : "r" (key & MSTATUS_IEN)
     262             :                           : "memory");
     263             : #endif
     264             : }
     265             : 
     266           0 : static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
     267             : {
     268             : #ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
     269             :         return z_soc_irq_unlocked(key);
     270             : #else
     271             :         return (key & MSTATUS_IEN) != 0;
     272             : #endif
     273             : }
     274             : 
     275           0 : static ALWAYS_INLINE void arch_nop(void)
     276             : {
     277             :         __asm__ volatile("nop");
     278             : }
     279             : 
     280           0 : extern uint32_t sys_clock_cycle_get_32(void);
     281             : 
     282           0 : static inline uint32_t arch_k_cycle_get_32(void)
     283             : {
     284             :         return sys_clock_cycle_get_32();
     285             : }
     286             : 
     287           0 : extern uint64_t sys_clock_cycle_get_64(void);
     288             : 
     289           0 : static inline uint64_t arch_k_cycle_get_64(void)
     290             : {
     291             :         return sys_clock_cycle_get_64();
     292             : }
     293             : 
     294             : #include <zephyr/arch/riscv/error.h>
     295             : 
     296             : #ifdef __cplusplus
     297             : }
     298             : #endif
     299             : 
     300             : #endif /*_ASMLANGUAGE */
     301             : 
     302             : #if defined(CONFIG_RISCV_PRIVILEGED)
     303             : #include <zephyr/arch/riscv/riscv-privileged/asm_inline.h>
     304             : #endif
     305             : 
     306             : 
     307             : #endif

Generated by: LCOV version 1.14