15#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
16#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
24#if defined(CONFIG_USERSPACE)
35#define ARCH_STACK_PTR_ALIGN 16
37#define Z_RISCV_STACK_PMP_ALIGN \
38 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
40#ifdef CONFIG_PMP_STACK_GUARD
50#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
51#define Z_RISCV_STACK_GUARD_SIZE \
52 Z_POW2_CEIL(MAX(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
53 Z_RISCV_STACK_PMP_ALIGN))
54#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
56#define Z_RISCV_STACK_GUARD_SIZE \
57 ROUND_UP(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
58 Z_RISCV_STACK_PMP_ALIGN)
59#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
74#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
77#define Z_RISCV_STACK_GUARD_SIZE 0
80#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
117#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
118#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
119 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
120 Z_RISCV_STACK_PMP_ALIGN))
121#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
122 ARCH_THREAD_STACK_SIZE_ADJUST(size)
141#define ARCH_THREAD_STACK_RESERVED \
142 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
143 Z_RISCV_STACK_PMP_ALIGN)
144#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
145 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
146#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
161#define MSTATUS_IEN (1UL << 3)
162#define MSTATUS_MPP_M (3UL << 11)
163#define MSTATUS_MPIE_EN (1UL << 7)
165#define MSTATUS_FS_OFF (0UL << 13)
166#define MSTATUS_FS_INIT (1UL << 13)
167#define MSTATUS_FS_CLEAN (2UL << 13)
168#define MSTATUS_FS_DIRTY (3UL << 13)
178#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
187#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
188#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
201#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
203#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
205#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
207#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
209#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
211#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
215#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
216 {PMP_R | PMP_W | PMP_X})
217#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
229extern void z_irq_spurious(
const void *unused);
237#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
238 return z_soc_irq_lock();
242 __asm__
volatile (
"csrrc %0, mstatus, %1"
257#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
258 z_soc_irq_unlock(key);
260 __asm__
volatile (
"csrs mstatus, %0"
269#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
270 return z_soc_irq_unlocked(key);
278 __asm__
volatile(
"nop");
303#if defined(CONFIG_SOC_FAMILY_RISCV_PRIVILEGED)
static ALWAYS_INLINE void arch_nop(void)
Definition: arch.h:348
uint32_t k_mem_partition_attr_t
Definition: arch.h:346
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
#define ALWAYS_INLINE
Definition: common.h:129
Public interface for configuring interrupts.
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition: arch.h:63
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: arch.h:74
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition: arch.h:99
static uint64_t arch_k_cycle_get_64(void)
Definition: arch.h:106
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition: arch.h:87
#define MSTATUS_IEN
Definition: arch.h:161
RISCV public error handling.
RISCV public exception handling.
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
unsigned int pmp_update_nr
Definition: arch.h:226
uint8_t pmp_attr
Definition: arch.h:222
Software-managed ISR table.