17#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
18#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
26#if defined(CONFIG_USERSPACE)
35#define ARCH_STACK_PTR_ALIGN 16
37#define Z_RISCV_STACK_PMP_ALIGN \
38 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
40#ifdef CONFIG_PMP_STACK_GUARD
50#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
51#define Z_RISCV_STACK_GUARD_SIZE \
52 Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
53 Z_RISCV_STACK_PMP_ALIGN))
54#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
56#define Z_RISCV_STACK_GUARD_SIZE \
57 ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
58 Z_RISCV_STACK_PMP_ALIGN)
59#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
74#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
77#define Z_RISCV_STACK_GUARD_SIZE 0
80#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
117#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
118#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
119 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
120 Z_RISCV_STACK_PMP_ALIGN))
121#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
122 ARCH_THREAD_STACK_SIZE_ADJUST(size)
141#define ARCH_THREAD_STACK_RESERVED \
142 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
143 Z_RISCV_STACK_PMP_ALIGN)
144#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
145 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
146#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
161#define MSTATUS_IEN (1UL << 3)
163#define MSTATUS_MPP_U (PRV_U << 11)
165#define MSTATUS_MPP_S (PRV_S << 11)
167#define MSTATUS_MPP_M (PRV_M << 11)
168#define MSTATUS_MPIE_EN (1UL << 7)
170#define MSTATUS_FS_OFF (0UL << 13)
171#define MSTATUS_FS_INIT (1UL << 13)
172#define MSTATUS_FS_CLEAN (2UL << 13)
173#define MSTATUS_FS_DIRTY (3UL << 13)
190#ifdef CONFIG_RISCV_S_MODE
191#define RV_STATUS_DEF_RESTORE (SSTATUS_SPP | SSTATUS_SPIE)
193#define RV_STATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
199#ifdef CONFIG_RISCV_S_MODE
201#define RV_STATUS_PP SSTATUS_SPP
203#define RV_STATUS_PP_U 0
206#define RV_STATUS_PP MSTATUS_MPP
208#define RV_STATUS_PP_U PRV_U
219#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
220#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
233#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
235#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
237#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
239#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
241#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
243#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
247#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
248 {PMP_R | PMP_W | PMP_X})
249#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
261extern void z_irq_spurious(
const void *unused);
264#ifdef CONFIG_RISCV_S_MODE
266#define RV_STATUS_CSR "sstatus"
268#define RV_STATUS_IE SSTATUS_SIE
270#define RV_STATUS_PIE SSTATUS_SPIE
273#define RV_STATUS_CSR "mstatus"
275#define RV_STATUS_IE MSTATUS_IEN
277#define RV_STATUS_PIE MSTATUS_MPIE_EN
296static ALWAYS_INLINE void z_riscv_status_set(
unsigned long val)
298 __asm__
volatile (
"csrs " RV_STATUS_CSR ", %0" : :
"rK" (val) :
"memory");
304static ALWAYS_INLINE void z_riscv_status_clear(
unsigned long val)
306 __asm__
volatile (
"csrc " RV_STATUS_CSR ", %0" : :
"rK" (val) :
"memory");
315#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
316 return z_soc_irq_lock();
335#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
336 z_soc_irq_unlock(key);
347#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
348 return z_soc_irq_unlocked(key);
357#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
361 unsigned int key = z_soc_irq_lock();
362 bool enabled = z_soc_irq_unlocked(key);
364 z_soc_irq_unlock(key);
367 unsigned int status = z_riscv_status_read();
375 __asm__
volatile(
"nop");
400#if defined(CONFIG_RISCV_PRIVILEGED)
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
RISCV public error handling.
Public interface for configuring interrupts.
static ALWAYS_INLINE void arch_nop(void)
Definition arch.h:61
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition arch.h:66
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition arch.h:72
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition arch.h:44
static ALWAYS_INLINE bool arch_cpu_irqs_are_enabled(void)
Implementation of arch_cpu_irqs_are_enabled.
Definition arch.h:78
static uint64_t arch_k_cycle_get_64(void)
Definition arch.h:51
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition arch.h:61
#define RV_STATUS_IE
Interrupt-enable bit in the status CSR (M-mode: MIE).
Definition arch.h:275
#define RV_STATUS_CSR
Name of the interrupt-status CSR as a string literal (M-mode).
Definition arch.h:273
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
unsigned int pmp_update_nr
Definition arch.h:258
Definition arm_mpu_v7m.h:145
uint8_t pmp_attr
Definition arch.h:254
Software-managed ISR table.