Zephyr API Documentation 4.4.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Contributors: 2018 Antmicro <www.antmicro.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
16
17#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
18#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
19
26#if defined(CONFIG_USERSPACE)
28#endif /* CONFIG_USERSPACE */
29#include <zephyr/irq.h>
30#include <zephyr/sw_isr_table.h>
31#include <zephyr/devicetree.h>
33
34/* stacks, for RISCV architecture stack should be 16byte-aligned */
35#define ARCH_STACK_PTR_ALIGN 16
36
37#define Z_RISCV_STACK_PMP_ALIGN \
38 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
39
40#ifdef CONFIG_PMP_STACK_GUARD
41/*
42 * The StackGuard is an area at the bottom of the kernel-mode stack made to
43 * fault when accessed. It is _not_ faulting when in exception mode as we rely
44 * on that area to save the exception stack frame and to process said fault.
45 * Therefore the guard area must be large enough to hold the esf, plus some
46 * configurable stack wiggle room to execute the fault handling code off of,
47 * as well as some guard size to cover possible sudden stack pointer
48 * displacement before the fault.
49 */
50#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
51#define Z_RISCV_STACK_GUARD_SIZE \
52 Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
53 Z_RISCV_STACK_PMP_ALIGN))
54#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
55#else
56#define Z_RISCV_STACK_GUARD_SIZE \
57 ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
58 Z_RISCV_STACK_PMP_ALIGN)
59#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
60#endif
61
62/* Kernel-only stacks have the following layout if a stack guard is enabled:
63 *
64 * +------------+ <- thread.stack_obj
65 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
66 * +------------+ <- thread.stack_info.start
67 * | Kernel |
68 * | stack |
69 * | |
70 * +............|
71 * | TLS | } thread.stack_info.delta
72 * +------------+ <- thread.stack_info.start + thread.stack_info.size
73 */
74#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
75
76#else /* !CONFIG_PMP_STACK_GUARD */
77#define Z_RISCV_STACK_GUARD_SIZE 0
78#endif
79
80#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
81/* The privilege elevation stack is located in another area of memory
82 * generated at build time by gen_kobject_list.py
83 *
84 * +------------+ <- thread.arch.priv_stack_start
85 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
86 * +------------+
87 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
88 * +------------+ <- thread.arch.priv_stack_start +
89 * CONFIG_PRIVILEGED_STACK_SIZE +
90 * Z_RISCV_STACK_GUARD_SIZE
91 *
92 * The main stack will be initially (or potentially only) used by kernel
93 * mode so we need to make room for a possible stack guard area when enabled:
94 *
95 * +------------+ <- thread.stack_obj
96 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
97 * +............| <- thread.stack_info.start
98 * | Thread |
99 * | stack |
100 * | |
101 * +............|
102 * | TLS | } thread.stack_info.delta
103 * +------------+ <- thread.stack_info.start + thread.stack_info.size
104 *
105 * When transitioning to user space, the guard area will be removed from
106 * the main stack. Any thread running in user mode will have full access
107 * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
108 *
109 * +------------+ <- thread.stack_obj = thread.stack_info.start
110 * | Thread |
111 * | stack |
112 * | |
113 * +............|
114 * | TLS | } thread.stack_info.delta
115 * +------------+ <- thread.stack_info.start + thread.stack_info.size
116 */
117#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
118#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
119 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
120 Z_RISCV_STACK_PMP_ALIGN))
121#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
122 ARCH_THREAD_STACK_SIZE_ADJUST(size)
123
124#else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
125
126/* The stack object will contain the PMP guard, the privilege stack, and then
127 * the usermode stack buffer in that order:
128 *
129 * +------------+ <- thread.stack_obj
130 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
131 * +------------+
132 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
133 * +------------+ <- thread.stack_info.start
134 * | Thread |
135 * | stack |
136 * | |
137 * +............|
138 * | TLS | } thread.stack_info.delta
139 * +------------+ <- thread.stack_info.start + thread.stack_info.size
140 */
141#define ARCH_THREAD_STACK_RESERVED \
142 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
143 Z_RISCV_STACK_PMP_ALIGN)
144#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
145 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
146#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
147#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
148
149#ifdef CONFIG_64BIT
150#define RV_REGSIZE 8
151#define RV_REGSHIFT 3
152#else
153#define RV_REGSIZE 4
154#define RV_REGSHIFT 2
155#endif
156
157/* Common mstatus bits. All supported cores today have the same
158 * layouts.
159 */
160
161#define MSTATUS_IEN (1UL << 3)
163#define MSTATUS_MPP_U (PRV_U << 11)
165#define MSTATUS_MPP_S (PRV_S << 11)
167#define MSTATUS_MPP_M (PRV_M << 11)
168#define MSTATUS_MPIE_EN (1UL << 7)
169
170#define MSTATUS_FS_OFF (0UL << 13)
171#define MSTATUS_FS_INIT (1UL << 13)
172#define MSTATUS_FS_CLEAN (2UL << 13)
173#define MSTATUS_FS_DIRTY (3UL << 13)
174
190#ifdef CONFIG_RISCV_S_MODE
191#define RV_STATUS_DEF_RESTORE (SSTATUS_SPP | SSTATUS_SPIE)
192#else
193#define RV_STATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
194#endif
195
196/* Previous-privilege field and its user-mode value, abstracted across M/S mode.
197 * Use as: (esf->mstatus & RV_STATUS_PP) == RV_STATUS_PP_U
198 */
199#ifdef CONFIG_RISCV_S_MODE
201#define RV_STATUS_PP SSTATUS_SPP
203#define RV_STATUS_PP_U 0
204#else
206#define RV_STATUS_PP MSTATUS_MPP
208#define RV_STATUS_PP_U PRV_U
209#endif
210
211#ifndef _ASMLANGUAGE
212#include <zephyr/sys/util.h>
213#include <zephyr/sys/slist.h>
214
215#ifdef __cplusplus
216extern "C" {
217#endif
218
219#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
220#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
221#endif
222
223/* Kernel macros for memory attribution
224 * (access permissions and cache-ability).
225 *
226 * The macros are to be stored in k_mem_partition_attr_t
227 * objects. The format of a k_mem_partition_attr_t object
228 * is an uint8_t composed by configuration register flags
229 * located in arch/riscv/include/core_pmp.h
230 */
231
232/* Read-Write access permission attributes */
233#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
234 {PMP_R | PMP_W})
235#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
236 {PMP_R})
237#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
238 {0})
239#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
240 {PMP_R})
241#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
242 {0})
243#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
244 {0})
245
246/* Execution-allowed attributes */
247#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
248 {PMP_R | PMP_W | PMP_X})
249#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
250 {PMP_R | PMP_X})
251
252/* Typedef for the k_mem_partition attribute */
253typedef struct {
256
257struct arch_mem_domain {
258 unsigned int pmp_update_nr;
259};
260
261extern void z_irq_spurious(const void *unused);
262
263/* Privilege-level abstraction for IRQ enable/disable CSR and bit */
264#ifdef CONFIG_RISCV_S_MODE
266#define RV_STATUS_CSR "sstatus"
268#define RV_STATUS_IE SSTATUS_SIE
270#define RV_STATUS_PIE SSTATUS_SPIE
271#else
273#define RV_STATUS_CSR "mstatus"
275#define RV_STATUS_IE MSTATUS_IEN
277#define RV_STATUS_PIE MSTATUS_MPIE_EN
278#endif
279
285static ALWAYS_INLINE unsigned long z_riscv_status_read(void)
286{
287 unsigned long __rv;
288
289 __asm__ volatile ("csrr %0, " RV_STATUS_CSR : "=r" (__rv));
290 return __rv;
291}
292
296static ALWAYS_INLINE void z_riscv_status_set(unsigned long val)
297{
298 __asm__ volatile ("csrs " RV_STATUS_CSR ", %0" : : "rK" (val) : "memory");
299}
300
304static ALWAYS_INLINE void z_riscv_status_clear(unsigned long val)
305{
306 __asm__ volatile ("csrc " RV_STATUS_CSR ", %0" : : "rK" (val) : "memory");
307}
308
309/*
310 * use atomic instruction csrrc to lock global irq
311 * csrrc: atomic read and clear bits in CSR register
312 */
313static ALWAYS_INLINE unsigned int arch_irq_lock(void)
314{
315#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
316 return z_soc_irq_lock();
317#else
318 unsigned int key;
319
320 __asm__ volatile ("csrrc %0, " RV_STATUS_CSR ", %1"
321 : "=r" (key)
322 : "rK" (RV_STATUS_IE)
323 : "memory");
324
325 return key;
326#endif
327}
328
329/*
330 * use atomic instruction csrs to unlock global irq
331 * csrs: atomic set bits in CSR register
332 */
333static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
334{
335#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
336 z_soc_irq_unlock(key);
337#else
338 __asm__ volatile ("csrs " RV_STATUS_CSR ", %0"
339 :
340 : "r" (key & RV_STATUS_IE)
341 : "memory");
342#endif
343}
344
345static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
346{
347#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
348 return z_soc_irq_unlocked(key);
349#else
350 return (key & RV_STATUS_IE) != 0;
351#endif
352}
353
356{
357#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
358 /* No direct probe primitive in the SoC ops; fall back to
359 * briefly locking and restoring.
360 */
361 unsigned int key = z_soc_irq_lock();
362 bool enabled = z_soc_irq_unlocked(key);
363
364 z_soc_irq_unlock(key);
365 return enabled;
366#else
367 unsigned int status = z_riscv_status_read();
368
369 return (status & RV_STATUS_IE) != 0;
370#endif
371}
372
373static ALWAYS_INLINE void arch_nop(void)
374{
375 __asm__ volatile("nop");
376}
377
379
380static inline uint32_t arch_k_cycle_get_32(void)
381{
382 return sys_clock_cycle_get_32();
383}
384
386
387static inline uint64_t arch_k_cycle_get_64(void)
388{
389 return sys_clock_cycle_get_64();
390}
391
393
394#ifdef __cplusplus
395}
396#endif
397
398#endif /*_ASMLANGUAGE */
399
400#if defined(CONFIG_RISCV_PRIVILEGED)
402#endif
403
404
405#endif
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
Devicetree main header.
RISCV public error handling.
#define ALWAYS_INLINE
Definition common.h:161
Public interface for configuring interrupts.
static ALWAYS_INLINE void arch_nop(void)
Definition arch.h:61
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition arch.h:66
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition arch.h:72
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition arch.h:44
static ALWAYS_INLINE bool arch_cpu_irqs_are_enabled(void)
Implementation of arch_cpu_irqs_are_enabled.
Definition arch.h:78
static uint64_t arch_k_cycle_get_64(void)
Definition arch.h:51
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition arch.h:61
#define RV_STATUS_IE
Interrupt-enable bit in the status CSR (M-mode: MIE).
Definition arch.h:275
#define RV_STATUS_CSR
Name of the interrupt-status CSR as a string literal (M-mode).
Definition arch.h:273
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
Definition arch.h:46
unsigned int pmp_update_nr
Definition arch.h:258
Definition arm_mpu_v7m.h:145
uint8_t pmp_attr
Definition arch.h:254
Software-managed ISR table.
Misc utilities.