Zephyr API Documentation 4.4.0-rc1
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Contributors: 2018 Antmicro <www.antmicro.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
16
17#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
18#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
19
26#if defined(CONFIG_USERSPACE)
28#endif /* CONFIG_USERSPACE */
29#include <zephyr/irq.h>
30#include <zephyr/sw_isr_table.h>
31#include <zephyr/devicetree.h>
33
34/* stacks, for RISCV architecture stack should be 16byte-aligned */
35#define ARCH_STACK_PTR_ALIGN 16
36
37#define Z_RISCV_STACK_PMP_ALIGN \
38 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
39
40#ifdef CONFIG_PMP_STACK_GUARD
41/*
42 * The StackGuard is an area at the bottom of the kernel-mode stack made to
43 * fault when accessed. It is _not_ faulting when in exception mode as we rely
44 * on that area to save the exception stack frame and to process said fault.
45 * Therefore the guard area must be large enough to hold the esf, plus some
46 * configurable stack wiggle room to execute the fault handling code off of,
47 * as well as some guard size to cover possible sudden stack pointer
48 * displacement before the fault.
49 */
50#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
51#define Z_RISCV_STACK_GUARD_SIZE \
52 Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
53 Z_RISCV_STACK_PMP_ALIGN))
54#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
55#else
56#define Z_RISCV_STACK_GUARD_SIZE \
57 ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
58 Z_RISCV_STACK_PMP_ALIGN)
59#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
60#endif
61#elif defined(CONFIG_CUSTOM_STACK_GUARD)
62/*
63 * The custom stack guard reserved area is located at the bottom of the
64 * kernel-mode stack. When a stack overflow occurs, this area is used to
65 * save the exception stack frame and to handle the resulting fault.
66 * Therefore, the reserved area must be large enough to hold the esf, plus
67 * some configurable stack wiggle room to execute fault-handling code safely.
68 */
69#define Z_RISCV_STACK_GUARD_SIZE \
70 ROUND_UP(sizeof(struct arch_esf) + CONFIG_CUSTOM_STACK_GUARD_RESERVED_SIZE, \
71 ARCH_STACK_PTR_ALIGN)
72#else /* !CONFIG_PMP_STACK_GUARD && !CONFIG_CUSTOM_STACK_GUARD */
73#define Z_RISCV_STACK_GUARD_SIZE 0
74#endif
75
76#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_CUSTOM_STACK_GUARD)
77/* Kernel-only stacks have the following layout if a stack guard is enabled:
78 *
79 * +------------+ <- thread.stack_obj
80 * | Guard / | } Z_RISCV_STACK_GUARD_SIZE
81 * | Reserved |
82 * +------------+ <- thread.stack_info.start
83 * | Kernel |
84 * | stack |
85 * | |
86 * +............|
87 * | TLS | } thread.stack_info.delta
88 * +------------+ <- thread.stack_info.start + thread.stack_info.size
89 */
90#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
91#endif
92
93#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
94/* The privilege elevation stack is located in another area of memory
95 * generated at build time by gen_kobject_list.py
96 *
97 * +------------+ <- thread.arch.priv_stack_start
98 * | Guard / | } Z_RISCV_STACK_GUARD_SIZE
99 * | Reserved |
100 * +------------+
101 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
102 * +------------+ <- thread.arch.priv_stack_start +
103 * CONFIG_PRIVILEGED_STACK_SIZE +
104 * Z_RISCV_STACK_GUARD_SIZE
105 *
106 * The main stack will be initially (or potentially only) used by kernel
107 * mode so we need to make room for a possible stack guard area when enabled:
108 *
109 * +------------+ <- thread.stack_obj
110 * | Guard / | } Z_RISCV_STACK_GUARD_SIZE
111 * | Reserved |
112 * +............| <- thread.stack_info.start
113 * | Thread |
114 * | stack |
115 * | |
116 * +............|
117 * | TLS | } thread.stack_info.delta
118 * +------------+ <- thread.stack_info.start + thread.stack_info.size
119 *
120 * When transitioning to user space, the guard area will be removed from
121 * the main stack. Any thread running in user mode will have full access
122 * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
123 *
124 * +------------+ <- thread.stack_obj = thread.stack_info.start
125 * | Thread |
126 * | stack |
127 * | |
128 * +............|
129 * | TLS | } thread.stack_info.delta
130 * +------------+ <- thread.stack_info.start + thread.stack_info.size
131 */
132#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
133#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
134 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
135 Z_RISCV_STACK_PMP_ALIGN))
136#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
137 ARCH_THREAD_STACK_SIZE_ADJUST(size)
138
139#else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
140
141/* The stack object will contain the PMP guard (or custom stack guard reserved area),
142 * the privilege stack, and then the usermode stack buffer in that order:
143 *
144 * +------------+ <- thread.stack_obj
145 * | Guard / | } Z_RISCV_STACK_GUARD_SIZE
146 * | Reserved |
147 * +------------+
148 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
149 * +------------+ <- thread.stack_info.start
150 * | Thread |
151 * | stack |
152 * | |
153 * +............|
154 * | TLS | } thread.stack_info.delta
155 * +------------+ <- thread.stack_info.start + thread.stack_info.size
156 */
157#define ARCH_THREAD_STACK_RESERVED \
158 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
159 Z_RISCV_STACK_PMP_ALIGN)
160#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
161 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
162#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
163#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
164
165#ifdef CONFIG_64BIT
166#define RV_REGSIZE 8
167#define RV_REGSHIFT 3
168#else
169#define RV_REGSIZE 4
170#define RV_REGSHIFT 2
171#endif
172
173/* Common mstatus bits. All supported cores today have the same
174 * layouts.
175 */
176
177#define MSTATUS_IEN (1UL << 3)
178#define MSTATUS_MPP_M (3UL << 11)
179#define MSTATUS_MPIE_EN (1UL << 7)
180
181#define MSTATUS_FS_OFF (0UL << 13)
182#define MSTATUS_FS_INIT (1UL << 13)
183#define MSTATUS_FS_CLEAN (2UL << 13)
184#define MSTATUS_FS_DIRTY (3UL << 13)
185
186/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
187 * platforms:
188 * - Preserve machine privileges in MPP. If you see any documentation
189 * telling you that MPP is read-only on this SoC, don't believe its
190 * lies.
191 * - Enable interrupts when exiting from exception into a new thread
192 * by setting MPIE now, so it will be copied into IE on mret.
193 */
194#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
195
196#ifndef _ASMLANGUAGE
197#include <zephyr/sys/util.h>
198
199#ifdef __cplusplus
200extern "C" {
201#endif
202
203#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
204#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
205#endif
206
207/* Kernel macros for memory attribution
208 * (access permissions and cache-ability).
209 *
210 * The macros are to be stored in k_mem_partition_attr_t
211 * objects. The format of a k_mem_partition_attr_t object
212 * is an uint8_t composed by configuration register flags
213 * located in arch/riscv/include/core_pmp.h
214 */
215
216/* Read-Write access permission attributes */
217#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
218 {PMP_R | PMP_W})
219#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
220 {PMP_R})
221#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
222 {0})
223#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
224 {PMP_R})
225#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
226 {0})
227#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
228 {0})
229
230/* Execution-allowed attributes */
231#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
232 {PMP_R | PMP_W | PMP_X})
233#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
234 {PMP_R | PMP_X})
235
236/* Typedef for the k_mem_partition attribute */
237typedef struct {
240
241struct arch_mem_domain {
242 unsigned int pmp_update_nr;
243};
244
245extern void z_irq_spurious(const void *unused);
246
247/*
248 * use atomic instruction csrrc to lock global irq
249 * csrrc: atomic read and clear bits in CSR register
250 */
251static ALWAYS_INLINE unsigned int arch_irq_lock(void)
252{
253#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
254 return z_soc_irq_lock();
255#else
256 unsigned int key;
257
258 __asm__ volatile ("csrrc %0, mstatus, %1"
259 : "=r" (key)
260 : "rK" (MSTATUS_IEN)
261 : "memory");
262
263 return key;
264#endif
265}
266
267/*
268 * use atomic instruction csrs to unlock global irq
269 * csrs: atomic set bits in CSR register
270 */
271static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
272{
273#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
274 z_soc_irq_unlock(key);
275#else
276 __asm__ volatile ("csrs mstatus, %0"
277 :
278 : "r" (key & MSTATUS_IEN)
279 : "memory");
280#endif
281}
282
283static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
284{
285#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
286 return z_soc_irq_unlocked(key);
287#else
288 return (key & MSTATUS_IEN) != 0;
289#endif
290}
291
292static ALWAYS_INLINE void arch_nop(void)
293{
294 __asm__ volatile("nop");
295}
296
298
299static inline uint32_t arch_k_cycle_get_32(void)
300{
301 return sys_clock_cycle_get_32();
302}
303
305
306static inline uint64_t arch_k_cycle_get_64(void)
307{
308 return sys_clock_cycle_get_64();
309}
310
312
313#ifdef __cplusplus
314}
315#endif
316
317#endif /*_ASMLANGUAGE */
318
319#if defined(CONFIG_RISCV_PRIVILEGED)
321#endif
322
323
324#endif
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
Devicetree main header.
RISCV public error handling.
#define ALWAYS_INLINE
Definition common.h:161
Public interface for configuring interrupts.
static ALWAYS_INLINE void arch_nop(void)
Definition arch.h:61
uint64_t sys_clock_cycle_get_64(void)
static uint64_t arch_k_cycle_get_64(void)
Definition arch.h:75
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition arch.h:72
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition arch.h:44
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition arch.h:61
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition arch.h:251
#define MSTATUS_IEN
Definition arch.h:177
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINT64_TYPE__ uint64_t
Definition stdint.h:91
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
Definition arch.h:46
unsigned int pmp_update_nr
Definition arch.h:242
Definition arm_mpu_v7m.h:145
uint8_t pmp_attr
Definition arch.h:238
Software-managed ISR table.
Misc utilities.