Zephyr API Documentation  3.5.0
A Scalable Open Source RTOS
3.5.0
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Contributors: 2018 Antmicro <www.antmicro.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
15#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
16#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
17
24#if defined(CONFIG_USERSPACE)
26#endif /* CONFIG_USERSPACE */
27#include <zephyr/irq.h>
28#include <zephyr/sw_isr_table.h>
29#include <soc.h>
30#include <zephyr/devicetree.h>
33
34/* stacks, for RISCV architecture stack should be 16byte-aligned */
35#define ARCH_STACK_PTR_ALIGN 16
36
37#define Z_RISCV_STACK_PMP_ALIGN \
38 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
39
40#ifdef CONFIG_PMP_STACK_GUARD
41/*
42 * The StackGuard is an area at the bottom of the kernel-mode stack made to
43 * fault when accessed. It is _not_ faulting when in exception mode as we rely
44 * on that area to save the exception stack frame and to process said fault.
45 * Therefore the guard area must be large enough to hold the esf, plus some
46 * configurable stack wiggle room to execute the fault handling code off of,
47 * as well as some guard size to cover possible sudden stack pointer
48 * displacement before the fault.
49 */
50#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
51#define Z_RISCV_STACK_GUARD_SIZE \
52 Z_POW2_CEIL(MAX(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
53 Z_RISCV_STACK_PMP_ALIGN))
54#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
55#else
56#define Z_RISCV_STACK_GUARD_SIZE \
57 ROUND_UP(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
58 Z_RISCV_STACK_PMP_ALIGN)
59#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
60#endif
61
62/* Kernel-only stacks have the following layout if a stack guard is enabled:
63 *
64 * +------------+ <- thread.stack_obj
65 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
66 * +------------+ <- thread.stack_info.start
67 * | Kernel |
68 * | stack |
69 * | |
70 * +............|
71 * | TLS | } thread.stack_info.delta
72 * +------------+ <- thread.stack_info.start + thread.stack_info.size
73 */
74#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
75
76#else /* !CONFIG_PMP_STACK_GUARD */
77#define Z_RISCV_STACK_GUARD_SIZE 0
78#endif
79
80#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
81/* The privilege elevation stack is located in another area of memory
82 * generated at build time by gen_kobject_list.py
83 *
84 * +------------+ <- thread.arch.priv_stack_start
85 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
86 * +------------+
87 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
88 * +------------+ <- thread.arch.priv_stack_start +
89 * CONFIG_PRIVILEGED_STACK_SIZE +
90 * Z_RISCV_STACK_GUARD_SIZE
91 *
92 * The main stack will be initially (or potentially only) used by kernel
93 * mode so we need to make room for a possible stack guard area when enabled:
94 *
95 * +------------+ <- thread.stack_obj
96 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
97 * +............| <- thread.stack_info.start
98 * | Thread |
99 * | stack |
100 * | |
101 * +............|
102 * | TLS | } thread.stack_info.delta
103 * +------------+ <- thread.stack_info.start + thread.stack_info.size
104 *
105 * When transitioning to user space, the guard area will be removed from
106 * the main stack. Any thread running in user mode will have full access
107 * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
108 *
109 * +------------+ <- thread.stack_obj = thread.stack_info.start
110 * | Thread |
111 * | stack |
112 * | |
113 * +............|
114 * | TLS | } thread.stack_info.delta
115 * +------------+ <- thread.stack_info.start + thread.stack_info.size
116 */
117#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
118#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
119 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
120 Z_RISCV_STACK_PMP_ALIGN))
121#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
122 ARCH_THREAD_STACK_SIZE_ADJUST(size)
123
124#else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
125
126/* The stack object will contain the PMP guard, the privilege stack, and then
127 * the usermode stack buffer in that order:
128 *
129 * +------------+ <- thread.stack_obj
130 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
131 * +------------+
132 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
133 * +------------+ <- thread.stack_info.start
134 * | Thread |
135 * | stack |
136 * | |
137 * +............|
138 * | TLS | } thread.stack_info.delta
139 * +------------+ <- thread.stack_info.start + thread.stack_info.size
140 */
141#define ARCH_THREAD_STACK_RESERVED \
142 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
143 Z_RISCV_STACK_PMP_ALIGN)
144#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
145 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
146#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
147#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
148
149#ifdef CONFIG_64BIT
150#define RV_REGSIZE 8
151#define RV_REGSHIFT 3
152#else
153#define RV_REGSIZE 4
154#define RV_REGSHIFT 2
155#endif
156
157/* Common mstatus bits. All supported cores today have the same
158 * layouts.
159 */
160
161#define MSTATUS_IEN (1UL << 3)
162#define MSTATUS_MPP_M (3UL << 11)
163#define MSTATUS_MPIE_EN (1UL << 7)
164
165#define MSTATUS_FS_OFF (0UL << 13)
166#define MSTATUS_FS_INIT (1UL << 13)
167#define MSTATUS_FS_CLEAN (2UL << 13)
168#define MSTATUS_FS_DIRTY (3UL << 13)
169
170/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
171 * platforms:
172 * - Preserve machine privileges in MPP. If you see any documentation
173 * telling you that MPP is read-only on this SoC, don't believe its
174 * lies.
175 * - Enable interrupts when exiting from exception into a new thread
176 * by setting MPIE now, so it will be copied into IE on mret.
177 */
178#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
179
180#ifndef _ASMLANGUAGE
181#include <zephyr/sys/util.h>
182
183#ifdef __cplusplus
184extern "C" {
185#endif
186
187#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
188#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
189#endif
190
191/* Kernel macros for memory attribution
192 * (access permissions and cache-ability).
193 *
194 * The macros are to be stored in k_mem_partition_attr_t
195 * objects. The format of a k_mem_partition_attr_t object
196 * is an uint8_t composed by configuration register flags
197 * located in arch/riscv/include/core_pmp.h
198 */
199
200/* Read-Write access permission attributes */
201#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
202 {PMP_R | PMP_W})
203#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
204 {PMP_R})
205#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
206 {0})
207#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
208 {PMP_R})
209#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
210 {0})
211#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
212 {0})
213
214/* Execution-allowed attributes */
215#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
216 {PMP_R | PMP_W | PMP_X})
217#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
218 {PMP_R | PMP_X})
219
220/* Typedef for the k_mem_partition attribute */
221typedef struct {
224
225struct arch_mem_domain {
226 unsigned int pmp_update_nr;
227};
228
229extern void z_irq_spurious(const void *unused);
230
231/*
232 * use atomic instruction csrrc to lock global irq
233 * csrrc: atomic read and clear bits in CSR register
234 */
235static ALWAYS_INLINE unsigned int arch_irq_lock(void)
236{
237#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
238 return z_soc_irq_lock();
239#else
240 unsigned int key;
241
242 __asm__ volatile ("csrrc %0, mstatus, %1"
243 : "=r" (key)
244 : "rK" (MSTATUS_IEN)
245 : "memory");
246
247 return key;
248#endif
249}
250
251/*
252 * use atomic instruction csrs to unlock global irq
253 * csrs: atomic set bits in CSR register
254 */
255static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
256{
257#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
258 z_soc_irq_unlock(key);
259#else
260 __asm__ volatile ("csrs mstatus, %0"
261 :
262 : "r" (key & MSTATUS_IEN)
263 : "memory");
264#endif
265}
266
267static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
268{
269#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
270 return z_soc_irq_unlocked(key);
271#else
272 return (key & MSTATUS_IEN) != 0;
273#endif
274}
275
276static ALWAYS_INLINE void arch_nop(void)
277{
278 __asm__ volatile("nop");
279}
280
282
283static inline uint32_t arch_k_cycle_get_32(void)
284{
285 return sys_clock_cycle_get_32();
286}
287
289
290static inline uint64_t arch_k_cycle_get_64(void)
291{
292 return sys_clock_cycle_get_64();
293}
294
296
297#ifdef __cplusplus
298}
299#endif
300
301#endif /*_ASMLANGUAGE */
302
303#if defined(CONFIG_SOC_FAMILY_RISCV_PRIVILEGED)
305#endif
306
307
308#endif
static ALWAYS_INLINE void arch_nop(void)
Definition: arch.h:348
uint32_t k_mem_partition_attr_t
Definition: arch.h:346
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
#define ALWAYS_INLINE
Definition: common.h:129
Devicetree main header.
Public interface for configuring interrupts.
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition: arch.h:63
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: arch.h:74
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition: arch.h:99
static uint64_t arch_k_cycle_get_64(void)
Definition: arch.h:106
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition: arch.h:87
#define MSTATUS_IEN
Definition: arch.h:161
RISCV public error handling.
RISCV public exception handling.
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
Definition: arch.h:46
unsigned int pmp_update_nr
Definition: arch.h:226
uint8_t pmp_attr
Definition: arch.h:222
Software-managed ISR table.
Misc utilities.