Zephyr API Documentation  3.5.0
A Scalable Open Source RTOS
3.5.0
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Cadence Design Systems, Inc.
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
13#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
14#define ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
15
16#include <zephyr/irq.h>
17
18#include <zephyr/devicetree.h>
19#if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__)
20#include <zephyr/types.h>
21#include <zephyr/toolchain.h>
25#include <zephyr/sw_isr_table.h>
28#include <xtensa/config/core.h>
31#include <zephyr/debug/sparse.h>
32
34
35#ifdef CONFIG_KERNEL_COHERENCE
36#define ARCH_STACK_PTR_ALIGN XCHAL_DCACHE_LINESIZE
37#else
38#define ARCH_STACK_PTR_ALIGN 16
39#endif
40
41/* Xtensa GPRs are often designated by two different names */
42#define sys_define_gpr_with_alias(name1, name2) union { uint32_t name1, name2; }
43
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
50extern void xtensa_arch_except(int reason_p);
51
52#define ARCH_EXCEPT(reason_p) do { \
53 xtensa_arch_except(reason_p); \
54 CODE_UNREACHABLE; \
55} while (false)
56
57/* internal routine documented in C file, needed by IRQ_CONNECT() macro */
58extern void z_irq_priority_set(uint32_t irq, uint32_t prio, uint32_t flags);
59
60#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
61{ \
62 Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
63}
64
65#define XTENSA_ERR_NORET
66
68
69static inline uint32_t arch_k_cycle_get_32(void)
70{
72}
73
75
76static inline uint64_t arch_k_cycle_get_64(void)
77{
79}
80
81static ALWAYS_INLINE void arch_nop(void)
82{
83 __asm__ volatile("nop");
84}
85
87{
88 int vecbase;
89
90 __asm__ volatile("rsr.vecbase %0" : "=r" (vecbase));
91
92 /* In some targets the bit 0 of VECBASE works as lock bit.
93 * When this bit set, VECBASE can't be changed until it is cleared by
94 * reset. When the target does not have it, it is hardwired to 0.
95 **/
96 __asm__ volatile("wsr.vecbase %0; rsync" : : "r" (vecbase | 1));
97}
98
99#if defined(CONFIG_XTENSA_RPO_CACHE)
100#if defined(CONFIG_ARCH_HAS_COHERENCE)
101static inline bool arch_mem_coherent(void *ptr)
102{
103 size_t addr = (size_t) ptr;
104
105 return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
106}
107#endif
108
109static inline bool arch_xtensa_is_ptr_cached(void *ptr)
110{
111 size_t addr = (size_t) ptr;
112
113 return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
114}
115
116static inline bool arch_xtensa_is_ptr_uncached(void *ptr)
117{
118 size_t addr = (size_t) ptr;
119
120 return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
121}
122
123static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
124{
125 /* The math here is all compile-time: when the two regions
126 * differ by a power of two, we can convert between them by
127 * setting or clearing just one bit. Otherwise it needs two
128 * operations.
129 */
130 uint32_t rxor = (rto ^ rfrom) << 29;
131
132 rto <<= 29;
133 if (Z_IS_POW2(rxor)) {
134 if ((rxor & rto) == 0) {
135 return addr & ~rxor;
136 } else {
137 return addr | rxor;
138 }
139 } else {
140 return (addr & ~(7U << 29)) | rto;
141 }
142}
163static inline void __sparse_cache *arch_xtensa_cached_ptr(void *ptr)
164{
165 return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
166 CONFIG_XTENSA_CACHED_REGION,
167 CONFIG_XTENSA_UNCACHED_REGION);
168}
169
188static inline void *arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
189{
190 return (void *)z_xtrpoflip((__sparse_force uint32_t)ptr,
191 CONFIG_XTENSA_UNCACHED_REGION,
192 CONFIG_XTENSA_CACHED_REGION);
193}
194
195/* Utility to generate an unrolled and optimal[1] code sequence to set
196 * the RPO TLB registers (contra the HAL cacheattr macros, which
197 * generate larger code and can't be called from C), based on the
198 * KERNEL_COHERENCE configuration in use. Selects RPO attribute "2"
199 * for regions (including MMIO registers in region zero) which want to
200 * bypass L1, "4" for the cached region which wants writeback, and
201 * "15" (invalid) elsewhere.
202 *
203 * Note that on cores that have the "translation" option set, we need
204 * to put an identity mapping in the high bits. Also per spec
205 * changing the current code region (by definition cached) requires
206 * that WITLB be followed by an ISYNC and that both instructions live
207 * in the same cache line (two 3-byte instructions fit in an 8-byte
208 * aligned region, so that's guaranteed not to cross a cache line
209 * boundary).
210 *
211 * [1] With the sole exception of gcc's infuriating insistence on
212 * emitting a precomputed literal for addr + addrincr instead of
213 * computing it with a single ADD instruction from values it already
214 * has in registers. Explicitly assigning the variables to registers
215 * via an attribute works, but then emits needless MOV instructions
216 * instead. I tell myself it's just 32 bytes of .text, but... Sigh.
217 */
218#define _REGION_ATTR(r) \
219 ((r) == 0 ? 2 : \
220 ((r) == CONFIG_XTENSA_CACHED_REGION ? 4 : \
221 ((r) == CONFIG_XTENSA_UNCACHED_REGION ? 2 : 15)))
222
223#define _SET_ONE_TLB(region) do { \
224 uint32_t attr = _REGION_ATTR(region); \
225 if (XCHAL_HAVE_XLT_CACHEATTR) { \
226 attr |= addr; /* RPO with translation */ \
227 } \
228 if (region != CONFIG_XTENSA_CACHED_REGION) { \
229 __asm__ volatile("wdtlb %0, %1; witlb %0, %1" \
230 :: "r"(attr), "r"(addr)); \
231 } else { \
232 __asm__ volatile("wdtlb %0, %1" \
233 :: "r"(attr), "r"(addr)); \
234 __asm__ volatile("j 1f; .align 8; 1:"); \
235 __asm__ volatile("witlb %0, %1; isync" \
236 :: "r"(attr), "r"(addr)); \
237 } \
238 addr += addrincr; \
239} while (0)
240
241#define ARCH_XTENSA_SET_RPO_TLB() do { \
242 register uint32_t addr = 0, addrincr = 0x20000000; \
243 FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
244} while (0)
245
246#endif
247
248#ifdef CONFIG_XTENSA_MMU
249extern void arch_xtensa_mmu_post_init(bool is_core0);
250#endif
251
252#ifdef __cplusplus
253}
254#endif
255
256#endif /* !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__) */
257
258#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_ */
static ALWAYS_INLINE void arch_nop(void)
Definition: arch.h:348
#define ALWAYS_INLINE
Definition: common.h:129
Devicetree main header.
static bool arch_mem_coherent(void *ptr)
Detect memory coherence type.
Definition: arch_interface.h:866
Public interface for configuring interrupts.
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition: arch.h:99
static uint64_t arch_k_cycle_get_64(void)
Definition: arch.h:106
flags
Definition: parser.h:96
Size of off_t must be equal or less than size of size_t
Definition: retained_mem.h:28
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
Software-managed ISR table.
Macros to abstract toolchain specific capabilities.
void xtensa_arch_except(int reason_p)
static ALWAYS_INLINE void xtensa_vecbase_lock(void)
Definition: arch.h:86
Xtensa public exception handling.