Zephyr API Documentation 4.3.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
arm-m-switch.h
Go to the documentation of this file.
1/* Copyright 2025 The ChromiumOS Authors
2 * Copyright 2026 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
17#ifndef _ZEPHYR_ARCH_ARM_M_SWITCH_H
18#define _ZEPHYR_ARCH_ARM_M_SWITCH_H
19
20#include <stdint.h>
23
24/* GCC/gas has a code generation bugglet on thumb. The R7 register is
25 * the ABI-defined frame pointer, though it's usually unused in zephyr
26 * due to -fomit-frame-pointer (and the fact the DWARF on ARM doesn't
27 * really need it). But when it IS enabled, which sometimes seems to
28 * happen due to toolchain internals, GCC is unable to allow its use
29 * in the clobber list of an asm() block (I guess it can't generate
30 * spill/fill code without using the frame?).
31 *
32 * When absolutely needed, this kconfig unmasks a workaround where we
33 * spill/fill R7 around the switch manually.
34 */
35#ifdef CONFIG_ARM_GCC_FP_WORKAROUND
36#define _R7_CLOBBER_OPT(expr) expr
37#else
38#define _R7_CLOBBER_OPT(expr) /**/
39#endif
40
41/* Should probably be in kconfig, basically this is testing whether or
42 * not the toolchain will allow a "g" flag (DSP state) to an "msr
43 * adsp_" instruction.
44 */
45#if defined(CONFIG_CPU_CORTEX_M4) || defined(CONFIG_CPU_CORTEX_M7) || defined(CONFIG_ARMV8_M_DSP)
46#define _ARM_M_SWITCH_HAVE_DSP
47#endif
48
69void *arm_m_new_stack(char *base, uint32_t sz, void *entry, void *arg0, void *arg1, void *arg2,
70 void *arg3);
71
84
92void arm_m_exc_exit(void);
93
111
121
124
125void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread);
126
128extern uintptr_t z_arm_tls_ptr;
129
132
134/* Global pointers to the frame locations for the callee-saved
135 * registers. Set in arm_m_must_switch(), and used by the fixup
136 * assembly in arm_m_exc_exit.
137 */
138struct arm_m_cs_ptrs {
140 void *out, *in, *lr_save, *lr_fixup;
141};
143
145extern struct arm_m_cs_ptrs arm_m_cs_ptrs;
146
156static inline void arm_m_exc_tail(void)
157{
158#ifdef CONFIG_MULTITHREADING
159 /* Dirty trickery. We defer as much interrupt-exit work until
160 * the very last moment, when the top-level ISR returns back
161 * into user code. We do this by replacing the topmost (!) LR
162 * return address in the stack frame with our fixup code at
163 * arm_m_exc_exit(). By running after the ISR return, it
164 * knows that the callee-save registers r4-r11 (which need to
165 * be saved to the outgoing thread) are restored.
166 *
167 * Obviously this only works if the ISR is "ABI-compliant
168 * enough". It doesn't have to have pushed a complete frame,
169 * but it does have to have put LR into its standard location.
170 * In practice generated code does (because it has to store LR
171 * somewhere so it can call other functions and then pop it to
172 * return), so this works even on code built with
173 * -fomit-frame-pointer. If an app needs a direct interrupt
174 * and can't meet these requirents, it can always skip this
175 * call and return directly (reschedule is optional for direct
176 * interrupts anyway).
177 *
178 * Finally note the call to check_stack_sentinel here: that is
179 * normally called from context switch at the end, but will
180 * toss an exception, which we can't allow (without hardship)
181 * on the path from here to interrupt exit. It will mess up
182 * our bookeeping around EXC_RETURN, so do it early.
183 */
184 void z_check_stack_sentinel(void);
185 void *isr_lr = (void *)*arm_m_exc_lr_ptr;
186
187 if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
188 z_check_stack_sentinel();
189 }
190
191 if (isr_lr != arm_m_cs_ptrs.lr_fixup) {
192 /* We need to return to arm_m_exc_exit only if an exception is returning to thread
193 * mode with PSP. Note that it is possible to get an exception in arm_m_exc_exit
194 * after interrupts are enabled but, before branching to lr (0xFFFFFFFD) and, at
195 * this point the exception pushes an ESF on MSP. If we write arm_m_exc_exit at top
196 * of MSP at this point, we are corrupting the XPSR of the ESF which will result in
197 * a usage fault. So, make sure that we do this only if we are returning to thread
198 * mode and using PSP to do so.
199 */
200 if ((((uint32_t)isr_lr & 0xFFFFFF00U) == 0xFFFFFF00U)
201 && (((uint32_t)isr_lr & 0xC) == 0xC)) {
202 arm_m_cs_ptrs.lr_save = isr_lr;
204 }
205 }
206#endif
207}
208
220static ALWAYS_INLINE void arm_m_switch(void *switch_to, void **switched_from)
221{
222#if defined(CONFIG_USERSPACE) || defined(CONFIG_MPU_STACK_GUARD)
223 z_arm_configure_dynamic_mpu_regions(_current);
224#endif
225
226#ifdef CONFIG_THREAD_LOCAL_STORAGE
227 z_arm_tls_ptr = _current->tls;
228#endif
229
230#if defined(CONFIG_USERSPACE) && defined(CONFIG_USE_SWITCH)
231 /* Set things up to write the CONTROL.nPRIV bit. We know the outgoing
232 * thread is in privileged mode (because you can't reach a
233 * context switch unless you're in the kernel!).
234 */
235 extern uint32_t arm_m_switch_control;
236 uint32_t control;
237
238 __asm__ volatile("mrs %0, control" : "=r"(control));
239 arm_m_switch_control = (control & ~1) | (_current->arch.mode & 1);
240#endif
241
242 /* new switch handle in r4, old switch handle pointer in r5.
243 * r6-r8 are used by the code here, and r9-r11 are
244 * unsaved/clobbered (they are very likely to be caller-saved
245 * registers in the enclosing function that the compiler can
246 * avoid using, i.e. we can let it make the call and avoid a
247 * double-spill). But all registers are restored fully
248 * (because we might be switching to an interrupt-saved frame)
249 */
250 register uint32_t r4 __asm__("r4") = (uint32_t)switch_to;
251 register uint32_t r5 __asm__("r5") = (uint32_t)switched_from;
252 __asm__ volatile(_R7_CLOBBER_OPT("push {r7};")
253 /* Construct and push a {r12, lr, pc} group at the top
254 * of the frame, where PC points to the final restore location
255 * at the end of this sequence.
256 */
257 "mov r6, r12;"
258 "mov r7, lr;"
259 "ldr r8, =3f;" /* address of restore PC */
260 "orr r8, r8, #1;" /* set thumb bit */
261 "push {r6-r8};"
262 "sub sp, sp, #24;" /* skip over space for r6-r11 */
263 "push {r0-r5};"
264 "mov r2, #0x01000000;" /* APSR (only care about thumb bit) */
265 "mov r0, #0;" /* Leave r0 zero for code blow */
266#ifdef CONFIG_BUILTIN_STACK_GUARD
267 "mrs r1, psplim;"
268 "push {r1-r2};"
269 "msr psplim, r0;" /* zero it so we can move the stack */
270#else
271 "push {r2};"
272#endif
273
274#ifdef CONFIG_FPU
275 /* Push FPU state (if active) to our outgoing stack */
276 " mrs r8, control;" /* read CONTROL.FPCA */
277 " and r7, r8, #4;" /* r7 == have_fpu */
278 " cbz r7, 1f;"
279 " bic r8, r8, #4;" /* clear CONTROL.FPCA */
280 " msr control, r8;"
281 " vmrs r6, fpscr;"
282 " push {r6};"
283 " vpush {s0-s31};"
284 "1: push {r7};" /* have_fpu word */
285
286 /* Pop FPU state (if present) from incoming frame in r4 */
287 " ldm r4!, {r7};" /* have_fpu word */
288 " cbz r7, 2f;"
289 " vldm r4!, {s0-s31};" /* (note: sets FPCA bit for us) */
290 " ldm r4!, {r6};"
291 " vmsr fpscr, r6;"
292 "2:;"
293#endif
294
295#if defined(CONFIG_USERSPACE) && defined(CONFIG_USE_SWITCH)
296 " ldr r8, =arm_m_switch_control;"
297 " ldr r8, [r8];"
298#endif
299
300 /* Save the outgoing switch handle (which is SP), swap stacks,
301 * and enable interrupts. The restore process is
302 * interruptible code (running in the incoming thread) once
303 * the stack is valid.
304 */
305 "str sp, [r5];"
306 "mov sp, r4;"
307 "msr basepri, r0;"
308
309#if defined(CONFIG_USERSPACE) && defined(CONFIG_USE_SWITCH)
310 " msr control, r8;" /* Now we can drop privilege */
311#endif
312
313 /* Restore is super simple: pop the flags (and stack limit if
314 * enabled) then slurp in the whole GPR set in two
315 * instructions. (The instruction encoding disallows popping
316 * both LR and PC in a single instruction)
317 */
318#ifdef CONFIG_BUILTIN_STACK_GUARD
319 "pop {r1-r2};"
320 "msr psplim, r1;"
321#else
322 "pop {r2};"
323#endif
324#ifdef _ARM_M_SWITCH_HAVE_DSP
325 "msr apsr_nzcvqg, r2;" /* bonkers syntax */
326#else
327 "msr apsr_nzcvq, r2;" /* not even source-compatible! */
328#endif
329 "pop {r0-r12, lr};"
330 "pop {pc};"
331
332 "3:" /* Label for restore address */
333 _R7_CLOBBER_OPT("pop {r7};")::"r"(r4),
334 "r"(r5)
335 : "r6", "r8", "r9", "r10",
336#ifndef CONFIG_ARM_GCC_FP_WORKAROUND
337 "r7",
338#endif
339 "r11");
340}
341
342#ifdef CONFIG_USE_SWITCH
352static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from)
353{
354 arm_m_switch(switch_to, switched_from);
355}
356#endif
357
358#endif /* _ZEPHYR_ARCH_ARM_M_SWITCH_H */
bool arm_m_iciit_check(uint32_t msp, uint32_t psp, uint32_t lr)
Recover an interrupted IT/ICI instruction after a context switch.
static void arm_m_exc_tail(void)
ISR-tail helper that patches the stacked LR for deferred switch fixup.
Definition arm-m-switch.h:156
void * arm_m_new_stack(char *base, uint32_t sz, void *entry, void *arg0, void *arg1, void *arg2, void *arg3)
Create an initial switch frame on a new thread's stack.
static ALWAYS_INLINE void arm_m_switch(void *switch_to, void **switched_from)
Core Cortex-M context switch routine.
Definition arm-m-switch.h:220
static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from)
Public arch-level wrapper for the Cortex-M switch routine.
Definition arm-m-switch.h:352
void arm_m_iciit_stub(void)
Undefined-instruction stub used to force IT/ICI recovery.
void arm_m_exc_exit(void)
Assembly stub that completes the Cortex-M context restore.
uint32_t * arm_m_exc_lr_ptr
Pointer to the stacked LR word used by the ISR tail fixup path.
struct arm_m_cs_ptrs arm_m_cs_ptrs
Global instance with current callee-saved frame pointers.
bool arm_m_must_switch(void)
Evaluate whether an interrupt should trigger a context switch.
uint32_t arm_m_switch_stack_buffer
Backing storage used when relocating stacks during switch operations.
#define IS_ENABLED(config_macro)
Check for macro definition in compiler-visible expressions.
Definition util_macro.h:148
#define ALWAYS_INLINE
Definition common.h:160
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
Thread Structure.
Definition thread.h:259