Line data Source code
1 1 : /*
2 : * Copyright (c) 2018 Linaro Limited.
3 : * Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
4 : *
5 : * SPDX-License-Identifier: Apache-2.0
6 : */
7 :
8 : /**
9 : * @file
10 : * @brief ARM AArch32 specific syscall header
11 : *
12 : * This header contains the ARM AArch32 specific syscall interface. It is
13 : * included by the syscall interface architecture-abstraction header
14 : * (include/arch/syscall.h)
15 : */
16 :
17 : #ifndef ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_
18 : #define ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_
19 :
20 : #define _SVC_CALL_CONTEXT_SWITCH 0
21 : #define _SVC_CALL_IRQ_OFFLOAD 1
22 : #define _SVC_CALL_RUNTIME_EXCEPT 2
23 : #define _SVC_CALL_SYSTEM_CALL 3
24 :
25 : #ifdef CONFIG_USERSPACE
26 : #ifndef _ASMLANGUAGE
27 :
28 : #include <zephyr/types.h>
29 : #include <stdbool.h>
30 : #include <zephyr/arch/arm/misc.h>
31 : #include <zephyr/sys/util_macro.h>
32 :
33 : #ifdef __cplusplus
34 : extern "C" {
35 : #endif
36 :
37 :
38 : /* Syscall invocation macros. arm-specific machine constraints used to ensure
39 : * args land in the proper registers.
40 : */
41 0 : static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
42 : uintptr_t arg3, uintptr_t arg4,
43 : uintptr_t arg5, uintptr_t arg6,
44 : uintptr_t call_id)
45 : {
46 : register uint32_t ret __asm__("r0") = arg1;
47 : register uint32_t r1 __asm__("r1") = arg2;
48 : register uint32_t r2 __asm__("r2") = arg3;
49 : register uint32_t r3 __asm__("r3") = arg4;
50 : register uint32_t r4 __asm__("r4") = arg5;
51 : register uint32_t r5 __asm__("r5") = arg6;
52 : register uint32_t r6 __asm__("r6") = call_id;
53 :
54 : __asm__ volatile("svc %[svid]\n"
55 : IF_ENABLED(CONFIG_ARM_BTI, ("bti\n"))
56 : : "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
57 : : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
58 : "r" (ret), "r" (r1), "r" (r2), "r" (r3),
59 : "r" (r4), "r" (r5), "r" (r6)
60 : : "r8", "memory", "ip");
61 :
62 : return ret;
63 : }
64 :
65 0 : static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
66 : uintptr_t arg3, uintptr_t arg4,
67 : uintptr_t arg5,
68 : uintptr_t call_id)
69 : {
70 : register uint32_t ret __asm__("r0") = arg1;
71 : register uint32_t r1 __asm__("r1") = arg2;
72 : register uint32_t r2 __asm__("r2") = arg3;
73 : register uint32_t r3 __asm__("r3") = arg4;
74 : register uint32_t r4 __asm__("r4") = arg5;
75 : register uint32_t r6 __asm__("r6") = call_id;
76 :
77 : __asm__ volatile("svc %[svid]\n"
78 : IF_ENABLED(CONFIG_ARM_BTI, ("bti\n"))
79 : : "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
80 : : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
81 : "r" (ret), "r" (r1), "r" (r2), "r" (r3),
82 : "r" (r4), "r" (r6)
83 : : "r8", "memory", "ip");
84 :
85 : return ret;
86 : }
87 :
88 0 : static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
89 : uintptr_t arg3, uintptr_t arg4,
90 : uintptr_t call_id)
91 : {
92 : register uint32_t ret __asm__("r0") = arg1;
93 : register uint32_t r1 __asm__("r1") = arg2;
94 : register uint32_t r2 __asm__("r2") = arg3;
95 : register uint32_t r3 __asm__("r3") = arg4;
96 : register uint32_t r6 __asm__("r6") = call_id;
97 :
98 : __asm__ volatile("svc %[svid]\n"
99 : IF_ENABLED(CONFIG_ARM_BTI, ("bti\n"))
100 : : "=r"(ret), "=r"(r1), "=r"(r2), "=r"(r3)
101 : : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
102 : "r" (ret), "r" (r1), "r" (r2), "r" (r3),
103 : "r" (r6)
104 : : "r8", "memory", "ip");
105 :
106 : return ret;
107 : }
108 :
109 0 : static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
110 : uintptr_t arg3,
111 : uintptr_t call_id)
112 : {
113 : register uint32_t ret __asm__("r0") = arg1;
114 : register uint32_t r1 __asm__("r1") = arg2;
115 : register uint32_t r2 __asm__("r2") = arg3;
116 : register uint32_t r6 __asm__("r6") = call_id;
117 :
118 : __asm__ volatile("svc %[svid]\n"
119 : IF_ENABLED(CONFIG_ARM_BTI, ("bti\n"))
120 : : "=r"(ret), "=r"(r1), "=r"(r2)
121 : : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
122 : "r" (ret), "r" (r1), "r" (r2), "r" (r6)
123 : : "r8", "memory", "r3", "ip");
124 :
125 : return ret;
126 : }
127 :
128 0 : static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
129 : uintptr_t call_id)
130 : {
131 : register uint32_t ret __asm__("r0") = arg1;
132 : register uint32_t r1 __asm__("r1") = arg2;
133 : register uint32_t r6 __asm__("r6") = call_id;
134 :
135 : __asm__ volatile("svc %[svid]\n"
136 : IF_ENABLED(CONFIG_ARM_BTI, ("bti\n"))
137 : : "=r"(ret), "=r"(r1)
138 : : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
139 : "r" (ret), "r" (r1), "r" (r6)
140 : : "r8", "memory", "r2", "r3", "ip");
141 :
142 : return ret;
143 : }
144 :
145 0 : static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
146 : uintptr_t call_id)
147 : {
148 : register uint32_t ret __asm__("r0") = arg1;
149 : register uint32_t r6 __asm__("r6") = call_id;
150 :
151 : __asm__ volatile("svc %[svid]\n"
152 : IF_ENABLED(CONFIG_ARM_BTI, ("bti\n"))
153 : : "=r"(ret)
154 : : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
155 : "r" (ret), "r" (r6)
156 : : "r8", "memory", "r1", "r2", "r3", "ip");
157 : return ret;
158 : }
159 :
160 0 : static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
161 : {
162 : register uint32_t ret __asm__("r0");
163 : register uint32_t r6 __asm__("r6") = call_id;
164 :
165 : __asm__ volatile("svc %[svid]\n"
166 : IF_ENABLED(CONFIG_ARM_BTI, ("bti\n"))
167 : : "=r"(ret)
168 : : [svid] "i" (_SVC_CALL_SYSTEM_CALL),
169 : "r" (ret), "r" (r6)
170 : : "r8", "memory", "r1", "r2", "r3", "ip");
171 :
172 : return ret;
173 : }
174 :
175 0 : static inline bool arch_is_user_context(void)
176 : {
177 : #if defined(CONFIG_CPU_CORTEX_M)
178 : uint32_t value;
179 :
180 : /* check for handler mode */
181 : __asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value));
182 : if (value) {
183 : return false;
184 : }
185 : #endif
186 :
187 : return z_arm_thread_is_in_user_mode();
188 : }
189 :
190 : #ifdef __cplusplus
191 : }
192 : #endif
193 :
194 : #endif /* _ASMLANGUAGE */
195 : #endif /* CONFIG_USERSPACE */
196 : #endif /* ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_ */
|