Line data Source code
1 1 : /*
2 : * Copyright (c) 2022 Intel Corporation.
3 : *
4 : * SPDX-License-Identifier: Apache-2.0
5 : */
6 :
7 : /**
8 : * @file
9 : * @brief Xtensa specific syscall header
10 : *
11 : * This header contains the Xtensa specific syscall interface. It is
12 : * included by the syscall interface architecture-abstraction header
13 : * (include/arch/syscall.h)
14 : */
15 :
16 : #ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
17 : #define ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
18 :
19 : #ifdef CONFIG_USERSPACE
20 : #ifndef _ASMLANGUAGE
21 :
22 : #include <zephyr/types.h>
23 : #include <stdbool.h>
24 : #include <zephyr/linker/sections.h>
25 : #include <zephyr/sys/util_macro.h>
26 :
27 : #include <xtensa/config/core-isa.h>
28 :
29 : #ifdef __cplusplus
30 : extern "C" {
31 : #endif
32 :
33 : /* When syscall assembly is executed, the EPC points to the syscall
34 : * instruction, and we have to manually advance it so we will
35 : * return to the instruction after syscall to continue execution.
36 : * However, with zero-overhead loops and the syscall instruction is
37 : * the last instruction, this simple addition does not work as it
38 : * would point past the loop and would have skipped the loop.
39 : * Because of this, syscall entrance would need to look at the loop
40 : * registers and set the PC back to the beginning of loop if we are
41 : * still looping. Assuming most of the syscalls are not inside
42 : * loops, the extra handling code consumes quite a few cycles.
43 : * To workaround this, simply adds a nop after syscall so we no
44 : * longer have to deal with loops at syscall entrance, and that
45 : * a nop is faster than all the code to manipulate loop registers.
46 : */
47 : #ifdef XCHAL_HAVE_LOOPS
48 : #define XTENSA_SYSCALL_ASM "syscall; nop;"
49 : #else
50 0 : #define XTENSA_SYSCALL_ASM "syscall"
51 : #endif
52 :
53 : #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
54 : uintptr_t xtensa_syscall_helper_args_6(uintptr_t arg1, uintptr_t arg2,
55 : uintptr_t arg3, uintptr_t arg4,
56 : uintptr_t arg5, uintptr_t arg6,
57 : uintptr_t call_id);
58 :
59 : uintptr_t xtensa_syscall_helper_args_5(uintptr_t arg1, uintptr_t arg2,
60 : uintptr_t arg3, uintptr_t arg4,
61 : uintptr_t arg5, uintptr_t call_id);
62 :
63 : uintptr_t xtensa_syscall_helper_args_4(uintptr_t arg1, uintptr_t arg2,
64 : uintptr_t arg3, uintptr_t arg4,
65 : uintptr_t call_id);
66 :
67 : #define SYSINL ALWAYS_INLINE
68 : #else
69 0 : #define SYSINL inline
70 : #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
71 :
72 : /**
73 : * We are following Linux Xtensa syscall ABI:
74 : *
75 : * syscall number arg1, arg2, arg3, arg4, arg5, arg6
76 : * -------------- ----------------------------------
77 : * a2 a6, a3, a4, a5, a8, a9
78 : *
79 : **/
80 :
81 :
82 1 : static SYSINL uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
83 : uintptr_t arg3, uintptr_t arg4,
84 : uintptr_t arg5, uintptr_t arg6,
85 : uintptr_t call_id)
86 : {
87 : #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
88 : return xtensa_syscall_helper_args_6(arg1, arg2, arg3, arg4, arg5, arg6, call_id);
89 : #else
90 : register uintptr_t a2 __asm__("%a2") = call_id;
91 : register uintptr_t a6 __asm__("%a6") = arg1;
92 : register uintptr_t a3 __asm__("%a3") = arg2;
93 : register uintptr_t a4 __asm__("%a4") = arg3;
94 : register uintptr_t a5 __asm__("%a5") = arg4;
95 : register uintptr_t a8 __asm__("%a8") = arg5;
96 : register uintptr_t a9 __asm__("%a9") = arg6;
97 :
98 : __asm__ volatile(XTENSA_SYSCALL_ASM
99 : : "=r" (a2)
100 : : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
101 : "r" (a5), "r" (a8), "r" (a9)
102 : : "memory");
103 :
104 : return a2;
105 : #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
106 : }
107 :
108 0 : static SYSINL uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
109 : uintptr_t arg3, uintptr_t arg4,
110 : uintptr_t arg5, uintptr_t call_id)
111 : {
112 : #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
113 : return xtensa_syscall_helper_args_5(arg1, arg2, arg3, arg4, arg5, call_id);
114 : #else
115 : register uintptr_t a2 __asm__("%a2") = call_id;
116 : register uintptr_t a6 __asm__("%a6") = arg1;
117 : register uintptr_t a3 __asm__("%a3") = arg2;
118 : register uintptr_t a4 __asm__("%a4") = arg3;
119 : register uintptr_t a5 __asm__("%a5") = arg4;
120 : register uintptr_t a8 __asm__("%a8") = arg5;
121 :
122 : __asm__ volatile(XTENSA_SYSCALL_ASM
123 : : "=r" (a2)
124 : : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
125 : "r" (a5), "r" (a8)
126 : : "memory");
127 :
128 : return a2;
129 : #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
130 : }
131 :
132 0 : static SYSINL uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
133 : uintptr_t arg3, uintptr_t arg4,
134 : uintptr_t call_id)
135 : {
136 : #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
137 : return xtensa_syscall_helper_args_4(arg1, arg2, arg3, arg4, call_id);
138 : #else
139 : register uintptr_t a2 __asm__("%a2") = call_id;
140 : register uintptr_t a6 __asm__("%a6") = arg1;
141 : register uintptr_t a3 __asm__("%a3") = arg2;
142 : register uintptr_t a4 __asm__("%a4") = arg3;
143 : register uintptr_t a5 __asm__("%a5") = arg4;
144 :
145 : __asm__ volatile(XTENSA_SYSCALL_ASM
146 : : "=r" (a2)
147 : : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
148 : "r" (a5)
149 : : "memory");
150 :
151 : return a2;
152 : #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
153 : }
154 :
155 0 : static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
156 : uintptr_t arg3, uintptr_t call_id)
157 : {
158 : register uintptr_t a2 __asm__("%a2") = call_id;
159 : register uintptr_t a6 __asm__("%a6") = arg1;
160 : register uintptr_t a3 __asm__("%a3") = arg2;
161 : register uintptr_t a4 __asm__("%a4") = arg3;
162 :
163 : __asm__ volatile(XTENSA_SYSCALL_ASM
164 : : "=r" (a2)
165 : : "r" (a2), "r" (a6), "r" (a3), "r" (a4)
166 : : "memory");
167 :
168 : return a2;
169 : }
170 :
171 0 : static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
172 : uintptr_t call_id)
173 : {
174 : register uintptr_t a2 __asm__("%a2") = call_id;
175 : register uintptr_t a6 __asm__("%a6") = arg1;
176 : register uintptr_t a3 __asm__("%a3") = arg2;
177 :
178 : __asm__ volatile(XTENSA_SYSCALL_ASM
179 : : "=r" (a2)
180 : : "r" (a2), "r" (a6), "r" (a3)
181 : : "memory");
182 :
183 : return a2;
184 : }
185 :
186 0 : static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
187 : {
188 : register uintptr_t a2 __asm__("%a2") = call_id;
189 : register uintptr_t a6 __asm__("%a6") = arg1;
190 :
191 : __asm__ volatile(XTENSA_SYSCALL_ASM
192 : : "=r" (a2)
193 : : "r" (a2), "r" (a6)
194 : : "memory");
195 :
196 : return a2;
197 : }
198 :
199 0 : static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
200 : {
201 : register uintptr_t a2 __asm__("%a2") = call_id;
202 :
203 : __asm__ volatile(XTENSA_SYSCALL_ASM
204 : : "=r" (a2)
205 : : "r" (a2)
206 : : "memory");
207 :
208 : return a2;
209 : }
210 :
211 : /*
212 : * There is no easy (or generic) way to figure out if a thread is runnining
213 : * in un-privileged mode. Reading the current ring (PS.CRING) is a privileged
214 : * instruction and not thread local storage is not available in xcc.
215 : */
216 0 : static inline bool arch_is_user_context(void)
217 : {
218 : #if XCHAL_HAVE_THREADPTR
219 : uint32_t thread;
220 :
221 : __asm__ volatile(
222 : "rur.THREADPTR %0\n\t"
223 : : "=a" (thread)
224 : );
225 : #ifdef CONFIG_THREAD_LOCAL_STORAGE
226 : extern Z_THREAD_LOCAL uint32_t is_user_mode;
227 :
228 : if (!thread) {
229 : return false;
230 : }
231 :
232 : return is_user_mode != 0;
233 : #else
234 : return !!thread;
235 : #endif
236 :
237 : #else /* XCHAL_HAVE_THREADPTR */
238 : extern bool xtensa_is_user_context(void);
239 :
240 : return xtensa_is_user_context();
241 : #endif /* XCHAL_HAVE_THREADPTR */
242 : }
243 :
244 : #undef SYSINL
245 :
246 : #ifdef __cplusplus
247 : }
248 : #endif
249 :
250 : #endif /* _ASMLANGUAGE */
251 : #endif /* CONFIG_USERSPACE */
252 : #endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_ */
|