Line data Source code
1 1 : /*
2 : * Copyright (c) 2019 Intel Corporation.
3 : *
4 : * SPDX-License-Identifier: Apache-2.0
5 : */
6 :
7 : /**
8 : * @file
9 : * @brief x86 (INTEL64) specific syscall header
10 : *
11 : * This header contains the x86 specific syscall interface. It is
12 : * included by the syscall interface architecture-abstraction header
13 : * (include/arch/syscall.h)
14 : */
15 :
16 : #ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_
17 : #define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_
18 :
19 : #ifdef CONFIG_USERSPACE
20 : #ifndef _ASMLANGUAGE
21 :
22 : #include <zephyr/types.h>
23 : #include <stdbool.h>
24 :
25 : #ifdef __cplusplus
26 : extern "C" {
27 : #endif
28 :
29 : /*
30 : * x86_64 System V calling convention:
31 : * First six arguments passed in via RDI, RSI, RDX, RCX, R8, R9
32 : * We'll use RAX for the call_id, and the return value
33 : *
34 : * Arrange registers so that they are in-place as much as possible when
35 : * doing the system call. Because RCX get overwritten by the CPU, put arg 4
36 : * in r10 instead.
37 : *
38 : * SYSCALL instruction stores return address in RCX and RFLAGS in R11. RIP is
39 : * loaded from LSTAR MSR, masks RFLAGS with the low 32 bits of EFER.SFMASK. CS
40 : * and SS are loaded from values derived from bits 47:32 of STAR MSR (+0
41 : * for CS, +8 for SS)
42 : *
43 : * SYSRET loads RIP from RCX and RFLAGS from r11. CS and SS are set with
44 : * values derived from STAR MSR bits 63:48 (+8 for CS, +16 for SS)
45 : *
46 : * The kernel is in charge of not clobbering across the system call
47 : * the remaining registers: RBX, RBP, R12-R15, SIMD/FPU, and any unused
48 : * argument registers.
49 : */
50 0 : static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
51 : uintptr_t arg3, uintptr_t arg4,
52 : uintptr_t arg5, uintptr_t arg6,
53 : uintptr_t call_id)
54 : {
55 : register uintptr_t rax __asm__("%rax") = call_id;
56 : register uintptr_t rdi __asm__("%rdi") = arg1;
57 : register uintptr_t rsi __asm__("%rsi") = arg2;
58 : register uintptr_t rdx __asm__("%rdx") = arg3;
59 : register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
60 : register uintptr_t r8 __asm__("%r8") = arg5;
61 : register uintptr_t r9 __asm__("%r9") = arg6;
62 :
63 : __asm__ volatile("syscall\n\t"
64 : : "=r" (rax)
65 : : "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
66 : "r" (r10), "r" (r8), "r" (r9)
67 : : "memory", "rcx", "r11");
68 :
69 : return rax;
70 : }
71 :
72 0 : static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
73 : uintptr_t arg3, uintptr_t arg4,
74 : uintptr_t arg5,
75 : uintptr_t call_id)
76 : {
77 : register uintptr_t rax __asm__("%rax") = call_id;
78 : register uintptr_t rdi __asm__("%rdi") = arg1;
79 : register uintptr_t rsi __asm__("%rsi") = arg2;
80 : register uintptr_t rdx __asm__("%rdx") = arg3;
81 : register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
82 : register uintptr_t r8 __asm__("%r8") = arg5;
83 :
84 : __asm__ volatile("syscall\n\t"
85 : : "=r" (rax)
86 : : "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
87 : "r" (r10), "r" (r8)
88 : : "memory", "rcx", "r11");
89 :
90 : return rax;
91 : }
92 :
93 0 : static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
94 : uintptr_t arg3, uintptr_t arg4,
95 : uintptr_t call_id)
96 : {
97 : register uintptr_t rax __asm__("%rax") = call_id;
98 : register uintptr_t rdi __asm__("%rdi") = arg1;
99 : register uintptr_t rsi __asm__("%rsi") = arg2;
100 : register uintptr_t rdx __asm__("%rdx") = arg3;
101 : register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
102 :
103 : __asm__ volatile("syscall\n\t"
104 : : "=r" (rax)
105 : : "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
106 : "r" (r10)
107 : : "memory", "rcx", "r11");
108 :
109 : return rax;
110 : }
111 :
112 0 : static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
113 : uintptr_t arg3,
114 : uintptr_t call_id)
115 : {
116 : register uintptr_t rax __asm__("%rax") = call_id;
117 : register uintptr_t rdi __asm__("%rdi") = arg1;
118 : register uintptr_t rsi __asm__("%rsi") = arg2;
119 : register uintptr_t rdx __asm__("%rdx") = arg3;
120 :
121 : __asm__ volatile("syscall\n\t"
122 : : "=r" (rax)
123 : : "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx)
124 : : "memory", "rcx", "r11");
125 :
126 : return rax;
127 : }
128 :
129 0 : static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
130 : uintptr_t call_id)
131 :
132 : {
133 : register uintptr_t rax __asm__("%rax") = call_id;
134 : register uintptr_t rdi __asm__("%rdi") = arg1;
135 : register uintptr_t rsi __asm__("%rsi") = arg2;
136 :
137 : __asm__ volatile("syscall\n\t"
138 : : "=r" (rax)
139 : : "r" (rax), "r" (rdi), "r" (rsi)
140 : : "memory", "rcx", "r11");
141 :
142 : return rax;
143 : }
144 :
145 0 : static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
146 : uintptr_t call_id)
147 : {
148 : register uintptr_t rax __asm__("%rax") = call_id;
149 : register uintptr_t rdi __asm__("%rdi") = arg1;
150 :
151 : __asm__ volatile("syscall\n\t"
152 : : "=r" (rax)
153 : : "r" (rax), "r" (rdi)
154 : : "memory", "rcx", "r11");
155 :
156 : return rax;
157 : }
158 :
159 0 : static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
160 : {
161 : register uintptr_t rax __asm__("%rax") = call_id;
162 :
163 : __asm__ volatile("syscall\n\t"
164 : : "=r" (rax)
165 : : "r" (rax)
166 : : "memory", "rcx", "r11");
167 :
168 : return rax;
169 : }
170 :
171 0 : static inline bool arch_is_user_context(void)
172 : {
173 : int cs;
174 :
175 : __asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
176 :
177 : return (cs & 0x3) != 0;
178 : }
179 :
180 : #ifdef __cplusplus
181 : }
182 : #endif
183 :
184 : #endif /* _ASMLANGUAGE */
185 : #endif /* CONFIG_USERSPACE */
186 : #endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_ */
|