Line data Source code
1 0 : /*
2 : * Copyright (c) 2019 Intel Corporation.
3 : *
4 : * SPDX-License-Identifier: Apache-2.0
5 : */
6 :
7 : /**
8 : * @defgroup arch-interface Architecture Interface
9 : * @ingroup internal_api
10 : * @brief Internal kernel APIs with public scope
11 : *
12 : * Any public kernel APIs that are implemented as inline functions and need to
13 : * call architecture-specific API so will have the prototypes for the
14 : * architecture-specific APIs here. Architecture APIs that aren't used in this
15 : * way go in kernel/include/kernel_arch_interface.h.
16 : *
17 : * The set of architecture-specific APIs used internally by public macros and
18 : * inline functions in public headers are also specified and documented.
19 : *
20 : * For all macros and inline function prototypes described herein, <arch/cpu.h>
21 : * must eventually pull in full definitions for all of them (the actual macro
22 : * defines and inline function bodies)
23 : *
24 : * include/kernel.h and other public headers depend on definitions in this
25 : * header.
26 : */
27 : #ifndef ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
28 : #define ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
29 :
30 : #ifndef _ASMLANGUAGE
31 : #include <zephyr/toolchain.h>
32 : #include <stddef.h>
33 : #include <zephyr/types.h>
34 : #include <zephyr/arch/cpu.h>
35 : #include <zephyr/irq_offload.h>
36 :
37 : #ifdef __cplusplus
38 : extern "C" {
39 : #endif
40 :
41 : /* NOTE: We cannot pull in kernel.h here, need some forward declarations */
42 : struct arch_esf;
43 : struct k_thread;
44 : struct k_mem_domain;
45 :
46 1 : typedef struct z_thread_stack_element k_thread_stack_t;
47 :
48 1 : typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
49 :
50 : __deprecated typedef struct arch_esf z_arch_esf_t;
51 :
52 : /**
53 : * @defgroup arch-timing Architecture timing APIs
54 : * @ingroup arch-interface
55 : * @{
56 : */
57 :
58 : /**
59 : * Obtain the current cycle count, in units specified by
60 : * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC. While this is historically
61 : * specified as part of the architecture API, in practice virtually
62 : * all platforms forward it to the sys_clock_cycle_get_32() API
63 : * provided by the timer driver.
64 : *
65 : * @see k_cycle_get_32()
66 : *
67 : * @return The current cycle time. This should count up monotonically
68 : * through the full 32 bit space, wrapping at 0xffffffff. Hardware
69 : * with fewer bits of precision in the timer is expected to synthesize
70 : * a 32 bit count.
71 : */
72 1 : static inline uint32_t arch_k_cycle_get_32(void);
73 :
74 : /**
75 : * As for arch_k_cycle_get_32(), but with a 64 bit return value. Not
76 : * all timer hardware has a 64 bit timer, this needs to be implemented
77 : * only if CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER is set.
78 : *
79 : * @see arch_k_cycle_get_32()
80 : *
81 : * @return The current cycle time. This should count up monotonically
82 : * through the full 64 bit space, wrapping at 2^64-1. Hardware with
83 : * fewer bits of precision in the timer is generally not expected to
84 : * implement this API.
85 : */
86 1 : static inline uint64_t arch_k_cycle_get_64(void);
87 :
88 : /** @} */
89 :
90 :
91 : /**
92 : * @addtogroup arch-threads
93 : * @{
94 : */
95 :
96 : /**
97 : * @def ARCH_THREAD_STACK_RESERVED
98 : *
99 : * @see K_THREAD_STACK_RESERVED
100 : */
101 :
102 : /**
103 : * @def ARCH_STACK_PTR_ALIGN
104 : *
105 : * Required alignment of the CPU's stack pointer register value, dictated by
106 : * hardware constraints and the ABI calling convention.
107 : *
108 : * @see Z_STACK_PTR_ALIGN
109 : */
110 :
111 : /**
112 : * @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
113 : *
114 : * Required alignment of the lowest address of a stack object.
115 : *
116 : * Optional definition.
117 : *
118 : * @see Z_THREAD_STACK_OBJ_ALIGN
119 : */
120 :
121 : /**
122 : * @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
123 : * @brief Round up a stack buffer size to alignment constraints
124 : *
125 : * Adjust a requested stack buffer size to the true size of its underlying
126 : * buffer, defined as the area usable for thread stack context and thread-
127 : * local storage.
128 : *
129 : * The size value passed here does not include storage reserved for platform
130 : * data.
131 : *
132 : * The returned value is either the same size provided (if already properly
133 : * aligned), or rounded up to satisfy alignment constraints. Calculations
134 : * performed here *must* be idempotent.
135 : *
136 : * Optional definition. If undefined, stack buffer sizes are either:
137 : * - Rounded up to the next power of two if user mode is enabled on an arch
138 : * with an MPU that requires such alignment
139 : * - Rounded up to ARCH_STACK_PTR_ALIGN
140 : *
141 : * @see Z_THREAD_STACK_SIZE_ADJUST
142 : */
143 :
144 : /**
145 : * @def ARCH_KERNEL_STACK_RESERVED
146 : * @brief MPU guard size for kernel-only stacks
147 : *
148 : * If MPU stack guards are used to catch stack overflows, specify the
149 : * amount of space reserved in kernel stack objects. If guard sizes are
150 : * context dependent, this should be in the minimum guard size, with
151 : * remaining space carved out if needed.
152 : *
153 : * Optional definition, defaults to 0.
154 : *
155 : * @see K_KERNEL_STACK_RESERVED
156 : */
157 :
158 : /**
159 : * @def ARCH_KERNEL_STACK_OBJ_ALIGN
160 : * @brief Required alignment of the lowest address of a kernel-only stack.
161 : */
162 :
163 : /** @} */
164 :
165 : /**
166 : * @addtogroup arch-pm
167 : * @{
168 : */
169 :
170 : /**
171 : * @brief Power save idle routine
172 : *
173 : * This function will be called by the kernel idle loop or possibly within
174 : * an implementation of z_pm_save_idle in the kernel when the
175 : * '_pm_save_flag' variable is non-zero.
176 : *
177 : * Architectures that do not implement power management instructions may
178 : * immediately return, otherwise a power-saving instruction should be
179 : * issued to wait for an interrupt.
180 : *
181 : * @note The function is expected to return after the interrupt that has
182 : * caused the CPU to exit power-saving mode has been serviced, although
183 : * this is not a firm requirement.
184 : *
185 : * @see k_cpu_idle()
186 : */
187 1 : void arch_cpu_idle(void);
188 :
189 : /**
190 : * @brief Atomically re-enable interrupts and enter low power mode
191 : *
192 : * The requirements for arch_cpu_atomic_idle() are as follows:
193 : *
194 : * -# Enabling interrupts and entering a low-power mode needs to be
195 : * atomic, i.e. there should be no period of time where interrupts are
196 : * enabled before the processor enters a low-power mode. See the comments
197 : * in k_lifo_get(), for example, of the race condition that
198 : * occurs if this requirement is not met.
199 : *
200 : * -# After waking up from the low-power mode, the interrupt lockout state
201 : * must be restored as indicated in the 'key' input parameter.
202 : *
203 : * @see k_cpu_atomic_idle()
204 : *
205 : * @param key Lockout key returned by previous invocation of arch_irq_lock()
206 : */
207 1 : void arch_cpu_atomic_idle(unsigned int key);
208 :
209 : /** @} */
210 :
211 :
212 : /**
213 : * @addtogroup arch-smp
214 : * @{
215 : */
216 :
217 : /**
218 : * Per-cpu entry function
219 : *
220 : * @param data context parameter, implementation specific
221 : */
222 1 : typedef void (*arch_cpustart_t)(void *data);
223 :
224 : /**
225 : * @brief Start a numbered CPU on a MP-capable system
226 : *
227 : * This starts and initializes a specific CPU. The main thread on startup is
228 : * running on CPU zero, other processors are numbered sequentially. On return
229 : * from this function, the CPU is known to have begun operating and will enter
230 : * the provided function. Its interrupts will be initialized but disabled such
231 : * that irq_unlock() with the provided key will work to enable them.
232 : *
233 : * Normally, in SMP mode this function will be called by the kernel
234 : * initialization and should not be used as a user API. But it is defined here
235 : * for special-purpose apps which want Zephyr running on one core and to use
236 : * others for design-specific processing.
237 : *
238 : * @param cpu_num Integer number of the CPU
239 : * @param stack Stack memory for the CPU
240 : * @param sz Stack buffer size, in bytes
241 : * @param fn Function to begin running on the CPU.
242 : * @param arg Untyped argument to be passed to "fn"
243 : */
244 1 : void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
245 : arch_cpustart_t fn, void *arg);
246 :
247 : /**
248 : * @brief Return CPU power status
249 : *
250 : * @param cpu_num Integer number of the CPU
251 : */
252 1 : bool arch_cpu_active(int cpu_num);
253 :
254 : /** @} */
255 :
256 :
257 : /**
258 : * @addtogroup arch-irq
259 : * @{
260 : */
261 :
262 : /**
263 : * Lock interrupts on the current CPU
264 : *
265 : * @see irq_lock()
266 : */
267 1 : static inline unsigned int arch_irq_lock(void);
268 :
269 : /**
270 : * Unlock interrupts on the current CPU
271 : *
272 : * @see irq_unlock()
273 : */
274 1 : static inline void arch_irq_unlock(unsigned int key);
275 :
276 : /**
277 : * Test if calling arch_irq_unlock() with this key would unlock irqs
278 : *
279 : * @param key value returned by arch_irq_lock()
280 : * @return true if interrupts were unlocked prior to the arch_irq_lock()
281 : * call that produced the key argument.
282 : */
283 1 : static inline bool arch_irq_unlocked(unsigned int key);
284 :
285 : /**
286 : * Disable the specified interrupt line
287 : *
288 : * @note: The behavior of interrupts that arrive after this call
289 : * returns and before the corresponding call to arch_irq_enable() is
290 : * undefined. The hardware is not required to latch and deliver such
291 : * an interrupt, though on some architectures that may work. Other
292 : * architectures will simply lose such an interrupt and never deliver
293 : * it. Many drivers and subsystems are not tolerant of such dropped
294 : * interrupts and it is the job of the application layer to ensure
295 : * that behavior remains correct.
296 : *
297 : * @see irq_disable()
298 : */
299 1 : void arch_irq_disable(unsigned int irq);
300 :
301 : /**
302 : * Enable the specified interrupt line
303 : *
304 : * @see irq_enable()
305 : */
306 1 : void arch_irq_enable(unsigned int irq);
307 :
308 : /**
309 : * Test if an interrupt line is enabled
310 : *
311 : * @see irq_is_enabled()
312 : */
313 1 : int arch_irq_is_enabled(unsigned int irq);
314 :
315 : /**
316 : * Arch-specific hook to install a dynamic interrupt.
317 : *
318 : * @param irq IRQ line number
319 : * @param priority Interrupt priority
320 : * @param routine Interrupt service routine
321 : * @param parameter ISR parameter
322 : * @param flags Arch-specific IRQ configuration flag
323 : *
324 : * @return The vector assigned to this interrupt
325 : */
326 1 : int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
327 : void (*routine)(const void *parameter),
328 : const void *parameter, uint32_t flags);
329 :
330 : /**
331 : * Arch-specific hook to dynamically uninstall a shared interrupt.
332 : * If the interrupt is not being shared, then the associated
333 : * _sw_isr_table entry will be replaced by (NULL, z_irq_spurious)
334 : * (default entry).
335 : *
336 : * @param irq IRQ line number
337 : * @param priority Interrupt priority
338 : * @param routine Interrupt service routine
339 : * @param parameter ISR parameter
340 : * @param flags Arch-specific IRQ configuration flag
341 : *
342 : * @return 0 in case of success, negative value otherwise
343 : */
344 1 : int arch_irq_disconnect_dynamic(unsigned int irq, unsigned int priority,
345 : void (*routine)(const void *parameter),
346 : const void *parameter, uint32_t flags);
347 :
348 : /**
349 : * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
350 : *
351 : * @see IRQ_CONNECT()
352 : */
353 :
354 : #ifdef CONFIG_PCIE
355 : /**
356 : * @def ARCH_PCIE_IRQ_CONNECT(bdf, irq, pri, isr, arg, flags)
357 : *
358 : * @see PCIE_IRQ_CONNECT()
359 : */
360 : #endif /* CONFIG_PCIE */
361 :
362 : /**
363 : * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
364 : *
365 : * @see IRQ_DIRECT_CONNECT()
366 : */
367 :
368 : /**
369 : * @def ARCH_ISR_DIRECT_PM()
370 : *
371 : * @see ISR_DIRECT_PM()
372 : */
373 :
374 : /**
375 : * @def ARCH_ISR_DIRECT_HEADER()
376 : *
377 : * @see ISR_DIRECT_HEADER()
378 : */
379 :
380 : /**
381 : * @def ARCH_ISR_DIRECT_FOOTER(swap)
382 : *
383 : * @see ISR_DIRECT_FOOTER()
384 : */
385 :
386 : /**
387 : * @def ARCH_ISR_DIRECT_DECLARE(name)
388 : *
389 : * @see ISR_DIRECT_DECLARE()
390 : */
391 :
392 : #ifndef CONFIG_PCIE_CONTROLLER
393 : /**
394 : * @brief Arch-specific hook for allocating IRQs
395 : *
396 : * Note: disable/enable IRQ relevantly inside the implementation of such
397 : * function to avoid concurrency issues. Also, an allocated IRQ is assumed
398 : * to be used thus a following @see arch_irq_is_used() should return true.
399 : *
400 : * @return The newly allocated IRQ or UINT_MAX on error.
401 : */
402 1 : unsigned int arch_irq_allocate(void);
403 :
404 : /**
405 : * @brief Arch-specific hook for declaring an IRQ being used
406 : *
407 : * Note: disable/enable IRQ relevantly inside the implementation of such
408 : * function to avoid concurrency issues.
409 : *
410 : * @param irq the IRQ to declare being used
411 : */
412 1 : void arch_irq_set_used(unsigned int irq);
413 :
414 : /**
415 : * @brief Arch-specific hook for checking if an IRQ is being used already
416 : *
417 : * @param irq the IRQ to check
418 : *
419 : * @return true if being, false otherwise
420 : */
421 1 : bool arch_irq_is_used(unsigned int irq);
422 :
423 : #endif /* CONFIG_PCIE_CONTROLLER */
424 :
425 : /**
426 : * @def ARCH_EXCEPT(reason_p)
427 : *
428 : * Generate a software induced fatal error.
429 : *
430 : * If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
431 : * K_ERR_STACK_CHK_FAIL may be induced.
432 : *
433 : * This should ideally generate a software trap, with exception context
434 : * indicating state when this was invoked. General purpose register state at
435 : * the time of trap should not be disturbed from the calling context.
436 : *
437 : * @param reason_p K_ERR_ scoped reason code for the fatal error.
438 : */
439 :
440 : #ifdef CONFIG_IRQ_OFFLOAD
441 : /**
442 : * Run a function in interrupt context.
443 : *
444 : * Implementations should invoke an exception such that the kernel goes through
445 : * its interrupt handling dispatch path, to include switching to the interrupt
446 : * stack, and runs the provided routine and parameter.
447 : *
448 : * The only intended use-case for this function is for test code to simulate
449 : * the correctness of kernel APIs in interrupt handling context. This API
450 : * is not intended for real applications.
451 : *
452 : * @see irq_offload()
453 : *
454 : * @param routine Function to run in interrupt context
455 : * @param parameter Value to pass to the function when invoked
456 : */
457 : void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
458 :
459 :
460 : /**
461 : * Initialize the architecture-specific portion of the irq_offload subsystem
462 : */
463 : void arch_irq_offload_init(void);
464 :
465 : #endif /* CONFIG_IRQ_OFFLOAD */
466 :
467 : /** @} */
468 :
469 :
470 : /**
471 : * @defgroup arch-smp Architecture-specific SMP APIs
472 : * @ingroup arch-interface
473 : * @{
474 : */
475 : #ifdef CONFIG_SMP
476 : /** Return the CPU struct for the currently executing CPU */
477 1 : static inline struct _cpu *arch_curr_cpu(void);
478 :
479 :
480 : /**
481 : * @brief Processor hardware ID
482 : *
483 : * Most multiprocessor architectures have a low-level unique ID value
484 : * associated with the current CPU that can be retrieved rapidly and
485 : * efficiently in kernel context. Note that while the numbering of
486 : * the CPUs is guaranteed to be unique, the values are
487 : * platform-defined. In particular, they are not guaranteed to match
488 : * Zephyr's own sequential CPU IDs (even though on some platforms they
489 : * do).
490 : *
491 : * @note There is an inherent race with this API: the system may
492 : * preempt the current thread and migrate it to another CPU before the
493 : * value is used. Safe usage requires knowing the migration is
494 : * impossible (e.g. because the code is in interrupt context, holds a
495 : * spinlock, or cannot migrate due to k_cpu_mask state).
496 : *
497 : * @return Unique ID for currently-executing CPU
498 : */
499 1 : static inline uint32_t arch_proc_id(void);
500 :
501 : /**
502 : * Broadcast an interrupt to all CPUs
503 : *
504 : * This will invoke z_sched_ipi() on all other CPUs in the system.
505 : */
506 1 : void arch_sched_broadcast_ipi(void);
507 :
508 : /**
509 : * Direct IPIs to the specified CPUs
510 : *
511 : * This will invoke z_sched_ipi() on the CPUs identified by @a cpu_bitmap.
512 : *
513 : * @param cpu_bitmap A bitmap indicating which CPUs need the IPI
514 : */
515 1 : void arch_sched_directed_ipi(uint32_t cpu_bitmap);
516 :
517 0 : int arch_smp_init(void);
518 :
519 : #endif /* CONFIG_SMP */
520 :
521 : /**
522 : * @brief Returns the number of CPUs
523 : *
524 : * For most systems this will be the same as CONFIG_MP_MAX_NUM_CPUS,
525 : * however some systems may determine this at runtime instead.
526 : *
527 : * @return the number of CPUs
528 : */
529 1 : static inline unsigned int arch_num_cpus(void);
530 :
531 : /** @} */
532 :
533 :
534 : /**
535 : * @defgroup arch-userspace Architecture-specific userspace APIs
536 : * @ingroup arch-interface
537 : * @{
538 : */
539 :
540 : #ifdef CONFIG_USERSPACE
541 : #include <zephyr/arch/syscall.h>
542 :
543 : /**
544 : * Invoke a system call with 0 arguments.
545 : *
546 : * No general-purpose register state other than return value may be preserved
547 : * when transitioning from supervisor mode back down to user mode for
548 : * security reasons.
549 : *
550 : * It is required that all arguments be stored in registers when elevating
551 : * privileges from user to supervisor mode.
552 : *
553 : * Processing of the syscall takes place on a separate kernel stack. Interrupts
554 : * should be enabled when invoking the system call marshallers from the
555 : * dispatch table. Thread preemption may occur when handling system calls.
556 : *
557 : * Call IDs are untrusted and must be bounds-checked, as the value is used to
558 : * index the system call dispatch table, containing function pointers to the
559 : * specific system call code.
560 : *
561 : * @param call_id System call ID
562 : * @return Return value of the system call. Void system calls return 0 here.
563 : */
564 1 : static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
565 :
566 : /**
567 : * Invoke a system call with 1 argument.
568 : *
569 : * @see arch_syscall_invoke0()
570 : *
571 : * @param arg1 First argument to the system call.
572 : * @param call_id System call ID, will be bounds-checked and used to reference
573 : * kernel-side dispatch table
574 : * @return Return value of the system call. Void system calls return 0 here.
575 : */
576 1 : static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
577 : uintptr_t call_id);
578 :
579 : /**
580 : * Invoke a system call with 2 arguments.
581 : *
582 : * @see arch_syscall_invoke0()
583 : *
584 : * @param arg1 First argument to the system call.
585 : * @param arg2 Second argument to the system call.
586 : * @param call_id System call ID, will be bounds-checked and used to reference
587 : * kernel-side dispatch table
588 : * @return Return value of the system call. Void system calls return 0 here.
589 : */
590 1 : static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
591 : uintptr_t call_id);
592 :
593 : /**
594 : * Invoke a system call with 3 arguments.
595 : *
596 : * @see arch_syscall_invoke0()
597 : *
598 : * @param arg1 First argument to the system call.
599 : * @param arg2 Second argument to the system call.
600 : * @param arg3 Third argument to the system call.
601 : * @param call_id System call ID, will be bounds-checked and used to reference
602 : * kernel-side dispatch table
603 : * @return Return value of the system call. Void system calls return 0 here.
604 : */
605 1 : static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
606 : uintptr_t arg3,
607 : uintptr_t call_id);
608 :
609 : /**
610 : * Invoke a system call with 4 arguments.
611 : *
612 : * @see arch_syscall_invoke0()
613 : *
614 : * @param arg1 First argument to the system call.
615 : * @param arg2 Second argument to the system call.
616 : * @param arg3 Third argument to the system call.
617 : * @param arg4 Fourth argument to the system call.
618 : * @param call_id System call ID, will be bounds-checked and used to reference
619 : * kernel-side dispatch table
620 : * @return Return value of the system call. Void system calls return 0 here.
621 : */
622 1 : static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
623 : uintptr_t arg3, uintptr_t arg4,
624 : uintptr_t call_id);
625 :
626 : /**
627 : * Invoke a system call with 5 arguments.
628 : *
629 : * @see arch_syscall_invoke0()
630 : *
631 : * @param arg1 First argument to the system call.
632 : * @param arg2 Second argument to the system call.
633 : * @param arg3 Third argument to the system call.
634 : * @param arg4 Fourth argument to the system call.
635 : * @param arg5 Fifth argument to the system call.
636 : * @param call_id System call ID, will be bounds-checked and used to reference
637 : * kernel-side dispatch table
638 : * @return Return value of the system call. Void system calls return 0 here.
639 : */
640 1 : static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
641 : uintptr_t arg3, uintptr_t arg4,
642 : uintptr_t arg5,
643 : uintptr_t call_id);
644 :
645 : /**
646 : * Invoke a system call with 6 arguments.
647 : *
648 : * @see arch_syscall_invoke0()
649 : *
650 : * @param arg1 First argument to the system call.
651 : * @param arg2 Second argument to the system call.
652 : * @param arg3 Third argument to the system call.
653 : * @param arg4 Fourth argument to the system call.
654 : * @param arg5 Fifth argument to the system call.
655 : * @param arg6 Sixth argument to the system call.
656 : * @param call_id System call ID, will be bounds-checked and used to reference
657 : * kernel-side dispatch table
658 : * @return Return value of the system call. Void system calls return 0 here.
659 : */
660 1 : static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
661 : uintptr_t arg3, uintptr_t arg4,
662 : uintptr_t arg5, uintptr_t arg6,
663 : uintptr_t call_id);
664 :
665 : /**
666 : * Indicate whether we are currently running in user mode
667 : *
668 : * @return True if the CPU is currently running with user permissions
669 : */
670 1 : static inline bool arch_is_user_context(void);
671 :
672 : /**
673 : * @brief Get the maximum number of partitions for a memory domain
674 : *
675 : * @return Max number of partitions, or -1 if there is no limit
676 : */
677 1 : int arch_mem_domain_max_partitions_get(void);
678 :
679 : #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
680 : /**
681 : *
682 : * @brief Architecture-specific hook for memory domain initialization
683 : *
684 : * Perform any tasks needed to initialize architecture-specific data within
685 : * the memory domain, such as reserving memory for page tables. All members
686 : * of the provided memory domain aside from `arch` will be initialized when
687 : * this is called, but no threads will be a assigned yet.
688 : *
689 : * This function may fail if initializing the memory domain requires allocation,
690 : * such as for page tables.
691 : *
692 : * The associated function k_mem_domain_init() documents that making
693 : * multiple init calls to the same memory domain is undefined behavior,
694 : * but has no assertions in place to check this. If this matters, it may be
695 : * desirable to add checks for this in the implementation of this function.
696 : *
697 : * @param domain The memory domain to initialize
698 : * @retval 0 Success
699 : * @retval -ENOMEM Insufficient memory
700 : */
701 : int arch_mem_domain_init(struct k_mem_domain *domain);
702 : #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
703 :
704 : #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
705 : /**
706 : * @brief Add a thread to a memory domain (arch-specific)
707 : *
708 : * Architecture-specific hook to manage internal data structures or hardware
709 : * state when the provided thread has been added to a memory domain.
710 : *
711 : * The thread->mem_domain_info.mem_domain pointer will be set to the domain to
712 : * be added to before this is called. Implementations may assume that the
713 : * thread is not already a member of this domain.
714 : *
715 : * @param thread Thread which needs to be configured.
716 : *
717 : * @retval 0 if successful
718 : * @retval -EINVAL if invalid parameters supplied
719 : * @retval -ENOSPC if running out of space in internal structures
720 : * (e.g. translation tables)
721 : */
722 : int arch_mem_domain_thread_add(struct k_thread *thread);
723 :
724 : /**
725 : * @brief Remove a thread from a memory domain (arch-specific)
726 : *
727 : * Architecture-specific hook to manage internal data structures or hardware
728 : * state when the provided thread has been removed from a memory domain.
729 : *
730 : * The thread's memory domain pointer will be the domain that the thread
731 : * is being removed from.
732 : *
733 : * @param thread Thread being removed from its memory domain
734 : *
735 : * @retval 0 if successful
736 : * @retval -EINVAL if invalid parameters supplied
737 : */
738 : int arch_mem_domain_thread_remove(struct k_thread *thread);
739 :
740 : /**
741 : * @brief Remove a partition from the memory domain (arch-specific)
742 : *
743 : * Architecture-specific hook to manage internal data structures or hardware
744 : * state when a memory domain has had a partition removed.
745 : *
746 : * The partition index data, and the number of partitions configured, are not
747 : * respectively cleared and decremented in the domain until after this function
748 : * runs.
749 : *
750 : * @param domain The memory domain structure
751 : * @param partition_id The partition index that needs to be deleted
752 : *
753 : * @retval 0 if successful
754 : * @retval -EINVAL if invalid parameters supplied
755 : * @retval -ENOENT if no matching partition found
756 : */
757 : int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
758 : uint32_t partition_id);
759 :
760 : /**
761 : * @brief Add a partition to the memory domain
762 : *
763 : * Architecture-specific hook to manage internal data structures or hardware
764 : * state when a memory domain has a partition added.
765 : *
766 : * @param domain The memory domain structure
767 : * @param partition_id The partition that needs to be added
768 : *
769 : * @retval 0 if successful
770 : * @retval -EINVAL if invalid parameters supplied
771 : */
772 : int arch_mem_domain_partition_add(struct k_mem_domain *domain,
773 : uint32_t partition_id);
774 : #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
775 :
776 : /**
777 : * @brief Check memory region permissions
778 : *
779 : * Given a memory region, return whether the current memory management hardware
780 : * configuration would allow a user thread to read/write that region. Used by
781 : * system calls to validate buffers coming in from userspace.
782 : *
783 : * Notes:
784 : * The function is guaranteed to never return validation success, if the entire
785 : * buffer area is not user accessible.
786 : *
787 : * The function is guaranteed to correctly validate the permissions of the
788 : * supplied buffer, if the user access permissions of the entire buffer are
789 : * enforced by a single, enabled memory management region.
790 : *
791 : * In some architectures the validation will always return failure
792 : * if the supplied memory buffer spans multiple enabled memory management
793 : * regions (even if all such regions permit user access).
794 : *
795 : * @warning Buffer of size zero (0) has undefined behavior.
796 : *
797 : * @param addr start address of the buffer
798 : * @param size the size of the buffer
799 : * @param write If non-zero, additionally check if the area is writable.
800 : * Otherwise, just check if the memory can be read.
801 : *
802 : * @return nonzero if the permissions don't match.
803 : */
804 1 : int arch_buffer_validate(const void *addr, size_t size, int write);
805 :
806 : /**
807 : * Get the optimal virtual region alignment to optimize the MMU table layout
808 : *
809 : * Some MMU HW requires some region to be aligned to some of the intermediate
810 : * block alignment in order to reduce table usage.
811 : * This call returns the optimal virtual address alignment in order to permit
812 : * such optimization in the following MMU mapping call.
813 : *
814 : * @param[in] phys Physical address of region to be mapped,
815 : * aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
816 : * @param[in] size Size of region to be mapped,
817 : * aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
818 : *
819 : * @return Alignment to apply on the virtual address of this region
820 : */
821 1 : size_t arch_virt_region_align(uintptr_t phys, size_t size);
822 :
823 : /**
824 : * Perform a one-way transition from supervisor to user mode.
825 : *
826 : * Implementations of this function must do the following:
827 : *
828 : * - Reset the thread's stack pointer to a suitable initial value. We do not
829 : * need any prior context since this is a one-way operation.
830 : * - Set up any kernel stack region for the CPU to use during privilege
831 : * elevation
832 : * - Put the CPU in whatever its equivalent of user mode is
833 : * - Transfer execution to arch_new_thread() passing along all the supplied
834 : * arguments, in user mode.
835 : *
836 : * @param user_entry Entry point to start executing as a user thread
837 : * @param p1 1st parameter to user thread
838 : * @param p2 2nd parameter to user thread
839 : * @param p3 3rd parameter to user thread
840 : */
841 1 : FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
842 : void *p1, void *p2, void *p3);
843 :
844 : /**
845 : * @brief Induce a kernel oops that appears to come from a specific location
846 : *
847 : * Normally, k_oops() generates an exception that appears to come from the
848 : * call site of the k_oops() itself.
849 : *
850 : * However, when validating arguments to a system call, if there are problems
851 : * we want the oops to appear to come from where the system call was invoked
852 : * and not inside the validation function.
853 : *
854 : * @param ssf System call stack frame pointer. This gets passed as an argument
855 : * to _k_syscall_handler_t functions and its contents are completely
856 : * architecture specific.
857 : */
858 1 : FUNC_NORETURN void arch_syscall_oops(void *ssf);
859 :
860 : /**
861 : * @brief Safely take the length of a potentially bad string
862 : *
863 : * This must not fault, instead the @p err parameter must have -1 written to it.
864 : * This function otherwise should work exactly like libc strnlen(). On success
865 : * @p err should be set to 0.
866 : *
867 : * @param s String to measure
868 : * @param maxsize Max length of the string
869 : * @param err Error value to write
870 : * @return Length of the string, not counting NULL byte, up to maxsize
871 : */
872 1 : size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
873 : #endif /* CONFIG_USERSPACE */
874 :
875 : /**
876 : * @brief Detect memory coherence type
877 : *
878 : * Required when ARCH_HAS_COHERENCE is true. This function returns
879 : * true if the byte pointed to lies within an architecture-defined
880 : * "coherence region" (typically implemented with uncached memory) and
881 : * can safely be used in multiprocessor code without explicit flush or
882 : * invalidate operations.
883 : *
884 : * @note The result is for only the single byte at the specified
885 : * address, this API is not required to check region boundaries or to
886 : * expect aligned pointers. The expectation is that the code above
887 : * will have queried the appropriate address(es).
888 : */
889 : #ifndef CONFIG_ARCH_HAS_COHERENCE
890 1 : static inline bool arch_mem_coherent(void *ptr)
891 : {
892 : ARG_UNUSED(ptr);
893 : return true;
894 : }
895 : #endif
896 :
897 : /**
898 : * @brief Ensure cache coherence prior to context switch
899 : *
900 : * Required when ARCH_HAS_COHERENCE is true. On cache-incoherent
901 : * multiprocessor architectures, thread stacks are cached by default
902 : * for performance reasons. They must therefore be flushed
903 : * appropriately on context switch. The rules are:
904 : *
905 : * 1. The region containing live data in the old stack (generally the
906 : * bytes between the current stack pointer and the top of the stack
907 : * memory) must be flushed to underlying storage so a new CPU that
908 : * runs the same thread sees the correct data. This must happen
909 : * before the assignment of the switch_handle field in the thread
910 : * struct which signals the completion of context switch.
911 : *
912 : * 2. Any data areas to be read from the new stack (generally the same
913 : * as the live region when it was saved) should be invalidated (and
914 : * NOT flushed!) in the data cache. This is because another CPU
915 : * may have run or re-initialized the thread since this CPU
916 : * suspended it, and any data present in cache will be stale.
917 : *
918 : * @note The kernel will call this function during interrupt exit when
919 : * a new thread has been chosen to run, and also immediately before
920 : * entering arch_switch() to effect a code-driven context switch. In
921 : * the latter case, it is very likely that more data will be written
922 : * to the old_thread stack region after this function returns but
923 : * before the completion of the switch. Simply flushing naively here
924 : * is not sufficient on many architectures and coordination with the
925 : * arch_switch() implementation is likely required.
926 : *
927 : * @param old_thread The old thread to be flushed before being allowed
928 : * to run on other CPUs.
929 : * @param old_switch_handle The switch handle to be stored into
930 : * old_thread (it will not be valid until the
931 : * cache is flushed so is not present yet).
932 : * This will be NULL if inside z_swap()
933 : * (because the arch_switch() has not saved it
934 : * yet).
935 : * @param new_thread The new thread to be invalidated before it runs locally.
936 : */
937 : #ifndef CONFIG_KERNEL_COHERENCE
938 1 : static inline void arch_cohere_stacks(struct k_thread *old_thread,
939 : void *old_switch_handle,
940 : struct k_thread *new_thread)
941 : {
942 : ARG_UNUSED(old_thread);
943 : ARG_UNUSED(old_switch_handle);
944 : ARG_UNUSED(new_thread);
945 : }
946 : #endif
947 :
948 : /** @} */
949 :
950 : /**
951 : * @defgroup arch-gdbstub Architecture-specific gdbstub APIs
952 : * @ingroup arch-interface
953 : * @{
954 : */
955 :
956 : #ifdef CONFIG_GDBSTUB
957 : struct gdb_ctx;
958 :
959 : /**
960 : * @brief Architecture layer debug start
961 : *
962 : * This function is called by @c gdb_init()
963 : */
964 1 : void arch_gdb_init(void);
965 :
966 : /**
967 : * @brief Continue running program
968 : *
969 : * Continue software execution.
970 : */
971 1 : void arch_gdb_continue(void);
972 :
973 : /**
974 : * @brief Continue with one step
975 : *
976 : * Continue software execution until reaches the next statement.
977 : */
978 1 : void arch_gdb_step(void);
979 :
980 : /**
981 : * @brief Read all registers, and outputs as hexadecimal string.
982 : *
983 : * This reads all CPU registers and outputs as hexadecimal string.
984 : * The output string must be parsable by GDB.
985 : *
986 : * @param ctx GDB context
987 : * @param buf Buffer to output hexadecimal string.
988 : * @param buflen Length of buffer.
989 : *
990 : * @return Length of hexadecimal string written.
991 : * Return 0 if error or not supported.
992 : */
993 1 : size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen);
994 :
995 : /**
996 : * @brief Take a hexadecimal string and update all registers.
997 : *
998 : * This takes in a hexadecimal string as presented from GDB,
999 : * and updates all CPU registers with new values.
1000 : *
1001 : * @param ctx GDB context
1002 : * @param hex Input hexadecimal string.
1003 : * @param hexlen Length of hexadecimal string.
1004 : *
1005 : * @return Length of hexadecimal string parsed.
1006 : * Return 0 if error or not supported.
1007 : */
1008 1 : size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen);
1009 :
1010 : /**
1011 : * @brief Read one register, and outputs as hexadecimal string.
1012 : *
1013 : * This reads one CPU register and outputs as hexadecimal string.
1014 : * The output string must be parsable by GDB.
1015 : *
1016 : * @param ctx GDB context
1017 : * @param buf Buffer to output hexadecimal string.
1018 : * @param buflen Length of buffer.
1019 : * @param regno Register number
1020 : *
1021 : * @return Length of hexadecimal string written.
1022 : * Return 0 if error or not supported.
1023 : */
1024 1 : size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen,
1025 : uint32_t regno);
1026 :
1027 : /**
1028 : * @brief Take a hexadecimal string and update one register.
1029 : *
1030 : * This takes in a hexadecimal string as presented from GDB,
1031 : * and updates one CPU registers with new value.
1032 : *
1033 : * @param ctx GDB context
1034 : * @param hex Input hexadecimal string.
1035 : * @param hexlen Length of hexadecimal string.
1036 : * @param regno Register number
1037 : *
1038 : * @return Length of hexadecimal string parsed.
1039 : * Return 0 if error or not supported.
1040 : */
1041 1 : size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
1042 : uint32_t regno);
1043 :
1044 : /**
1045 : * @brief Add breakpoint or watchpoint.
1046 : *
1047 : * @param ctx GDB context
1048 : * @param type Breakpoint or watchpoint type
1049 : * @param addr Address of breakpoint or watchpoint
1050 : * @param kind Size of breakpoint/watchpoint in bytes
1051 : *
1052 : * @retval 0 Operation successful
1053 : * @retval -1 Error encountered
1054 : * @retval -2 Not supported
1055 : */
1056 1 : int arch_gdb_add_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1057 : uintptr_t addr, uint32_t kind);
1058 :
1059 : /**
1060 : * @brief Remove breakpoint or watchpoint.
1061 : *
1062 : * @param ctx GDB context
1063 : * @param type Breakpoint or watchpoint type
1064 : * @param addr Address of breakpoint or watchpoint
1065 : * @param kind Size of breakpoint/watchpoint in bytes
1066 : *
1067 : * @retval 0 Operation successful
1068 : * @retval -1 Error encountered
1069 : * @retval -2 Not supported
1070 : */
1071 1 : int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1072 : uintptr_t addr, uint32_t kind);
1073 :
1074 : #endif
1075 : /** @} */
1076 :
1077 : #ifdef CONFIG_TIMING_FUNCTIONS
1078 : #include <zephyr/timing/types.h>
1079 :
1080 : /**
1081 : * @brief Arch specific Timing Measurement APIs
1082 : * @defgroup timing_api_arch Arch specific Timing Measurement APIs
1083 : * @ingroup timing_api
1084 : *
1085 : * Implements the necessary bits to support timing measurement
1086 : * using architecture specific timing measurement mechanism.
1087 : *
1088 : * @{
1089 : */
1090 :
1091 : /**
1092 : * @brief Initialize the timing subsystem.
1093 : *
1094 : * Perform the necessary steps to initialize the timing subsystem.
1095 : *
1096 : * @see timing_init()
1097 : */
1098 1 : void arch_timing_init(void);
1099 :
1100 : /**
1101 : * @brief Signal the start of the timing information gathering.
1102 : *
1103 : * Signal to the timing subsystem that timing information
1104 : * will be gathered from this point forward.
1105 : *
1106 : * @note Any call to arch_timing_counter_get() must be done between
1107 : * calls to arch_timing_start() and arch_timing_stop(), and on the
1108 : * same CPU core.
1109 : *
1110 : * @see timing_start()
1111 : */
1112 1 : void arch_timing_start(void);
1113 :
1114 : /**
1115 : * @brief Signal the end of the timing information gathering.
1116 : *
1117 : * Signal to the timing subsystem that timing information
1118 : * is no longer being gathered from this point forward.
1119 : *
1120 : * @note Any call to arch_timing_counter_get() must be done between
1121 : * calls to arch_timing_start() and arch_timing_stop(), and on the
1122 : * same CPU core.
1123 : *
1124 : * @see timing_stop()
1125 : */
1126 1 : void arch_timing_stop(void);
1127 :
1128 : /**
1129 : * @brief Return timing counter.
1130 : *
1131 : * @parblock
1132 : *
1133 : * @note Any call to arch_timing_counter_get() must be done between
1134 : * calls to arch_timing_start() and arch_timing_stop(), and on the
1135 : * same CPU core.
1136 : *
1137 : * @endparblock
1138 : *
1139 : * @parblock
1140 : *
1141 : * @note Not all architectures have a timing counter with 64 bit precision.
1142 : * It is possible to see this value "go backwards" due to internal
1143 : * rollover. Timing code must be prepared to address the rollover
1144 : * (with platform-dependent code, e.g. by casting to a uint32_t before
1145 : * subtraction) or by using arch_timing_cycles_get() which is required
1146 : * to understand the distinction.
1147 : *
1148 : * @endparblock
1149 : *
1150 : * @return Timing counter.
1151 : *
1152 : * @see timing_counter_get()
1153 : */
1154 1 : timing_t arch_timing_counter_get(void);
1155 :
1156 : /**
1157 : * @brief Get number of cycles between @p start and @p end.
1158 : *
1159 : * @note For some architectures, the raw numbers from counter need
1160 : * to be scaled to obtain actual number of cycles, or may roll over
1161 : * internally. This function computes a positive-definite interval
1162 : * between two returned cycle values.
1163 : *
1164 : * @param start Pointer to counter at start of a measured execution.
1165 : * @param end Pointer to counter at stop of a measured execution.
1166 : * @return Number of cycles between start and end.
1167 : *
1168 : * @see timing_cycles_get()
1169 : */
1170 1 : uint64_t arch_timing_cycles_get(volatile timing_t *const start,
1171 : volatile timing_t *const end);
1172 :
1173 : /**
1174 : * @brief Get frequency of counter used (in Hz).
1175 : *
1176 : * @return Frequency of counter used for timing in Hz.
1177 : *
1178 : * @see timing_freq_get()
1179 : */
1180 1 : uint64_t arch_timing_freq_get(void);
1181 :
1182 : /**
1183 : * @brief Convert number of @p cycles into nanoseconds.
1184 : *
1185 : * @param cycles Number of cycles
1186 : * @return Converted time value
1187 : *
1188 : * @see timing_cycles_to_ns()
1189 : */
1190 1 : uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
1191 :
1192 : /**
1193 : * @brief Convert number of @p cycles into nanoseconds with averaging.
1194 : *
1195 : * @param cycles Number of cycles
1196 : * @param count Times of accumulated cycles to average over
1197 : * @return Converted time value
1198 : *
1199 : * @see timing_cycles_to_ns_avg()
1200 : */
1201 1 : uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
1202 :
1203 : /**
1204 : * @brief Get frequency of counter used (in MHz).
1205 : *
1206 : * @return Frequency of counter used for timing in MHz.
1207 : *
1208 : * @see timing_freq_get_mhz()
1209 : */
1210 1 : uint32_t arch_timing_freq_get_mhz(void);
1211 :
1212 : /** @} */
1213 :
1214 : #endif /* CONFIG_TIMING_FUNCTIONS */
1215 :
1216 : #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
1217 :
1218 : struct msi_vector;
1219 : typedef struct msi_vector msi_vector_t;
1220 :
1221 : /**
1222 : * @brief Allocate vector(s) for the endpoint MSI message(s).
1223 : *
1224 : * @param priority the MSI vectors base interrupt priority
1225 : * @param vectors an array to fill with allocated MSI vectors
1226 : * @param n_vector the size of MSI vectors array
1227 : *
1228 : * @return The number of allocated MSI vectors
1229 : */
1230 : uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
1231 : msi_vector_t *vectors,
1232 : uint8_t n_vector);
1233 :
1234 : /**
1235 : * @brief Connect an MSI vector to the given routine
1236 : *
1237 : * @param vector The MSI vector to connect to
1238 : * @param routine Interrupt service routine
1239 : * @param parameter ISR parameter
1240 : * @param flags Arch-specific IRQ configuration flag
1241 : *
1242 : * @return True on success, false otherwise
1243 : */
1244 : bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
1245 : void (*routine)(const void *parameter),
1246 : const void *parameter,
1247 : uint32_t flags);
1248 :
1249 : #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
1250 :
1251 : /**
1252 : * @brief Perform architecture specific processing within spin loops
1253 : *
1254 : * This is invoked from busy loops with IRQs disabled such as the contended
1255 : * spinlock loop. The default implementation is a weak function that calls
1256 : * arch_nop(). Architectures may implement this function to perform extra
1257 : * checks or power management tricks if needed.
1258 : */
1259 1 : void arch_spin_relax(void);
1260 :
1261 : /**
1262 : * @defgroup arch-stackwalk Architecture-specific Stack Walk APIs
1263 : * @ingroup arch-interface
1264 : *
1265 : * To add API support to an architecture, `arch_stack_walk()` should be implemented and a non-user
1266 : * configurable Kconfig `ARCH_HAS_STACKWALK` that is default to `y` should be created in the
1267 : * architecture's top level Kconfig, with all the relevant dependencies.
1268 : *
1269 : * @{
1270 : */
1271 :
1272 : /**
1273 : * stack_trace_callback_fn - Callback for @ref arch_stack_walk
1274 : * @param cookie Caller supplied pointer handed back by @ref arch_stack_walk
1275 : * @param addr The stack entry address to consume
1276 : *
1277 : * @return True, if the entry was consumed or skipped. False, if there is no space left to store
1278 : */
1279 1 : typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr);
1280 :
1281 : /**
1282 : * @brief Architecture-specific function to walk the stack
1283 : *
1284 : * @param callback_fn Callback which is invoked by the architecture code for each entry.
1285 : * @param cookie Caller supplied pointer which is handed back to @a callback_fn
1286 : * @param thread Pointer to a k_thread struct, can be NULL
1287 : * @param esf Pointer to an arch_esf struct, can be NULL
1288 : *
1289 : * ============ ======= ============================================
1290 : * thread esf
1291 : * ============ ======= ============================================
1292 : * thread NULL Stack trace from thread (can be arch_current_thread())
1293 : * thread esf Stack trace starting on esf
1294 : * ============ ======= ============================================
1295 : */
1296 1 : void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
1297 : const struct k_thread *thread, const struct arch_esf *esf);
1298 :
1299 : /**
1300 : * arch-stackwalk
1301 : * @}
1302 : */
1303 :
1304 : #ifdef __cplusplus
1305 : }
1306 : #endif /* __cplusplus */
1307 :
1308 : #include <zephyr/arch/arch_inlines.h>
1309 :
1310 : #endif /* _ASMLANGUAGE */
1311 :
1312 : #endif /* ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_ */
|