LCOV - code coverage report
Current view: top level - zephyr/arch - arch_interface.h Coverage Total Hit
Test: new.info Lines: 96.9 % 65 63
Test Date: 2025-09-05 16:43:28

            Line data    Source code
       1            0 : /*
       2              :  * Copyright (c) 2019 Intel Corporation.
       3              :  *
       4              :  * SPDX-License-Identifier: Apache-2.0
       5              :  */
       6              : 
       7              : /**
       8              :  * @defgroup arch-interface Architecture Interface
       9              :  * @ingroup internal_api
      10              :  * @brief Internal kernel APIs with public scope
      11              :  *
      12              :  * Any public kernel APIs that are implemented as inline functions and need to
      13              :  * call architecture-specific API so will have the prototypes for the
      14              :  * architecture-specific APIs here. Architecture APIs that aren't used in this
      15              :  * way go in kernel/include/kernel_arch_interface.h.
      16              :  *
      17              :  * The set of architecture-specific APIs used internally by public macros and
      18              :  * inline functions in public headers are also specified and documented.
      19              :  *
      20              :  * For all macros and inline function prototypes described herein, <arch/cpu.h>
      21              :  * must eventually pull in full definitions for all of them (the actual macro
      22              :  * defines and inline function bodies)
      23              :  *
      24              :  * include/kernel.h and other public headers depend on definitions in this
      25              :  * header.
      26              :  */
      27              : #ifndef ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
      28              : #define ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
      29              : 
      30              : #ifndef _ASMLANGUAGE
      31              : #include <zephyr/toolchain.h>
      32              : #include <stddef.h>
      33              : #include <zephyr/types.h>
      34              : #include <zephyr/arch/cpu.h>
      35              : #include <zephyr/irq_offload.h>
      36              : 
      37              : #ifdef __cplusplus
      38              : extern "C" {
      39              : #endif
      40              : 
      41              : /* NOTE: We cannot pull in kernel.h here, need some forward declarations  */
      42              : struct arch_esf;
      43              : struct k_thread;
      44              : struct k_mem_domain;
      45              : 
      46            1 : typedef struct z_thread_stack_element k_thread_stack_t;
      47              : 
      48            1 : typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
      49              : 
      50              : /**
      51              :  * @defgroup arch-timing Architecture timing APIs
      52              :  * @ingroup arch-interface
      53              :  * @{
      54              :  */
      55              : 
      56              : /**
      57              :  * Obtain the current cycle count, in units specified by
      58              :  * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC.  While this is historically
      59              :  * specified as part of the architecture API, in practice virtually
      60              :  * all platforms forward it to the sys_clock_cycle_get_32() API
      61              :  * provided by the timer driver.
      62              :  *
      63              :  * @see k_cycle_get_32()
      64              :  *
      65              :  * @return The current cycle time.  This should count up monotonically
      66              :  * through the full 32 bit space, wrapping at 0xffffffff.  Hardware
      67              :  * with fewer bits of precision in the timer is expected to synthesize
      68              :  * a 32 bit count.
      69              :  */
      70            1 : static inline uint32_t arch_k_cycle_get_32(void);
      71              : 
      72              : /**
      73              :  * As for arch_k_cycle_get_32(), but with a 64 bit return value.  Not
      74              :  * all timer hardware has a 64 bit timer, this needs to be implemented
      75              :  * only if CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER is set.
      76              :  *
      77              :  * @see arch_k_cycle_get_32()
      78              :  *
      79              :  * @return The current cycle time.  This should count up monotonically
      80              :  * through the full 64 bit space, wrapping at 2^64-1.  Hardware with
      81              :  * fewer bits of precision in the timer is generally not expected to
      82              :  * implement this API.
      83              :  */
      84            1 : static inline uint64_t arch_k_cycle_get_64(void);
      85              : 
      86              : /** @} */
      87              : 
      88              : 
      89              : /**
      90              :  * @addtogroup arch-threads
      91              :  * @{
      92              :  */
      93              : 
      94              : /**
      95              :  * @def ARCH_THREAD_STACK_RESERVED
      96              :  *
      97              :  * @see K_THREAD_STACK_RESERVED
      98              :  */
      99              : 
     100              : /**
     101              :  * @def ARCH_STACK_PTR_ALIGN
     102              :  *
     103              :  * Required alignment of the CPU's stack pointer register value, dictated by
     104              :  * hardware constraints and the ABI calling convention.
     105              :  *
     106              :  * @see Z_STACK_PTR_ALIGN
     107              :  */
     108              : 
     109              : /**
     110              :  * @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
     111              :  *
     112              :  * Required alignment of the lowest address of a stack object.
     113              :  *
     114              :  * Optional definition.
     115              :  *
     116              :  * @see Z_THREAD_STACK_OBJ_ALIGN
     117              :  */
     118              : 
     119              : /**
     120              :  * @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
     121              :  * @brief Round up a stack buffer size to alignment constraints
     122              :  *
     123              :  * Adjust a requested stack buffer size to the true size of its underlying
     124              :  * buffer, defined as the area usable for thread stack context and thread-
     125              :  * local storage.
     126              :  *
     127              :  * The size value passed here does not include storage reserved for platform
     128              :  * data.
     129              :  *
     130              :  * The returned value is either the same size provided (if already properly
     131              :  * aligned), or rounded up to satisfy alignment constraints.  Calculations
     132              :  * performed here *must* be idempotent.
     133              :  *
     134              :  * Optional definition. If undefined, stack buffer sizes are either:
     135              :  * - Rounded up to the next power of two if user mode is enabled on an arch
     136              :  *   with an MPU that requires such alignment
     137              :  * - Rounded up to ARCH_STACK_PTR_ALIGN
     138              :  *
     139              :  * @see Z_THREAD_STACK_SIZE_ADJUST
     140              :  */
     141              : 
     142              : /**
     143              :  * @def ARCH_KERNEL_STACK_RESERVED
     144              :  * @brief MPU guard size for kernel-only stacks
     145              :  *
     146              :  * If MPU stack guards are used to catch stack overflows, specify the
     147              :  * amount of space reserved in kernel stack objects. If guard sizes are
     148              :  * context dependent, this should be in the minimum guard size, with
     149              :  * remaining space carved out if needed.
     150              :  *
     151              :  * Optional definition, defaults to 0.
     152              :  *
     153              :  * @see K_KERNEL_STACK_RESERVED
     154              :  */
     155              : 
     156              : /**
     157              :  * @def ARCH_KERNEL_STACK_OBJ_ALIGN
     158              :  * @brief Required alignment of the lowest address of a kernel-only stack.
     159              :  */
     160              : 
     161              : /** @} */
     162              : 
     163              : /**
     164              :  * @addtogroup arch-pm
     165              :  * @{
     166              :  */
     167              : 
     168              : /**
     169              :  * @brief Power save idle routine
     170              :  *
     171              :  * This function will be called by the kernel idle loop or possibly within
     172              :  * an implementation of z_pm_save_idle in the kernel when the
     173              :  * '_pm_save_flag' variable is non-zero.
     174              :  *
     175              :  * Architectures that do not implement power management instructions may
     176              :  * immediately return, otherwise a power-saving instruction should be
     177              :  * issued to wait for an interrupt.
     178              :  *
     179              :  * @note The function is expected to return after the interrupt that has
     180              :  * caused the CPU to exit power-saving mode has been serviced, although
     181              :  * this is not a firm requirement.
     182              :  *
     183              :  * @see k_cpu_idle()
     184              :  */
     185            1 : void arch_cpu_idle(void);
     186              : 
     187              : /**
     188              :  * @brief Atomically re-enable interrupts and enter low power mode
     189              :  *
     190              :  * The requirements for arch_cpu_atomic_idle() are as follows:
     191              :  *
     192              :  * -# Enabling interrupts and entering a low-power mode needs to be
     193              :  *    atomic, i.e. there should be no period of time where interrupts are
     194              :  *    enabled before the processor enters a low-power mode.  See the comments
     195              :  *    in k_lifo_get(), for example, of the race condition that
     196              :  *    occurs if this requirement is not met.
     197              :  *
     198              :  * -# After waking up from the low-power mode, the interrupt lockout state
     199              :  *    must be restored as indicated in the 'key' input parameter.
     200              :  *
     201              :  * @see k_cpu_atomic_idle()
     202              :  *
     203              :  * @param key Lockout key returned by previous invocation of arch_irq_lock()
     204              :  */
     205            1 : void arch_cpu_atomic_idle(unsigned int key);
     206              : 
     207              : /** @} */
     208              : 
     209              : 
     210              : /**
     211              :  * @addtogroup arch-smp
     212              :  * @{
     213              :  */
     214              : 
     215              : /**
     216              :  * Per-cpu entry function
     217              :  *
     218              :  * @param data context parameter, implementation specific
     219              :  */
     220            1 : typedef void (*arch_cpustart_t)(void *data);
     221              : 
     222              : /**
     223              :  * @brief Start a numbered CPU on a MP-capable system
     224              :  *
     225              :  * This starts and initializes a specific CPU.  The main thread on startup is
     226              :  * running on CPU zero, other processors are numbered sequentially.  On return
     227              :  * from this function, the CPU is known to have begun operating and will enter
     228              :  * the provided function.  Its interrupts will be initialized but disabled such
     229              :  * that irq_unlock() with the provided key will work to enable them.
     230              :  *
     231              :  * Normally, in SMP mode this function will be called by the kernel
     232              :  * initialization and should not be used as a user API.  But it is defined here
     233              :  * for special-purpose apps which want Zephyr running on one core and to use
     234              :  * others for design-specific processing.
     235              :  *
     236              :  * @param cpu_num Integer number of the CPU
     237              :  * @param stack Stack memory for the CPU
     238              :  * @param sz Stack buffer size, in bytes
     239              :  * @param fn Function to begin running on the CPU.
     240              :  * @param arg Untyped argument to be passed to "fn"
     241              :  */
     242            1 : void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
     243              :                     arch_cpustart_t fn, void *arg);
     244              : 
     245              : /**
     246              :  * @brief Return CPU power status
     247              :  *
     248              :  * @param cpu_num Integer number of the CPU
     249              :  */
     250            1 : bool arch_cpu_active(int cpu_num);
     251              : 
     252              : /** @} */
     253              : 
     254              : 
     255              : /**
     256              :  * @addtogroup arch-irq
     257              :  * @{
     258              :  */
     259              : 
     260              : /**
     261              :  * Lock interrupts on the current CPU
     262              :  *
     263              :  * @see irq_lock()
     264              :  */
     265            1 : static inline unsigned int arch_irq_lock(void);
     266              : 
     267              : /**
     268              :  * Unlock interrupts on the current CPU
     269              :  *
     270              :  * @see irq_unlock()
     271              :  */
     272            1 : static inline void arch_irq_unlock(unsigned int key);
     273              : 
     274              : /**
     275              :  * Test if calling arch_irq_unlock() with this key would unlock irqs
     276              :  *
     277              :  * @param key value returned by arch_irq_lock()
     278              :  * @return true if interrupts were unlocked prior to the arch_irq_lock()
     279              :  * call that produced the key argument.
     280              :  */
     281            1 : static inline bool arch_irq_unlocked(unsigned int key);
     282              : 
     283              : /**
     284              :  * Disable the specified interrupt line
     285              :  *
     286              :  * @note: The behavior of interrupts that arrive after this call
     287              :  * returns and before the corresponding call to arch_irq_enable() is
     288              :  * undefined.  The hardware is not required to latch and deliver such
     289              :  * an interrupt, though on some architectures that may work.  Other
     290              :  * architectures will simply lose such an interrupt and never deliver
     291              :  * it.  Many drivers and subsystems are not tolerant of such dropped
     292              :  * interrupts and it is the job of the application layer to ensure
     293              :  * that behavior remains correct.
     294              :  *
     295              :  * @see irq_disable()
     296              :  */
     297            1 : void arch_irq_disable(unsigned int irq);
     298              : 
     299              : /**
     300              :  * Enable the specified interrupt line
     301              :  *
     302              :  * @see irq_enable()
     303              :  */
     304            1 : void arch_irq_enable(unsigned int irq);
     305              : 
     306              : /**
     307              :  * Test if an interrupt line is enabled
     308              :  *
     309              :  * @see irq_is_enabled()
     310              :  */
     311            1 : int arch_irq_is_enabled(unsigned int irq);
     312              : 
     313              : /**
     314              :  * Arch-specific hook to install a dynamic interrupt.
     315              :  *
     316              :  * @param irq IRQ line number
     317              :  * @param priority Interrupt priority
     318              :  * @param routine Interrupt service routine
     319              :  * @param parameter ISR parameter
     320              :  * @param flags Arch-specific IRQ configuration flag
     321              :  *
     322              :  * @return The vector assigned to this interrupt
     323              :  */
     324            1 : int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
     325              :                              void (*routine)(const void *parameter),
     326              :                              const void *parameter, uint32_t flags);
     327              : 
     328              : /**
     329              :  * Arch-specific hook to dynamically uninstall a shared interrupt.
     330              :  * If the interrupt is not being shared, then the associated
     331              :  * _sw_isr_table entry will be replaced by (NULL, z_irq_spurious)
     332              :  * (default entry).
     333              :  *
     334              :  * @param irq IRQ line number
     335              :  * @param priority Interrupt priority
     336              :  * @param routine Interrupt service routine
     337              :  * @param parameter ISR parameter
     338              :  * @param flags Arch-specific IRQ configuration flag
     339              :  *
     340              :  * @return 0 in case of success, negative value otherwise
     341              :  */
     342            1 : int arch_irq_disconnect_dynamic(unsigned int irq, unsigned int priority,
     343              :                                 void (*routine)(const void *parameter),
     344              :                                 const void *parameter, uint32_t flags);
     345              : 
     346              : /**
     347              :  * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
     348              :  *
     349              :  * @see IRQ_CONNECT()
     350              :  */
     351              : 
     352              : #ifdef CONFIG_PCIE
     353              : /**
     354              :  * @def ARCH_PCIE_IRQ_CONNECT(bdf, irq, pri, isr, arg, flags)
     355              :  *
     356              :  * @see PCIE_IRQ_CONNECT()
     357              :  */
     358              : #endif /* CONFIG_PCIE */
     359              : 
     360              : /**
     361              :  * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
     362              :  *
     363              :  * @see IRQ_DIRECT_CONNECT()
     364              :  */
     365              : 
     366              : /**
     367              :  * @def ARCH_ISR_DIRECT_PM()
     368              :  *
     369              :  * @see ISR_DIRECT_PM()
     370              :  */
     371              : 
     372              : /**
     373              :  * @def ARCH_ISR_DIRECT_HEADER()
     374              :  *
     375              :  * @see ISR_DIRECT_HEADER()
     376              :  */
     377              : 
     378              : /**
     379              :  * @def ARCH_ISR_DIRECT_FOOTER(swap)
     380              :  *
     381              :  * @see ISR_DIRECT_FOOTER()
     382              :  */
     383              : 
     384              : /**
     385              :  * @def ARCH_ISR_DIRECT_DECLARE(name)
     386              :  *
     387              :  * @see ISR_DIRECT_DECLARE()
     388              :  */
     389              : 
     390              : #ifndef CONFIG_PCIE_CONTROLLER
     391              : /**
     392              :  * @brief Arch-specific hook for allocating IRQs
     393              :  *
     394              :  * Note: disable/enable IRQ relevantly inside the implementation of such
     395              :  * function to avoid concurrency issues. Also, an allocated IRQ is assumed
     396              :  * to be used thus a following @see arch_irq_is_used() should return true.
     397              :  *
     398              :  * @return The newly allocated IRQ or UINT_MAX on error.
     399              :  */
     400            1 : unsigned int arch_irq_allocate(void);
     401              : 
     402              : /**
     403              :  * @brief Arch-specific hook for declaring an IRQ being used
     404              :  *
     405              :  * Note: disable/enable IRQ relevantly inside the implementation of such
     406              :  * function to avoid concurrency issues.
     407              :  *
     408              :  * @param irq the IRQ to declare being used
     409              :  */
     410            1 : void arch_irq_set_used(unsigned int irq);
     411              : 
     412              : /**
     413              :  * @brief Arch-specific hook for checking if an IRQ is being used already
     414              :  *
     415              :  * @param irq the IRQ to check
     416              :  *
     417              :  * @return true if being, false otherwise
     418              :  */
     419            1 : bool arch_irq_is_used(unsigned int irq);
     420              : 
     421              : #endif /* CONFIG_PCIE_CONTROLLER */
     422              : 
     423              : /**
     424              :  * @def ARCH_EXCEPT(reason_p)
     425              :  *
     426              :  * Generate a software induced fatal error.
     427              :  *
     428              :  * If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
     429              :  * K_ERR_STACK_CHK_FAIL may be induced.
     430              :  *
     431              :  * This should ideally generate a software trap, with exception context
     432              :  * indicating state when this was invoked. General purpose register state at
     433              :  * the time of trap should not be disturbed from the calling context.
     434              :  *
     435              :  * @param reason_p K_ERR_ scoped reason code for the fatal error.
     436              :  */
     437              : 
     438              : #ifdef CONFIG_IRQ_OFFLOAD
     439              : /**
     440              :  * Run a function in interrupt context.
     441              :  *
     442              :  * Implementations should invoke an exception such that the kernel goes through
     443              :  * its interrupt handling dispatch path, to include switching to the interrupt
     444              :  * stack, and runs the provided routine and parameter.
     445              :  *
     446              :  * The only intended use-case for this function is for test code to simulate
     447              :  * the correctness of kernel APIs in interrupt handling context. This API
     448              :  * is not intended for real applications.
     449              :  *
     450              :  * @see irq_offload()
     451              :  *
     452              :  * @param routine Function to run in interrupt context
     453              :  * @param parameter Value to pass to the function when invoked
     454              :  */
     455              : void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
     456              : 
     457              : 
     458              : /**
     459              :  * Initialize the architecture-specific portion of the irq_offload subsystem
     460              :  */
     461              : void arch_irq_offload_init(void);
     462              : 
     463              : #endif /* CONFIG_IRQ_OFFLOAD */
     464              : 
     465              : /** @} */
     466              : 
     467              : 
     468              : /**
     469              :  * @defgroup arch-smp Architecture-specific SMP APIs
     470              :  * @ingroup arch-interface
     471              :  * @{
     472              :  */
     473              : #ifdef CONFIG_SMP
     474              : /** Return the CPU struct for the currently executing CPU */
     475            1 : static inline struct _cpu *arch_curr_cpu(void);
     476              : 
     477              : 
     478              : /**
     479              :  * @brief Processor hardware ID
     480              :  *
     481              :  * Most multiprocessor architectures have a low-level unique ID value
     482              :  * associated with the current CPU that can be retrieved rapidly and
     483              :  * efficiently in kernel context.  Note that while the numbering of
     484              :  * the CPUs is guaranteed to be unique, the values are
     485              :  * platform-defined. In particular, they are not guaranteed to match
     486              :  * Zephyr's own sequential CPU IDs (even though on some platforms they
     487              :  * do).
     488              :  *
     489              :  * @note There is an inherent race with this API: the system may
     490              :  * preempt the current thread and migrate it to another CPU before the
     491              :  * value is used.  Safe usage requires knowing the migration is
     492              :  * impossible (e.g. because the code is in interrupt context, holds a
     493              :  * spinlock, or cannot migrate due to k_cpu_mask state).
     494              :  *
     495              :  * @return Unique ID for currently-executing CPU
     496              :  */
     497            1 : static inline uint32_t arch_proc_id(void);
     498              : 
     499              : /**
     500              :  * Broadcast an interrupt to all CPUs
     501              :  *
     502              :  * This will invoke z_sched_ipi() on all other CPUs in the system.
     503              :  */
     504            1 : void arch_sched_broadcast_ipi(void);
     505              : 
     506              : /**
     507              :  * Direct IPIs to the specified CPUs
     508              :  *
     509              :  * This will invoke z_sched_ipi() on the CPUs identified by @a cpu_bitmap.
     510              :  *
     511              :  * @param cpu_bitmap A bitmap indicating which CPUs need the IPI
     512              :  */
     513            1 : void arch_sched_directed_ipi(uint32_t cpu_bitmap);
     514              : 
     515            0 : int arch_smp_init(void);
     516              : 
     517              : #endif /* CONFIG_SMP */
     518              : 
     519              : /**
     520              :  * @brief Returns the number of CPUs
     521              :  *
     522              :  * For most systems this will be the same as CONFIG_MP_MAX_NUM_CPUS,
     523              :  * however some systems may determine this at runtime instead.
     524              :  *
     525              :  * @return the number of CPUs
     526              :  */
     527            1 : static inline unsigned int arch_num_cpus(void);
     528              : 
     529              : /** @} */
     530              : 
     531              : 
     532              : /**
     533              :  * @defgroup arch-userspace Architecture-specific userspace APIs
     534              :  * @ingroup arch-interface
     535              :  * @{
     536              :  */
     537              : 
     538              : #ifdef CONFIG_USERSPACE
     539              : #include <zephyr/arch/syscall.h>
     540              : 
     541              : /**
     542              :  * Invoke a system call with 0 arguments.
     543              :  *
     544              :  * No general-purpose register state other than return value may be preserved
     545              :  * when transitioning from supervisor mode back down to user mode for
     546              :  * security reasons.
     547              :  *
     548              :  * It is required that all arguments be stored in registers when elevating
     549              :  * privileges from user to supervisor mode.
     550              :  *
     551              :  * Processing of the syscall takes place on a separate kernel stack. Interrupts
     552              :  * should be enabled when invoking the system call marshallers from the
     553              :  * dispatch table. Thread preemption may occur when handling system calls.
     554              :  *
     555              :  * Call IDs are untrusted and must be bounds-checked, as the value is used to
     556              :  * index the system call dispatch table, containing function pointers to the
     557              :  * specific system call code.
     558              :  *
     559              :  * @param call_id System call ID
     560              :  * @return Return value of the system call. Void system calls return 0 here.
     561              :  */
     562            1 : static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
     563              : 
     564              : /**
     565              :  * Invoke a system call with 1 argument.
     566              :  *
     567              :  * @see arch_syscall_invoke0()
     568              :  *
     569              :  * @param arg1 First argument to the system call.
     570              :  * @param call_id System call ID, will be bounds-checked and used to reference
     571              :  *                kernel-side dispatch table
     572              :  * @return Return value of the system call. Void system calls return 0 here.
     573              :  */
     574            1 : static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
     575              :                                              uintptr_t call_id);
     576              : 
     577              : /**
     578              :  * Invoke a system call with 2 arguments.
     579              :  *
     580              :  * @see arch_syscall_invoke0()
     581              :  *
     582              :  * @param arg1 First argument to the system call.
     583              :  * @param arg2 Second argument to the system call.
     584              :  * @param call_id System call ID, will be bounds-checked and used to reference
     585              :  *                kernel-side dispatch table
     586              :  * @return Return value of the system call. Void system calls return 0 here.
     587              :  */
     588            1 : static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
     589              :                                              uintptr_t call_id);
     590              : 
     591              : /**
     592              :  * Invoke a system call with 3 arguments.
     593              :  *
     594              :  * @see arch_syscall_invoke0()
     595              :  *
     596              :  * @param arg1 First argument to the system call.
     597              :  * @param arg2 Second argument to the system call.
     598              :  * @param arg3 Third argument to the system call.
     599              :  * @param call_id System call ID, will be bounds-checked and used to reference
     600              :  *                kernel-side dispatch table
     601              :  * @return Return value of the system call. Void system calls return 0 here.
     602              :  */
     603            1 : static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
     604              :                                              uintptr_t arg3,
     605              :                                              uintptr_t call_id);
     606              : 
     607              : /**
     608              :  * Invoke a system call with 4 arguments.
     609              :  *
     610              :  * @see arch_syscall_invoke0()
     611              :  *
     612              :  * @param arg1 First argument to the system call.
     613              :  * @param arg2 Second argument to the system call.
     614              :  * @param arg3 Third argument to the system call.
     615              :  * @param arg4 Fourth argument to the system call.
     616              :  * @param call_id System call ID, will be bounds-checked and used to reference
     617              :  *                kernel-side dispatch table
     618              :  * @return Return value of the system call. Void system calls return 0 here.
     619              :  */
     620            1 : static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
     621              :                                              uintptr_t arg3, uintptr_t arg4,
     622              :                                              uintptr_t call_id);
     623              : 
     624              : /**
     625              :  * Invoke a system call with 5 arguments.
     626              :  *
     627              :  * @see arch_syscall_invoke0()
     628              :  *
     629              :  * @param arg1 First argument to the system call.
     630              :  * @param arg2 Second argument to the system call.
     631              :  * @param arg3 Third argument to the system call.
     632              :  * @param arg4 Fourth argument to the system call.
     633              :  * @param arg5 Fifth argument to the system call.
     634              :  * @param call_id System call ID, will be bounds-checked and used to reference
     635              :  *                kernel-side dispatch table
     636              :  * @return Return value of the system call. Void system calls return 0 here.
     637              :  */
     638            1 : static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
     639              :                                              uintptr_t arg3, uintptr_t arg4,
     640              :                                              uintptr_t arg5,
     641              :                                              uintptr_t call_id);
     642              : 
     643              : /**
     644              :  * Invoke a system call with 6 arguments.
     645              :  *
     646              :  * @see arch_syscall_invoke0()
     647              :  *
     648              :  * @param arg1 First argument to the system call.
     649              :  * @param arg2 Second argument to the system call.
     650              :  * @param arg3 Third argument to the system call.
     651              :  * @param arg4 Fourth argument to the system call.
     652              :  * @param arg5 Fifth argument to the system call.
     653              :  * @param arg6 Sixth argument to the system call.
     654              :  * @param call_id System call ID, will be bounds-checked and used to reference
     655              :  *                kernel-side dispatch table
     656              :  * @return Return value of the system call. Void system calls return 0 here.
     657              :  */
     658            1 : static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
     659              :                                              uintptr_t arg3, uintptr_t arg4,
     660              :                                              uintptr_t arg5, uintptr_t arg6,
     661              :                                              uintptr_t call_id);
     662              : 
     663              : /**
     664              :  * Indicate whether we are currently running in user mode
     665              :  *
     666              :  * @return True if the CPU is currently running with user permissions
     667              :  */
     668            1 : static inline bool arch_is_user_context(void);
     669              : 
     670              : /**
     671              :  * @brief Get the maximum number of partitions for a memory domain
     672              :  *
     673              :  * @return Max number of partitions, or -1 if there is no limit
     674              :  */
     675            1 : int arch_mem_domain_max_partitions_get(void);
     676              : 
     677              : #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
     678              : /**
     679              :  *
     680              :  * @brief Architecture-specific hook for memory domain initialization
     681              :  *
     682              :  * Perform any tasks needed to initialize architecture-specific data within
     683              :  * the memory domain, such as reserving memory for page tables. All members
     684              :  * of the provided memory domain aside from `arch` will be initialized when
     685              :  * this is called, but no threads will be a assigned yet.
     686              :  *
     687              :  * This function may fail if initializing the memory domain requires allocation,
     688              :  * such as for page tables.
     689              :  *
     690              :  * The associated function k_mem_domain_init() documents that making
     691              :  * multiple init calls to the same memory domain is undefined behavior,
     692              :  * but has no assertions in place to check this. If this matters, it may be
     693              :  * desirable to add checks for this in the implementation of this function.
     694              :  *
     695              :  * @param domain The memory domain to initialize
     696              :  * @retval 0 Success
     697              :  * @retval -ENOMEM Insufficient memory
     698              :  */
     699              : int arch_mem_domain_init(struct k_mem_domain *domain);
     700              : #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
     701              : 
     702              : #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
     703              : /**
     704              :  * @brief Add a thread to a memory domain (arch-specific)
     705              :  *
     706              :  * Architecture-specific hook to manage internal data structures or hardware
     707              :  * state when the provided thread has been added to a memory domain.
     708              :  *
     709              :  * The thread->mem_domain_info.mem_domain pointer will be set to the domain to
     710              :  * be added to before this is called. Implementations may assume that the
     711              :  * thread is not already a member of this domain.
     712              :  *
     713              :  * @param thread Thread which needs to be configured.
     714              :  *
     715              :  * @retval 0 if successful
     716              :  * @retval -EINVAL if invalid parameters supplied
     717              :  * @retval -ENOSPC if running out of space in internal structures
     718              :  *                    (e.g. translation tables)
     719              :  */
     720              : int arch_mem_domain_thread_add(struct k_thread *thread);
     721              : 
     722              : /**
     723              :  * @brief Remove a thread from a memory domain (arch-specific)
     724              :  *
     725              :  * Architecture-specific hook to manage internal data structures or hardware
     726              :  * state when the provided thread has been removed from a memory domain.
     727              :  *
     728              :  * The thread's memory domain pointer will be the domain that the thread
     729              :  * is being removed from.
     730              :  *
     731              :  * @param thread Thread being removed from its memory domain
     732              :  *
     733              :  * @retval 0 if successful
     734              :  * @retval -EINVAL if invalid parameters supplied
     735              :  */
     736              : int arch_mem_domain_thread_remove(struct k_thread *thread);
     737              : 
     738              : /**
     739              :  * @brief Remove a partition from the memory domain (arch-specific)
     740              :  *
     741              :  * Architecture-specific hook to manage internal data structures or hardware
     742              :  * state when a memory domain has had a partition removed.
     743              :  *
     744              :  * The partition index data, and the number of partitions configured, are not
     745              :  * respectively cleared and decremented in the domain until after this function
     746              :  * runs.
     747              :  *
     748              :  * @param domain The memory domain structure
     749              :  * @param partition_id The partition index that needs to be deleted
     750              :  *
     751              :  * @retval 0 if successful
     752              :  * @retval -EINVAL if invalid parameters supplied
     753              :  * @retval -ENOENT if no matching partition found
     754              :  */
     755              : int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
     756              :                                      uint32_t partition_id);
     757              : 
     758              : /**
     759              :  * @brief Add a partition to the memory domain
     760              :  *
     761              :  * Architecture-specific hook to manage internal data structures or hardware
     762              :  * state when a memory domain has a partition added.
     763              :  *
     764              :  * @param domain The memory domain structure
     765              :  * @param partition_id The partition that needs to be added
     766              :  *
     767              :  * @retval 0 if successful
     768              :  * @retval -EINVAL if invalid parameters supplied
     769              :  */
     770              : int arch_mem_domain_partition_add(struct k_mem_domain *domain,
     771              :                                   uint32_t partition_id);
     772              : #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
     773              : 
     774              : /**
     775              :  * @brief Check memory region permissions
     776              :  *
     777              :  * Given a memory region, return whether the current memory management hardware
     778              :  * configuration would allow a user thread to read/write that region. Used by
     779              :  * system calls to validate buffers coming in from userspace.
     780              :  *
     781              :  * Notes:
     782              :  * The function is guaranteed to never return validation success, if the entire
     783              :  * buffer area is not user accessible.
     784              :  *
     785              :  * The function is guaranteed to correctly validate the permissions of the
     786              :  * supplied buffer, if the user access permissions of the entire buffer are
     787              :  * enforced by a single, enabled memory management region.
     788              :  *
     789              :  * In some architectures the validation will always return failure
     790              :  * if the supplied memory buffer spans multiple enabled memory management
     791              :  * regions (even if all such regions permit user access).
     792              :  *
     793              :  * @warning Buffer of size zero (0) has undefined behavior.
     794              :  *
     795              :  * @param addr start address of the buffer
     796              :  * @param size the size of the buffer
     797              :  * @param write If non-zero, additionally check if the area is writable.
     798              :  *        Otherwise, just check if the memory can be read.
     799              :  *
     800              :  * @return nonzero if the permissions don't match.
     801              :  */
     802            1 : int arch_buffer_validate(const void *addr, size_t size, int write);
     803              : 
     804              : /**
     805              :  * Get the optimal virtual region alignment to optimize the MMU table layout
     806              :  *
     807              :  * Some MMU HW requires some region to be aligned to some of the intermediate
     808              :  * block alignment in order to reduce table usage.
     809              :  * This call returns the optimal virtual address alignment in order to permit
     810              :  * such optimization in the following MMU mapping call.
     811              :  *
     812              :  * @param[in] phys Physical address of region to be mapped,
     813              :  *                 aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
     814              :  * @param[in] size Size of region to be mapped,
     815              :  *                 aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
     816              :  *
     817              :  * @return Alignment to apply on the virtual address of this region
     818              :  */
     819            1 : size_t arch_virt_region_align(uintptr_t phys, size_t size);
     820              : 
     821              : /**
     822              :  * Perform a one-way transition from supervisor to user mode.
     823              :  *
     824              :  * Implementations of this function must do the following:
     825              :  *
     826              :  * - Reset the thread's stack pointer to a suitable initial value. We do not
     827              :  *   need any prior context since this is a one-way operation.
     828              :  * - Set up any kernel stack region for the CPU to use during privilege
     829              :  *   elevation
     830              :  * - Put the CPU in whatever its equivalent of user mode is
     831              :  * - Transfer execution to arch_new_thread() passing along all the supplied
     832              :  *   arguments, in user mode.
     833              :  *
     834              :  * @param user_entry Entry point to start executing as a user thread
     835              :  * @param p1 1st parameter to user thread
     836              :  * @param p2 2nd parameter to user thread
     837              :  * @param p3 3rd parameter to user thread
     838              :  */
     839            1 : FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
     840              :                                         void *p1, void *p2, void *p3);
     841              : 
     842              : /**
     843              :  * @brief Induce a kernel oops that appears to come from a specific location
     844              :  *
     845              :  * Normally, k_oops() generates an exception that appears to come from the
     846              :  * call site of the k_oops() itself.
     847              :  *
     848              :  * However, when validating arguments to a system call, if there are problems
     849              :  * we want the oops to appear to come from where the system call was invoked
     850              :  * and not inside the validation function.
     851              :  *
     852              :  * @param ssf System call stack frame pointer. This gets passed as an argument
     853              :  *            to _k_syscall_handler_t functions and its contents are completely
     854              :  *            architecture specific.
     855              :  */
     856            1 : FUNC_NORETURN void arch_syscall_oops(void *ssf);
     857              : 
     858              : /**
     859              :  * @brief Safely take the length of a potentially bad string
     860              :  *
     861              :  * This must not fault, instead the @p err parameter must have -1 written to it.
     862              :  * This function otherwise should work exactly like libc strnlen(). On success
     863              :  * @p err should be set to 0.
     864              :  *
     865              :  * @param s String to measure
     866              :  * @param maxsize Max length of the string
     867              :  * @param err Error value to write
     868              :  * @return Length of the string, not counting NULL byte, up to maxsize
     869              :  */
     870            1 : size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
     871              : #endif /* CONFIG_USERSPACE */
     872              : 
     873              : /**
     874              :  * @brief Detect memory coherence type
     875              :  *
     876              :  * Required when ARCH_HAS_COHERENCE is true.  This function returns
     877              :  * true if the byte pointed to lies within an architecture-defined
     878              :  * "coherence region" (typically implemented with uncached memory) and
     879              :  * can safely be used in multiprocessor code without explicit flush or
     880              :  * invalidate operations.
     881              :  *
     882              :  * @note The result is for only the single byte at the specified
     883              :  * address, this API is not required to check region boundaries or to
     884              :  * expect aligned pointers.  The expectation is that the code above
     885              :  * will have queried the appropriate address(es).
     886              :  */
     887              : #ifndef CONFIG_ARCH_HAS_COHERENCE
     888            1 : static inline bool arch_mem_coherent(void *ptr)
     889              : {
     890              :         ARG_UNUSED(ptr);
     891              :         return true;
     892              : }
     893              : #endif
     894              : 
     895              : /**
     896              :  * @brief Ensure cache coherence prior to context switch
     897              :  *
     898              :  * Required when ARCH_HAS_COHERENCE is true.  On cache-incoherent
     899              :  * multiprocessor architectures, thread stacks are cached by default
     900              :  * for performance reasons.  They must therefore be flushed
     901              :  * appropriately on context switch.  The rules are:
     902              :  *
     903              :  * 1. The region containing live data in the old stack (generally the
     904              :  *    bytes between the current stack pointer and the top of the stack
     905              :  *    memory) must be flushed to underlying storage so a new CPU that
     906              :  *    runs the same thread sees the correct data.  This must happen
     907              :  *    before the assignment of the switch_handle field in the thread
     908              :  *    struct which signals the completion of context switch.
     909              :  *
     910              :  * 2. Any data areas to be read from the new stack (generally the same
     911              :  *    as the live region when it was saved) should be invalidated (and
     912              :  *    NOT flushed!) in the data cache.  This is because another CPU
     913              :  *    may have run or re-initialized the thread since this CPU
     914              :  *    suspended it, and any data present in cache will be stale.
     915              :  *
     916              :  * @note The kernel will call this function during interrupt exit when
     917              :  * a new thread has been chosen to run, and also immediately before
     918              :  * entering arch_switch() to effect a code-driven context switch.  In
     919              :  * the latter case, it is very likely that more data will be written
     920              :  * to the old_thread stack region after this function returns but
     921              :  * before the completion of the switch.  Simply flushing naively here
     922              :  * is not sufficient on many architectures and coordination with the
     923              :  * arch_switch() implementation is likely required.
     924              :  *
     925              :  * @param old_thread The old thread to be flushed before being allowed
     926              :  *                   to run on other CPUs.
     927              :  * @param old_switch_handle The switch handle to be stored into
     928              :  *                          old_thread (it will not be valid until the
     929              :  *                          cache is flushed so is not present yet).
     930              :  *                          This will be NULL if inside z_swap()
     931              :  *                          (because the arch_switch() has not saved it
     932              :  *                          yet).
     933              :  * @param new_thread The new thread to be invalidated before it runs locally.
     934              :  */
     935              : #ifndef CONFIG_KERNEL_COHERENCE
     936            1 : static inline void arch_cohere_stacks(struct k_thread *old_thread,
     937              :                                       void *old_switch_handle,
     938              :                                       struct k_thread *new_thread)
     939              : {
     940              :         ARG_UNUSED(old_thread);
     941              :         ARG_UNUSED(old_switch_handle);
     942              :         ARG_UNUSED(new_thread);
     943              : }
     944              : #endif
     945              : 
     946              : /** @} */
     947              : 
     948              : /**
     949              :  * @defgroup arch-gdbstub Architecture-specific gdbstub APIs
     950              :  * @ingroup arch-interface
     951              :  * @{
     952              :  */
     953              : 
     954              : #ifdef CONFIG_GDBSTUB
     955              : struct gdb_ctx;
     956              : 
     957              : /**
     958              :  * @brief Architecture layer debug start
     959              :  *
     960              :  * This function is called by @c gdb_init()
     961              :  */
     962            1 : void arch_gdb_init(void);
     963              : 
     964              : /**
     965              :  * @brief Continue running program
     966              :  *
     967              :  * Continue software execution.
     968              :  */
     969            1 : void arch_gdb_continue(void);
     970              : 
     971              : /**
     972              :  * @brief Continue with one step
     973              :  *
     974              :  * Continue software execution until reaches the next statement.
     975              :  */
     976            1 : void arch_gdb_step(void);
     977              : 
     978              : /**
     979              :  * @brief Read all registers, and outputs as hexadecimal string.
     980              :  *
     981              :  * This reads all CPU registers and outputs as hexadecimal string.
     982              :  * The output string must be parsable by GDB.
     983              :  *
     984              :  * @param ctx    GDB context
     985              :  * @param buf    Buffer to output hexadecimal string.
     986              :  * @param buflen Length of buffer.
     987              :  *
     988              :  * @return Length of hexadecimal string written.
     989              :  *         Return 0 if error or not supported.
     990              :  */
     991            1 : size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen);
     992              : 
     993              : /**
     994              :  * @brief Take a hexadecimal string and update all registers.
     995              :  *
     996              :  * This takes in a hexadecimal string as presented from GDB,
     997              :  * and updates all CPU registers with new values.
     998              :  *
     999              :  * @param ctx    GDB context
    1000              :  * @param hex    Input hexadecimal string.
    1001              :  * @param hexlen Length of hexadecimal string.
    1002              :  *
    1003              :  * @return Length of hexadecimal string parsed.
    1004              :  *         Return 0 if error or not supported.
    1005              :  */
    1006            1 : size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen);
    1007              : 
    1008              : /**
    1009              :  * @brief Read one register, and outputs as hexadecimal string.
    1010              :  *
    1011              :  * This reads one CPU register and outputs as hexadecimal string.
    1012              :  * The output string must be parsable by GDB.
    1013              :  *
    1014              :  * @param ctx    GDB context
    1015              :  * @param buf    Buffer to output hexadecimal string.
    1016              :  * @param buflen Length of buffer.
    1017              :  * @param regno  Register number
    1018              :  *
    1019              :  * @return Length of hexadecimal string written.
    1020              :  *         Return 0 if error or not supported.
    1021              :  */
    1022            1 : size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen,
    1023              :                             uint32_t regno);
    1024              : 
    1025              : /**
    1026              :  * @brief Take a hexadecimal string and update one register.
    1027              :  *
    1028              :  * This takes in a hexadecimal string as presented from GDB,
    1029              :  * and updates one CPU registers with new value.
    1030              :  *
    1031              :  * @param ctx    GDB context
    1032              :  * @param hex    Input hexadecimal string.
    1033              :  * @param hexlen Length of hexadecimal string.
    1034              :  * @param regno  Register number
    1035              :  *
    1036              :  * @return Length of hexadecimal string parsed.
    1037              :  *         Return 0 if error or not supported.
    1038              :  */
    1039            1 : size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
    1040              :                              uint32_t regno);
    1041              : 
    1042              : /**
    1043              :  * @brief Add breakpoint or watchpoint.
    1044              :  *
    1045              :  * @param ctx GDB context
    1046              :  * @param type Breakpoint or watchpoint type
    1047              :  * @param addr Address of breakpoint or watchpoint
    1048              :  * @param kind Size of breakpoint/watchpoint in bytes
    1049              :  *
    1050              :  * @retval 0  Operation successful
    1051              :  * @retval -1 Error encountered
    1052              :  * @retval -2 Not supported
    1053              :  */
    1054            1 : int arch_gdb_add_breakpoint(struct gdb_ctx *ctx, uint8_t type,
    1055              :                             uintptr_t addr, uint32_t kind);
    1056              : 
    1057              : /**
    1058              :  * @brief Remove breakpoint or watchpoint.
    1059              :  *
    1060              :  * @param ctx GDB context
    1061              :  * @param type Breakpoint or watchpoint type
    1062              :  * @param addr Address of breakpoint or watchpoint
    1063              :  * @param kind Size of breakpoint/watchpoint in bytes
    1064              :  *
    1065              :  * @retval 0  Operation successful
    1066              :  * @retval -1 Error encountered
    1067              :  * @retval -2 Not supported
    1068              :  */
    1069            1 : int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type,
    1070              :                                uintptr_t addr, uint32_t kind);
    1071              : 
    1072              : /**
    1073              :  * @brief Post processing after memory write.
    1074              :  *
    1075              :  * @param[in] addr  Starting address of the memory region
    1076              :  * @param[in] len   Size of the memory region
    1077              :  * @param[in] align Write alignment of memory region
    1078              :  */
    1079            1 : void arch_gdb_post_memory_write(uintptr_t addr, size_t len, uint8_t align);
    1080              : 
    1081              : #endif
    1082              : /** @} */
    1083              : 
    1084              : #ifdef CONFIG_TIMING_FUNCTIONS
    1085              : #include <zephyr/timing/types.h>
    1086              : 
    1087              : /**
    1088              :  * @brief Arch specific Timing Measurement APIs
    1089              :  * @defgroup timing_api_arch Arch specific Timing Measurement APIs
    1090              :  * @ingroup timing_api
    1091              :  *
    1092              :  * Implements the necessary bits to support timing measurement
    1093              :  * using architecture specific timing measurement mechanism.
    1094              :  *
    1095              :  * @{
    1096              :  */
    1097              : 
    1098              : /**
    1099              :  * @brief Initialize the timing subsystem.
    1100              :  *
    1101              :  * Perform the necessary steps to initialize the timing subsystem.
    1102              :  *
    1103              :  * @see timing_init()
    1104              :  */
    1105            1 : void arch_timing_init(void);
    1106              : 
    1107              : /**
    1108              :  * @brief Signal the start of the timing information gathering.
    1109              :  *
    1110              :  * Signal to the timing subsystem that timing information
    1111              :  * will be gathered from this point forward.
    1112              :  *
    1113              :  * @note Any call to arch_timing_counter_get() must be done between
    1114              :  * calls to arch_timing_start() and arch_timing_stop(), and on the
    1115              :  * same CPU core.
    1116              :  *
    1117              :  * @see timing_start()
    1118              :  */
    1119            1 : void arch_timing_start(void);
    1120              : 
    1121              : /**
    1122              :  * @brief Signal the end of the timing information gathering.
    1123              :  *
    1124              :  * Signal to the timing subsystem that timing information
    1125              :  * is no longer being gathered from this point forward.
    1126              :  *
    1127              :  * @note Any call to arch_timing_counter_get() must be done between
    1128              :  * calls to arch_timing_start() and arch_timing_stop(), and on the
    1129              :  * same CPU core.
    1130              :  *
    1131              :  * @see timing_stop()
    1132              :  */
    1133            1 : void arch_timing_stop(void);
    1134              : 
    1135              : /**
    1136              :  * @brief Return timing counter.
    1137              :  *
    1138              :  * @parblock
    1139              :  *
    1140              :  * @note Any call to arch_timing_counter_get() must be done between
    1141              :  * calls to arch_timing_start() and arch_timing_stop(), and on the
    1142              :  * same CPU core.
    1143              :  *
    1144              :  * @endparblock
    1145              :  *
    1146              :  * @parblock
    1147              :  *
    1148              :  * @note Not all architectures have a timing counter with 64 bit precision.
    1149              :  * It is possible to see this value "go backwards" due to internal
    1150              :  * rollover.  Timing code must be prepared to address the rollover
    1151              :  * (with platform-dependent code, e.g. by casting to a uint32_t before
    1152              :  * subtraction) or by using arch_timing_cycles_get() which is required
    1153              :  * to understand the distinction.
    1154              :  *
    1155              :  * @endparblock
    1156              :  *
    1157              :  * @return Timing counter.
    1158              :  *
    1159              :  * @see timing_counter_get()
    1160              :  */
    1161            1 : timing_t arch_timing_counter_get(void);
    1162              : 
    1163              : /**
    1164              :  * @brief Get number of cycles between @p start and @p end.
    1165              :  *
    1166              :  * @note For some architectures, the raw numbers from counter need
    1167              :  * to be scaled to obtain actual number of cycles, or may roll over
    1168              :  * internally.  This function computes a positive-definite interval
    1169              :  * between two returned cycle values.
    1170              :  *
    1171              :  * @param start Pointer to counter at start of a measured execution.
    1172              :  * @param end Pointer to counter at stop of a measured execution.
    1173              :  * @return Number of cycles between start and end.
    1174              :  *
    1175              :  * @see timing_cycles_get()
    1176              :  */
    1177            1 : uint64_t arch_timing_cycles_get(volatile timing_t *const start,
    1178              :                                 volatile timing_t *const end);
    1179              : 
    1180              : /**
    1181              :  * @brief Get frequency of counter used (in Hz).
    1182              :  *
    1183              :  * @return Frequency of counter used for timing in Hz.
    1184              :  *
    1185              :  * @see timing_freq_get()
    1186              :  */
    1187            1 : uint64_t arch_timing_freq_get(void);
    1188              : 
    1189              : /**
    1190              :  * @brief Convert number of @p cycles into nanoseconds.
    1191              :  *
    1192              :  * @param cycles Number of cycles
    1193              :  * @return Converted time value
    1194              :  *
    1195              :  * @see timing_cycles_to_ns()
    1196              :  */
    1197            1 : uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
    1198              : 
    1199              : /**
    1200              :  * @brief Convert number of @p cycles into nanoseconds with averaging.
    1201              :  *
    1202              :  * @param cycles Number of cycles
    1203              :  * @param count Times of accumulated cycles to average over
    1204              :  * @return Converted time value
    1205              :  *
    1206              :  * @see timing_cycles_to_ns_avg()
    1207              :  */
    1208            1 : uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
    1209              : 
    1210              : /**
    1211              :  * @brief Get frequency of counter used (in MHz).
    1212              :  *
    1213              :  * @return Frequency of counter used for timing in MHz.
    1214              :  *
    1215              :  * @see timing_freq_get_mhz()
    1216              :  */
    1217            1 : uint32_t arch_timing_freq_get_mhz(void);
    1218              : 
    1219              : /** @} */
    1220              : 
    1221              : #endif /* CONFIG_TIMING_FUNCTIONS */
    1222              : 
    1223              : #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
    1224              : 
    1225              : struct msi_vector;
    1226              : typedef struct msi_vector msi_vector_t;
    1227              : 
    1228              : /**
    1229              :  * @brief Allocate vector(s) for the endpoint MSI message(s).
    1230              :  *
    1231              :  * @param priority the MSI vectors base interrupt priority
    1232              :  * @param vectors an array to fill with allocated MSI vectors
    1233              :  * @param n_vector the size of MSI vectors array
    1234              :  *
    1235              :  * @return The number of allocated MSI vectors
    1236              :  */
    1237              : uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
    1238              :                                        msi_vector_t *vectors,
    1239              :                                        uint8_t n_vector);
    1240              : 
    1241              : /**
    1242              :  * @brief Connect an MSI vector to the given routine
    1243              :  *
    1244              :  * @param vector The MSI vector to connect to
    1245              :  * @param routine Interrupt service routine
    1246              :  * @param parameter ISR parameter
    1247              :  * @param flags Arch-specific IRQ configuration flag
    1248              :  *
    1249              :  * @return True on success, false otherwise
    1250              :  */
    1251              : bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
    1252              :                                   void (*routine)(const void *parameter),
    1253              :                                   const void *parameter,
    1254              :                                   uint32_t flags);
    1255              : 
    1256              : #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
    1257              : 
    1258              : /**
    1259              :  * @brief Perform architecture specific processing within spin loops
    1260              :  *
    1261              :  * This is invoked from busy loops with IRQs disabled such as the contended
    1262              :  * spinlock loop. The default implementation is a weak function that calls
    1263              :  * arch_nop(). Architectures may implement this function to perform extra
    1264              :  * checks or power management tricks if needed.
    1265              :  */
    1266            1 : void arch_spin_relax(void);
    1267              : 
    1268              : /**
    1269              :  * @defgroup arch-stackwalk Architecture-specific Stack Walk APIs
    1270              :  * @ingroup arch-interface
    1271              :  * @brief Architecture-specific Stack Walk APIs
    1272              :  *
    1273              :  * To add API support to an architecture, `arch_stack_walk()` should be implemented and a non-user
    1274              :  * configurable Kconfig `ARCH_HAS_STACKWALK` that is default to `y` should be created in the
    1275              :  * architecture's top level Kconfig, with all the relevant dependencies.
    1276              :  *
    1277              :  * @{
    1278              :  */
    1279              : 
    1280              : /**
    1281              :  * stack_trace_callback_fn - Callback for @ref arch_stack_walk
    1282              :  * @param cookie Caller supplied pointer handed back by @ref arch_stack_walk
    1283              :  * @param addr The stack entry address to consume
    1284              :  *
    1285              :  * @return True, if the entry was consumed or skipped. False, if there is no space left to store
    1286              :  */
    1287            1 : typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr);
    1288              : 
    1289              : /**
    1290              :  * @brief Architecture-specific function to walk the stack
    1291              :  *
    1292              :  * @param callback_fn Callback which is invoked by the architecture code for each entry.
    1293              :  * @param cookie Caller supplied pointer which is handed back to @a callback_fn
    1294              :  * @param thread Pointer to a k_thread struct, can be NULL
    1295              :  * @param esf Pointer to an arch_esf struct, can be NULL
    1296              :  *
    1297              :  * ============ ======= ============================================
    1298              :  * thread       esf
    1299              :  * ============ ======= ============================================
    1300              :  * thread       NULL    Stack trace from thread (can be _current)
    1301              :  * thread       esf     Stack trace starting on esf
    1302              :  * ============ ======= ============================================
    1303              :  */
    1304            1 : void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
    1305              :                      const struct k_thread *thread, const struct arch_esf *esf);
    1306              : 
    1307              : /**
    1308              :  * arch-stackwalk
    1309              :  * @}
    1310              :  */
    1311              : 
    1312              : #ifdef __cplusplus
    1313              : }
    1314              : #endif /* __cplusplus */
    1315              : 
    1316              : #include <zephyr/arch/arch_inlines.h>
    1317              : 
    1318              : #endif /* _ASMLANGUAGE */
    1319              : 
    1320              : #endif /* ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_ */
        

Generated by: LCOV version 2.0-1