LCOV - code coverage report
Current view: top level - zephyr/kernel/internal - mm.h Coverage Total Hit
Test: new.info Lines: 69.2 % 13 9
Test Date: 2025-09-05 16:43:28

            Line data    Source code
       1            0 : /*
       2              :  * Copyright (c) 2020 Intel Corporation
       3              :  *
       4              :  * SPDX-License-Identifier: Apache-2.0
       5              :  */
       6              : 
       7              : #ifndef ZEPHYR_INCLUDE_KERNEL_INTERNAL_MM_H
       8              : #define ZEPHYR_INCLUDE_KERNEL_INTERNAL_MM_H
       9              : 
      10              : #include <zephyr/sys/util.h>
      11              : #include <zephyr/toolchain.h>
      12              : 
      13              : /**
      14              :  * @defgroup kernel_mm_internal_apis Kernel Memory Management Internal APIs
      15              :  * @ingroup internal_api
      16              :  * @{
      17              :  */
      18              : 
      19              : /**
      20              :  * @def K_MEM_VIRT_OFFSET
      21              :  * @brief Address offset of permanent virtual mapping from physical address.
      22              :  *
      23              :  * This is the offset to subtract from a virtual address mapped in the
      24              :  * kernel's permanent mapping of RAM, to obtain its physical address.
      25              :  *
      26              :  *     virt_addr = phys_addr + K_MEM_VIRT_OFFSET
      27              :  *
      28              :  * This only works for virtual addresses within the interval
      29              :  * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
      30              :  *
      31              :  * These macros are intended for assembly, linker code, and static initializers.
      32              :  * Use with care.
      33              :  *
      34              :  * Note that when demand paging is active, these will only work with page
      35              :  * frames that are pinned to their virtual mapping at boot.
      36              :  *
      37              :  * TODO: This will likely need to move to an arch API or need additional
      38              :  * constraints defined.
      39              :  */
      40              : #ifdef CONFIG_MMU
      41            1 : #define K_MEM_VIRT_OFFSET       ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
      42              :                                  (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
      43              : #else
      44              : #define K_MEM_VIRT_OFFSET       0
      45              : #endif /* CONFIG_MMU */
      46              : 
      47              : #if CONFIG_SRAM_BASE_ADDRESS != 0
      48              : #define IS_SRAM_ADDRESS_LOWER(ADDR)  ((ADDR) >= CONFIG_SRAM_BASE_ADDRESS)
      49              : #else
      50            0 : #define IS_SRAM_ADDRESS_LOWER(ADDR)  true
      51              : #endif /* CONFIG_SRAM_BASE_ADDRESS != 0 */
      52              : 
      53              : 
      54              : #if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
      55              : #define IS_SRAM_ADDRESS_UPPER(ADDR)              \
      56              :         ((ADDR) < (CONFIG_SRAM_BASE_ADDRESS +    \
      57              :                   (CONFIG_SRAM_SIZE * 1024UL)))
      58              : #else
      59            0 : #define IS_SRAM_ADDRESS_UPPER(ADDR)  false
      60              : #endif
      61              : 
      62            0 : #define IS_SRAM_ADDRESS(ADDR)            \
      63              :         (IS_SRAM_ADDRESS_LOWER(ADDR) &&  \
      64              :          IS_SRAM_ADDRESS_UPPER(ADDR))
      65              : 
      66              : /**
      67              :  * @brief Get physical address from virtual address.
      68              :  *
      69              :  * This only works in the kernel's permanent mapping of RAM.
      70              :  *
      71              :  * @param virt Virtual address
      72              :  *
      73              :  * @return Physical address.
      74              :  */
      75            1 : #define K_MEM_PHYS_ADDR(virt)   ((virt) - K_MEM_VIRT_OFFSET)
      76              : 
      77              : /**
      78              :  * @brief Get virtual address from physical address.
      79              :  *
      80              :  * This only works in the kernel's permanent mapping of RAM.
      81              :  *
      82              :  * @param phys Physical address
      83              :  *
      84              :  * @return Virtual address.
      85              :  */
      86            1 : #define K_MEM_VIRT_ADDR(phys)   ((phys) + K_MEM_VIRT_OFFSET)
      87              : 
      88              : #if K_MEM_VIRT_OFFSET != 0
      89              : /**
      90              :  * @brief Kernel is mapped in virtual memory if defined.
      91              :  */
      92              : #define K_MEM_IS_VM_KERNEL 1
      93              : #ifdef CONFIG_XIP
      94              : #error "XIP and a virtual memory kernel are not allowed"
      95              : #endif
      96              : #endif
      97              : 
      98              : #ifndef _ASMLANGUAGE
      99              : #include <stdint.h>
     100              : #include <stddef.h>
     101              : #include <inttypes.h>
     102              : #include <zephyr/sys/__assert.h>
     103              : #include <zephyr/sys/mem_manage.h>
     104              : 
     105              : /**
     106              :  * @brief Get physical address from virtual address.
     107              :  *
     108              :  * This only works in the kernel's permanent mapping of RAM.
     109              :  *
     110              :  * Just like K_MEM_PHYS_ADDR() but with type safety and assertions.
     111              :  *
     112              :  * @param virt Virtual address
     113              :  *
     114              :  * @return Physical address.
     115              :  */
     116            1 : static inline uintptr_t k_mem_phys_addr(void *virt)
     117              : {
     118              :         uintptr_t addr = (uintptr_t)virt;
     119              : 
     120              : #if defined(CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK)
     121              :         __ASSERT(sys_mm_is_virt_addr_in_range(virt),
     122              :                  "address %p not in permanent mappings", virt);
     123              : #elif defined(CONFIG_MMU)
     124              :         __ASSERT(
     125              : #if CONFIG_KERNEL_VM_BASE != 0
     126              :                  (addr >= CONFIG_KERNEL_VM_BASE) &&
     127              : #endif /* CONFIG_KERNEL_VM_BASE != 0 */
     128              : #if (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE) != 0
     129              :                  (addr < (CONFIG_KERNEL_VM_BASE +
     130              :                           (CONFIG_KERNEL_VM_SIZE))),
     131              : #else
     132              :                  false,
     133              : #endif /* CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE != 0 */
     134              :                  "address %p not in permanent mappings", virt);
     135              : #else
     136              :         /* Should be identity-mapped */
     137              :         __ASSERT(IS_SRAM_ADDRESS(addr),
     138              :                  "physical address 0x%lx not in RAM",
     139              :                  (unsigned long)addr);
     140              : #endif /* CONFIG_MMU */
     141              : 
     142              :         /* TODO add assertion that this page is pinned to boot mapping,
     143              :          * the above checks won't be sufficient with demand paging
     144              :          */
     145              : 
     146              :         return K_MEM_PHYS_ADDR(addr);
     147              : }
     148              : 
     149              : /**
     150              :  * @brief Get virtual address from physical address.
     151              :  *
     152              :  * This only works in the kernel's permanent mapping of RAM.
     153              :  *
     154              :  * Just like K_MEM_VIRT_ADDR() but with type safety and assertions.
     155              :  *
     156              :  * @param phys Physical address
     157              :  *
     158              :  * @return Virtual address.
     159              :  */
     160            1 : static inline void *k_mem_virt_addr(uintptr_t phys)
     161              : {
     162              : #if defined(CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK)
     163              :         __ASSERT(sys_mm_is_phys_addr_in_range(phys),
     164              :                 "physical address 0x%lx not in RAM", (unsigned long)phys);
     165              : #else
     166              :         __ASSERT(IS_SRAM_ADDRESS(phys),
     167              :                  "physical address 0x%lx not in RAM", (unsigned long)phys);
     168              : #endif /* CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK */
     169              : 
     170              :         /* TODO add assertion that this page frame is pinned to boot mapping,
     171              :          * the above check won't be sufficient with demand paging
     172              :          */
     173              : 
     174              :         return (void *)K_MEM_VIRT_ADDR(phys);
     175              : }
     176              : 
     177              : #ifdef __cplusplus
     178              : extern "C" {
     179              : #endif
     180              : 
     181              : /**
     182              :  * Map a physical memory region into the kernel's virtual address space
     183              :  *
     184              :  * This function is intended for mapping memory-mapped I/O regions into
     185              :  * the virtual address space. Given a physical address and a size, return a
     186              :  * linear address representing the base of where the physical region is mapped
     187              :  * in the virtual address space for the Zephyr kernel.
     188              :  *
     189              :  * The memory mapped via this function must be unmapped using
     190              :  * k_mem_unmap_phys_bare().
     191              :  *
     192              :  * This function alters the active page tables in the area reserved
     193              :  * for the kernel. This function will choose the virtual address
     194              :  * and return it to the caller.
     195              :  *
     196              :  * Portable code should never assume that phys_addr and linear_addr will
     197              :  * be equal.
     198              :  *
     199              :  * Caching and access properties are controlled by the 'flags' parameter.
     200              :  * Unused bits in 'flags' are reserved for future expansion.
     201              :  * A caching mode must be selected. By default, the region is read-only
     202              :  * with user access and code execution forbidden. This policy is changed
     203              :  * by passing K_MEM_CACHE_* and K_MEM_PERM_* macros into the 'flags' parameter.
     204              :  *
     205              :  * If there is insufficient virtual address space for the mapping this will
     206              :  * generate a kernel panic.
     207              :  *
     208              :  * This API is only available if CONFIG_MMU is enabled.
     209              :  *
     210              :  * It is highly discouraged to use this function to map system RAM page
     211              :  * frames. It may conflict with anonymous memory mappings and demand paging
     212              :  * and produce undefined behavior.  Do not use this for RAM unless you know
     213              :  * exactly what you are doing. If you need a chunk of memory, use k_mem_map().
     214              :  * If you need a contiguous buffer of physical memory, statically declare it
     215              :  * and pin it at build time, it will be mapped when the system boots.
     216              :  *
     217              :  * This API is part of infrastructure still under development and may
     218              :  * change.
     219              :  *
     220              :  * @param[out] virt_ptr Output virtual address storage location
     221              :  * @param[in]  phys Physical address base of the memory region
     222              :  * @param[in]  size Size of the memory region
     223              :  * @param[in]  flags Caching mode and access flags, see K_MAP_* macros
     224              :  */
     225            1 : void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size,
     226              :                          uint32_t flags);
     227              : 
     228              : /**
     229              :  * Unmap a virtual memory region from kernel's virtual address space.
     230              :  *
     231              :  * This function is intended to be used by drivers and early boot routines
     232              :  * where temporary memory mappings need to be made. This allows these
     233              :  * memory mappings to be discarded once they are no longer needed.
     234              :  *
     235              :  * This function alters the active page tables in the area reserved
     236              :  * for the kernel.
     237              :  *
     238              :  * This will align the input parameters to page boundaries so that
     239              :  * this can be used with the virtual address as returned by
     240              :  * k_mem_map_phys_bare().
     241              :  *
     242              :  * This API is only available if CONFIG_MMU is enabled.
     243              :  *
     244              :  * It is highly discouraged to use this function to unmap memory mappings.
     245              :  * It may conflict with anonymous memory mappings and demand paging and
     246              :  * produce undefined behavior. Do not use this unless you know exactly
     247              :  * what you are doing.
     248              :  *
     249              :  * This API is part of infrastructure still under development and may
     250              :  * change.
     251              :  *
     252              :  * @param virt Starting address of the virtual address region to be unmapped.
     253              :  * @param size Size of the virtual address region
     254              :  */
     255            1 : void k_mem_unmap_phys_bare(uint8_t *virt, size_t size);
     256              : 
     257              : /**
     258              :  * Map memory into virtual address space with guard pages.
     259              :  *
     260              :  * This maps memory into virtual address space with a preceding and
     261              :  * a succeeding guard pages. The memory mapped via this function must be
     262              :  * unmapped using k_mem_unmap_phys_guard().
     263              :  *
     264              :  * This function maps a contiguous physical memory region into kernel's
     265              :  * virtual address space with a preceding and a succeeding guard pages.
     266              :  * Given a physical address and a size, return a linear address representing
     267              :  * the base of where the physical region is mapped in the virtual address
     268              :  * space for the Zephyr kernel.
     269              :  *
     270              :  * This function alters the active page tables in the area reserved
     271              :  * for the kernel. This function will choose the virtual address
     272              :  * and return it to the caller.
     273              :  *
     274              :  * If user thread access control needs to be managed in any way, do not enable
     275              :  * K_MEM_PERM_USER flags here; instead manage the region's permissions
     276              :  * with memory domain APIs after the mapping has been established. Setting
     277              :  * K_MEM_PERM_USER here will allow all user threads to access this memory
     278              :  * which is usually undesirable.
     279              :  *
     280              :  * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
     281              :  *
     282              :  * The returned virtual memory pointer will be page-aligned. The size
     283              :  * parameter, and any base address for re-mapping purposes must be page-
     284              :  * aligned.
     285              :  *
     286              :  * Note that the allocation includes two guard pages immediately before
     287              :  * and after the requested region. The total size of the allocation will be
     288              :  * the requested size plus the size of these two guard pages.
     289              :  *
     290              :  * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
     291              :  * function, with details in the documentation for these flags.
     292              :  *
     293              :  * @see k_mem_map() for additional information if called via that.
     294              :  *
     295              :  * @param phys Physical address base of the memory region if not requesting
     296              :  *             anonymous memory. Must be page-aligned.
     297              :  * @param size Size of the memory mapping. This must be page-aligned.
     298              :  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
     299              :  * @param is_anon True is requesting mapping with anonymous memory.
     300              :  *
     301              :  * @return The mapped memory location, or NULL if insufficient virtual address
     302              :  *         space, insufficient physical memory to establish the mapping,
     303              :  *         or insufficient memory for paging structures.
     304              :  */
     305            1 : void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_anon);
     306              : 
     307              : /**
     308              :  * Un-map memory mapped via k_mem_map_phys_guard().
     309              :  *
     310              :  * This removes the memory mappings for the provided page-aligned region,
     311              :  * and the two guard pages surrounding the region.
     312              :  *
     313              :  * This function alters the active page tables in the area reserved
     314              :  * for the kernel.
     315              :  *
     316              :  * @see k_mem_unmap() for additional information if called via that.
     317              :  *
     318              :  * @note Calling this function on a region which was not mapped via
     319              :  *       k_mem_map_phys_guard() to begin with is undefined behavior.
     320              :  *
     321              :  * @param addr Page-aligned memory region base virtual address
     322              :  * @param size Page-aligned memory region size
     323              :  * @param is_anon True if the mapped memory is from anonymous memory.
     324              :  */
     325            1 : void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon);
     326              : 
     327              : #ifdef __cplusplus
     328              : }
     329              : #endif
     330              : 
     331              : /** @} */
     332              : 
     333              : #endif /* !_ASMLANGUAGE */
     334              : #endif /* ZEPHYR_INCLUDE_KERNEL_INTERNAL_MM_H */
        

Generated by: LCOV version 2.0-1