LCOV - code coverage report
Current view: top level - zephyr/kernel/internal - mm.h Hit Total Coverage
Test: new.info Lines: 9 10 90.0 %
Date: 2024-12-22 00:14:23

          Line data    Source code
       1           0 : /*
       2             :  * Copyright (c) 2020 Intel Corporation
       3             :  *
       4             :  * SPDX-License-Identifier: Apache-2.0
       5             :  */
       6             : 
       7             : #ifndef ZEPHYR_INCLUDE_KERNEL_INTERNAL_MM_H
       8             : #define ZEPHYR_INCLUDE_KERNEL_INTERNAL_MM_H
       9             : 
      10             : #include <zephyr/sys/util.h>
      11             : #include <zephyr/toolchain.h>
      12             : 
      13             : /**
      14             :  * @defgroup kernel_mm_internal_apis Kernel Memory Management Internal APIs
      15             :  * @ingroup internal_api
      16             :  * @{
      17             :  */
      18             : 
      19             : /**
      20             :  * @def K_MEM_VIRT_OFFSET
      21             :  * @brief Address offset of permanent virtual mapping from physical address.
      22             :  *
      23             :  * This is the offset to subtract from a virtual address mapped in the
      24             :  * kernel's permanent mapping of RAM, to obtain its physical address.
      25             :  *
      26             :  *     virt_addr = phys_addr + K_MEM_VIRT_OFFSET
      27             :  *
      28             :  * This only works for virtual addresses within the interval
      29             :  * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
      30             :  *
      31             :  * These macros are intended for assembly, linker code, and static initializers.
      32             :  * Use with care.
      33             :  *
      34             :  * Note that when demand paging is active, these will only work with page
      35             :  * frames that are pinned to their virtual mapping at boot.
      36             :  *
      37             :  * TODO: This will likely need to move to an arch API or need additional
      38             :  * constraints defined.
      39             :  */
      40             : #ifdef CONFIG_MMU
      41           1 : #define K_MEM_VIRT_OFFSET       ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
      42             :                                  (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
      43             : #else
      44             : #define K_MEM_VIRT_OFFSET       0
      45             : #endif /* CONFIG_MMU */
      46             : 
      47             : /**
      48             :  * @brief Get physical address from virtual address.
      49             :  *
      50             :  * This only works in the kernel's permanent mapping of RAM.
      51             :  *
      52             :  * @param virt Virtual address
      53             :  *
      54             :  * @return Physical address.
      55             :  */
      56           1 : #define K_MEM_PHYS_ADDR(virt)   ((virt) - K_MEM_VIRT_OFFSET)
      57             : 
      58             : /**
      59             :  * @brief Get virtual address from physical address.
      60             :  *
      61             :  * This only works in the kernel's permanent mapping of RAM.
      62             :  *
      63             :  * @param phys Physical address
      64             :  *
      65             :  * @return Virtual address.
      66             :  */
      67           1 : #define K_MEM_VIRT_ADDR(phys)   ((phys) + K_MEM_VIRT_OFFSET)
      68             : 
      69             : #if K_MEM_VIRT_OFFSET != 0
      70             : /**
      71             :  * @brief Kernel is mapped in virtual memory if defined.
      72             :  */
      73             : #define K_MEM_IS_VM_KERNEL 1
      74             : #ifdef CONFIG_XIP
      75             : #error "XIP and a virtual memory kernel are not allowed"
      76             : #endif
      77             : #endif
      78             : 
      79             : #ifndef _ASMLANGUAGE
      80             : #include <stdint.h>
      81             : #include <stddef.h>
      82             : #include <inttypes.h>
      83             : #include <zephyr/sys/__assert.h>
      84             : #include <zephyr/sys/mem_manage.h>
      85             : 
      86             : /**
      87             :  * @brief Get physical address from virtual address.
      88             :  *
      89             :  * This only works in the kernel's permanent mapping of RAM.
      90             :  *
      91             :  * Just like K_MEM_PHYS_ADDR() but with type safety and assertions.
      92             :  *
      93             :  * @param virt Virtual address
      94             :  *
      95             :  * @return Physical address.
      96             :  */
      97           1 : static inline uintptr_t k_mem_phys_addr(void *virt)
      98             : {
      99             :         uintptr_t addr = (uintptr_t)virt;
     100             : 
     101             : #if defined(CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK)
     102             :         __ASSERT(sys_mm_is_virt_addr_in_range(virt),
     103             :                  "address %p not in permanent mappings", virt);
     104             : #elif defined(CONFIG_MMU)
     105             :         __ASSERT(
     106             : #if CONFIG_KERNEL_VM_BASE != 0
     107             :                  (addr >= CONFIG_KERNEL_VM_BASE) &&
     108             : #endif /* CONFIG_KERNEL_VM_BASE != 0 */
     109             : #if (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE) != 0
     110             :                  (addr < (CONFIG_KERNEL_VM_BASE +
     111             :                           (CONFIG_KERNEL_VM_SIZE))),
     112             : #else
     113             :                  false,
     114             : #endif /* CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE != 0 */
     115             :                  "address %p not in permanent mappings", virt);
     116             : #else
     117             :         /* Should be identity-mapped */
     118             :         __ASSERT(
     119             : #if CONFIG_SRAM_BASE_ADDRESS != 0
     120             :                  (addr >= CONFIG_SRAM_BASE_ADDRESS) &&
     121             : #endif /* CONFIG_SRAM_BASE_ADDRESS != 0 */
     122             : #if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
     123             :                  (addr < (CONFIG_SRAM_BASE_ADDRESS +
     124             :                           (CONFIG_SRAM_SIZE * 1024UL))),
     125             : #else
     126             :                  false,
     127             : #endif /* (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 */
     128             :                  "physical address 0x%lx not in RAM",
     129             :                  (unsigned long)addr);
     130             : #endif /* CONFIG_MMU */
     131             : 
     132             :         /* TODO add assertion that this page is pinned to boot mapping,
     133             :          * the above checks won't be sufficient with demand paging
     134             :          */
     135             : 
     136             :         return K_MEM_PHYS_ADDR(addr);
     137             : }
     138             : 
     139             : /**
     140             :  * @brief Get virtual address from physical address.
     141             :  *
     142             :  * This only works in the kernel's permanent mapping of RAM.
     143             :  *
     144             :  * Just like K_MEM_VIRT_ADDR() but with type safety and assertions.
     145             :  *
     146             :  * @param phys Physical address
     147             :  *
     148             :  * @return Virtual address.
     149             :  */
     150           1 : static inline void *k_mem_virt_addr(uintptr_t phys)
     151             : {
     152             : #if defined(CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK)
     153             :         __ASSERT(sys_mm_is_phys_addr_in_range(phys),
     154             :                 "physical address 0x%lx not in RAM", (unsigned long)phys);
     155             : #else
     156             :         __ASSERT(
     157             : #if CONFIG_SRAM_BASE_ADDRESS != 0
     158             :                  (phys >= CONFIG_SRAM_BASE_ADDRESS) &&
     159             : #endif /* CONFIG_SRAM_BASE_ADDRESS != 0 */
     160             : #if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
     161             :                  (phys < (CONFIG_SRAM_BASE_ADDRESS +
     162             :                           (CONFIG_SRAM_SIZE * 1024UL))),
     163             : #else
     164             :                  false,
     165             : #endif /* (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 */
     166             :                  "physical address 0x%lx not in RAM", (unsigned long)phys);
     167             : #endif /* CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK */
     168             : 
     169             :         /* TODO add assertion that this page frame is pinned to boot mapping,
     170             :          * the above check won't be sufficient with demand paging
     171             :          */
     172             : 
     173             :         return (void *)K_MEM_VIRT_ADDR(phys);
     174             : }
     175             : 
     176             : #ifdef __cplusplus
     177             : extern "C" {
     178             : #endif
     179             : 
     180             : /**
     181             :  * Map a physical memory region into the kernel's virtual address space
     182             :  *
     183             :  * This function is intended for mapping memory-mapped I/O regions into
     184             :  * the virtual address space. Given a physical address and a size, return a
     185             :  * linear address representing the base of where the physical region is mapped
     186             :  * in the virtual address space for the Zephyr kernel.
     187             :  *
     188             :  * The memory mapped via this function must be unmapped using
     189             :  * k_mem_unmap_phys_bare().
     190             :  *
     191             :  * This function alters the active page tables in the area reserved
     192             :  * for the kernel. This function will choose the virtual address
     193             :  * and return it to the caller.
     194             :  *
     195             :  * Portable code should never assume that phys_addr and linear_addr will
     196             :  * be equal.
     197             :  *
     198             :  * Caching and access properties are controlled by the 'flags' parameter.
     199             :  * Unused bits in 'flags' are reserved for future expansion.
     200             :  * A caching mode must be selected. By default, the region is read-only
     201             :  * with user access and code execution forbidden. This policy is changed
     202             :  * by passing K_MEM_CACHE_* and K_MEM_PERM_* macros into the 'flags' parameter.
     203             :  *
     204             :  * If there is insufficient virtual address space for the mapping this will
     205             :  * generate a kernel panic.
     206             :  *
     207             :  * This API is only available if CONFIG_MMU is enabled.
     208             :  *
     209             :  * It is highly discouraged to use this function to map system RAM page
     210             :  * frames. It may conflict with anonymous memory mappings and demand paging
     211             :  * and produce undefined behavior.  Do not use this for RAM unless you know
     212             :  * exactly what you are doing. If you need a chunk of memory, use k_mem_map().
     213             :  * If you need a contiguous buffer of physical memory, statically declare it
     214             :  * and pin it at build time, it will be mapped when the system boots.
     215             :  *
     216             :  * This API is part of infrastructure still under development and may
     217             :  * change.
     218             :  *
     219             :  * @param[out] virt_ptr Output virtual address storage location
     220             :  * @param[in]  phys Physical address base of the memory region
     221             :  * @param[in]  size Size of the memory region
     222             :  * @param[in]  flags Caching mode and access flags, see K_MAP_* macros
     223             :  */
     224           1 : void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size,
     225             :                          uint32_t flags);
     226             : 
     227             : /**
     228             :  * Unmap a virtual memory region from kernel's virtual address space.
     229             :  *
     230             :  * This function is intended to be used by drivers and early boot routines
     231             :  * where temporary memory mappings need to be made. This allows these
     232             :  * memory mappings to be discarded once they are no longer needed.
     233             :  *
     234             :  * This function alters the active page tables in the area reserved
     235             :  * for the kernel.
     236             :  *
     237             :  * This will align the input parameters to page boundaries so that
     238             :  * this can be used with the virtual address as returned by
     239             :  * k_mem_map_phys_bare().
     240             :  *
     241             :  * This API is only available if CONFIG_MMU is enabled.
     242             :  *
     243             :  * It is highly discouraged to use this function to unmap memory mappings.
     244             :  * It may conflict with anonymous memory mappings and demand paging and
     245             :  * produce undefined behavior. Do not use this unless you know exactly
     246             :  * what you are doing.
     247             :  *
     248             :  * This API is part of infrastructure still under development and may
     249             :  * change.
     250             :  *
     251             :  * @param virt Starting address of the virtual address region to be unmapped.
     252             :  * @param size Size of the virtual address region
     253             :  */
     254           1 : void k_mem_unmap_phys_bare(uint8_t *virt, size_t size);
     255             : 
     256             : /**
     257             :  * Map memory into virtual address space with guard pages.
     258             :  *
     259             :  * This maps memory into virtual address space with a preceding and
     260             :  * a succeeding guard pages. The memory mapped via this function must be
     261             :  * unmapped using k_mem_unmap_phys_guard().
     262             :  *
     263             :  * This function maps a contiguous physical memory region into kernel's
     264             :  * virtual address space with a preceding and a succeeding guard pages.
     265             :  * Given a physical address and a size, return a linear address representing
     266             :  * the base of where the physical region is mapped in the virtual address
     267             :  * space for the Zephyr kernel.
     268             :  *
     269             :  * This function alters the active page tables in the area reserved
     270             :  * for the kernel. This function will choose the virtual address
     271             :  * and return it to the caller.
     272             :  *
     273             :  * If user thread access control needs to be managed in any way, do not enable
     274             :  * K_MEM_PERM_USER flags here; instead manage the region's permissions
     275             :  * with memory domain APIs after the mapping has been established. Setting
     276             :  * K_MEM_PERM_USER here will allow all user threads to access this memory
     277             :  * which is usually undesirable.
     278             :  *
     279             :  * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
     280             :  *
     281             :  * The returned virtual memory pointer will be page-aligned. The size
     282             :  * parameter, and any base address for re-mapping purposes must be page-
     283             :  * aligned.
     284             :  *
     285             :  * Note that the allocation includes two guard pages immediately before
     286             :  * and after the requested region. The total size of the allocation will be
     287             :  * the requested size plus the size of these two guard pages.
     288             :  *
     289             :  * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
     290             :  * function, with details in the documentation for these flags.
     291             :  *
     292             :  * @see k_mem_map() for additional information if called via that.
     293             :  *
     294             :  * @param phys Physical address base of the memory region if not requesting
     295             :  *             anonymous memory. Must be page-aligned.
     296             :  * @param size Size of the memory mapping. This must be page-aligned.
     297             :  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
     298             :  * @param is_anon True is requesting mapping with anonymous memory.
     299             :  *
     300             :  * @return The mapped memory location, or NULL if insufficient virtual address
     301             :  *         space, insufficient physical memory to establish the mapping,
     302             :  *         or insufficient memory for paging structures.
     303             :  */
     304           1 : void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_anon);
     305             : 
     306             : /**
     307             :  * Un-map memory mapped via k_mem_map_phys_guard().
     308             :  *
     309             :  * This removes the memory mappings for the provided page-aligned region,
     310             :  * and the two guard pages surrounding the region.
     311             :  *
     312             :  * This function alters the active page tables in the area reserved
     313             :  * for the kernel.
     314             :  *
     315             :  * @see k_mem_unmap() for additional information if called via that.
     316             :  *
     317             :  * @note Calling this function on a region which was not mapped via
     318             :  *       k_mem_map_phys_guard() to begin with is undefined behavior.
     319             :  *
     320             :  * @param addr Page-aligned memory region base virtual address
     321             :  * @param size Page-aligned memory region size
     322             :  * @param is_anon True if the mapped memory is from anonymous memory.
     323             :  */
     324           1 : void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon);
     325             : 
     326             : #ifdef __cplusplus
     327             : }
     328             : #endif
     329             : 
     330             : /** @} */
     331             : 
     332             : #endif /* !_ASMLANGUAGE */
     333             : #endif /* ZEPHYR_INCLUDE_KERNEL_INTERNAL_MM_H */

Generated by: LCOV version 1.14