LCOV - code coverage report
Current view: top level - zephyr/kernel - mm.h Hit Total Coverage
Test: new.info Lines: 16 17 94.1 %
Date: 2024-12-22 00:14:23

          Line data    Source code
       1           0 : /*
       2             :  * Copyright (c) 2020 Intel Corporation
       3             :  *
       4             :  * SPDX-License-Identifier: Apache-2.0
       5             :  */
       6             : 
       7             : #ifndef ZEPHYR_INCLUDE_KERNEL_MM_H
       8             : #define ZEPHYR_INCLUDE_KERNEL_MM_H
       9             : 
      10             : #include <zephyr/sys/util.h>
      11             : #include <zephyr/toolchain.h>
      12             : #if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
      13             : #include <zephyr/arch/arm64/arm_mem.h>
      14             : #endif /* CONFIG_ARM_MMU && CONFIG_ARM64 */
      15             : 
      16             : #include <zephyr/kernel/internal/mm.h>
      17             : 
      18             : /**
      19             :  * @brief Kernel Memory Management
      20             :  * @defgroup kernel_memory_management Kernel Memory Management
      21             :  * @ingroup kernel_apis
      22             :  * @{
      23             :  */
      24             : 
      25             : /**
      26             :  * @name Caching mode definitions.
      27             :  *
      28             :  * These are mutually exclusive.
      29             :  *
      30             :  * @{
      31             :  */
      32             : 
      33             : /** No caching. Most drivers want this. */
      34           1 : #define K_MEM_CACHE_NONE        2
      35             : 
      36             : /** Write-through caching. Used by certain drivers. */
      37           1 : #define K_MEM_CACHE_WT          1
      38             : 
      39             : /** Full write-back caching. Any RAM mapped wants this. */
      40           1 : #define K_MEM_CACHE_WB          0
      41             : 
      42             : /*
      43             :  * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
      44             :  * pay attention to be not conflicted when updating these flags.
      45             :  */
      46             : 
      47             : /** Reserved bits for cache modes in k_map() flags argument */
      48           1 : #define K_MEM_CACHE_MASK        (BIT(3) - 1)
      49             : 
      50             : /** @} */
      51             : 
      52             : /**
      53             :  * @name Region permission attributes.
      54             :  *
      55             :  * Default is read-only, no user, no exec
      56             :  *
      57             :  * @{
      58             :  */
      59             : 
      60             : /** Region will have read/write access (and not read-only) */
      61           1 : #define K_MEM_PERM_RW           BIT(3)
      62             : 
      63             : /** Region will be executable (normally forbidden) */
      64           1 : #define K_MEM_PERM_EXEC         BIT(4)
      65             : 
      66             : /** Region will be accessible to user mode (normally supervisor-only) */
      67           1 : #define K_MEM_PERM_USER         BIT(5)
      68             : 
      69             : /** @} */
      70             : 
      71             : /**
      72             :  * @name Region mapping behaviour attributes
      73             :  *
      74             :  * @{
      75             :  */
      76             : 
      77             : /** Region will be mapped to 1:1 virtual and physical address */
      78           1 : #define K_MEM_DIRECT_MAP        BIT(6)
      79             : 
      80             : /** @} */
      81             : 
      82             : #ifndef _ASMLANGUAGE
      83             : #include <stdint.h>
      84             : #include <stddef.h>
      85             : #include <inttypes.h>
      86             : 
      87             : #ifdef __cplusplus
      88             : extern "C" {
      89             : #endif
      90             : 
      91             : /**
      92             :  * @name k_mem_map() control flags
      93             :  *
      94             :  * @{
      95             :  */
      96             : 
      97             : /**
      98             :  * @brief The mapped region is not guaranteed to be zeroed.
      99             :  *
     100             :  * This may improve performance. The associated page frames may contain
     101             :  * indeterminate data, zeroes, or even sensitive information.
     102             :  *
     103             :  * This may not be used with K_MEM_PERM_USER as there are no circumstances
     104             :  * where this is safe.
     105             :  */
     106           1 : #define K_MEM_MAP_UNINIT        BIT(16)
     107             : 
     108             : /**
     109             :  * Region will be pinned in memory and never paged
     110             :  *
     111             :  * Such memory is guaranteed to never produce a page fault due to page-outs
     112             :  * or copy-on-write once the mapping call has returned. Physical page frames
     113             :  * will be pre-fetched as necessary and pinned.
     114             :  */
     115           1 : #define K_MEM_MAP_LOCK          BIT(17)
     116             : 
     117             : /**
     118             :  * Region will be unpaged i.e. not mapped into memory
     119             :  *
     120             :  * This is meant to be used by kernel code and not by application code.
     121             :  *
     122             :  * Corresponding memory address range will be set so no actual memory will
     123             :  * be allocated initially. Allocation will happen through demand paging when
     124             :  * addresses in that range are accessed. This is incompatible with
     125             :  * K_MEM_MAP_LOCK.
     126             :  *
     127             :  * When this flag is specified, the phys argument to arch_mem_map()
     128             :  * is interpreted as a backing store location value not a physical address.
     129             :  * This is very similar to arch_mem_page_out() in that regard.
     130             :  * Two special location values are defined: ARCH_UNPAGED_ANON_ZERO and
     131             :  * ARCH_UNPAGED_ANON_UNINIT. Those are to be used with anonymous memory
     132             :  * mappings for zeroed and uninitialized pages respectively.
     133             :  */
     134           1 : #define K_MEM_MAP_UNPAGED       BIT(18)
     135             : 
     136             : /** @} */
     137             : 
     138             : /**
     139             :  * Return the amount of free memory available
     140             :  *
     141             :  * The returned value will reflect how many free RAM page frames are available.
     142             :  * If demand paging is enabled, it may still be possible to allocate more.
     143             :  *
     144             :  * The information reported by this function may go stale immediately if
     145             :  * concurrent memory mappings or page-ins take place.
     146             :  *
     147             :  * @return Free physical RAM, in bytes
     148             :  */
     149           1 : size_t k_mem_free_get(void);
     150             : 
     151             : /**
     152             :  * Map anonymous memory into Zephyr's address space
     153             :  *
     154             :  * This function effectively increases the data space available to Zephyr.
     155             :  * The kernel will choose a base virtual address and return it to the caller.
     156             :  * The memory will have access permissions for all contexts set per the
     157             :  * provided flags argument.
     158             :  *
     159             :  * If user thread access control needs to be managed in any way, do not enable
     160             :  * K_MEM_PERM_USER flags here; instead manage the region's permissions
     161             :  * with memory domain APIs after the mapping has been established. Setting
     162             :  * K_MEM_PERM_USER here will allow all user threads to access this memory
     163             :  * which is usually undesirable.
     164             :  *
     165             :  * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
     166             :  *
     167             :  * The mapped region is not guaranteed to be physically contiguous in memory.
     168             :  * Physically contiguous buffers should be allocated statically and pinned
     169             :  * at build time.
     170             :  *
     171             :  * Pages mapped in this way have write-back cache settings.
     172             :  *
     173             :  * The returned virtual memory pointer will be page-aligned. The size
     174             :  * parameter, and any base address for re-mapping purposes must be page-
     175             :  * aligned.
     176             :  *
     177             :  * Note that the allocation includes two guard pages immediately before
     178             :  * and after the requested region. The total size of the allocation will be
     179             :  * the requested size plus the size of these two guard pages.
     180             :  *
     181             :  * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
     182             :  * function, with details in the documentation for these flags.
     183             :  *
     184             :  * @param size Size of the memory mapping. This must be page-aligned.
     185             :  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
     186             :  * @return The mapped memory location, or NULL if insufficient virtual address
     187             :  *         space, insufficient physical memory to establish the mapping,
     188             :  *         or insufficient memory for paging structures.
     189             :  */
     190           1 : static inline void *k_mem_map(size_t size, uint32_t flags)
     191             : {
     192             :         return k_mem_map_phys_guard((uintptr_t)NULL, size, flags, true);
     193             : }
     194             : 
     195             : #ifdef CONFIG_DEMAND_MAPPING
     196             : /**
     197             :  * Create an unpaged mapping
     198             :  *
     199             :  * This maps backing-store "location" tokens into Zephyr's address space.
     200             :  * Corresponding memory address range will be set so no actual memory will
     201             :  * be allocated initially. Allocation will happen through demand paging when
     202             :  * addresses in the mapped range are accessed.
     203             :  *
     204             :  * The kernel will choose a base virtual address and return it to the caller.
     205             :  * The memory access permissions for all contexts will be set per the
     206             :  * provided flags argument.
     207             :  *
     208             :  * If user thread access control needs to be managed in any way, do not enable
     209             :  * K_MEM_PERM_USER flags here; instead manage the region's permissions
     210             :  * with memory domain APIs after the mapping has been established. Setting
     211             :  * K_MEM_PERM_USER here will allow all user threads to access this memory
     212             :  * which is usually undesirable.
     213             :  *
     214             :  * This is incompatible with K_MEM_MAP_LOCK.
     215             :  *
     216             :  * The provided backing-store "location" token must be linearly incrementable
     217             :  * by a page size across the entire mapping.
     218             :  *
     219             :  * Allocated pages will have write-back cache settings.
     220             :  *
     221             :  * The returned virtual memory pointer will be page-aligned. The size
     222             :  * parameter, and any base address for re-mapping purposes must be page-
     223             :  * aligned.
     224             :  *
     225             :  * Note that the allocation includes two guard pages immediately before
     226             :  * and after the requested region. The total size of the allocation will be
     227             :  * the requested size plus the size of these two guard pages.
     228             :  *
     229             :  * @param location Backing store initial location token
     230             :  * @param size Size of the memory mapping. This must be page-aligned.
     231             :  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
     232             :  * @return The mapping location, or NULL if insufficient virtual address
     233             :  *         space to establish the mapping, or insufficient memory for paging
     234             :  *         structures.
     235             :  */
     236             : static inline void *k_mem_map_unpaged(uintptr_t location, size_t size, uint32_t flags)
     237             : {
     238             :         flags |= K_MEM_MAP_UNPAGED;
     239             :         return k_mem_map_phys_guard(location, size, flags, false);
     240             : }
     241             : #endif
     242             : 
     243             : /**
     244             :  * Un-map mapped memory
     245             :  *
     246             :  * This removes a memory mapping for the provided page-aligned region.
     247             :  * Associated page frames will be free and the kernel may re-use the associated
     248             :  * virtual address region. Any paged out data pages may be discarded.
     249             :  *
     250             :  * Calling this function on a region which was not mapped to begin with is
     251             :  * undefined behavior.
     252             :  *
     253             :  * @param addr Page-aligned memory region base virtual address
     254             :  * @param size Page-aligned memory region size
     255             :  */
     256           1 : static inline void k_mem_unmap(void *addr, size_t size)
     257             : {
     258             :         k_mem_unmap_phys_guard(addr, size, true);
     259             : }
     260             : 
     261             : /**
     262             :  * Modify memory mapping attribute flags
     263             :  *
     264             :  * This updates caching, access and control flags for the provided
     265             :  * page-aligned memory region.
     266             :  *
     267             :  * Calling this function on a region which was not mapped to begin with is
     268             :  * undefined behavior. However system memory implicitly mapped at boot time
     269             :  * is supported.
     270             :  *
     271             :  * @param addr Page-aligned memory region base virtual address
     272             :  * @param size Page-aligned memory region size
     273             :  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
     274             :  * @return 0 for success, negative error code otherwise.
     275             :  */
     276           1 : int k_mem_update_flags(void *addr, size_t size, uint32_t flags);
     277             : 
     278             : /**
     279             :  * Given an arbitrary region, provide a aligned region that covers it
     280             :  *
     281             :  * The returned region will have both its base address and size aligned
     282             :  * to the provided alignment value.
     283             :  *
     284             :  * @param[out] aligned_addr Aligned address
     285             :  * @param[out] aligned_size Aligned region size
     286             :  * @param[in]  addr Region base address
     287             :  * @param[in]  size Region size
     288             :  * @param[in]  align What to align the address and size to
     289             :  * @retval offset between aligned_addr and addr
     290             :  */
     291           1 : size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
     292             :                           uintptr_t addr, size_t size, size_t align);
     293             : 
     294             : #ifdef __cplusplus
     295             : }
     296             : #endif
     297             : 
     298             : /** @} */
     299             : 
     300             : #endif /* !_ASMLANGUAGE */
     301             : #endif /* ZEPHYR_INCLUDE_KERNEL_MM_H */

Generated by: LCOV version 1.14