LCOV - code coverage report
Current view: top level - zephyr - spinlock.h Coverage Total Hit
Test: new.info Lines: 100.0 % 8 8
Test Date: 2025-09-05 20:47:19

            Line data    Source code
       1            1 : /*
       2              :  * Copyright (c) 2018 Intel Corporation.
       3              :  *
       4              :  * SPDX-License-Identifier: Apache-2.0
       5              :  */
       6              : 
       7              : /**
       8              :  * @file
       9              :  * @brief Public interface for spinlocks
      10              :  */
      11              : 
      12              : #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
      13              : #define ZEPHYR_INCLUDE_SPINLOCK_H_
      14              : 
      15              : #include <errno.h>
      16              : #include <stdbool.h>
      17              : 
      18              : #include <zephyr/arch/cpu.h>
      19              : #include <zephyr/sys/atomic.h>
      20              : #include <zephyr/sys/__assert.h>
      21              : #include <zephyr/sys/time_units.h>
      22              : 
      23              : #ifdef __cplusplus
      24              : extern "C" {
      25              : #endif
      26              : 
      27              : /**
      28              :  * @brief Spinlock APIs
      29              :  * @defgroup spinlock_apis Spinlock APIs
      30              :  * @ingroup kernel_apis
      31              :  * @{
      32              :  */
      33              : 
      34              : struct z_spinlock_key {
      35              :         int key;
      36              : };
      37              : 
      38              : /**
      39              :  * @brief Kernel Spin Lock
      40              :  *
      41              :  * This struct defines a spin lock record on which CPUs can wait with
      42              :  * k_spin_lock().  Any number of spinlocks may be defined in
      43              :  * application code.
      44              :  */
      45            1 : struct k_spinlock {
      46              : /**
      47              :  * @cond INTERNAL_HIDDEN
      48              :  */
      49              : #ifdef CONFIG_SMP
      50              : #ifdef CONFIG_TICKET_SPINLOCKS
      51              :         /*
      52              :          * Ticket spinlocks are conceptually two atomic variables,
      53              :          * one indicating the current FIFO head (spinlock owner),
      54              :          * and the other indicating the current FIFO tail.
      55              :          * Spinlock is acquired in the following manner:
      56              :          * - current FIFO tail value is atomically incremented while it's
      57              :          *   original value is saved as a "ticket"
      58              :          * - we spin until the FIFO head becomes equal to the ticket value
      59              :          *
      60              :          * Spinlock is released by atomic increment of the FIFO head
      61              :          */
      62              :         atomic_t owner;
      63              :         atomic_t tail;
      64              : #else
      65              :         atomic_t locked;
      66              : #endif /* CONFIG_TICKET_SPINLOCKS */
      67              : #endif /* CONFIG_SMP */
      68              : 
      69              : #ifdef CONFIG_SPIN_VALIDATE
      70              :         /* Stores the thread that holds the lock with the locking CPU
      71              :          * ID in the bottom two bits.
      72              :          */
      73              :         uintptr_t thread_cpu;
      74              : #ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
      75              :         /* Stores the time (in cycles) when a lock was taken
      76              :          */
      77              :         uint32_t lock_time;
      78              : #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
      79              : #endif /* CONFIG_SPIN_VALIDATE */
      80              : 
      81              : #if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
      82              :         !defined(CONFIG_SPIN_VALIDATE)
      83              :         /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
      84              :          * the k_spinlock struct will have no members. The result
      85              :          * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
      86              :          *
      87              :          * This size difference causes problems when the k_spinlock
      88              :          * is embedded into another struct like k_msgq, because C and
      89              :          * C++ will have different ideas on the offsets of the members
      90              :          * that come after the k_spinlock member.
      91              :          *
      92              :          * To prevent this we add a 1 byte dummy member to k_spinlock
      93              :          * when the user selects C++ support and k_spinlock would
      94              :          * otherwise be empty.
      95              :          */
      96              :         char dummy;
      97              : #endif
      98              : /**
      99              :  * INTERNAL_HIDDEN @endcond
     100              :  */
     101              : };
     102              : 
     103              : /* There's a spinlock validation framework available when asserts are
     104              :  * enabled.  It adds a relatively hefty overhead (about 3k or so) to
     105              :  * kernel code size, don't use on platforms known to be small.
     106              :  */
     107              : #ifdef CONFIG_SPIN_VALIDATE
     108              : bool z_spin_lock_valid(struct k_spinlock *l);
     109              : bool z_spin_unlock_valid(struct k_spinlock *l);
     110              : void z_spin_lock_set_owner(struct k_spinlock *l);
     111              : BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4, "Too many CPUs for mask");
     112              : 
     113              : # ifdef CONFIG_KERNEL_COHERENCE
     114              : bool z_spin_lock_mem_coherent(struct k_spinlock *l);
     115              : # endif /* CONFIG_KERNEL_COHERENCE */
     116              : 
     117              : #endif /* CONFIG_SPIN_VALIDATE */
     118              : 
     119              : /**
     120              :  * @brief Spinlock key type
     121              :  *
     122              :  * This type defines a "key" value used by a spinlock implementation
     123              :  * to store the system interrupt state at the time of a call to
     124              :  * k_spin_lock().  It is expected to be passed to a matching
     125              :  * k_spin_unlock().
     126              :  *
     127              :  * This type is opaque and should not be inspected by application
     128              :  * code.
     129              :  */
     130            1 : typedef struct z_spinlock_key k_spinlock_key_t;
     131              : 
     132              : static ALWAYS_INLINE void z_spinlock_validate_pre(struct k_spinlock *l)
     133              : {
     134              :         ARG_UNUSED(l);
     135              : #ifdef CONFIG_SPIN_VALIDATE
     136              :         __ASSERT(z_spin_lock_valid(l), "Invalid spinlock %p", l);
     137              : #ifdef CONFIG_KERNEL_COHERENCE
     138              :         __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
     139              : #endif
     140              : #endif
     141              : }
     142              : 
     143              : static ALWAYS_INLINE void z_spinlock_validate_post(struct k_spinlock *l)
     144              : {
     145              :         ARG_UNUSED(l);
     146              : #ifdef CONFIG_SPIN_VALIDATE
     147              :         z_spin_lock_set_owner(l);
     148              : #if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
     149              :         l->lock_time = sys_clock_cycle_get_32();
     150              : #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
     151              : #endif /* CONFIG_SPIN_VALIDATE */
     152              : }
     153              : 
     154              : /**
     155              :  * @brief Lock a spinlock
     156              :  *
     157              :  * This routine locks the specified spinlock, returning a key handle
     158              :  * representing interrupt state needed at unlock time.  Upon
     159              :  * returning, the calling thread is guaranteed not to be suspended or
     160              :  * interrupted on its current CPU until it calls k_spin_unlock().  The
     161              :  * implementation guarantees mutual exclusion: exactly one thread on
     162              :  * one CPU will return from k_spin_lock() at a time.  Other CPUs
     163              :  * trying to acquire a lock already held by another CPU will enter an
     164              :  * implementation-defined busy loop ("spinning") until the lock is
     165              :  * released.
     166              :  *
     167              :  * Separate spin locks may be nested. It is legal to lock an
     168              :  * (unlocked) spin lock while holding a different lock.  Spin locks
     169              :  * are not recursive, however: an attempt to acquire a spin lock that
     170              :  * the CPU already holds will deadlock.
     171              :  *
     172              :  * In circumstances where only one CPU exists, the behavior of
     173              :  * k_spin_lock() remains as specified above, though obviously no
     174              :  * spinning will take place.  Implementations may be free to optimize
     175              :  * in uniprocessor contexts such that the locking reduces to an
     176              :  * interrupt mask operation.
     177              :  *
     178              :  * @warning
     179              :  * Holding a spinlock when a context switch occurs is illegal.
     180              :  *
     181              :  * @param l A pointer to the spinlock to lock
     182              :  * @return A key value that must be passed to k_spin_unlock() when the
     183              :  *         lock is released.
     184              :  */
     185            1 : static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
     186              : {
     187              :         ARG_UNUSED(l);
     188              :         k_spinlock_key_t k;
     189              : 
     190              :         /* Note that we need to use the underlying arch-specific lock
     191              :          * implementation.  The "irq_lock()" API in SMP context is
     192              :          * actually a wrapper for a global spinlock!
     193              :          */
     194              :         k.key = arch_irq_lock();
     195              : 
     196              :         z_spinlock_validate_pre(l);
     197              : #ifdef CONFIG_SMP
     198              : #ifdef CONFIG_TICKET_SPINLOCKS
     199              :         /*
     200              :          * Enqueue ourselves to the end of a spinlock waiters queue
     201              :          * receiving a ticket
     202              :          */
     203              :         atomic_val_t ticket = atomic_inc(&l->tail);
     204              :         /* Spin until our ticket is served */
     205              :         while (atomic_get(&l->owner) != ticket) {
     206              :                 arch_spin_relax();
     207              :         }
     208              : #else
     209              :         while (!atomic_cas(&l->locked, 0, 1)) {
     210              :                 arch_spin_relax();
     211              :         }
     212              : #endif /* CONFIG_TICKET_SPINLOCKS */
     213              : #endif /* CONFIG_SMP */
     214              :         z_spinlock_validate_post(l);
     215              : 
     216              :         return k;
     217              : }
     218              : 
     219              : /**
     220              :  * @brief Attempt to lock a spinlock
     221              :  *
     222              :  * This routine makes one attempt to lock @p l. If it is successful, then
     223              :  * it will store the key into @p k.
     224              :  *
     225              :  * @param[in] l A pointer to the spinlock to lock
     226              :  * @param[out] k A pointer to the spinlock key
     227              :  * @retval 0 on success
     228              :  * @retval -EBUSY if another thread holds the lock
     229              :  *
     230              :  * @see k_spin_lock
     231              :  * @see k_spin_unlock
     232              :  */
     233            1 : static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k)
     234              : {
     235              :         int key = arch_irq_lock();
     236              : 
     237              :         z_spinlock_validate_pre(l);
     238              : #ifdef CONFIG_SMP
     239              : #ifdef CONFIG_TICKET_SPINLOCKS
     240              :         /*
     241              :          * atomic_get and atomic_cas operations below are not executed
     242              :          * simultaneously.
     243              :          * So in theory k_spin_trylock can lock an already locked spinlock.
     244              :          * To reproduce this the following conditions should be met after we
     245              :          * executed atomic_get and before we executed atomic_cas:
     246              :          *
     247              :          * - spinlock needs to be taken 0xffff_..._ffff + 1 times
     248              :          * (which requires 0xffff_..._ffff number of CPUs, as k_spin_lock call
     249              :          * is blocking) or
     250              :          * - spinlock needs to be taken and released 0xffff_..._ffff times and
     251              :          * then taken again
     252              :          *
     253              :          * In real-life systems this is considered non-reproducible given that
     254              :          * required actions need to be done during this tiny window of several
     255              :          * CPU instructions (which execute with interrupt locked,
     256              :          * so no preemption can happen here)
     257              :          */
     258              :         atomic_val_t ticket_val = atomic_get(&l->owner);
     259              : 
     260              :         if (!atomic_cas(&l->tail, ticket_val, ticket_val + 1)) {
     261              :                 goto busy;
     262              :         }
     263              : #else
     264              :         if (!atomic_cas(&l->locked, 0, 1)) {
     265              :                 goto busy;
     266              :         }
     267              : #endif /* CONFIG_TICKET_SPINLOCKS */
     268              : #endif /* CONFIG_SMP */
     269              :         z_spinlock_validate_post(l);
     270              : 
     271              :         k->key = key;
     272              : 
     273              :         return 0;
     274              : 
     275              : #ifdef CONFIG_SMP
     276              : busy:
     277              :         arch_irq_unlock(key);
     278              :         return -EBUSY;
     279              : #endif /* CONFIG_SMP */
     280              : }
     281              : 
     282              : /**
     283              :  * @brief Unlock a spin lock
     284              :  *
     285              :  * This releases a lock acquired by k_spin_lock().  After this
     286              :  * function is called, any CPU will be able to acquire the lock.  If
     287              :  * other CPUs are currently spinning inside k_spin_lock() waiting for
     288              :  * this lock, exactly one of them will return synchronously with the
     289              :  * lock held.
     290              :  *
     291              :  * Spin locks must be properly nested.  A call to k_spin_unlock() must
     292              :  * be made on the lock object most recently locked using
     293              :  * k_spin_lock(), using the key value that it returned.  Attempts to
     294              :  * unlock mis-nested locks, or to unlock locks that are not held, or
     295              :  * to passing a key parameter other than the one returned from
     296              :  * k_spin_lock(), are illegal.  When CONFIG_SPIN_VALIDATE is set, some
     297              :  * of these errors can be detected by the framework.
     298              :  *
     299              :  * @param l A pointer to the spinlock to release
     300              :  * @param key The value returned from k_spin_lock() when this lock was
     301              :  *        acquired
     302              :  */
     303            1 : static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
     304              :                                         k_spinlock_key_t key)
     305              : {
     306              :         ARG_UNUSED(l);
     307              : #ifdef CONFIG_SPIN_VALIDATE
     308              :         __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
     309              : 
     310              : #if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
     311              :         uint32_t delta = sys_clock_cycle_get_32() - l->lock_time;
     312              : 
     313              :         __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
     314              :                  "Spin lock %p held %u cycles, longer than limit of %u cycles",
     315              :                  l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
     316              : #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
     317              : #endif /* CONFIG_SPIN_VALIDATE */
     318              : 
     319              : #ifdef CONFIG_SMP
     320              : #ifdef CONFIG_TICKET_SPINLOCKS
     321              :         /* Give the spinlock to the next CPU in a FIFO */
     322              :         (void)atomic_inc(&l->owner);
     323              : #else
     324              :         /* Strictly we don't need atomic_clear() here (which is an
     325              :          * exchange operation that returns the old value).  We are always
     326              :          * setting a zero and (because we hold the lock) know the existing
     327              :          * state won't change due to a race.  But some architectures need
     328              :          * a memory barrier when used like this, and we don't have a
     329              :          * Zephyr framework for that.
     330              :          */
     331              :         (void)atomic_clear(&l->locked);
     332              : #endif /* CONFIG_TICKET_SPINLOCKS */
     333              : #endif /* CONFIG_SMP */
     334              :         arch_irq_unlock(key.key);
     335              : }
     336              : 
     337              : /**
     338              :  * @cond INTERNAL_HIDDEN
     339              :  */
     340              : 
     341              : #if defined(CONFIG_SMP) && defined(CONFIG_TEST)
     342              : /*
     343              :  * @brief Checks if spinlock is held by some CPU, including the local CPU.
     344              :  *              This API shouldn't be used outside the tests for spinlock
     345              :  *
     346              :  * @param l A pointer to the spinlock
     347              :  * @retval true - if spinlock is held by some CPU; false - otherwise
     348              :  */
     349              : static ALWAYS_INLINE bool z_spin_is_locked(struct k_spinlock *l)
     350              : {
     351              : #ifdef CONFIG_TICKET_SPINLOCKS
     352              :         atomic_val_t ticket_val = atomic_get(&l->owner);
     353              : 
     354              :         return !atomic_cas(&l->tail, ticket_val, ticket_val);
     355              : #else
     356              :         return l->locked;
     357              : #endif /* CONFIG_TICKET_SPINLOCKS */
     358              : }
     359              : #endif /* defined(CONFIG_SMP) && defined(CONFIG_TEST) */
     360              : 
     361              : /* Internal function: releases the lock, but leaves local interrupts disabled */
     362              : static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
     363              : {
     364              :         ARG_UNUSED(l);
     365              : #ifdef CONFIG_SPIN_VALIDATE
     366              :         __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
     367              : #endif
     368              : #ifdef CONFIG_SMP
     369              : #ifdef CONFIG_TICKET_SPINLOCKS
     370              :         (void)atomic_inc(&l->owner);
     371              : #else
     372              :         (void)atomic_clear(&l->locked);
     373              : #endif /* CONFIG_TICKET_SPINLOCKS */
     374              : #endif /* CONFIG_SMP */
     375              : }
     376              : 
     377              : #if defined(CONFIG_SPIN_VALIDATE) && defined(__GNUC__)
     378              : static ALWAYS_INLINE void z_spin_onexit(__maybe_unused k_spinlock_key_t *k)
     379              : {
     380              :         __ASSERT(k->key, "K_SPINLOCK exited with goto, break or return, "
     381              :                          "use K_SPINLOCK_BREAK instead.");
     382              : }
     383              : #define K_SPINLOCK_ONEXIT __attribute__((__cleanup__(z_spin_onexit)))
     384              : #else
     385              : #define K_SPINLOCK_ONEXIT
     386              : #endif
     387              : 
     388              : /**
     389              :  * INTERNAL_HIDDEN @endcond
     390              :  */
     391              : 
     392              : /**
     393              :  * @brief Leaves a code block guarded with @ref K_SPINLOCK after releasing the
     394              :  * lock.
     395              :  *
     396              :  * See @ref K_SPINLOCK for details.
     397              :  */
     398            1 : #define K_SPINLOCK_BREAK continue
     399              : 
     400              : /**
     401              :  * @brief Guards a code block with the given spinlock, automatically acquiring
     402              :  * the lock before executing the code block. The lock will be released either
     403              :  * when reaching the end of the code block or when leaving the block with
     404              :  * @ref K_SPINLOCK_BREAK.
     405              :  *
     406              :  * @details Example usage:
     407              :  *
     408              :  * @code{.c}
     409              :  * K_SPINLOCK(&mylock) {
     410              :  *
     411              :  *   ...execute statements with the lock held...
     412              :  *
     413              :  *   if (some_condition) {
     414              :  *     ...release the lock and leave the guarded section prematurely:
     415              :  *     K_SPINLOCK_BREAK;
     416              :  *   }
     417              :  *
     418              :  *   ...execute statements with the lock held...
     419              :  *
     420              :  * }
     421              :  * @endcode
     422              :  *
     423              :  * Behind the scenes this pattern expands to a for-loop whose body is executed
     424              :  * exactly once:
     425              :  *
     426              :  * @code{.c}
     427              :  * for (k_spinlock_key_t key = k_spin_lock(&mylock); ...; k_spin_unlock(&mylock, key)) {
     428              :  *     ...
     429              :  * }
     430              :  * @endcode
     431              :  *
     432              :  * @warning The code block must execute to its end or be left by calling
     433              :  * @ref K_SPINLOCK_BREAK. Otherwise, e.g. if exiting the block with a break,
     434              :  * goto or return statement, the spinlock will not be released on exit.
     435              :  *
     436              :  * @note In user mode the spinlock must be placed in memory accessible to the
     437              :  * application, see @ref K_APP_DMEM and @ref K_APP_BMEM macros for details.
     438              :  *
     439              :  * @param lck Spinlock used to guard the enclosed code block.
     440              :  */
     441            1 : #define K_SPINLOCK(lck)                                                                            \
     442              :         for (k_spinlock_key_t __i K_SPINLOCK_ONEXIT = {}, __key = k_spin_lock(lck); !__i.key;      \
     443              :              k_spin_unlock((lck), __key), __i.key = 1)
     444              : 
     445              : /** @} */
     446              : 
     447              : #ifdef __cplusplus
     448              : }
     449              : #endif
     450              : 
     451              : #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
        

Generated by: LCOV version 2.0-1