LCOV - code coverage report
Current view: top level - zephyr - spinlock.h Hit Total Coverage
Test: new.info Lines: 8 8 100.0 %
Date: 2024-12-22 00:14:23

          Line data    Source code
       1           1 : /*
       2             :  * Copyright (c) 2018 Intel Corporation.
       3             :  *
       4             :  * SPDX-License-Identifier: Apache-2.0
       5             :  */
       6             : 
       7             : /**
       8             :  * @file
       9             :  * @brief Public interface for spinlocks
      10             :  */
      11             : 
      12             : #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
      13             : #define ZEPHYR_INCLUDE_SPINLOCK_H_
      14             : 
      15             : #include <errno.h>
      16             : #include <stdbool.h>
      17             : 
      18             : #include <zephyr/arch/cpu.h>
      19             : #include <zephyr/sys/atomic.h>
      20             : #include <zephyr/sys/__assert.h>
      21             : #include <zephyr/sys/time_units.h>
      22             : 
      23             : #ifdef __cplusplus
      24             : extern "C" {
      25             : #endif
      26             : 
      27             : /**
      28             :  * @brief Spinlock APIs
      29             :  * @defgroup spinlock_apis Spinlock APIs
      30             :  * @ingroup kernel_apis
      31             :  * @{
      32             :  */
      33             : 
      34             : struct z_spinlock_key {
      35             :         int key;
      36             : };
      37             : 
      38             : /**
      39             :  * @brief Kernel Spin Lock
      40             :  *
      41             :  * This struct defines a spin lock record on which CPUs can wait with
      42             :  * k_spin_lock().  Any number of spinlocks may be defined in
      43             :  * application code.
      44             :  */
      45           1 : struct k_spinlock {
      46             : /**
      47             :  * @cond INTERNAL_HIDDEN
      48             :  */
      49             : #ifdef CONFIG_SMP
      50             : #ifdef CONFIG_TICKET_SPINLOCKS
      51             :         /*
      52             :          * Ticket spinlocks are conceptually two atomic variables,
      53             :          * one indicating the current FIFO head (spinlock owner),
      54             :          * and the other indicating the current FIFO tail.
      55             :          * Spinlock is acquired in the following manner:
      56             :          * - current FIFO tail value is atomically incremented while it's
      57             :          *   original value is saved as a "ticket"
      58             :          * - we spin until the FIFO head becomes equal to the ticket value
      59             :          *
      60             :          * Spinlock is released by atomic increment of the FIFO head
      61             :          */
      62             :         atomic_t owner;
      63             :         atomic_t tail;
      64             : #else
      65             :         atomic_t locked;
      66             : #endif /* CONFIG_TICKET_SPINLOCKS */
      67             : #endif /* CONFIG_SMP */
      68             : 
      69             : #ifdef CONFIG_SPIN_VALIDATE
      70             :         /* Stores the thread that holds the lock with the locking CPU
      71             :          * ID in the bottom two bits.
      72             :          */
      73             :         uintptr_t thread_cpu;
      74             : #ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
      75             :         /* Stores the time (in cycles) when a lock was taken
      76             :          */
      77             :         uint32_t lock_time;
      78             : #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
      79             : #endif /* CONFIG_SPIN_VALIDATE */
      80             : 
      81             : #if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
      82             :         !defined(CONFIG_SPIN_VALIDATE)
      83             :         /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
      84             :          * the k_spinlock struct will have no members. The result
      85             :          * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
      86             :          *
      87             :          * This size difference causes problems when the k_spinlock
      88             :          * is embedded into another struct like k_msgq, because C and
      89             :          * C++ will have different ideas on the offsets of the members
      90             :          * that come after the k_spinlock member.
      91             :          *
      92             :          * To prevent this we add a 1 byte dummy member to k_spinlock
      93             :          * when the user selects C++ support and k_spinlock would
      94             :          * otherwise be empty.
      95             :          */
      96             :         char dummy;
      97             : #endif
      98             : /**
      99             :  * INTERNAL_HIDDEN @endcond
     100             :  */
     101             : };
     102             : 
     103             : /* There's a spinlock validation framework available when asserts are
     104             :  * enabled.  It adds a relatively hefty overhead (about 3k or so) to
     105             :  * kernel code size, don't use on platforms known to be small.
     106             :  */
     107             : #ifdef CONFIG_SPIN_VALIDATE
     108             : bool z_spin_lock_valid(struct k_spinlock *l);
     109             : bool z_spin_unlock_valid(struct k_spinlock *l);
     110             : void z_spin_lock_set_owner(struct k_spinlock *l);
     111             : BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4, "Too many CPUs for mask");
     112             : 
     113             : # ifdef CONFIG_KERNEL_COHERENCE
     114             : bool z_spin_lock_mem_coherent(struct k_spinlock *l);
     115             : # endif /* CONFIG_KERNEL_COHERENCE */
     116             : 
     117             : #endif /* CONFIG_SPIN_VALIDATE */
     118             : 
     119             : /**
     120             :  * @brief Spinlock key type
     121             :  *
     122             :  * This type defines a "key" value used by a spinlock implementation
     123             :  * to store the system interrupt state at the time of a call to
     124             :  * k_spin_lock().  It is expected to be passed to a matching
     125             :  * k_spin_unlock().
     126             :  *
     127             :  * This type is opaque and should not be inspected by application
     128             :  * code.
     129             :  */
     130           1 : typedef struct z_spinlock_key k_spinlock_key_t;
     131             : 
     132             : static ALWAYS_INLINE void z_spinlock_validate_pre(struct k_spinlock *l)
     133             : {
     134             :         ARG_UNUSED(l);
     135             : #ifdef CONFIG_SPIN_VALIDATE
     136             :         __ASSERT(z_spin_lock_valid(l), "Invalid spinlock %p", l);
     137             : #ifdef CONFIG_KERNEL_COHERENCE
     138             :         __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
     139             : #endif
     140             : #endif
     141             : }
     142             : 
     143             : static ALWAYS_INLINE void z_spinlock_validate_post(struct k_spinlock *l)
     144             : {
     145             :         ARG_UNUSED(l);
     146             : #ifdef CONFIG_SPIN_VALIDATE
     147             :         z_spin_lock_set_owner(l);
     148             : #if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
     149             :         l->lock_time = sys_clock_cycle_get_32();
     150             : #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
     151             : #endif /* CONFIG_SPIN_VALIDATE */
     152             : }
     153             : 
     154             : /**
     155             :  * @brief Lock a spinlock
     156             :  *
     157             :  * This routine locks the specified spinlock, returning a key handle
     158             :  * representing interrupt state needed at unlock time.  Upon
     159             :  * returning, the calling thread is guaranteed not to be suspended or
     160             :  * interrupted on its current CPU until it calls k_spin_unlock().  The
     161             :  * implementation guarantees mutual exclusion: exactly one thread on
     162             :  * one CPU will return from k_spin_lock() at a time.  Other CPUs
     163             :  * trying to acquire a lock already held by another CPU will enter an
     164             :  * implementation-defined busy loop ("spinning") until the lock is
     165             :  * released.
     166             :  *
     167             :  * Separate spin locks may be nested. It is legal to lock an
     168             :  * (unlocked) spin lock while holding a different lock.  Spin locks
     169             :  * are not recursive, however: an attempt to acquire a spin lock that
     170             :  * the CPU already holds will deadlock.
     171             :  *
     172             :  * In circumstances where only one CPU exists, the behavior of
     173             :  * k_spin_lock() remains as specified above, though obviously no
     174             :  * spinning will take place.  Implementations may be free to optimize
     175             :  * in uniprocessor contexts such that the locking reduces to an
     176             :  * interrupt mask operation.
     177             :  *
     178             :  * @param l A pointer to the spinlock to lock
     179             :  * @return A key value that must be passed to k_spin_unlock() when the
     180             :  *         lock is released.
     181             :  */
     182           1 : static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
     183             : {
     184             :         ARG_UNUSED(l);
     185             :         k_spinlock_key_t k;
     186             : 
     187             :         /* Note that we need to use the underlying arch-specific lock
     188             :          * implementation.  The "irq_lock()" API in SMP context is
     189             :          * actually a wrapper for a global spinlock!
     190             :          */
     191             :         k.key = arch_irq_lock();
     192             : 
     193             :         z_spinlock_validate_pre(l);
     194             : #ifdef CONFIG_SMP
     195             : #ifdef CONFIG_TICKET_SPINLOCKS
     196             :         /*
     197             :          * Enqueue ourselves to the end of a spinlock waiters queue
     198             :          * receiving a ticket
     199             :          */
     200             :         atomic_val_t ticket = atomic_inc(&l->tail);
     201             :         /* Spin until our ticket is served */
     202             :         while (atomic_get(&l->owner) != ticket) {
     203             :                 arch_spin_relax();
     204             :         }
     205             : #else
     206             :         while (!atomic_cas(&l->locked, 0, 1)) {
     207             :                 arch_spin_relax();
     208             :         }
     209             : #endif /* CONFIG_TICKET_SPINLOCKS */
     210             : #endif /* CONFIG_SMP */
     211             :         z_spinlock_validate_post(l);
     212             : 
     213             :         return k;
     214             : }
     215             : 
     216             : /**
     217             :  * @brief Attempt to lock a spinlock
     218             :  *
     219             :  * This routine makes one attempt to lock @p l. If it is successful, then
     220             :  * it will store the key into @p k.
     221             :  *
     222             :  * @param[in] l A pointer to the spinlock to lock
     223             :  * @param[out] k A pointer to the spinlock key
     224             :  * @retval 0 on success
     225             :  * @retval -EBUSY if another thread holds the lock
     226             :  *
     227             :  * @see k_spin_lock
     228             :  * @see k_spin_unlock
     229             :  */
     230           1 : static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k)
     231             : {
     232             :         int key = arch_irq_lock();
     233             : 
     234             :         z_spinlock_validate_pre(l);
     235             : #ifdef CONFIG_SMP
     236             : #ifdef CONFIG_TICKET_SPINLOCKS
     237             :         /*
     238             :          * atomic_get and atomic_cas operations below are not executed
     239             :          * simultaneously.
     240             :          * So in theory k_spin_trylock can lock an already locked spinlock.
     241             :          * To reproduce this the following conditions should be met after we
     242             :          * executed atomic_get and before we executed atomic_cas:
     243             :          *
     244             :          * - spinlock needs to be taken 0xffff_..._ffff + 1 times
     245             :          * (which requires 0xffff_..._ffff number of CPUs, as k_spin_lock call
     246             :          * is blocking) or
     247             :          * - spinlock needs to be taken and released 0xffff_..._ffff times and
     248             :          * then taken again
     249             :          *
     250             :          * In real-life systems this is considered non-reproducible given that
     251             :          * required actions need to be done during this tiny window of several
     252             :          * CPU instructions (which execute with interrupt locked,
     253             :          * so no preemption can happen here)
     254             :          */
     255             :         atomic_val_t ticket_val = atomic_get(&l->owner);
     256             : 
     257             :         if (!atomic_cas(&l->tail, ticket_val, ticket_val + 1)) {
     258             :                 goto busy;
     259             :         }
     260             : #else
     261             :         if (!atomic_cas(&l->locked, 0, 1)) {
     262             :                 goto busy;
     263             :         }
     264             : #endif /* CONFIG_TICKET_SPINLOCKS */
     265             : #endif /* CONFIG_SMP */
     266             :         z_spinlock_validate_post(l);
     267             : 
     268             :         k->key = key;
     269             : 
     270             :         return 0;
     271             : 
     272             : #ifdef CONFIG_SMP
     273             : busy:
     274             :         arch_irq_unlock(key);
     275             :         return -EBUSY;
     276             : #endif /* CONFIG_SMP */
     277             : }
     278             : 
     279             : /**
     280             :  * @brief Unlock a spin lock
     281             :  *
     282             :  * This releases a lock acquired by k_spin_lock().  After this
     283             :  * function is called, any CPU will be able to acquire the lock.  If
     284             :  * other CPUs are currently spinning inside k_spin_lock() waiting for
     285             :  * this lock, exactly one of them will return synchronously with the
     286             :  * lock held.
     287             :  *
     288             :  * Spin locks must be properly nested.  A call to k_spin_unlock() must
     289             :  * be made on the lock object most recently locked using
     290             :  * k_spin_lock(), using the key value that it returned.  Attempts to
     291             :  * unlock mis-nested locks, or to unlock locks that are not held, or
     292             :  * to passing a key parameter other than the one returned from
     293             :  * k_spin_lock(), are illegal.  When CONFIG_SPIN_VALIDATE is set, some
     294             :  * of these errors can be detected by the framework.
     295             :  *
     296             :  * @param l A pointer to the spinlock to release
     297             :  * @param key The value returned from k_spin_lock() when this lock was
     298             :  *        acquired
     299             :  */
     300           1 : static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
     301             :                                         k_spinlock_key_t key)
     302             : {
     303             :         ARG_UNUSED(l);
     304             : #ifdef CONFIG_SPIN_VALIDATE
     305             :         __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
     306             : 
     307             : #if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
     308             :         uint32_t delta = sys_clock_cycle_get_32() - l->lock_time;
     309             : 
     310             :         __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
     311             :                  "Spin lock %p held %u cycles, longer than limit of %u cycles",
     312             :                  l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
     313             : #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
     314             : #endif /* CONFIG_SPIN_VALIDATE */
     315             : 
     316             : #ifdef CONFIG_SMP
     317             : #ifdef CONFIG_TICKET_SPINLOCKS
     318             :         /* Give the spinlock to the next CPU in a FIFO */
     319             :         (void)atomic_inc(&l->owner);
     320             : #else
     321             :         /* Strictly we don't need atomic_clear() here (which is an
     322             :          * exchange operation that returns the old value).  We are always
     323             :          * setting a zero and (because we hold the lock) know the existing
     324             :          * state won't change due to a race.  But some architectures need
     325             :          * a memory barrier when used like this, and we don't have a
     326             :          * Zephyr framework for that.
     327             :          */
     328             :         (void)atomic_clear(&l->locked);
     329             : #endif /* CONFIG_TICKET_SPINLOCKS */
     330             : #endif /* CONFIG_SMP */
     331             :         arch_irq_unlock(key.key);
     332             : }
     333             : 
     334             : /**
     335             :  * @cond INTERNAL_HIDDEN
     336             :  */
     337             : 
     338             : #if defined(CONFIG_SMP) && defined(CONFIG_TEST)
     339             : /*
     340             :  * @brief Checks if spinlock is held by some CPU, including the local CPU.
     341             :  *              This API shouldn't be used outside the tests for spinlock
     342             :  *
     343             :  * @param l A pointer to the spinlock
     344             :  * @retval true - if spinlock is held by some CPU; false - otherwise
     345             :  */
     346             : static ALWAYS_INLINE bool z_spin_is_locked(struct k_spinlock *l)
     347             : {
     348             : #ifdef CONFIG_TICKET_SPINLOCKS
     349             :         atomic_val_t ticket_val = atomic_get(&l->owner);
     350             : 
     351             :         return !atomic_cas(&l->tail, ticket_val, ticket_val);
     352             : #else
     353             :         return l->locked;
     354             : #endif /* CONFIG_TICKET_SPINLOCKS */
     355             : }
     356             : #endif /* defined(CONFIG_SMP) && defined(CONFIG_TEST) */
     357             : 
     358             : /* Internal function: releases the lock, but leaves local interrupts disabled */
     359             : static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
     360             : {
     361             :         ARG_UNUSED(l);
     362             : #ifdef CONFIG_SPIN_VALIDATE
     363             :         __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
     364             : #endif
     365             : #ifdef CONFIG_SMP
     366             : #ifdef CONFIG_TICKET_SPINLOCKS
     367             :         (void)atomic_inc(&l->owner);
     368             : #else
     369             :         (void)atomic_clear(&l->locked);
     370             : #endif /* CONFIG_TICKET_SPINLOCKS */
     371             : #endif /* CONFIG_SMP */
     372             : }
     373             : 
     374             : #if defined(CONFIG_SPIN_VALIDATE) && defined(__GNUC__)
     375             : static ALWAYS_INLINE void z_spin_onexit(__maybe_unused k_spinlock_key_t *k)
     376             : {
     377             :         __ASSERT(k->key, "K_SPINLOCK exited with goto, break or return, "
     378             :                          "use K_SPINLOCK_BREAK instead.");
     379             : }
     380             : #define K_SPINLOCK_ONEXIT __attribute__((__cleanup__(z_spin_onexit)))
     381             : #else
     382             : #define K_SPINLOCK_ONEXIT
     383             : #endif
     384             : 
     385             : /**
     386             :  * INTERNAL_HIDDEN @endcond
     387             :  */
     388             : 
     389             : /**
     390             :  * @brief Leaves a code block guarded with @ref K_SPINLOCK after releasing the
     391             :  * lock.
     392             :  *
     393             :  * See @ref K_SPINLOCK for details.
     394             :  */
     395           1 : #define K_SPINLOCK_BREAK continue
     396             : 
     397             : /**
     398             :  * @brief Guards a code block with the given spinlock, automatically acquiring
     399             :  * the lock before executing the code block. The lock will be released either
     400             :  * when reaching the end of the code block or when leaving the block with
     401             :  * @ref K_SPINLOCK_BREAK.
     402             :  *
     403             :  * @details Example usage:
     404             :  *
     405             :  * @code{.c}
     406             :  * K_SPINLOCK(&mylock) {
     407             :  *
     408             :  *   ...execute statements with the lock held...
     409             :  *
     410             :  *   if (some_condition) {
     411             :  *     ...release the lock and leave the guarded section prematurely:
     412             :  *     K_SPINLOCK_BREAK;
     413             :  *   }
     414             :  *
     415             :  *   ...execute statements with the lock held...
     416             :  *
     417             :  * }
     418             :  * @endcode
     419             :  *
     420             :  * Behind the scenes this pattern expands to a for-loop whose body is executed
     421             :  * exactly once:
     422             :  *
     423             :  * @code{.c}
     424             :  * for (k_spinlock_key_t key = k_spin_lock(&mylock); ...; k_spin_unlock(&mylock, key)) {
     425             :  *     ...
     426             :  * }
     427             :  * @endcode
     428             :  *
     429             :  * @warning The code block must execute to its end or be left by calling
     430             :  * @ref K_SPINLOCK_BREAK. Otherwise, e.g. if exiting the block with a break,
     431             :  * goto or return statement, the spinlock will not be released on exit.
     432             :  *
     433             :  * @note In user mode the spinlock must be placed in memory accessible to the
     434             :  * application, see @ref K_APP_DMEM and @ref K_APP_BMEM macros for details.
     435             :  *
     436             :  * @param lck Spinlock used to guard the enclosed code block.
     437             :  */
     438           1 : #define K_SPINLOCK(lck)                                                                            \
     439             :         for (k_spinlock_key_t __i K_SPINLOCK_ONEXIT = {}, __key = k_spin_lock(lck); !__i.key;      \
     440             :              k_spin_unlock((lck), __key), __i.key = 1)
     441             : 
     442             : /** @} */
     443             : 
     444             : #ifdef __cplusplus
     445             : }
     446             : #endif
     447             : 
     448             : #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */

Generated by: LCOV version 1.14