12#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
13#define ZEPHYR_INCLUDE_SPINLOCK_H_
34struct z_spinlock_key {
50#ifdef CONFIG_TICKET_SPINLOCKS
69#ifdef CONFIG_SPIN_VALIDATE
74#ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
81#if defined(CONFIG_NONZERO_SPINLOCK_SIZE) && !defined(CONFIG_SMP) && !defined(CONFIG_SPIN_VALIDATE)
103#ifdef CONFIG_SPIN_VALIDATE
105bool z_spin_unlock_valid(
struct k_spinlock *l);
106void z_spin_lock_set_owner(
struct k_spinlock *l);
107BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4,
"Too many CPUs for mask");
109# ifdef CONFIG_KERNEL_COHERENCE
110bool z_spin_lock_mem_coherent(
struct k_spinlock *l);
128static ALWAYS_INLINE
void z_spinlock_validate_pre(
struct k_spinlock *l)
131#ifdef CONFIG_SPIN_VALIDATE
132 __ASSERT(z_spin_lock_valid(l),
"Invalid spinlock %p", l);
133#ifdef CONFIG_KERNEL_COHERENCE
134 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
139static ALWAYS_INLINE
void z_spinlock_validate_post(
struct k_spinlock *l)
142#ifdef CONFIG_SPIN_VALIDATE
143 z_spin_lock_set_owner(l);
144#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
192 z_spinlock_validate_pre(l);
194#ifdef CONFIG_TICKET_SPINLOCKS
212 z_spinlock_validate_post(l);
235 z_spinlock_validate_pre(l);
237#ifdef CONFIG_TICKET_SPINLOCKS
258 if (!
atomic_cas(&l->tail, ticket_val, ticket_val + 1)) {
267 z_spinlock_validate_post(l);
305#ifdef CONFIG_SPIN_VALIDATE
306 __ASSERT(z_spin_unlock_valid(l),
"Not my spinlock %p", l);
308#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
311 __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
312 "Spin lock %p held %u cycles, longer than limit of %u cycles",
313 l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
318#ifdef CONFIG_TICKET_SPINLOCKS
339#if defined(CONFIG_TEST) || defined(CONFIG_ASSERT)
348static ALWAYS_INLINE
bool z_spin_is_locked(
struct k_spinlock *l)
351#ifdef CONFIG_TICKET_SPINLOCKS
354 return !
atomic_cas(&l->tail, ticket_val, ticket_val);
367static ALWAYS_INLINE
void k_spin_release(
struct k_spinlock *l)
370#ifdef CONFIG_SPIN_VALIDATE
371 __ASSERT(z_spin_unlock_valid(l),
"Not my spinlock %p", l);
374#ifdef CONFIG_TICKET_SPINLOCKS
382#if defined(CONFIG_SPIN_VALIDATE) && defined(__GNUC__)
385 __ASSERT(k->key,
"K_SPINLOCK exited with goto, break or return, "
386 "use K_SPINLOCK_BREAK instead.");
388#define K_SPINLOCK_ONEXIT __attribute__((__cleanup__(z_spin_onexit)))
390#define K_SPINLOCK_ONEXIT
403#define K_SPINLOCK_BREAK continue
446#define K_SPINLOCK(lck) \
447 for (k_spinlock_key_t __i K_SPINLOCK_ONEXIT = {}, __key = k_spin_lock(lck); !__i.key; \
448 k_spin_unlock((lck), __key), __i.key = 1)
uint32_t sys_clock_cycle_get_32(void)
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Disable all interrupts on the local CPU.
Definition irq.h:168
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition irq.h:176
static ALWAYS_INLINE bool arch_cpu_irqs_are_enabled(void)
Implementation of arch_cpu_irqs_are_enabled.
Definition irq.h:191
void arch_spin_relax(void)
Perform architecture specific processing within spin loops.
long atomic_t
Definition atomic_types.h:15
atomic_t atomic_val_t
Definition atomic_types.h:16
atomic_val_t atomic_get(const atomic_t *target)
Atomic get.
atomic_val_t atomic_clear(atomic_t *target)
Atomic clear.
atomic_val_t atomic_inc(atomic_t *target)
Atomic increment.
_Bool atomic_cas(atomic_t *target, atomic_val_t old_value, atomic_val_t new_value)
Atomic compare-and-set.
static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k)
Attempt to lock a spinlock.
Definition spinlock.h:231
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
Unlock a spin lock.
Definition spinlock.h:301
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Lock a spinlock.
Definition spinlock.h:181
struct z_spinlock_key k_spinlock_key_t
Spinlock key type.
Definition spinlock.h:126
#define EBUSY
Mount device busy.
Definition errno.h:54
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
Kernel Spin Lock.
Definition spinlock.h:45