Zephyr API Documentation  3.5.0
A Scalable Open Source RTOS
3.5.0
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
spinlock.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
12#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
13#define ZEPHYR_INCLUDE_SPINLOCK_H_
14
15#include <errno.h>
16#include <stdbool.h>
17
18#include <zephyr/arch/cpu.h>
19#include <zephyr/sys/atomic.h>
20#include <zephyr/sys/__assert.h>
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
34struct z_spinlock_key {
35 int key;
36};
37
45struct k_spinlock {
46#ifdef CONFIG_SMP
48#endif
49
50#ifdef CONFIG_SPIN_VALIDATE
51 /* Stores the thread that holds the lock with the locking CPU
52 * ID in the bottom two bits.
53 */
54 uintptr_t thread_cpu;
55#ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
56 /* Stores the time (in cycles) when a lock was taken
57 */
58 uint32_t lock_time;
59#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
60#endif /* CONFIG_SPIN_VALIDATE */
61
62#if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
63 !defined(CONFIG_SPIN_VALIDATE)
64 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
65 * the k_spinlock struct will have no members. The result
66 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
67 *
68 * This size difference causes problems when the k_spinlock
69 * is embedded into another struct like k_msgq, because C and
70 * C++ will have different ideas on the offsets of the members
71 * that come after the k_spinlock member.
72 *
73 * To prevent this we add a 1 byte dummy member to k_spinlock
74 * when the user selects C++ support and k_spinlock would
75 * otherwise be empty.
76 */
77 char dummy;
78#endif
79};
80
81/* There's a spinlock validation framework available when asserts are
82 * enabled. It adds a relatively hefty overhead (about 3k or so) to
83 * kernel code size, don't use on platforms known to be small.
84 */
85#ifdef CONFIG_SPIN_VALIDATE
86bool z_spin_lock_valid(struct k_spinlock *l);
87bool z_spin_unlock_valid(struct k_spinlock *l);
88void z_spin_lock_set_owner(struct k_spinlock *l);
89BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4, "Too many CPUs for mask");
90
91# ifdef CONFIG_KERNEL_COHERENCE
92bool z_spin_lock_mem_coherent(struct k_spinlock *l);
93# endif /* CONFIG_KERNEL_COHERENCE */
94
95#endif /* CONFIG_SPIN_VALIDATE */
96
108typedef struct z_spinlock_key k_spinlock_key_t;
109
110static ALWAYS_INLINE void z_spinlock_validate_pre(struct k_spinlock *l)
111{
112 ARG_UNUSED(l);
113#ifdef CONFIG_SPIN_VALIDATE
114 __ASSERT(z_spin_lock_valid(l), "Invalid spinlock %p", l);
115#ifdef CONFIG_KERNEL_COHERENCE
116 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
117#endif
118#endif
119}
120
121static ALWAYS_INLINE void z_spinlock_validate_post(struct k_spinlock *l)
122{
123 ARG_UNUSED(l);
124#ifdef CONFIG_SPIN_VALIDATE
125 z_spin_lock_set_owner(l);
126#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
127 l->lock_time = sys_clock_cycle_get_32();
128#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
129#endif /* CONFIG_SPIN_VALIDATE */
130}
131
161{
162 ARG_UNUSED(l);
164
165 /* Note that we need to use the underlying arch-specific lock
166 * implementation. The "irq_lock()" API in SMP context is
167 * actually a wrapper for a global spinlock!
168 */
169 k.key = arch_irq_lock();
170
171 z_spinlock_validate_pre(l);
172#ifdef CONFIG_SMP
173 while (!atomic_cas(&l->locked, 0, 1)) {
175 }
176#endif
177 z_spinlock_validate_post(l);
178
179 return k;
180}
181
197{
198 int key = arch_irq_lock();
199
200 z_spinlock_validate_pre(l);
201#ifdef CONFIG_SMP
202 if (!atomic_cas(&l->locked, 0, 1)) {
203 arch_irq_unlock(key);
204 return -EBUSY;
205 }
206#endif
207 z_spinlock_validate_post(l);
208
209 k->key = key;
210
211 return 0;
212}
213
237{
238 ARG_UNUSED(l);
239#ifdef CONFIG_SPIN_VALIDATE
240 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
241
242#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
243 uint32_t delta = sys_clock_cycle_get_32() - l->lock_time;
244
245 __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
246 "Spin lock %p held %u cycles, longer than limit of %u cycles",
247 l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
248#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
249#endif /* CONFIG_SPIN_VALIDATE */
250
251#ifdef CONFIG_SMP
252 /* Strictly we don't need atomic_clear() here (which is an
253 * exchange operation that returns the old value). We are always
254 * setting a zero and (because we hold the lock) know the existing
255 * state won't change due to a race. But some architectures need
256 * a memory barrier when used like this, and we don't have a
257 * Zephyr framework for that.
258 */
259 atomic_clear(&l->locked);
260#endif
261 arch_irq_unlock(key.key);
262}
263
268#if defined(CONFIG_SMP) && defined(CONFIG_TEST)
269/*
270 * @brief Checks if spinlock is held by some CPU, including the local CPU.
271 * This API shouldn't be used outside the tests for spinlock
272 *
273 * @param l A pointer to the spinlock
274 * @retval true - if spinlock is held by some CPU; false - otherwise
275 */
276static ALWAYS_INLINE bool z_spin_is_locked(struct k_spinlock *l)
277{
278 return l->locked;
279}
280#endif
281
282/* Internal function: releases the lock, but leaves local interrupts disabled */
283static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
284{
285 ARG_UNUSED(l);
286#ifdef CONFIG_SPIN_VALIDATE
287 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
288#endif
289#ifdef CONFIG_SMP
290 atomic_clear(&l->locked);
291#endif
292}
293
294#if defined(CONFIG_SPIN_VALIDATE) && defined(__GNUC__)
295static ALWAYS_INLINE void z_spin_onexit(__maybe_unused k_spinlock_key_t *k)
296{
297 __ASSERT(k->key, "K_SPINLOCK exited with goto, break or return, "
298 "use K_SPINLOCK_BREAK instead.");
299}
300#define K_SPINLOCK_ONEXIT __attribute__((__cleanup__(z_spin_onexit)))
301#else
302#define K_SPINLOCK_ONEXIT
303#endif
304
315#define K_SPINLOCK_BREAK continue
316
358#define K_SPINLOCK(lck) \
359 for (k_spinlock_key_t __i K_SPINLOCK_ONEXIT = {}, __key = k_spin_lock(lck); !__i.key; \
360 k_spin_unlock(lck, __key), __i.key = 1)
361
364#ifdef __cplusplus
365}
366#endif
367
368#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
uint32_t sys_clock_cycle_get_32(void)
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Disable all interrupts on the local CPU.
Definition: irq.h:168
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: irq.h:176
void arch_spin_relax(void)
Perform architecture specific processing within spin loops.
long atomic_t
Definition: atomic.h:22
static ALWAYS_INLINE bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
Definition: atomic_xtensa.h:42
static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target)
Definition: atomic_xtensa.h:133
#define ALWAYS_INLINE
Definition: common.h:129
System error numbers.
static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k)
Attempt to lock a spinlock.
Definition: spinlock.h:196
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
Unlock a spin lock.
Definition: spinlock.h:235
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Lock a spinlock.
Definition: spinlock.h:160
struct z_spinlock_key k_spinlock_key_t
Spinlock key type.
Definition: spinlock.h:108
#define EBUSY
Mount device busy.
Definition: errno.h:55
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
Kernel Spin Lock.
Definition: spinlock.h:45
atomic_t locked
Definition: spinlock.h:47