Zephyr API Documentation  3.0.0
A Scalable Open Source RTOS
3.0.0
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
spinlock.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
12#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
13#define ZEPHYR_INCLUDE_SPINLOCK_H_
14
15#include <sys/atomic.h>
16#include <sys/__assert.h>
17#include <stdbool.h>
18#include <arch/cpu.h>
19
20#ifdef __cplusplus
21extern "C" {
22#endif
23
31struct z_spinlock_key {
32 int key;
33};
34
42struct k_spinlock {
43#ifdef CONFIG_SMP
45#endif
46
47#ifdef CONFIG_SPIN_VALIDATE
48 /* Stores the thread that holds the lock with the locking CPU
49 * ID in the bottom two bits.
50 */
51 uintptr_t thread_cpu;
52#endif
53
54#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
55 !defined(CONFIG_SPIN_VALIDATE)
56 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
57 * the k_spinlock struct will have no members. The result
58 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
59 *
60 * This size difference causes problems when the k_spinlock
61 * is embedded into another struct like k_msgq, because C and
62 * C++ will have different ideas on the offsets of the members
63 * that come after the k_spinlock member.
64 *
65 * To prevent this we add a 1 byte dummy member to k_spinlock
66 * when the user selects C++ support and k_spinlock would
67 * otherwise be empty.
68 */
69 char dummy;
70#endif
71};
72
73/* There's a spinlock validation framework available when asserts are
74 * enabled. It adds a relatively hefty overhead (about 3k or so) to
75 * kernel code size, don't use on platforms known to be small.
76 */
77#ifdef CONFIG_SPIN_VALIDATE
78bool z_spin_lock_valid(struct k_spinlock *l);
79bool z_spin_unlock_valid(struct k_spinlock *l);
80void z_spin_lock_set_owner(struct k_spinlock *l);
81BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 4, "Too many CPUs for mask");
82
83# ifdef CONFIG_KERNEL_COHERENCE
84bool z_spin_lock_mem_coherent(struct k_spinlock *l);
85# endif /* CONFIG_KERNEL_COHERENCE */
86
87#endif /* CONFIG_SPIN_VALIDATE */
88
100typedef struct z_spinlock_key k_spinlock_key_t;
101
131{
132 ARG_UNUSED(l);
134
135 /* Note that we need to use the underlying arch-specific lock
136 * implementation. The "irq_lock()" API in SMP context is
137 * actually a wrapper for a global spinlock!
138 */
139 k.key = arch_irq_lock();
140
141#ifdef CONFIG_SPIN_VALIDATE
142 __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
143# ifdef CONFIG_KERNEL_COHERENCE
144 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
145# endif
146#endif
147
148#ifdef CONFIG_SMP
149 while (!atomic_cas(&l->locked, 0, 1)) {
150 }
151#endif
152
153#ifdef CONFIG_SPIN_VALIDATE
154 z_spin_lock_set_owner(l);
155#endif
156 return k;
157}
158
182{
183 ARG_UNUSED(l);
184#ifdef CONFIG_SPIN_VALIDATE
185 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
186#endif
187
188#ifdef CONFIG_SMP
189 /* Strictly we don't need atomic_clear() here (which is an
190 * exchange operation that returns the old value). We are always
191 * setting a zero and (because we hold the lock) know the existing
192 * state won't change due to a race. But some architectures need
193 * a memory barrier when used like this, and we don't have a
194 * Zephyr framework for that.
195 */
196 atomic_clear(&l->locked);
197#endif
198 arch_irq_unlock(key.key);
199}
200
201/* Internal function: releases the lock, but leaves local interrupts
202 * disabled
203 */
205{
206 ARG_UNUSED(l);
207#ifdef CONFIG_SPIN_VALIDATE
208 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
209#endif
210#ifdef CONFIG_SMP
211 atomic_clear(&l->locked);
212#endif
213}
214
217#ifdef __cplusplus
218}
219#endif
220
221#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Disable all interrupts on the local CPU.
Definition: irq.h:168
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: irq.h:176
long atomic_t
Definition: atomic.h:22
atomic_val_t atomic_clear(atomic_t *target)
static ALWAYS_INLINE bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
Definition: atomic_xtensa.h:42
#define ALWAYS_INLINE
Definition: common.h:124
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
Definition: spinlock.h:204
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
Unlock a spin lock.
Definition: spinlock.h:180
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Lock a spinlock.
Definition: spinlock.h:130
struct z_spinlock_key k_spinlock_key_t
Spinlock key type.
Definition: spinlock.h:100
static k_spinlock_key_t key
Definition: spinlock_error_case.c:14
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:75
Kernel Spin Lock.
Definition: spinlock.h:42
atomic_t locked
Definition: spinlock.h:44
#define CONFIG_MP_NUM_CPUS
Definition: ztest.h:38