1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds#ifndef __LINUX_SPINLOCK_H
31da177e4SLinus Torvalds#define __LINUX_SPINLOCK_H
41da177e4SLinus Torvalds
51da177e4SLinus Torvalds/*
6fb1c8f93SIngo Molnar * include/linux/spinlock.h - generic spinlock/rwlock declarations
7fb1c8f93SIngo Molnar *
8fb1c8f93SIngo Molnar * here's the role of the various spinlock/rwlock related include files:
9fb1c8f93SIngo Molnar *
10fb1c8f93SIngo Molnar * on SMP builds:
11fb1c8f93SIngo Molnar *
12fb3a6bbcSThomas Gleixner *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
13fb1c8f93SIngo Molnar *                        initializers
14fb1c8f93SIngo Molnar *
154f084ca7SThomas Gleixner *  linux/spinlock_types_raw:
164f084ca7SThomas Gleixner *			  The raw types and initializers
17fb1c8f93SIngo Molnar *  linux/spinlock_types.h:
18fb1c8f93SIngo Molnar *                        defines the generic type and initializers
19fb1c8f93SIngo Molnar *
200199c4e6SThomas Gleixner *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
21fb1c8f93SIngo Molnar *                        implementations, mostly inline assembly code
22fb1c8f93SIngo Molnar *
23fb1c8f93SIngo Molnar *   (also included on UP-debug builds:)
24fb1c8f93SIngo Molnar *
25fb1c8f93SIngo Molnar *  linux/spinlock_api_smp.h:
26fb1c8f93SIngo Molnar *                        contains the prototypes for the _spin_*() APIs.
27fb1c8f93SIngo Molnar *
28fb1c8f93SIngo Molnar *  linux/spinlock.h:     builds the final spin_*() APIs.
29fb1c8f93SIngo Molnar *
30fb1c8f93SIngo Molnar * on UP builds:
31fb1c8f93SIngo Molnar *
32fb1c8f93SIngo Molnar *  linux/spinlock_type_up.h:
33fb1c8f93SIngo Molnar *                        contains the generic, simplified UP spinlock type.
34fb1c8f93SIngo Molnar *                        (which is an empty structure on non-debug builds)
35fb1c8f93SIngo Molnar *
364f084ca7SThomas Gleixner *  linux/spinlock_types_raw:
374f084ca7SThomas Gleixner *			  The raw RT types and initializers
38fb1c8f93SIngo Molnar *  linux/spinlock_types.h:
39fb1c8f93SIngo Molnar *                        defines the generic type and initializers
40fb1c8f93SIngo Molnar *
41fb1c8f93SIngo Molnar *  linux/spinlock_up.h:
420199c4e6SThomas Gleixner *                        contains the arch_spin_*()/etc. version of UP
43fb1c8f93SIngo Molnar *                        builds. (which are NOPs on non-debug, non-preempt
44fb1c8f93SIngo Molnar *                        builds)
45fb1c8f93SIngo Molnar *
46fb1c8f93SIngo Molnar *   (included on UP-non-debug builds:)
47fb1c8f93SIngo Molnar *
48fb1c8f93SIngo Molnar *  linux/spinlock_api_up.h:
49fb1c8f93SIngo Molnar *                        builds the _spin_*() APIs.
50fb1c8f93SIngo Molnar *
51fb1c8f93SIngo Molnar *  linux/spinlock.h:     builds the final spin_*() APIs.
521da177e4SLinus Torvalds */
531da177e4SLinus Torvalds
543f307891SSteven Rostedt#include <linux/typecheck.h>
551da177e4SLinus Torvalds#include <linux/preempt.h>
561da177e4SLinus Torvalds#include <linux/linkage.h>
571da177e4SLinus Torvalds#include <linux/compiler.h>
58df9ee292SDavid Howells#include <linux/irqflags.h>
591da177e4SLinus Torvalds#include <linux/thread_info.h>
601da177e4SLinus Torvalds#include <linux/stringify.h>
61676dcb8bSAndrew Morton#include <linux/bottom_half.h>
62c935cd62SHerbert Xu#include <linux/lockdep.h>
6396f951edSDavid Howells#include <asm/barrier.h>
6460ca1e5aSWill Deacon#include <asm/mmiowb.h>
651da177e4SLinus Torvalds
661da177e4SLinus Torvalds
671da177e4SLinus Torvalds/*
681da177e4SLinus Torvalds * Must define these before including other files, inline functions need them
691da177e4SLinus Torvalds */
7075ddb0e8SDenys Vlasenko#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
711da177e4SLinus Torvalds
721da177e4SLinus Torvalds#define LOCK_SECTION_START(extra)               \
731da177e4SLinus Torvalds        ".subsection 1\n\t"                     \
741da177e4SLinus Torvalds        extra                                   \
751da177e4SLinus Torvalds        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
761da177e4SLinus Torvalds        LOCK_SECTION_NAME ":\n\t"               \
771da177e4SLinus Torvalds        ".endif\n"
781da177e4SLinus Torvalds
791da177e4SLinus Torvalds#define LOCK_SECTION_END                        \
801da177e4SLinus Torvalds        ".previous\n\t"
811da177e4SLinus Torvalds
8233def849SJoe Perches#define __lockfunc __section(".spinlock.text")
831da177e4SLinus Torvalds
841da177e4SLinus Torvalds/*
85fb3a6bbcSThomas Gleixner * Pull the arch_spinlock_t and arch_rwlock_t definitions:
861da177e4SLinus Torvalds */
87fb1c8f93SIngo Molnar#include <linux/spinlock_types.h>
881da177e4SLinus Torvalds
891da177e4SLinus Torvalds/*
9025985edcSLucas De Marchi * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
911da177e4SLinus Torvalds */
928a25d5deSIngo Molnar#ifdef CONFIG_SMP
93fb1c8f93SIngo Molnar# include <asm/spinlock.h>
941da177e4SLinus Torvalds#else
95fb1c8f93SIngo Molnar# include <linux/spinlock_up.h>
961da177e4SLinus Torvalds#endif
971da177e4SLinus Torvalds
988a25d5deSIngo Molnar#ifdef CONFIG_DEBUG_SPINLOCK
99c2f21ce2SThomas Gleixner  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
100de8f5e4fSPeter Zijlstra				   struct lock_class_key *key, short inner);
101de8f5e4fSPeter Zijlstra
102de8f5e4fSPeter Zijlstra# define raw_spin_lock_init(lock)					\
103de8f5e4fSPeter Zijlstrado {									\
104de8f5e4fSPeter Zijlstra	static struct lock_class_key __key;				\
105de8f5e4fSPeter Zijlstra									\
106de8f5e4fSPeter Zijlstra	__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN);	\
1078a25d5deSIngo Molnar} while (0)
1088a25d5deSIngo Molnar
1098a25d5deSIngo Molnar#else
110c2f21ce2SThomas Gleixner# define raw_spin_lock_init(lock)				\
111c2f21ce2SThomas Gleixner	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
1128a25d5deSIngo Molnar#endif
1138a25d5deSIngo Molnar
114c2f21ce2SThomas Gleixner#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
115fb1c8f93SIngo Molnar
1160199c4e6SThomas Gleixner#ifdef arch_spin_is_contended
117c2f21ce2SThomas Gleixner#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
118a5ef7ca0SKyle McMartin#else
119c2f21ce2SThomas Gleixner#define raw_spin_is_contended(lock)	(((void)(lock), 0))
1200199c4e6SThomas Gleixner#endif /*arch_spin_is_contended*/
12195c354feSNicholas Piggin
122e0acd0a6SOleg Nesterov/*
1233d85b270SAndrea Parri * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
1243d85b270SAndrea Parri * between program-order earlier lock acquisitions and program-order later
1253d85b270SAndrea Parri * memory accesses.
126d89e588cSPeter Zijlstra *
1273d85b270SAndrea Parri * This guarantees that the following two properties hold:
128d89e588cSPeter Zijlstra *
1293d85b270SAndrea Parri *   1) Given the snippet:
130d89e588cSPeter Zijlstra *
1313d85b270SAndrea Parri *	  { X = 0;  Y = 0; }
132d89e588cSPeter Zijlstra *
1333d85b270SAndrea Parri *	  CPU0				CPU1
134d89e588cSPeter Zijlstra *
1353d85b270SAndrea Parri *	  WRITE_ONCE(X, 1);		WRITE_ONCE(Y, 1);
1363d85b270SAndrea Parri *	  spin_lock(S);			smp_mb();
1373d85b270SAndrea Parri *	  smp_mb__after_spinlock();	r1 = READ_ONCE(X);
1383d85b270SAndrea Parri *	  r0 = READ_ONCE(Y);
1393d85b270SAndrea Parri *	  spin_unlock(S);
140d89e588cSPeter Zijlstra *
1413d85b270SAndrea Parri *      it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
1423d85b270SAndrea Parri *      and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
1433d85b270SAndrea Parri *      preceding the call to smp_mb__after_spinlock() in __schedule() and in
1443d85b270SAndrea Parri *      try_to_wake_up().
1453d85b270SAndrea Parri *
1463d85b270SAndrea Parri *   2) Given the snippet:
1473d85b270SAndrea Parri *
1483d85b270SAndrea Parri *  { X = 0;  Y = 0; }
1493d85b270SAndrea Parri *
1503d85b270SAndrea Parri *  CPU0		CPU1				CPU2
1513d85b270SAndrea Parri *
1523d85b270SAndrea Parri *  spin_lock(S);	spin_lock(S);			r1 = READ_ONCE(Y);
1533d85b270SAndrea Parri *  WRITE_ONCE(X, 1);	smp_mb__after_spinlock();	smp_rmb();
1543d85b270SAndrea Parri *  spin_unlock(S);	r0 = READ_ONCE(X);		r2 = READ_ONCE(X);
1553d85b270SAndrea Parri *			WRITE_ONCE(Y, 1);
1563d85b270SAndrea Parri *			spin_unlock(S);
1573d85b270SAndrea Parri *
1583d85b270SAndrea Parri *      it is forbidden that CPU0's critical section executes before CPU1's
1593d85b270SAndrea Parri *      critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
1603d85b270SAndrea Parri *      and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
1613d85b270SAndrea Parri *      preceding the calls to smp_rmb() in try_to_wake_up() for similar
1623d85b270SAndrea Parri *      snippets but "projected" onto two CPUs.
1633d85b270SAndrea Parri *
1643d85b270SAndrea Parri * Property (2) upgrades the lock to an RCsc lock.
165d89e588cSPeter Zijlstra *
166d89e588cSPeter Zijlstra * Since most load-store architectures implement ACQUIRE with an smp_mb() after
167d89e588cSPeter Zijlstra * the LL/SC loop, they need no further barriers. Similarly all our TSO
168d89e588cSPeter Zijlstra * architectures imply an smp_mb() for each atomic instruction and equally don't
169d89e588cSPeter Zijlstra * need more.
170d89e588cSPeter Zijlstra *
171d89e588cSPeter Zijlstra * Architectures that can implement ACQUIRE better need to take care.
172e0acd0a6SOleg Nesterov */
173d89e588cSPeter Zijlstra#ifndef smp_mb__after_spinlock
174f948666dSMarco Elver#define smp_mb__after_spinlock()	kcsan_mb()
175ad462769SJiri Olsa#endif
176ad462769SJiri Olsa
177fb1c8f93SIngo Molnar#ifdef CONFIG_DEBUG_SPINLOCK
178b97c4bc1SLuca Barbieri extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
1799828ea9dSThomas Gleixner extern int do_raw_spin_trylock(raw_spinlock_t *lock);
180b97c4bc1SLuca Barbieri extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
181fb1c8f93SIngo Molnar#else
182b97c4bc1SLuca Barbieristatic inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
183c2f21ce2SThomas Gleixner{
184