1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_SPINLOCK_H 3#define __LINUX_SPINLOCK_H 4#define __LINUX_INSIDE_SPINLOCK_H 5 6/* 7 * include/linux/spinlock.h - generic spinlock/rwlock declarations 8 * 9 * here's the role of the various spinlock/rwlock related include files: 10 * 11 * on SMP builds: 12 * 13 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 14 * initializers 15 * 16 * linux/spinlock_types_raw: 17 * The raw types and initializers 18 * linux/spinlock_types.h: 19 * defines the generic type and initializers 20 * 21 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 22 * implementations, mostly inline assembly code 23 * 24 * (also included on UP-debug builds:) 25 * 26 * linux/spinlock_api_smp.h: 27 * contains the prototypes for the _spin_*() APIs. 28 * 29 * linux/spinlock.h: builds the final spin_*() APIs. 30 * 31 * on UP builds: 32 * 33 * linux/spinlock_type_up.h: 34 * contains the generic, simplified UP spinlock type. 35 * (which is an empty structure on non-debug builds) 36 * 37 * linux/spinlock_types_raw: 38 * The raw RT types and initializers 39 * linux/spinlock_types.h: 40 * defines the generic type and initializers 41 * 42 * linux/spinlock_up.h: 43 * contains the arch_spin_*()/etc. version of UP 44 * builds. (which are NOPs on non-debug, non-preempt 45 * builds) 46 * 47 * (included on UP-non-debug builds:) 48 * 49 * linux/spinlock_api_up.h: 50 * builds the _spin_*() APIs. 51 * 52 * linux/spinlock.h: builds the final spin_*() APIs. 53 */ 54 55#include <linux/typecheck.h> 56#include <linux/preempt.h> 57#include <linux/linkage.h> 58#include <linux/compiler.h> 59#include <linux/irqflags.h> 60#include <linux/thread_info.h> 61#include <linux/stringify.h> 62#include <linux/bottom_half.h> 63#include <linux/lockdep.h> 64#include <linux/cleanup.h> 65#include <asm/barrier.h> 66#include <asm/mmiowb.h> 67 68 69/* 70 * Must define these before including other files, inline functions need them 71 */ 72#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 73 74#define LOCK_SECTION_START(extra) \ 75 ".subsection 1\n\t" \ 76 extra \ 77 ".ifndef " LOCK_SECTION_NAME "\n\t" \ 78 LOCK_SECTION_NAME ":\n\t" \ 79 ".endif\n" 80 81#define LOCK_SECTION_END \ 82 ".previous\n\t" 83 84#define __lockfunc __section(".spinlock.text") 85 86/* 87 * Pull the arch_spinlock_t and arch_rwlock_t definitions: 88 */ 89#include <linux/spinlock_types.h> 90 91/* 92 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 93 */ 94#ifdef CONFIG_SMP 95# include <asm/spinlock.h> 96#else 97# include <linux/spinlock_up.h> 98#endif 99 100#ifdef CONFIG_DEBUG_SPINLOCK 101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 102 struct lock_class_key *key, short inner); 103 104# define raw_spin_lock_init(lock) \ 105do { \ 106 static struct lock_class_key __key; \ 107 \ 108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 109} while (0) 110 111#else 112# define raw_spin_lock_init(lock) \ 113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 114#endif 115 116#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 117 118#ifdef arch_spin_is_contended 119#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 120#else 121#define raw_spin_is_contended(lock) (((void)(lock), 0)) 122#endif /*arch_spin_is_contended*/ 123 124/* 125 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier 126 * between program-order earlier lock acquisitions and program-order later 127 * memory accesses. 128 * 129 * This guarantees that the following two properties hold: 130 * 131 * 1) Given the snippet: 132 * 133 * { X = 0; Y = 0; } 134 * 135 * CPU0 CPU1 136 * 137 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); 138 * spin_lock(S); smp_mb(); 139 * smp_mb__after_spinlock(); r1 = READ_ONCE(X); 140 * r0 = READ_ONCE(Y); 141 * spin_unlock(S); 142 * 143 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) 144 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments 145 * preceding the call to smp_mb__after_spinlock() in __schedule() and in 146 * try_to_wake_up(). 147 * 148 * 2) Given the snippet: 149 * 150 * { X = 0; Y = 0; } 151 * 152 * CPU0 CPU1 CPU2 153 * 154 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); 155 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); 156 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); 157 * WRITE_ONCE(Y, 1); 158 * spin_unlock(S); 159 * 160 * it is forbidden that CPU0's critical section executes before CPU1's 161 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) 162 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments 163 * preceding the calls to smp_rmb() in try_to_wake_up() for similar 164 * snippets but "projected" onto two CPUs. 165 * 166 * Property (2) upgrades the lock to an RCsc lock. 167 * 168 * Since most load-store architectures implement ACQUIRE with an smp_mb() after 169 * the LL/SC loop, they need no further barriers. Similarly all our TSO 170 * architectures imply an smp_mb() for each atomic instruction and equally don't 171 * need more. 172 * 173 * Architectures that can implement ACQUIRE better need to take care. 174 */ 175#ifndef smp_mb__after_spinlock 176#define smp_mb__after_spinlock() kcsan_mb() 177#endif 178 179#ifdef CONFIG_DEBUG_SPINLOCK 180 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 181 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 182 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 183#else 184static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) 185{ 186 __acquire(lock); 187 arch_spin_lock(&lock->raw_lock); 188 mmiowb_spin_lock(); 189} 190 191static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 192{ 193 int ret = arch_spin_trylock(&(lock)->raw_lock); 194 195 if (ret) 196 mmiowb_spin_lock(); 197 198 return ret; 199} 200 201static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 202{ 203 mmiowb_spin_unlock(); 204 arch_spin_unlock(&lock->raw_lock); 205 __release(lock); 206} 207#endif 208 209/* 210 * Define the various spin_lock methods. Note we define these 211 * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The 212 * various methods are defined as nops in the case they are not 213 * required. 214 */ 215#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 216 217#define raw_spin_lock(lock) _raw_spin_lock(lock) 218 219#ifdef CONFIG_DEBUG_LOCK_ALLOC 220# define raw_spin_lock_nested(lock, subclass) \ 221 _raw_spin_lock_nested(lock, subclass) 222 223# define raw_spin_lock_nest_lock(lock, nest_lock) \ 224 do { \ 225 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 226 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 227 } while (0) 228#else 229/* 230 * Always evaluate the 'subclass' argument to avoid that the compiler 231 * warns about set-but-not-used variables when building with 232 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. 233 */ 234# define raw_spin_lock_nested(lock, subclass) \ 235 _raw_spin_lock(((void)(subclass), (lock))) 236# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 237#endif 238 239#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 240 241#define raw_spin_lock_irqsave(lock, flags) \ 242 do { \ 243 typecheck(unsigned long, flags); \ 244 flags = _raw_spin_lock_irqsave(lock); \ 245 } while (0) 246 247#ifdef CONFIG_DEBUG_LOCK_ALLOC 248#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 249 do { \ 250 typecheck(unsigned long, flags); \ 251 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 252 } while (0) 253#else 254#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 255 do { \ 256 typecheck(unsigned long, flags); \ 257 flags = _raw_spin_lock_irqsave(lock); \ 258 } while (0) 259#endif 260 261#else 262 263#define raw_spin_lock_irqsave(lock, flags) \ 264 do { \ 265 typecheck(unsigned long, flags); \ 266 _raw_spin_lock_irqsave(lock, flags); \ 267 } while (0) 268 269#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 270 raw_spin_lock_irqsave(lock, flags) 271 272#endif 273 274#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 275#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 276#define raw_spin_unlock(lock) _raw_spin_unlock(lock) 277#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 278 279#define raw_spin_unlock_irqrestore(lock, flags) \ 280 do { \ 281 typecheck(unsigned long, flags); \ 282 _raw_spin_unlock_irqrestore(lock, flags); \ 283 } while (0) 284#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 285 286#define raw_spin_trylock_bh(lock) \ 287 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 288 289#define raw_spin_trylock_irq(lock) \ 290({ \ 291 local_irq_disable(); \ 292 raw_spin_trylock(lock) ? \ 293 1 : ({ local_irq_enable(); 0; }); \ 294}) 295 296#define raw_spin_trylock_irqsave(lock, flags) \ 297({ \ 298 local_irq_save(flags); \ 299 raw_spin_trylock(lock) ? \ 300 1 : ({ local_irq_restore(flags); 0; }); \ 301}) 302 303#ifndef CONFIG_PREEMPT_RT 304/* Include rwlock functions for !RT */ 305#include <linux/rwlock.h> 306#endif 307 308/* 309 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 310 */ 311#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 312# include <linux/spinlock_api_smp.h> 313#else 314# include <linux/spinlock_api_up.h> 315#endif 316 317/* Non PREEMPT_RT kernel, map to raw spinlocks: */ 318#ifndef CONFIG_PREEMPT_RT 319 320/* 321 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 322 */ 323 324static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 325{ 326 return &lock->rlock; 327} 328 329#ifdef CONFIG_DEBUG_SPINLOCK 330 331# define spin_lock_init(lock) \ 332do { \ 333 static struct lock_class_key __key; \ 334 \ 335 __raw_spin_lock_init(spinlock_check(lock), \ 336 #lock, &__key, LD_WAIT_CONFIG); \ 337} while (0) 338 339#else 340 341# define spin_lock_init(_lock) \ 342do { \ 343 spinlock_check(_lock); \ 344 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ 345} while (0) 346 347#endif 348 349static __always_inline void spin_lock(spinlock_t *lock) 350{ 351 raw_spin_lock(&lock->rlock); 352} 353 354static __always_inline void spin_lock_bh(spinlock_t *lock) 355{ 356 raw_spin_lock_bh(&lock->rlock); 357} 358 359static __always_inline int spin_trylock(spinlock_t *lock) 360{ 361 return raw_spin_trylock(&lock->rlock); 362} 363 364#define spin_lock_nested(lock, subclass) \ 365do { \ 366 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 367} while (0) 368 369#define spin_lock_nest_lock(lock, nest_lock) \ 370do { \ 371 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 372} while (0) 373 374static __always_inline void spin_lock_irq(spinlock_t *lock) 375{ 376 raw_spin_lock_irq(&lock->rlock); 377} 378 379#define spin_lock_irqsave(lock, flags) \ 380do { \ 381 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 382} while (0) 383 384#define spin_lock_irqsave_nested(lock, flags, subclass) \ 385do { \ 386 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 387} while (0) 388 389static __always_inline void spin_unlock(spinlock_t *lock) 390{ 391 raw_spin_unlock(&lock->rlock); 392} 393 394static __always_inline void spin_unlock_bh(spinlock_t *lock) 395{ 396 raw_spin_unlock_bh(&lock->rlock); 397} 398 399static __always_inline void spin_unlock_irq(spinlock_t *lock) 400{ 401 raw_spin_unlock_irq(&lock->rlock); 402} 403 404static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 405{ 406 raw_spin_unlock_irqrestore(&lock->rlock, flags); 407} 408 409static __always_inline int spin_trylock_bh(spinlock_t *lock) 410{ 411 return raw_spin_trylock_bh(&lock->rlock); 412} 413 414static __always_inline int spin_trylock_irq(spinlock_t *lock) 415{ 416 return raw_spin_trylock_irq(&lock->rlock); 417} 418 419#define spin_trylock_irqsave(lock, flags) \ 420({ \ 421 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 422}) 423 424/** 425 * spin_is_locked() - Check whether a spinlock is locked. 426 * @lock: Pointer to the spinlock. 427 * 428 * This function is NOT required to provide any memory ordering 429 * guarantees; it could be used for debugging purposes or, when 430 * additional synchronization is needed, accompanied with other 431 * constructs (memory barriers) enforcing the synchronization. 432 * 433 * Returns: 1 if @lock is locked, 0 otherwise. 434 * 435 * Note that the function only tells you that the spinlock is 436 * seen to be locked, not that it is locked on your CPU. 437 * 438 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, 439 * the return value is always 0 (see include/linux/spinlock_up.h). 440 * Therefore you should not rely heavily on the return value. 441 */ 442static __always_inline int spin_is_locked(spinlock_t *lock) 443{ 444 return raw_spin_is_locked(&lock->rlock); 445} 446 447static __always_inline int spin_is_contended(spinlock_t *lock) 448{ 449 return raw_spin_is_contended(&lock->rlock); 450} 451 452#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 453 454#else /* !CONFIG_PREEMPT_RT */ 455# include <linux/spinlock_rt.h> 456#endif /* CONFIG_PREEMPT_RT */ 457 458/* 459 * Does a critical section need to be broken due to another 460 * task waiting?: (technically does not depend on CONFIG_PREEMPTION, 461 * but a general need for low latency) 462 */ 463static inline int spin_needbreak(spinlock_t *lock) 464{ 465#ifdef CONFIG_PREEMPTION 466 return spin_is_contended(lock); 467#else 468 return 0; 469#endif 470} 471 472/* 473 * Check if a rwlock is contended. 474 * Returns non-zero if there is another task waiting on the rwlock. 475 * Returns zero if the lock is not contended or the system / underlying 476 * rwlock implementation does not support contention detection. 477 * Technically does not depend on CONFIG_PREEMPTION, but a general need 478 * for low latency. 479 */ 480static inline int rwlock_needbreak(rwlock_t *lock) 481{ 482#ifdef CONFIG_PREEMPTION 483 return rwlock_is_contended(lock); 484#else 485 return 0; 486#endif 487} 488 489/* 490 * Pull the atomic_t declaration: 491 * (asm-mips/atomic.h needs above definitions) 492 */ 493#include <linux/atomic.h> 494/** 495 * atomic_dec_and_lock - lock on reaching reference count zero 496 * @atomic: the atomic counter 497 * @lock: the spinlock in question 498 * 499 * Decrements @atomic by 1. If the result is 0, returns true and locks 500 * @lock. Returns false for all other cases. 501 */ 502extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 503#define atomic_dec_and_lock(atomic, lock) \ 504 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 505 506extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, 507 unsigned long *flags); 508#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ 509 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) 510 511extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock); 512#define atomic_dec_and_raw_lock(atomic, lock) \ 513 __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock)) 514 515extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, 516 unsigned long *flags); 517#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \ 518 __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))) 519 520int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, 521 size_t max_size, unsigned int cpu_mult, 522 gfp_t gfp, const char *name, 523 struct lock_class_key *key); 524 525#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ 526 ({ \ 527 static struct lock_class_key key; \ 528 int ret; \ 529 \ 530 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ 531 cpu_mult, gfp, #locks, &key); \ 532 ret; \ 533 }) 534 535void free_bucket_spinlocks(spinlock_t *locks); 536 537DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, 538 raw_spin_lock(_T->lock), 539 raw_spin_unlock(_T->lock)) 540 541DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock)) 542 543DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, 544 raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), 545 raw_spin_unlock(_T->lock)) 546 547DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, 548 raw_spin_lock_irq(_T->lock), 549 raw_spin_unlock_irq(_T->lock)) 550 551DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) 552 553DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, 554 raw_spin_lock_irqsave(_T->lock, _T->flags), 555 raw_spin_unlock_irqrestore(_T->lock, _T->flags), 556 unsigned long flags) 557 558DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try, 559 raw_spin_trylock_irqsave(_T->lock, _T->flags)) 560 561DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, 562 spin_lock(_T->lock), 563 spin_unlock(_T->lock)) 564 565DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock)) 566 567DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, 568 spin_lock_irq(_T->lock), 569 spin_unlock_irq(_T->lock)) 570 571DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, 572 spin_trylock_irq(_T->lock)) 573 574DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, 575 spin_lock_irqsave(_T->lock, _T->flags), 576 spin_unlock_irqrestore(_T->lock, _T->flags), 577 unsigned long flags) 578 579DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try, 580 spin_trylock_irqsave(_T->lock, _T->flags)) 581 582DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, 583 read_lock(_T->lock), 584 read_unlock(_T->lock)) 585 586DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t, 587 read_lock_irq(_T->lock), 588 read_unlock_irq(_T->lock)) 589 590DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t, 591 read_lock_irqsave(_T->lock, _T->flags), 592 read_unlock_irqrestore(_T->lock, _T->flags), 593 unsigned long flags) 594 595DEFINE_LOCK_GUARD_1(write_lock, rwlock_t, 596 write_lock(_T->lock), 597 write_unlock(_T->lock)) 598 599DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t, 600 write_lock_irq(_T->lock), 601 write_unlock_irq(_T->lock)) 602 603DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t, 604 write_lock_irqsave(_T->lock, _T->flags), 605 write_unlock_irqrestore(_T->lock, _T->flags), 606 unsigned long flags) 607 608#undef __LINUX_INSIDE_SPINLOCK_H 609#endif /* __LINUX_SPINLOCK_H */ 610