1#ifndef __LINUX_BIT_SPINLOCK_H
2#define __LINUX_BIT_SPINLOCK_H
3
4/*
5 *  bit-based spin_lock()
6 *
7 * Don't use this unless you really need to: spin_lock() and spin_unlock()
8 * are significantly faster.
9 */
10static inline void bit_spin_lock(int bitnum, unsigned long *addr)
11{
12	/*
13	 * Assuming the lock is uncontended, this never enters
14	 * the body of the outer loop. If it is contended, then
15	 * within the inner loop a non-atomic test is used to
16	 * busywait with less bus contention for a good time to
17	 * attempt to acquire the lock bit.
18	 */
19	preempt_disable();
20#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
21	while (test_and_set_bit(bitnum, addr)) {
22		while (test_bit(bitnum, addr)) {
23			preempt_enable();
24			cpu_relax();
25			preempt_disable();
26		}
27	}
28#endif
29	__acquire(bitlock);
30}
31
32/*
33 * Return true if it was acquired
34 */
35static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
36{
37	preempt_disable();
38#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
39	if (test_and_set_bit(bitnum, addr)) {
40		preempt_enable();
41		return 0;
42	}
43#endif
44	__acquire(bitlock);
45	return 1;
46}
47
48/*
49 *  bit-based spin_unlock()
50 */
51static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
52{
53#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
54	BUG_ON(!test_bit(bitnum, addr));
55	smp_mb__before_clear_bit();
56	clear_bit(bitnum, addr);
57#endif
58	preempt_enable();
59	__release(bitlock);
60}
61
62/*
63 * Return true if the lock is held.
64 */
65static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
66{
67#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
68	return test_bit(bitnum, addr);
69#elif defined CONFIG_PREEMPT
70	return preempt_count();
71#else
72	return 1;
73#endif
74}
75
76#endif /* __LINUX_BIT_SPINLOCK_H */
77