1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_LOCAL_LOCK_H
3# error "Do not include directly, include linux/local_lock.h"
4#endif
5
6#include <linux/percpu-defs.h>
7#include <linux/lockdep.h>
8
9#ifndef CONFIG_PREEMPT_RT
10
11typedef struct {
12#ifdef CONFIG_DEBUG_LOCK_ALLOC
13	struct lockdep_map	dep_map;
14	struct task_struct	*owner;
15#endif
16} local_lock_t;
17
18#ifdef CONFIG_DEBUG_LOCK_ALLOC
19# define LOCAL_LOCK_DEBUG_INIT(lockname)		\
20	.dep_map = {					\
21		.name = #lockname,			\
22		.wait_type_inner = LD_WAIT_CONFIG,	\
23		.lock_type = LD_LOCK_PERCPU,		\
24	},						\
25	.owner = NULL,
26
27static inline void local_lock_acquire(local_lock_t *l)
28{
29	lock_map_acquire(&l->dep_map);
30	DEBUG_LOCKS_WARN_ON(l->owner);
31	l->owner = current;
32}
33
34static inline void local_lock_release(local_lock_t *l)
35{
36	DEBUG_LOCKS_WARN_ON(l->owner != current);
37	l->owner = NULL;
38	lock_map_release(&l->dep_map);
39}
40
41static inline void local_lock_debug_init(local_lock_t *l)
42{
43	l->owner = NULL;
44}
45#else /* CONFIG_DEBUG_LOCK_ALLOC */
46# define LOCAL_LOCK_DEBUG_INIT(lockname)
47static inline void local_lock_acquire(local_lock_t *l) { }
48static inline void local_lock_release(local_lock_t *l) { }
49static inline void local_lock_debug_init(local_lock_t *l) { }
50#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
51
52#define INIT_LOCAL_LOCK(lockname)	{ LOCAL_LOCK_DEBUG_INIT(lockname) }
53
54#define __local_lock_init(lock)					\
55do {								\
56	static struct lock_class_key __key;			\
57								\
58	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
59	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
60			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
61			      LD_LOCK_PERCPU);			\
62	local_lock_debug_init(lock);				\
63} while (0)
64
65#define __local_lock(lock)					\
66	do {							\
67		preempt_disable();				\
68		local_lock_acquire(this_cpu_ptr(lock));		\
69	} while (0)
70
71#define __local_lock_irq(lock)					\
72	do {							\
73		local_irq_disable();				\
74		local_lock_acquire(this_cpu_ptr(lock));		\
75	} while (0)
76
77#define __local_lock_irqsave(lock, flags)			\
78	do {							\
79		local_irq_save(flags);				\
80		local_lock_acquire(this_cpu_ptr(lock));		\
81	} while (0)
82
83#define __local_unlock(lock)					\
84	do {							\
85		local_lock_release(this_cpu_ptr(lock));		\
86		preempt_enable();				\
87	} while (0)
88
89#define __local_unlock_irq(lock)				\
90	do {							\
91		local_lock_release(this_cpu_ptr(lock));		\
92		local_irq_enable();				\
93	} while (0)
94
95#define __local_unlock_irqrestore(lock, flags)			\
96	do {							\
97		local_lock_release(this_cpu_ptr(lock));		\
98		local_irq_restore(flags);			\
99	} while (0)
100
101#else /* !CONFIG_PREEMPT_RT */
102
103/*
104 * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
105 * critical section while staying preemptible.
106 */
107typedef spinlock_t local_lock_t;
108
109#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
110
111#define __local_lock_init(l)					\
112	do {							\
113		local_spin_lock_init((l));			\
114	} while (0)
115
116#define __local_lock(__lock)					\
117	do {							\
118		migrate_disable();				\
119		spin_lock(this_cpu_ptr((__lock)));		\
120	} while (0)
121
122#define __local_lock_irq(lock)			__local_lock(lock)
123
124#define __local_lock_irqsave(lock, flags)			\
125	do {							\
126		typecheck(unsigned long, flags);		\
127		flags = 0;					\
128		__local_lock(lock);				\
129	} while (0)
130
131#define __local_unlock(__lock)					\
132	do {							\
133		spin_unlock(this_cpu_ptr((__lock)));		\
134		migrate_enable();				\
135	} while (0)
136
137#define __local_unlock_irq(lock)		__local_unlock(lock)
138
139#define __local_unlock_irqrestore(lock, flags)	__local_unlock(lock)
140
141#endif /* CONFIG_PREEMPT_RT */
142