1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Generic barrier definitions.
4 *
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
7 *
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
10 */
11#ifndef __ASM_GENERIC_BARRIER_H
12#define __ASM_GENERIC_BARRIER_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/compiler.h>
17#include <linux/kcsan-checks.h>
18#include <asm/rwonce.h>
19
20#ifndef nop
21#define nop()	asm volatile ("nop")
22#endif
23
24/*
25 * Architectures that want generic instrumentation can define __ prefixed
26 * variants of all barriers.
27 */
28
29#ifdef __mb
30#define mb()	do { kcsan_mb(); __mb(); } while (0)
31#endif
32
33#ifdef __rmb
34#define rmb()	do { kcsan_rmb(); __rmb(); } while (0)
35#endif
36
37#ifdef __wmb
38#define wmb()	do { kcsan_wmb(); __wmb(); } while (0)
39#endif
40
41#ifdef __dma_mb
42#define dma_mb()	do { kcsan_mb(); __dma_mb(); } while (0)
43#endif
44
45#ifdef __dma_rmb
46#define dma_rmb()	do { kcsan_rmb(); __dma_rmb(); } while (0)
47#endif
48
49#ifdef __dma_wmb
50#define dma_wmb()	do { kcsan_wmb(); __dma_wmb(); } while (0)
51#endif
52
53/*
54 * Force strict CPU ordering. And yes, this is required on UP too when we're
55 * talking to devices.
56 *
57 * Fall back to compiler barriers if nothing better is provided.
58 */
59
60#ifndef mb
61#define mb()	barrier()
62#endif
63
64#ifndef rmb
65#define rmb()	mb()
66#endif
67
68#ifndef wmb
69#define wmb()	mb()
70#endif
71
72#ifndef dma_mb
73#define dma_mb()	mb()
74#endif
75
76#ifndef dma_rmb
77#define dma_rmb()	rmb()
78#endif
79
80#ifndef dma_wmb
81#define dma_wmb()	wmb()
82#endif
83
84#ifndef __smp_mb
85#define __smp_mb()	mb()
86#endif
87
88#ifndef __smp_rmb
89#define __smp_rmb()	rmb()
90#endif
91
92#ifndef __smp_wmb
93#define __smp_wmb()	wmb()
94#endif
95
96#ifdef CONFIG_SMP
97
98#ifndef smp_mb
99#define smp_mb()	do { kcsan_mb(); __smp_mb(); } while (0)
100#endif
101
102#ifndef smp_rmb
103#define smp_rmb()	do { kcsan_rmb(); __smp_rmb(); } while (0)
104#endif
105
106#ifndef smp_wmb
107#define smp_wmb()	do { kcsan_wmb(); __smp_wmb(); } while (0)
108#endif
109
110#else	/* !CONFIG_SMP */
111
112#ifndef smp_mb
113#define smp_mb()	barrier()
114#endif
115
116#ifndef smp_rmb
117#define smp_rmb()	barrier()
118#endif
119
120#ifndef smp_wmb
121#define smp_wmb()	barrier()
122#endif
123
124#endif	/* CONFIG_SMP */
125
126#ifndef __smp_store_mb
127#define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
128#endif
129
130#ifndef __smp_mb__before_atomic
131#define __smp_mb__before_atomic()	__smp_mb()
132#endif
133
134#ifndef __smp_mb__after_atomic
135#define __smp_mb__after_atomic()	__smp_mb()
136#endif
137
138#ifndef __smp_store_release
139#define __smp_store_release(p, v)					\
140do {									\
141	compiletime_assert_atomic_type(*p);				\
142	__smp_mb();							\
143	WRITE_ONCE(*p, v);						\
144} while (0)
145#endif
146
147#ifndef __smp_load_acquire
148#define __smp_load_acquire(p)						\
149({									\
150	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
151	compiletime_assert_atomic_type(*p);				\
152	__smp_mb();							\
153	(typeof(*p))___p1;						\
154})
155#endif
156
157#ifdef CONFIG_SMP
158
159#ifndef smp_store_mb
160#define smp_store_mb(var, value)  do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
161#endif
162
163#ifndef smp_mb__before_atomic
164#define smp_mb__before_atomic()	do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
165#endif
166
167#ifndef smp_mb__after_atomic
168#define smp_mb__after_atomic()	do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
169#endif
170
171#ifndef smp_store_release
172#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
173#endif
174
175#ifndef smp_load_acquire
176#define smp_load_acquire(p) __smp_load_acquire(p)
177#endif
178
179#else	/* !CONFIG_SMP */
180
181#ifndef smp_store_mb
182#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
183#endif
184
185#ifndef smp_mb__before_atomic
186#define smp_mb__before_atomic()	barrier()
187#endif
188
189#ifndef smp_mb__after_atomic
190#define smp_mb__after_atomic()	barrier()
191#endif
192
193#ifndef smp_store_release
194#define smp_store_release(p, v)						\
195do {									\
196	barrier();							\
197	WRITE_ONCE(*p, v);						\
198} while (0)
199#endif
200
201#ifndef smp_load_acquire
202#define smp_load_acquire(p)						\
203({									\
204	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
205	barrier();							\
206	(typeof(*p))___p1;						\
207})
208#endif
209
210#endif	/* CONFIG_SMP */
211
212/* Barriers for virtual machine guests when talking to an SMP host */
213#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
214#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
215#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
216#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
217#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
218#define virt_mb__after_atomic()	do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
219#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
220#define virt_load_acquire(p) __smp_load_acquire(p)
221
222/**
223 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
224 *
225 * A control dependency provides a LOAD->STORE order, the additional RMB
226 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
227 * aka. (load)-ACQUIRE.
228 *
229 * Architectures that do not do load speculation can have this be barrier().
230 */
231#ifndef smp_acquire__after_ctrl_dep
232#define smp_acquire__after_ctrl_dep()		smp_rmb()
233#endif
234
235/**
236 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
237 * @ptr: pointer to the variable to wait on
238 * @cond: boolean expression to wait for
239 *
240 * Equivalent to using READ_ONCE() on the condition variable.
241 *
242 * Due to C lacking lambda expressions we load the value of *ptr into a
243 * pre-named variable @VAL to be used in @cond.
244 */
245#ifndef smp_cond_load_relaxed
246#define smp_cond_load_relaxed(ptr, cond_expr) ({		\
247	typeof(ptr) __PTR = (ptr);				\
248	__unqual_scalar_typeof(*ptr) VAL;			\
249	for (;;) {						\
250		VAL = READ_ONCE(*__PTR);			\
251		if (cond_expr)					\
252			break;					\
253		cpu_relax();					\
254	}							\
255	(typeof(*ptr))VAL;					\
256})
257#endif
258
259/**
260 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
261 * @ptr: pointer to the variable to wait on
262 * @cond: boolean expression to wait for
263 *
264 * Equivalent to using smp_load_acquire() on the condition variable but employs
265 * the control dependency of the wait to reduce the barrier on many platforms.
266 */
267#ifndef smp_cond_load_acquire
268#define smp_cond_load_acquire(ptr, cond_expr) ({		\
269	__unqual_scalar_typeof(*ptr) _val;			\
270	_val = smp_cond_load_relaxed(ptr, cond_expr);		\
271	smp_acquire__after_ctrl_dep();				\
272	(typeof(*ptr))_val;					\
273})
274#endif
275
276/*
277 * pmem_wmb() ensures that all stores for which the modification
278 * are written to persistent storage by preceding instructions have
279 * updated persistent storage before any data  access or data transfer
280 * caused by subsequent instructions is initiated.
281 */
282#ifndef pmem_wmb
283#define pmem_wmb()	wmb()
284#endif
285
286/*
287 * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
288 * this kind of memory accesses, the CPU may wait for prior accesses to be
289 * merged with subsequent ones. In some situation, such wait is bad for the
290 * performance. io_stop_wc() can be used to prevent the merging of
291 * write-combining memory accesses before this macro with those after it.
292 */
293#ifndef io_stop_wc
294#define io_stop_wc() do { } while (0)
295#endif
296
297#endif /* !__ASSEMBLY__ */
298#endif /* __ASM_GENERIC_BARRIER_H */
299