1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Atomic operations.
4 *
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 */
7#ifndef _ASM_ATOMIC_H
8#define _ASM_ATOMIC_H
9
10#include <linux/types.h>
11#include <asm/barrier.h>
12#include <asm/cmpxchg.h>
13
14#if __SIZEOF_LONG__ == 4
15#define __LL		"ll.w	"
16#define __SC		"sc.w	"
17#define __AMADD		"amadd.w	"
18#define __AMAND_DB	"amand_db.w	"
19#define __AMOR_DB	"amor_db.w	"
20#define __AMXOR_DB	"amxor_db.w	"
21#elif __SIZEOF_LONG__ == 8
22#define __LL		"ll.d	"
23#define __SC		"sc.d	"
24#define __AMADD		"amadd.d	"
25#define __AMAND_DB	"amand_db.d	"
26#define __AMOR_DB	"amor_db.d	"
27#define __AMXOR_DB	"amxor_db.d	"
28#endif
29
30#define ATOMIC_INIT(i)	  { (i) }
31
32#define arch_atomic_read(v)	READ_ONCE((v)->counter)
33#define arch_atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
34
35#define ATOMIC_OP(op, I, asm_op)					\
36static inline void arch_atomic_##op(int i, atomic_t *v)			\
37{									\
38	__asm__ __volatile__(						\
39	"am"#asm_op".w" " $zero, %1, %0	\n"				\
40	: "+ZB" (v->counter)						\
41	: "r" (I)							\
42	: "memory");							\
43}
44
45#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix)		\
46static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v)	\
47{									\
48	int result;							\
49									\
50	__asm__ __volatile__(						\
51	"am"#asm_op#mb".w" " %1, %2, %0		\n"			\
52	: "+ZB" (v->counter), "=&r" (result)				\
53	: "r" (I)							\
54	: "memory");							\
55									\
56	return result c_op I;						\
57}
58
59#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix)			\
60static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v)	\
61{									\
62	int result;							\
63									\
64	__asm__ __volatile__(						\
65	"am"#asm_op#mb".w" " %1, %2, %0		\n"			\
66	: "+ZB" (v->counter), "=&r" (result)				\
67	: "r" (I)							\
68	: "memory");							\
69									\
70	return result;							\
71}
72
73#define ATOMIC_OPS(op, I, asm_op, c_op)					\
74	ATOMIC_OP(op, I, asm_op)					\
75	ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db,         )		\
76	ATOMIC_OP_RETURN(op, I, asm_op, c_op,    , _relaxed)		\
77	ATOMIC_FETCH_OP(op, I, asm_op, _db,         )			\
78	ATOMIC_FETCH_OP(op, I, asm_op,    , _relaxed)
79
80ATOMIC_OPS(add, i, add, +)
81ATOMIC_OPS(sub, -i, add, +)
82
83#define arch_atomic_add_return		arch_atomic_add_return
84#define arch_atomic_add_return_acquire	arch_atomic_add_return
85#define arch_atomic_add_return_release	arch_atomic_add_return
86#define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
87#define arch_atomic_sub_return		arch_atomic_sub_return
88#define arch_atomic_sub_return_acquire	arch_atomic_sub_return
89#define arch_atomic_sub_return_release	arch_atomic_sub_return
90#define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
91#define arch_atomic_fetch_add		arch_atomic_fetch_add
92#define arch_atomic_fetch_add_acquire	arch_atomic_fetch_add
93#define arch_atomic_fetch_add_release	arch_atomic_fetch_add
94#define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
95#define arch_atomic_fetch_sub		arch_atomic_fetch_sub
96#define arch_atomic_fetch_sub_acquire	arch_atomic_fetch_sub
97#define arch_atomic_fetch_sub_release	arch_atomic_fetch_sub
98#define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
99
100#undef ATOMIC_OPS
101
102#define ATOMIC_OPS(op, I, asm_op)					\
103	ATOMIC_OP(op, I, asm_op)					\
104	ATOMIC_FETCH_OP(op, I, asm_op, _db,         )			\
105	ATOMIC_FETCH_OP(op, I, asm_op,    , _relaxed)
106
107ATOMIC_OPS(and, i, and)
108ATOMIC_OPS(or, i, or)
109ATOMIC_OPS(xor, i, xor)
110
111#define arch_atomic_fetch_and		arch_atomic_fetch_and
112#define arch_atomic_fetch_and_acquire	arch_atomic_fetch_and
113#define arch_atomic_fetch_and_release	arch_atomic_fetch_and
114#define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
115#define arch_atomic_fetch_or		arch_atomic_fetch_or
116#define arch_atomic_fetch_or_acquire	arch_atomic_fetch_or
117#define arch_atomic_fetch_or_release	arch_atomic_fetch_or
118#define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
119#define arch_atomic_fetch_xor		arch_atomic_fetch_xor
120#define arch_atomic_fetch_xor_acquire	arch_atomic_fetch_xor
121#define arch_atomic_fetch_xor_release	arch_atomic_fetch_xor
122#define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
123
124#undef ATOMIC_OPS
125#undef ATOMIC_FETCH_OP
126#undef ATOMIC_OP_RETURN
127#undef ATOMIC_OP
128
129static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
130{
131       int prev, rc;
132
133	__asm__ __volatile__ (
134		"0:	ll.w	%[p],  %[c]\n"
135		"	beq	%[p],  %[u], 1f\n"
136		"	add.w	%[rc], %[p], %[a]\n"
137		"	sc.w	%[rc], %[c]\n"
138		"	beqz	%[rc], 0b\n"
139		"	b	2f\n"
140		"1:\n"
141		__WEAK_LLSC_MB
142		"2:\n"
143		: [p]"=&r" (prev), [rc]"=&r" (rc),
144		  [c]"=ZB" (v->counter)
145		: [a]"r" (a), [u]"r" (u)
146		: "memory");
147
148	return prev;
149}
150#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
151
152static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
153{
154	int result;
155	int temp;
156
157	if (__builtin_constant_p(i)) {
158		__asm__ __volatile__(
159		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
160		"	addi.w	%0, %1, %3				\n"
161		"	move	%1, %0					\n"
162		"	bltz	%0, 2f					\n"
163		"	sc.w	%1, %2					\n"
164		"	beqz	%1, 1b					\n"
165		"2:							\n"
166		__WEAK_LLSC_MB
167		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
168		: "I" (-i));
169	} else {
170		__asm__ __volatile__(
171		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
172		"	sub.w	%0, %1, %3				\n"
173		"	move	%1, %0					\n"
174		"	bltz	%0, 2f					\n"
175		"	sc.w	%1, %2					\n"
176		"	beqz	%1, 1b					\n"
177		"2:							\n"
178		__WEAK_LLSC_MB
179		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
180		: "r" (i));
181	}
182
183	return result;
184}
185
186#define arch_atomic_dec_if_positive(v)	arch_atomic_sub_if_positive(1, v)
187
188#ifdef CONFIG_64BIT
189
190#define ATOMIC64_INIT(i)    { (i) }
191
192#define arch_atomic64_read(v)	READ_ONCE((v)->counter)
193#define arch_atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
194
195#define ATOMIC64_OP(op, I, asm_op)					\
196static inline void arch_atomic64_##op(long i, atomic64_t *v)		\
197{									\
198	__asm__ __volatile__(						\
199	"am"#asm_op".d " " $zero, %1, %0	\n"			\
200	: "+ZB" (v->counter)						\
201	: "r" (I)							\
202	: "memory");							\
203}
204
205#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix)			\
206static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v)	\
207{										\
208	long result;								\
209	__asm__ __volatile__(							\
210	"am"#asm_op#mb".d " " %1, %2, %0		\n"			\
211	: "+ZB" (v->counter), "=&r" (result)					\
212	: "r" (I)								\
213	: "memory");								\
214										\
215	return result c_op I;							\
216}
217
218#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix)				\
219static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v)	\
220{										\
221	long result;								\
222										\
223	__asm__ __volatile__(							\
224	"am"#asm_op#mb".d " " %1, %2, %0		\n"			\
225	: "+ZB" (v->counter), "=&r" (result)					\
226	: "r" (I)								\
227	: "memory");								\
228										\
229	return result;								\
230}
231
232#define ATOMIC64_OPS(op, I, asm_op, c_op)				      \
233	ATOMIC64_OP(op, I, asm_op)					      \
234	ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db,         )		      \
235	ATOMIC64_OP_RETURN(op, I, asm_op, c_op,    , _relaxed)		      \
236	ATOMIC64_FETCH_OP(op, I, asm_op, _db,         )			      \
237	ATOMIC64_FETCH_OP(op, I, asm_op,    , _relaxed)
238
239ATOMIC64_OPS(add, i, add, +)
240ATOMIC64_OPS(sub, -i, add, +)
241
242#define arch_atomic64_add_return		arch_atomic64_add_return
243#define arch_atomic64_add_return_acquire	arch_atomic64_add_return
244#define arch_atomic64_add_return_release	arch_atomic64_add_return
245#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
246#define arch_atomic64_sub_return		arch_atomic64_sub_return
247#define arch_atomic64_sub_return_acquire	arch_atomic64_sub_return
248#define arch_atomic64_sub_return_release	arch_atomic64_sub_return
249#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
250#define arch_atomic64_fetch_add			arch_atomic64_fetch_add
251#define arch_atomic64_fetch_add_acquire		arch_atomic64_fetch_add
252#define arch_atomic64_fetch_add_release		arch_atomic64_fetch_add
253#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
254#define arch_atomic64_fetch_sub			arch_atomic64_fetch_sub
255#define arch_atomic64_fetch_sub_acquire		arch_atomic64_fetch_sub
256#define arch_atomic64_fetch_sub_release		arch_atomic64_fetch_sub
257#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
258
259#undef ATOMIC64_OPS
260
261#define ATOMIC64_OPS(op, I, asm_op)					      \
262	ATOMIC64_OP(op, I, asm_op)					      \
263	ATOMIC64_FETCH_OP(op, I, asm_op, _db,         )			      \
264	ATOMIC64_FETCH_OP(op, I, asm_op,    , _relaxed)
265
266ATOMIC64_OPS(and, i, and)
267ATOMIC64_OPS(or, i, or)
268ATOMIC64_OPS(xor, i, xor)
269
270#define arch_atomic64_fetch_and		arch_atomic64_fetch_and
271#define arch_atomic64_fetch_and_acquire	arch_atomic64_fetch_and
272#define arch_atomic64_fetch_and_release	arch_atomic64_fetch_and
273#define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
274#define arch_atomic64_fetch_or		arch_atomic64_fetch_or
275#define arch_atomic64_fetch_or_acquire	arch_atomic64_fetch_or
276#define arch_atomic64_fetch_or_release	arch_atomic64_fetch_or
277#define arch_atomic64_fetch_or_relaxed	arch_atomic64_fetch_or_relaxed
278#define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor
279#define arch_atomic64_fetch_xor_acquire	arch_atomic64_fetch_xor
280#define arch_atomic64_fetch_xor_release	arch_atomic64_fetch_xor
281#define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
282
283#undef ATOMIC64_OPS
284#undef ATOMIC64_FETCH_OP
285#undef ATOMIC64_OP_RETURN
286#undef ATOMIC64_OP
287
288static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
289{
290       long prev, rc;
291
292	__asm__ __volatile__ (
293		"0:	ll.d	%[p],  %[c]\n"
294		"	beq	%[p],  %[u], 1f\n"
295		"	add.d	%[rc], %[p], %[a]\n"
296		"	sc.d	%[rc], %[c]\n"
297		"	beqz	%[rc], 0b\n"
298		"	b	2f\n"
299		"1:\n"
300		__WEAK_LLSC_MB
301		"2:\n"
302		: [p]"=&r" (prev), [rc]"=&r" (rc),
303		  [c] "=ZB" (v->counter)
304		: [a]"r" (a), [u]"r" (u)
305		: "memory");
306
307	return prev;
308}
309#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
310
311static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
312{
313	long result;
314	long temp;
315
316	if (__builtin_constant_p(i)) {
317		__asm__ __volatile__(
318		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
319		"	addi.d	%0, %1, %3				\n"
320		"	move	%1, %0					\n"
321		"	bltz	%0, 2f					\n"
322		"	sc.d	%1, %2					\n"
323		"	beqz	%1, 1b					\n"
324		"2:							\n"
325		__WEAK_LLSC_MB
326		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
327		: "I" (-i));
328	} else {
329		__asm__ __volatile__(
330		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
331		"	sub.d	%0, %1, %3				\n"
332		"	move	%1, %0					\n"
333		"	bltz	%0, 2f					\n"
334		"	sc.d	%1, %2					\n"
335		"	beqz	%1, 1b					\n"
336		"2:							\n"
337		__WEAK_LLSC_MB
338		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
339		: "r" (i));
340	}
341
342	return result;
343}
344
345#define arch_atomic64_dec_if_positive(v)	arch_atomic64_sub_if_positive(1, v)
346
347#endif /* CONFIG_64BIT */
348
349#endif /* _ASM_ATOMIC_H */
350