1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Based on arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * Copyright (C) 2012 ARM Ltd.
8 */
9
10#ifndef __ASM_ATOMIC_LSE_H
11#define __ASM_ATOMIC_LSE_H
12
13#define ATOMIC_OP(op, asm_op)						\
14static __always_inline void						\
15__lse_atomic_##op(int i, atomic_t *v)					\
16{									\
17	asm volatile(							\
18	__LSE_PREAMBLE							\
19	"	" #asm_op "	%w[i], %[v]\n"				\
20	: [v] "+Q" (v->counter)						\
21	: [i] "r" (i));							\
22}
23
24ATOMIC_OP(andnot, stclr)
25ATOMIC_OP(or, stset)
26ATOMIC_OP(xor, steor)
27ATOMIC_OP(add, stadd)
28
29static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
30{
31	__lse_atomic_add(-i, v);
32}
33
34#undef ATOMIC_OP
35
36#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
37static __always_inline int						\
38__lse_atomic_fetch_##op##name(int i, atomic_t *v)			\
39{									\
40	int old;							\
41									\
42	asm volatile(							\
43	__LSE_PREAMBLE							\
44	"	" #asm_op #mb "	%w[i], %w[old], %[v]"			\
45	: [v] "+Q" (v->counter),					\
46	  [old] "=r" (old)						\
47	: [i] "r" (i)							\
48	: cl);								\
49									\
50	return old;							\
51}
52
53#define ATOMIC_FETCH_OPS(op, asm_op)					\
54	ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)			\
55	ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
56	ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")		\
57	ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
58
59ATOMIC_FETCH_OPS(andnot, ldclr)
60ATOMIC_FETCH_OPS(or, ldset)
61ATOMIC_FETCH_OPS(xor, ldeor)
62ATOMIC_FETCH_OPS(add, ldadd)
63
64#undef ATOMIC_FETCH_OP
65#undef ATOMIC_FETCH_OPS
66
67#define ATOMIC_FETCH_OP_SUB(name)					\
68static __always_inline int						\
69__lse_atomic_fetch_sub##name(int i, atomic_t *v)			\
70{									\
71	return __lse_atomic_fetch_add##name(-i, v);			\
72}
73
74ATOMIC_FETCH_OP_SUB(_relaxed)
75ATOMIC_FETCH_OP_SUB(_acquire)
76ATOMIC_FETCH_OP_SUB(_release)
77ATOMIC_FETCH_OP_SUB(        )
78
79#undef ATOMIC_FETCH_OP_SUB
80
81#define ATOMIC_OP_ADD_SUB_RETURN(name)					\
82static __always_inline int						\
83__lse_atomic_add_return##name(int i, atomic_t *v)			\
84{									\
85	return __lse_atomic_fetch_add##name(i, v) + i;			\
86}									\
87									\
88static __always_inline int						\
89__lse_atomic_sub_return##name(int i, atomic_t *v)			\
90{									\
91	return __lse_atomic_fetch_sub(i, v) - i;			\
92}
93
94ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
95ATOMIC_OP_ADD_SUB_RETURN(_acquire)
96ATOMIC_OP_ADD_SUB_RETURN(_release)
97ATOMIC_OP_ADD_SUB_RETURN(        )
98
99#undef ATOMIC_OP_ADD_SUB_RETURN
100
101static __always_inline void __lse_atomic_and(int i, atomic_t *v)
102{
103	return __lse_atomic_andnot(~i, v);
104}
105
106#define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
107static __always_inline int						\
108__lse_atomic_fetch_and##name(int i, atomic_t *v)			\
109{									\
110	return __lse_atomic_fetch_andnot##name(~i, v);			\
111}
112
113ATOMIC_FETCH_OP_AND(_relaxed,   )
114ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
115ATOMIC_FETCH_OP_AND(_release,  l, "memory")
116ATOMIC_FETCH_OP_AND(        , al, "memory")
117
118#undef ATOMIC_FETCH_OP_AND
119
120#define ATOMIC64_OP(op, asm_op)						\
121static __always_inline void						\
122__lse_atomic64_##op(s64 i, atomic64_t *v)				\
123{									\
124	asm volatile(							\
125	__LSE_PREAMBLE							\
126	"	" #asm_op "	%[i], %[v]\n"				\
127	: [v] "+Q" (v->counter)						\
128	: [i] "r" (i));							\
129}
130
131ATOMIC64_OP(andnot, stclr)
132ATOMIC64_OP(or, stset)
133ATOMIC64_OP(xor, steor)
134ATOMIC64_OP(add, stadd)
135
136static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
137{
138	__lse_atomic64_add(-i, v);
139}
140
141#undef ATOMIC64_OP
142
143#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
144static __always_inline long						\
145__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)			\
146{									\
147	s64 old;							\
148									\
149	asm volatile(							\
150	__LSE_PREAMBLE							\
151	"	" #asm_op #mb "	%[i], %[old], %[v]"			\
152	: [v] "+Q" (v->counter),					\
153	  [old] "=r" (old)						\
154	: [i] "r" (i) 							\
155	: cl);								\
156									\
157	return old;							\
158}
159
160#define ATOMIC64_FETCH_OPS(op, asm_op)					\
161	ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)			\
162	ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
163	ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")		\
164	ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
165
166ATOMIC64_FETCH_OPS(andnot, ldclr)
167ATOMIC64_FETCH_OPS(or, ldset)
168ATOMIC64_FETCH_OPS(xor, ldeor)
169ATOMIC64_FETCH_OPS(add, ldadd)
170
171#undef ATOMIC64_FETCH_OP
172#undef ATOMIC64_FETCH_OPS
173
174#define ATOMIC64_FETCH_OP_SUB(name)					\
175static __always_inline long						\
176__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)			\
177{									\
178	return __lse_atomic64_fetch_add##name(-i, v);			\
179}
180
181ATOMIC64_FETCH_OP_SUB(_relaxed)
182ATOMIC64_FETCH_OP_SUB(_acquire)
183ATOMIC64_FETCH_OP_SUB(_release)
184ATOMIC64_FETCH_OP_SUB(        )
185
186#undef ATOMIC64_FETCH_OP_SUB
187
188#define ATOMIC64_OP_ADD_SUB_RETURN(name)				\
189static __always_inline long						\
190__lse_atomic64_add_return##name(s64 i, atomic64_t *v)			\
191{									\
192	return __lse_atomic64_fetch_add##name(i, v) + i;		\
193}									\
194									\
195static __always_inline long						\
196__lse_atomic64_sub_return##name(s64 i, atomic64_t *v)			\
197{									\
198	return __lse_atomic64_fetch_sub##name(i, v) - i;		\
199}
200
201ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
202ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
203ATOMIC64_OP_ADD_SUB_RETURN(_release)
204ATOMIC64_OP_ADD_SUB_RETURN(        )
205
206#undef ATOMIC64_OP_ADD_SUB_RETURN
207
208static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
209{
210	return __lse_atomic64_andnot(~i, v);
211}
212
213#define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
214static __always_inline long						\
215__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)			\
216{									\
217	return __lse_atomic64_fetch_andnot##name(~i, v);		\
218}
219
220ATOMIC64_FETCH_OP_AND(_relaxed,   )
221ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
222ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
223ATOMIC64_FETCH_OP_AND(        , al, "memory")
224
225#undef ATOMIC64_FETCH_OP_AND
226
227static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
228{
229	unsigned long tmp;
230
231	asm volatile(
232	__LSE_PREAMBLE
233	"1:	ldr	%x[tmp], %[v]\n"
234	"	subs	%[ret], %x[tmp], #1\n"
235	"	b.lt	2f\n"
236	"	casal	%x[tmp], %[ret], %[v]\n"
237	"	sub	%x[tmp], %x[tmp], #1\n"
238	"	sub	%x[tmp], %x[tmp], %[ret]\n"
239	"	cbnz	%x[tmp], 1b\n"
240	"2:"
241	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
242	:
243	: "cc", "memory");
244
245	return (long)v;
246}
247
248#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
249static __always_inline u##sz						\
250__lse__cmpxchg_case_##name##sz(volatile void *ptr,			\
251					      u##sz old,		\
252					      u##sz new)		\
253{									\
254	asm volatile(							\
255	__LSE_PREAMBLE							\
256	"	cas" #mb #sfx "	%" #w "[old], %" #w "[new], %[v]\n"	\
257	: [v] "+Q" (*(u##sz *)ptr),					\
258	  [old] "+r" (old)						\
259	: [new] "rZ" (new)						\
260	: cl);								\
261									\
262	return old;							\
263}
264
265__CMPXCHG_CASE(w, b,     ,  8,   )
266__CMPXCHG_CASE(w, h,     , 16,   )
267__CMPXCHG_CASE(w,  ,     , 32,   )
268__CMPXCHG_CASE(x,  ,     , 64,   )
269__CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
270__CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
271__CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
272__CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
273__CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
274__CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
275__CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
276__CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
277__CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
278__CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
279__CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
280__CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
281
282#undef __CMPXCHG_CASE
283
284#define __CMPXCHG128(name, mb, cl...)					\
285static __always_inline u128						\
286__lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new)		\
287{									\
288	union __u128_halves r, o = { .full = (old) },			\
289			       n = { .full = (new) };			\
290	register unsigned long x0 asm ("x0") = o.low;			\
291	register unsigned long x1 asm ("x1") = o.high;			\
292	register unsigned long x2 asm ("x2") = n.low;			\
293	register unsigned long x3 asm ("x3") = n.high;			\
294	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
295									\
296	asm volatile(							\
297	__LSE_PREAMBLE							\
298	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
299	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
300	  [v] "+Q" (*(u128 *)ptr)					\
301	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
302	  [oldval1] "r" (o.low), [oldval2] "r" (o.high)			\
303	: cl);								\
304									\
305	r.low = x0; r.high = x1;					\
306									\
307	return r.full;							\
308}
309
310__CMPXCHG128(   ,   )
311__CMPXCHG128(_mb, al, "memory")
312
313#undef __CMPXCHG128
314
315#endif	/* __ASM_ATOMIC_LSE_H */
316