1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_ATOMIC64_64_H
3#define _ASM_X86_ATOMIC64_64_H
4
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8
9/* The 64-bit atomic type */
10
11#define ATOMIC64_INIT(i)	{ (i) }
12
13static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
14{
15	return __READ_ONCE((v)->counter);
16}
17
18static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
19{
20	__WRITE_ONCE(v->counter, i);
21}
22
23static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
24{
25	asm volatile(LOCK_PREFIX "addq %1,%0"
26		     : "=m" (v->counter)
27		     : "er" (i), "m" (v->counter) : "memory");
28}
29
30static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
31{
32	asm volatile(LOCK_PREFIX "subq %1,%0"
33		     : "=m" (v->counter)
34		     : "er" (i), "m" (v->counter) : "memory");
35}
36
37static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
38{
39	return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
40}
41#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
42
43static __always_inline void arch_atomic64_inc(atomic64_t *v)
44{
45	asm volatile(LOCK_PREFIX "incq %0"
46		     : "=m" (v->counter)
47		     : "m" (v->counter) : "memory");
48}
49#define arch_atomic64_inc arch_atomic64_inc
50
51static __always_inline void arch_atomic64_dec(atomic64_t *v)
52{
53	asm volatile(LOCK_PREFIX "decq %0"
54		     : "=m" (v->counter)
55		     : "m" (v->counter) : "memory");
56}
57#define arch_atomic64_dec arch_atomic64_dec
58
59static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
60{
61	return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
62}
63#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
64
65static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
66{
67	return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
68}
69#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
70
71static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
72{
73	return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
74}
75#define arch_atomic64_add_negative arch_atomic64_add_negative
76
77static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
78{
79	return i + xadd(&v->counter, i);
80}
81#define arch_atomic64_add_return arch_atomic64_add_return
82
83static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
84{
85	return arch_atomic64_add_return(-i, v);
86}
87#define arch_atomic64_sub_return arch_atomic64_sub_return
88
89static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
90{
91	return xadd(&v->counter, i);
92}
93#define arch_atomic64_fetch_add arch_atomic64_fetch_add
94
95static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
96{
97	return xadd(&v->counter, -i);
98}
99#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
100
101static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
102{
103	return arch_cmpxchg(&v->counter, old, new);
104}
105#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
106
107static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
108{
109	return arch_try_cmpxchg(&v->counter, old, new);
110}
111#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
112
113static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
114{
115	return arch_xchg(&v->counter, new);
116}
117#define arch_atomic64_xchg arch_atomic64_xchg
118
119static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
120{
121	asm volatile(LOCK_PREFIX "andq %1,%0"
122			: "+m" (v->counter)
123			: "er" (i)
124			: "memory");
125}
126
127static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
128{
129	s64 val = arch_atomic64_read(v);
130
131	do {
132	} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
133	return val;
134}
135#define arch_atomic64_fetch_and arch_atomic64_fetch_and
136
137static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
138{
139	asm volatile(LOCK_PREFIX "orq %1,%0"
140			: "+m" (v->counter)
141			: "er" (i)
142			: "memory");
143}
144
145static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
146{
147	s64 val = arch_atomic64_read(v);
148
149	do {
150	} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
151	return val;
152}
153#define arch_atomic64_fetch_or arch_atomic64_fetch_or
154
155static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
156{
157	asm volatile(LOCK_PREFIX "xorq %1,%0"
158			: "+m" (v->counter)
159			: "er" (i)
160			: "memory");
161}
162
163static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
164{
165	s64 val = arch_atomic64_read(v);
166
167	do {
168	} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
169	return val;
170}
171#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
172
173#endif /* _ASM_X86_ATOMIC64_64_H */
174