1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
4 */
5
6#ifndef _ASM_PARISC_ATOMIC_H_
7#define _ASM_PARISC_ATOMIC_H_
8
9#include <linux/types.h>
10#include <asm/cmpxchg.h>
11#include <asm/barrier.h>
12
13/*
14 * Atomic operations that C can't guarantee us.  Useful for
15 * resource counting etc..
16 *
17 * And probably incredibly slow on parisc.  OTOH, we don't
18 * have to write any serious assembly.   prumpf
19 */
20
21#ifdef CONFIG_SMP
22#include <asm/spinlock.h>
23#include <asm/cache.h>		/* we use L1_CACHE_BYTES */
24
25/* Use an array of spinlocks for our atomic_ts.
26 * Hash function to index into a different SPINLOCK.
27 * Since "a" is usually an address, use one spinlock per cacheline.
28 */
29#  define ATOMIC_HASH_SIZE 4
30#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
31
32extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
33
34/* Can't use raw_spin_lock_irq because of #include problems, so
35 * this is the substitute */
36#define _atomic_spin_lock_irqsave(l,f) do {	\
37	arch_spinlock_t *s = ATOMIC_HASH(l);	\
38	local_irq_save(f);			\
39	arch_spin_lock(s);			\
40} while(0)
41
42#define _atomic_spin_unlock_irqrestore(l,f) do {	\
43	arch_spinlock_t *s = ATOMIC_HASH(l);		\
44	arch_spin_unlock(s);				\
45	local_irq_restore(f);				\
46} while(0)
47
48
49#else
50#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
52#endif
53
54/*
55 * Note that we need not lock read accesses - aligned word writes/reads
56 * are atomic, so a reader never sees inconsistent values.
57 */
58
59static __inline__ void arch_atomic_set(atomic_t *v, int i)
60{
61	unsigned long flags;
62	_atomic_spin_lock_irqsave(v, flags);
63
64	v->counter = i;
65
66	_atomic_spin_unlock_irqrestore(v, flags);
67}
68
69#define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))
70
71static __inline__ int arch_atomic_read(const atomic_t *v)
72{
73	return READ_ONCE((v)->counter);
74}
75
76#define ATOMIC_OP(op, c_op)						\
77static __inline__ void arch_atomic_##op(int i, atomic_t *v)		\
78{									\
79	unsigned long flags;						\
80									\
81	_atomic_spin_lock_irqsave(v, flags);				\
82	v->counter c_op i;						\
83	_atomic_spin_unlock_irqrestore(v, flags);			\
84}
85
86#define ATOMIC_OP_RETURN(op, c_op)					\
87static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v)	\
88{									\
89	unsigned long flags;						\
90	int ret;							\
91									\
92	_atomic_spin_lock_irqsave(v, flags);				\
93	ret = (v->counter c_op i);					\
94	_atomic_spin_unlock_irqrestore(v, flags);			\
95									\
96	return ret;							\
97}
98
99#define ATOMIC_FETCH_OP(op, c_op)					\
100static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v)	\
101{									\
102	unsigned long flags;						\
103	int ret;							\
104									\
105	_atomic_spin_lock_irqsave(v, flags);				\
106	ret = v->counter;						\
107	v->counter c_op i;						\
108	_atomic_spin_unlock_irqrestore(v, flags);			\
109									\
110	return ret;							\
111}
112
113#define ATOMIC_OPS(op, c_op)						\
114	ATOMIC_OP(op, c_op)						\
115	ATOMIC_OP_RETURN(op, c_op)					\
116	ATOMIC_FETCH_OP(op, c_op)
117
118ATOMIC_OPS(add, +=)
119ATOMIC_OPS(sub, -=)
120
121#define arch_atomic_add_return	arch_atomic_add_return
122#define arch_atomic_sub_return	arch_atomic_sub_return
123#define arch_atomic_fetch_add	arch_atomic_fetch_add
124#define arch_atomic_fetch_sub	arch_atomic_fetch_sub
125
126#undef ATOMIC_OPS
127#define ATOMIC_OPS(op, c_op)						\
128	ATOMIC_OP(op, c_op)						\
129	ATOMIC_FETCH_OP(op, c_op)
130
131ATOMIC_OPS(and, &=)
132ATOMIC_OPS(or, |=)
133ATOMIC_OPS(xor, ^=)
134
135#define arch_atomic_fetch_and	arch_atomic_fetch_and
136#define arch_atomic_fetch_or	arch_atomic_fetch_or
137#define arch_atomic_fetch_xor	arch_atomic_fetch_xor
138
139#undef ATOMIC_OPS
140#undef ATOMIC_FETCH_OP
141#undef ATOMIC_OP_RETURN
142#undef ATOMIC_OP
143
144#ifdef CONFIG_64BIT
145
146#define ATOMIC64_INIT(i) { (i) }
147
148#define ATOMIC64_OP(op, c_op)						\
149static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v)		\
150{									\
151	unsigned long flags;						\
152									\
153	_atomic_spin_lock_irqsave(v, flags);				\
154	v->counter c_op i;						\
155	_atomic_spin_unlock_irqrestore(v, flags);			\
156}
157
158#define ATOMIC64_OP_RETURN(op, c_op)					\
159static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v)	\
160{									\
161	unsigned long flags;						\
162	s64 ret;							\
163									\
164	_atomic_spin_lock_irqsave(v, flags);				\
165	ret = (v->counter c_op i);					\
166	_atomic_spin_unlock_irqrestore(v, flags);			\
167									\
168	return ret;							\
169}
170
171#define ATOMIC64_FETCH_OP(op, c_op)					\
172static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v)	\
173{									\
174	unsigned long flags;						\
175	s64 ret;							\
176									\
177	_atomic_spin_lock_irqsave(v, flags);				\
178	ret = v->counter;						\
179	v->counter c_op i;						\
180	_atomic_spin_unlock_irqrestore(v, flags);			\
181									\
182	return ret;							\
183}
184
185#define ATOMIC64_OPS(op, c_op)						\
186	ATOMIC64_OP(op, c_op)						\
187	ATOMIC64_OP_RETURN(op, c_op)					\
188	ATOMIC64_FETCH_OP(op, c_op)
189
190ATOMIC64_OPS(add, +=)
191ATOMIC64_OPS(sub, -=)
192
193#define arch_atomic64_add_return	arch_atomic64_add_return
194#define arch_atomic64_sub_return	arch_atomic64_sub_return
195#define arch_atomic64_fetch_add		arch_atomic64_fetch_add
196#define arch_atomic64_fetch_sub		arch_atomic64_fetch_sub
197
198#undef ATOMIC64_OPS
199#define ATOMIC64_OPS(op, c_op)						\
200	ATOMIC64_OP(op, c_op)						\
201	ATOMIC64_FETCH_OP(op, c_op)
202
203ATOMIC64_OPS(and, &=)
204ATOMIC64_OPS(or, |=)
205ATOMIC64_OPS(xor, ^=)
206
207#define arch_atomic64_fetch_and		arch_atomic64_fetch_and
208#define arch_atomic64_fetch_or		arch_atomic64_fetch_or
209#define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor
210
211#undef ATOMIC64_OPS
212#undef ATOMIC64_FETCH_OP
213#undef ATOMIC64_OP_RETURN
214#undef ATOMIC64_OP
215
216static __inline__ void
217arch_atomic64_set(atomic64_t *v, s64 i)
218{
219	unsigned long flags;
220	_atomic_spin_lock_irqsave(v, flags);
221
222	v->counter = i;
223
224	_atomic_spin_unlock_irqrestore(v, flags);
225}
226
227#define arch_atomic64_set_release(v, i)	arch_atomic64_set((v), (i))
228
229static __inline__ s64
230arch_atomic64_read(const atomic64_t *v)
231{
232	return READ_ONCE((v)->counter);
233}
234
235#endif /* !CONFIG_64BIT */
236
237
238#endif /* _ASM_PARISC_ATOMIC_H_ */
239