atomic-v4.h revision 331643
1/* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
2
3/*-
4 * Copyright (C) 2003-2004 Olivier Houchard
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by Brini.
22 * 4. The name of Brini may not be used to endorse or promote products
23 *    derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
31 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
33 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
34 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * $FreeBSD: stable/11/sys/arm/include/atomic-v4.h 331643 2018-03-27 18:52:27Z dim $
37 */
38
39#ifndef _MACHINE_ATOMIC_V4_H_
40#define	_MACHINE_ATOMIC_V4_H_
41
42#ifndef _MACHINE_ATOMIC_H_
43#error Do not include this file directly, use <machine/atomic.h>
44#endif
45
46#if __ARM_ARCH <= 5
47#define isb()  __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
48#define dsb()  __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
49#define dmb()  dsb()
50#else
51#error Only use this file with ARMv5 and earlier
52#endif
53
54#define mb()   dmb()
55#define wmb()  dmb()
56#define rmb()  dmb()
57
58#define __with_interrupts_disabled(expr) \
59	do {						\
60		u_int cpsr_save, tmp;			\
61							\
62		__asm __volatile(			\
63			"mrs  %0, cpsr;"		\
64			"orr  %1, %0, %2;"		\
65			"msr  cpsr_fsxc, %1;"		\
66			: "=r" (cpsr_save), "=r" (tmp)	\
67			: "I" (PSR_I | PSR_F)		\
68		        : "cc" );		\
69		(expr);				\
70		 __asm __volatile(		\
71			"msr  cpsr_fsxc, %0"	\
72			: /* no output */	\
73			: "r" (cpsr_save)	\
74			: "cc" );		\
75	} while(0)
76
77static __inline uint32_t
78__swp(uint32_t val, volatile uint32_t *ptr)
79{
80	__asm __volatile("swp	%0, %2, [%3]"
81	    : "=&r" (val), "=m" (*ptr)
82	    : "r" (val), "r" (ptr), "m" (*ptr)
83	    : "memory");
84	return (val);
85}
86
87
88#ifdef _KERNEL
89#define	ARM_HAVE_ATOMIC64
90
91static __inline void
92atomic_add_32(volatile u_int32_t *p, u_int32_t val)
93{
94	__with_interrupts_disabled(*p += val);
95}
96
97static __inline void
98atomic_add_64(volatile u_int64_t *p, u_int64_t val)
99{
100	__with_interrupts_disabled(*p += val);
101}
102
103static __inline void
104atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
105{
106	__with_interrupts_disabled(*address &= ~clearmask);
107}
108
109static __inline void
110atomic_clear_64(volatile uint64_t *address, uint64_t clearmask)
111{
112	__with_interrupts_disabled(*address &= ~clearmask);
113}
114
115static __inline int
116atomic_fcmpset_32(volatile u_int32_t *p, volatile u_int32_t *cmpval, volatile u_int32_t newval)
117{
118	u_int32_t ret;
119
120	__with_interrupts_disabled(
121	 {
122	 	ret = *p;
123	    	if (*p == *cmpval) {
124			*p = newval;
125			ret = 1;
126		} else {
127			*cmpval = *p;
128			ret = 0;
129		}
130	});
131	return (ret);
132}
133
134static __inline int
135atomic_fcmpset_64(volatile u_int64_t *p, volatile u_int64_t *cmpval, volatile u_int64_t newval)
136{
137	u_int64_t ret;
138
139	__with_interrupts_disabled(
140	 {
141	    	if (*p == *cmpval) {
142			*p = newval;
143			ret = 1;
144		} else {
145			*cmpval = *p;
146			ret = 0;
147		}
148	});
149	return (ret);
150}
151
152static __inline u_int32_t
153atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
154{
155	int ret;
156
157	__with_interrupts_disabled(
158	 {
159	    	if (*p == cmpval) {
160			*p = newval;
161			ret = 1;
162		} else {
163			ret = 0;
164		}
165	});
166	return (ret);
167}
168
169static __inline u_int64_t
170atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile u_int64_t newval)
171{
172	int ret;
173
174	__with_interrupts_disabled(
175	 {
176	    	if (*p == cmpval) {
177			*p = newval;
178			ret = 1;
179		} else {
180			ret = 0;
181		}
182	});
183	return (ret);
184}
185
186
187static __inline uint32_t
188atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
189{
190	uint32_t value;
191
192	__with_interrupts_disabled(
193	{
194	    	value = *p;
195		*p += v;
196	});
197	return (value);
198}
199
200static __inline uint64_t
201atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
202{
203	uint64_t value;
204
205	__with_interrupts_disabled(
206	{
207	    	value = *p;
208		*p += v;
209	});
210	return (value);
211}
212
213static __inline uint64_t
214atomic_load_64(volatile uint64_t *p)
215{
216	uint64_t value;
217
218	__with_interrupts_disabled(value = *p);
219	return (value);
220}
221
222static __inline void
223atomic_set_32(volatile uint32_t *address, uint32_t setmask)
224{
225	__with_interrupts_disabled(*address |= setmask);
226}
227
228static __inline void
229atomic_set_64(volatile uint64_t *address, uint64_t setmask)
230{
231	__with_interrupts_disabled(*address |= setmask);
232}
233
234static __inline void
235atomic_store_64(volatile uint64_t *p, uint64_t value)
236{
237	__with_interrupts_disabled(*p = value);
238}
239
240static __inline void
241atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
242{
243	__with_interrupts_disabled(*p -= val);
244}
245
246static __inline void
247atomic_subtract_64(volatile u_int64_t *p, u_int64_t val)
248{
249	__with_interrupts_disabled(*p -= val);
250}
251
252#else /* !_KERNEL */
253
254static __inline void
255atomic_add_32(volatile u_int32_t *p, u_int32_t val)
256{
257	int start, ras_start = ARM_RAS_START;
258
259	__asm __volatile("1:\n"
260	    "adr	%1, 1b\n"
261	    "str	%1, [%0]\n"
262	    "adr	%1, 2f\n"
263	    "str	%1, [%0, #4]\n"
264	    "ldr	%1, [%2]\n"
265	    "add	%1, %1, %3\n"
266	    "str	%1, [%2]\n"
267	    "2:\n"
268	    "mov	%1, #0\n"
269	    "str	%1, [%0]\n"
270	    "mov	%1, #0xffffffff\n"
271	    "str	%1, [%0, #4]\n"
272	    : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val)
273	    : : "memory");
274}
275
276static __inline void
277atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
278{
279	int start, ras_start = ARM_RAS_START;
280
281	__asm __volatile("1:\n"
282	    "adr	%1, 1b\n"
283	    "str	%1, [%0]\n"
284	    "adr	%1, 2f\n"
285	    "str	%1, [%0, #4]\n"
286	    "ldr	%1, [%2]\n"
287	    "bic	%1, %1, %3\n"
288	    "str	%1, [%2]\n"
289	    "2:\n"
290	    "mov	%1, #0\n"
291	    "str	%1, [%0]\n"
292	    "mov	%1, #0xffffffff\n"
293	    "str	%1, [%0, #4]\n"
294	    : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (clearmask)
295	    : : "memory");
296
297}
298
299static __inline u_int32_t
300atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
301{
302	int done, ras_start = ARM_RAS_START;
303
304	__asm __volatile("1:\n"
305	    "adr	%1, 1b\n"
306	    "str	%1, [%0]\n"
307	    "adr	%1, 2f\n"
308	    "str	%1, [%0, #4]\n"
309	    "ldr	%1, [%2]\n"
310	    "cmp	%1, %3\n"
311	    "streq	%4, [%2]\n"
312	    "2:\n"
313	    "mov	%1, #0\n"
314	    "str	%1, [%0]\n"
315	    "mov	%1, #0xffffffff\n"
316	    "str	%1, [%0, #4]\n"
317	    "moveq	%1, #1\n"
318	    "movne	%1, #0\n"
319	    : "+r" (ras_start), "=r" (done)
320	    ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc", "memory");
321	return (done);
322}
323
324static __inline uint32_t
325atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
326{
327	uint32_t start, tmp, ras_start = ARM_RAS_START;
328
329	__asm __volatile("1:\n"
330	    "adr	%1, 1b\n"
331	    "str	%1, [%0]\n"
332	    "adr	%1, 2f\n"
333	    "str	%1, [%0, #4]\n"
334	    "ldr	%1, [%3]\n"
335	    "mov	%2, %1\n"
336	    "add	%2, %2, %4\n"
337	    "str	%2, [%3]\n"
338	    "2:\n"
339	    "mov	%2, #0\n"
340	    "str	%2, [%0]\n"
341	    "mov	%2, #0xffffffff\n"
342	    "str	%2, [%0, #4]\n"
343	    : "+r" (ras_start), "=r" (start), "=r" (tmp), "+r" (p), "+r" (v)
344	    : : "memory");
345	return (start);
346}
347
348static __inline void
349atomic_set_32(volatile uint32_t *address, uint32_t setmask)
350{
351	int start, ras_start = ARM_RAS_START;
352
353	__asm __volatile("1:\n"
354	    "adr	%1, 1b\n"
355	    "str	%1, [%0]\n"
356	    "adr	%1, 2f\n"
357	    "str	%1, [%0, #4]\n"
358	    "ldr	%1, [%2]\n"
359	    "orr	%1, %1, %3\n"
360	    "str	%1, [%2]\n"
361	    "2:\n"
362	    "mov	%1, #0\n"
363	    "str	%1, [%0]\n"
364	    "mov	%1, #0xffffffff\n"
365	    "str	%1, [%0, #4]\n"
366
367	    : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (setmask)
368	    : : "memory");
369}
370
371static __inline void
372atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
373{
374	int start, ras_start = ARM_RAS_START;
375
376	__asm __volatile("1:\n"
377	    "adr	%1, 1b\n"
378	    "str	%1, [%0]\n"
379	    "adr	%1, 2f\n"
380	    "str	%1, [%0, #4]\n"
381	    "ldr	%1, [%2]\n"
382	    "sub	%1, %1, %3\n"
383	    "str	%1, [%2]\n"
384	    "2:\n"
385	    "mov	%1, #0\n"
386	    "str	%1, [%0]\n"
387	    "mov	%1, #0xffffffff\n"
388	    "str	%1, [%0, #4]\n"
389
390	    : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val)
391	    : : "memory");
392}
393
394#endif /* _KERNEL */
395
396static __inline uint32_t
397atomic_readandclear_32(volatile u_int32_t *p)
398{
399
400	return (__swp(0, p));
401}
402
403static __inline uint32_t
404atomic_swap_32(volatile u_int32_t *p, u_int32_t v)
405{
406
407	return (__swp(v, p));
408}
409
410#define atomic_fcmpset_rel_32	atomic_fcmpset_32
411#define atomic_fcmpset_acq_32	atomic_fcmpset_32
412#define atomic_fcmpset_rel_64	atomic_fcmpset_64
413#define atomic_fcmpset_acq_64	atomic_fcmpset_64
414#define atomic_fcmpset_acq_long	atomic_fcmpset_long
415#define atomic_fcmpset_rel_long	atomic_fcmpset_long
416#define atomic_cmpset_rel_32	atomic_cmpset_32
417#define atomic_cmpset_acq_32	atomic_cmpset_32
418#define atomic_cmpset_rel_64	atomic_cmpset_64
419#define atomic_cmpset_acq_64	atomic_cmpset_64
420#define atomic_set_rel_32	atomic_set_32
421#define atomic_set_acq_32	atomic_set_32
422#define atomic_clear_rel_32	atomic_clear_32
423#define atomic_clear_acq_32	atomic_clear_32
424#define atomic_add_rel_32	atomic_add_32
425#define atomic_add_acq_32	atomic_add_32
426#define atomic_subtract_rel_32	atomic_subtract_32
427#define atomic_subtract_acq_32	atomic_subtract_32
428#define atomic_store_rel_32	atomic_store_32
429#define atomic_store_rel_long	atomic_store_long
430#define atomic_load_acq_32	atomic_load_32
431#define atomic_load_acq_long	atomic_load_long
432#define atomic_add_acq_long		atomic_add_long
433#define atomic_add_rel_long		atomic_add_long
434#define atomic_subtract_acq_long	atomic_subtract_long
435#define atomic_subtract_rel_long	atomic_subtract_long
436#define atomic_clear_acq_long		atomic_clear_long
437#define atomic_clear_rel_long		atomic_clear_long
438#define atomic_set_acq_long		atomic_set_long
439#define atomic_set_rel_long		atomic_set_long
440#define atomic_cmpset_acq_long		atomic_cmpset_long
441#define atomic_cmpset_rel_long		atomic_cmpset_long
442#define atomic_load_acq_long		atomic_load_long
443#undef __with_interrupts_disabled
444
445static __inline void
446atomic_add_long(volatile u_long *p, u_long v)
447{
448
449	atomic_add_32((volatile uint32_t *)p, v);
450}
451
452static __inline void
453atomic_clear_long(volatile u_long *p, u_long v)
454{
455
456	atomic_clear_32((volatile uint32_t *)p, v);
457}
458
459static __inline int
460atomic_cmpset_long(volatile u_long *dst, u_long old, u_long newe)
461{
462
463	return (atomic_cmpset_32((volatile uint32_t *)dst, old, newe));
464}
465
466#ifdef _KERNEL
467/* atomic_fcmpset_32 is only defined for the kernel */
468static __inline u_long
469atomic_fcmpset_long(volatile u_long *dst, u_long *old, u_long newe)
470{
471
472	return (atomic_fcmpset_32((volatile uint32_t *)dst,
473	    (uint32_t *)old, newe));
474}
475#endif
476
477static __inline u_long
478atomic_fetchadd_long(volatile u_long *p, u_long v)
479{
480
481	return (atomic_fetchadd_32((volatile uint32_t *)p, v));
482}
483
484static __inline void
485atomic_readandclear_long(volatile u_long *p)
486{
487
488	atomic_readandclear_32((volatile uint32_t *)p);
489}
490
491static __inline void
492atomic_set_long(volatile u_long *p, u_long v)
493{
494
495	atomic_set_32((volatile uint32_t *)p, v);
496}
497
498static __inline void
499atomic_subtract_long(volatile u_long *p, u_long v)
500{
501
502	atomic_subtract_32((volatile uint32_t *)p, v);
503}
504
505/*
506 * ARMv5 does not support SMP.  For both kernel and user modes, only a
507 * compiler barrier is needed for fences, since CPU is always
508 * self-consistent.
509 */
510static __inline void
511atomic_thread_fence_acq(void)
512{
513
514	__compiler_membar();
515}
516
517static __inline void
518atomic_thread_fence_rel(void)
519{
520
521	__compiler_membar();
522}
523
524static __inline void
525atomic_thread_fence_acq_rel(void)
526{
527
528	__compiler_membar();
529}
530
531static __inline void
532atomic_thread_fence_seq_cst(void)
533{
534
535	__compiler_membar();
536}
537
538#endif /* _MACHINE_ATOMIC_H_ */
539