1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
29 * $FreeBSD$
30 */
31
32#ifndef _MACHINE_ATOMIC_H_
33#define	_MACHINE_ATOMIC_H_
34
35#ifndef _SYS_CDEFS_H_
36#error this file needs sys/cdefs.h as a prerequisite
37#endif
38
39#include <sys/atomic_common.h>
40
41/*
42 * Note: All the 64-bit atomic operations are only atomic when running
43 * in 64-bit mode.  It is assumed that code compiled for n32 and n64
44 * fits into this definition and no further safeties are needed.
45 *
46 * It is also assumed that the add, subtract and other arithmetic is
47 * done on numbers not pointers.  The special rules for n32 pointers
48 * do not have atomic operations defined for them, but generally shouldn't
49 * need atomic operations.
50 */
51#ifndef __MIPS_PLATFORM_SYNC_NOPS
52#define __MIPS_PLATFORM_SYNC_NOPS ""
53#endif
54
55static __inline  void
56mips_sync(void)
57{
58	__asm __volatile (".set noreorder\n"
59			"\tsync\n"
60			__MIPS_PLATFORM_SYNC_NOPS
61			".set reorder\n"
62			: : : "memory");
63}
64
65#define mb()	mips_sync()
66#define wmb()	mips_sync()
67#define rmb()	mips_sync()
68
69/*
70 * Various simple arithmetic on memory which is atomic in the presence
71 * of interrupts and SMP safe.
72 */
73
74void atomic_set_8(__volatile uint8_t *, uint8_t);
75void atomic_clear_8(__volatile uint8_t *, uint8_t);
76void atomic_add_8(__volatile uint8_t *, uint8_t);
77void atomic_subtract_8(__volatile uint8_t *, uint8_t);
78
79void atomic_set_16(__volatile uint16_t *, uint16_t);
80void atomic_clear_16(__volatile uint16_t *, uint16_t);
81void atomic_add_16(__volatile uint16_t *, uint16_t);
82void atomic_subtract_16(__volatile uint16_t *, uint16_t);
83
84static __inline void
85atomic_set_32(__volatile uint32_t *p, uint32_t v)
86{
87	uint32_t temp;
88
89	__asm __volatile (
90		"1:\tll	%0, %3\n\t"		/* load old value */
91		"or	%0, %2, %0\n\t"		/* calculate new value */
92		"sc	%0, %1\n\t"		/* attempt to store */
93		"beqz	%0, 1b\n\t"		/* spin if failed */
94		: "=&r" (temp), "=m" (*p)
95		: "r" (v), "m" (*p)
96		: "memory");
97
98}
99
100static __inline void
101atomic_clear_32(__volatile uint32_t *p, uint32_t v)
102{
103	uint32_t temp;
104	v = ~v;
105
106	__asm __volatile (
107		"1:\tll	%0, %3\n\t"		/* load old value */
108		"and	%0, %2, %0\n\t"		/* calculate new value */
109		"sc	%0, %1\n\t"		/* attempt to store */
110		"beqz	%0, 1b\n\t"		/* spin if failed */
111		: "=&r" (temp), "=m" (*p)
112		: "r" (v), "m" (*p)
113		: "memory");
114}
115
116static __inline void
117atomic_add_32(__volatile uint32_t *p, uint32_t v)
118{
119	uint32_t temp;
120
121	__asm __volatile (
122		"1:\tll	%0, %3\n\t"		/* load old value */
123		"addu	%0, %2, %0\n\t"		/* calculate new value */
124		"sc	%0, %1\n\t"		/* attempt to store */
125		"beqz	%0, 1b\n\t"		/* spin if failed */
126		: "=&r" (temp), "=m" (*p)
127		: "r" (v), "m" (*p)
128		: "memory");
129}
130
131static __inline void
132atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
133{
134	uint32_t temp;
135
136	__asm __volatile (
137		"1:\tll	%0, %3\n\t"		/* load old value */
138		"subu	%0, %2\n\t"		/* calculate new value */
139		"sc	%0, %1\n\t"		/* attempt to store */
140		"beqz	%0, 1b\n\t"		/* spin if failed */
141		: "=&r" (temp), "=m" (*p)
142		: "r" (v), "m" (*p)
143		: "memory");
144}
145
146static __inline uint32_t
147atomic_readandclear_32(__volatile uint32_t *addr)
148{
149	uint32_t result,temp;
150
151	__asm __volatile (
152		"1:\tll	 %0,%3\n\t"	/* load current value, asserting lock */
153		"li	 %1,0\n\t"		/* value to store */
154		"sc	 %1,%2\n\t"	/* attempt to store */
155		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
156		: "=&r"(result), "=&r"(temp), "=m" (*addr)
157		: "m" (*addr)
158		: "memory");
159
160	return result;
161}
162
163static __inline uint32_t
164atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
165{
166	uint32_t result,temp;
167
168	__asm __volatile (
169		"1:\tll	 %0,%3\n\t"	/* load current value, asserting lock */
170		"or      %1,$0,%4\n\t"
171		"sc	 %1,%2\n\t"	/* attempt to store */
172		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
173		: "=&r"(result), "=&r"(temp), "=m" (*addr)
174		: "m" (*addr), "r" (value)
175		: "memory");
176
177	return result;
178}
179
180#if defined(__mips_n64) || defined(__mips_n32)
181static __inline void
182atomic_set_64(__volatile uint64_t *p, uint64_t v)
183{
184	uint64_t temp;
185
186	__asm __volatile (
187		"1:\n\t"
188		"lld	%0, %3\n\t"		/* load old value */
189		"or	%0, %2, %0\n\t"		/* calculate new value */
190		"scd	%0, %1\n\t"		/* attempt to store */
191		"beqz	%0, 1b\n\t"		/* spin if failed */
192		: "=&r" (temp), "=m" (*p)
193		: "r" (v), "m" (*p)
194		: "memory");
195
196}
197
198static __inline void
199atomic_clear_64(__volatile uint64_t *p, uint64_t v)
200{
201	uint64_t temp;
202	v = ~v;
203
204	__asm __volatile (
205		"1:\n\t"
206		"lld	%0, %3\n\t"		/* load old value */
207		"and	%0, %2, %0\n\t"		/* calculate new value */
208		"scd	%0, %1\n\t"		/* attempt to store */
209		"beqz	%0, 1b\n\t"		/* spin if failed */
210		: "=&r" (temp), "=m" (*p)
211		: "r" (v), "m" (*p)
212		: "memory");
213}
214
215static __inline void
216atomic_add_64(__volatile uint64_t *p, uint64_t v)
217{
218	uint64_t temp;
219
220	__asm __volatile (
221		"1:\n\t"
222		"lld	%0, %3\n\t"		/* load old value */
223		"daddu	%0, %2, %0\n\t"		/* calculate new value */
224		"scd	%0, %1\n\t"		/* attempt to store */
225		"beqz	%0, 1b\n\t"		/* spin if failed */
226		: "=&r" (temp), "=m" (*p)
227		: "r" (v), "m" (*p)
228		: "memory");
229}
230
231static __inline void
232atomic_subtract_64(__volatile uint64_t *p, uint64_t v)
233{
234	uint64_t temp;
235
236	__asm __volatile (
237		"1:\n\t"
238		"lld	%0, %3\n\t"		/* load old value */
239		"dsubu	%0, %2\n\t"		/* calculate new value */
240		"scd	%0, %1\n\t"		/* attempt to store */
241		"beqz	%0, 1b\n\t"		/* spin if failed */
242		: "=&r" (temp), "=m" (*p)
243		: "r" (v), "m" (*p)
244		: "memory");
245}
246
247static __inline uint64_t
248atomic_readandclear_64(__volatile uint64_t *addr)
249{
250	uint64_t result,temp;
251
252	__asm __volatile (
253		"1:\n\t"
254		"lld	 %0, %3\n\t"		/* load old value */
255		"li	 %1, 0\n\t"		/* value to store */
256		"scd	 %1, %2\n\t"		/* attempt to store */
257		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
258		: "=&r"(result), "=&r"(temp), "=m" (*addr)
259		: "m" (*addr)
260		: "memory");
261
262	return result;
263}
264
265static __inline uint64_t
266atomic_readandset_64(__volatile uint64_t *addr, uint64_t value)
267{
268	uint64_t result,temp;
269
270	__asm __volatile (
271		"1:\n\t"
272		"lld	 %0,%3\n\t"		/* Load old value*/
273		"or      %1,$0,%4\n\t"
274		"scd	 %1,%2\n\t"		/* attempt to store */
275		"beqz	 %1, 1b\n\t"		/* if the store failed, spin */
276		: "=&r"(result), "=&r"(temp), "=m" (*addr)
277		: "m" (*addr), "r" (value)
278		: "memory");
279
280	return result;
281}
282#endif
283
284#define	ATOMIC_ACQ_REL(NAME, WIDTH)					\
285static __inline  void							\
286atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
287{									\
288	atomic_##NAME##_##WIDTH(p, v);					\
289	mips_sync(); 							\
290}									\
291									\
292static __inline  void							\
293atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
294{									\
295	mips_sync();							\
296	atomic_##NAME##_##WIDTH(p, v);					\
297}
298
299/* Variants of simple arithmetic with memory barriers. */
300ATOMIC_ACQ_REL(set, 8)
301ATOMIC_ACQ_REL(clear, 8)
302ATOMIC_ACQ_REL(add, 8)
303ATOMIC_ACQ_REL(subtract, 8)
304ATOMIC_ACQ_REL(set, 16)
305ATOMIC_ACQ_REL(clear, 16)
306ATOMIC_ACQ_REL(add, 16)
307ATOMIC_ACQ_REL(subtract, 16)
308ATOMIC_ACQ_REL(set, 32)
309ATOMIC_ACQ_REL(clear, 32)
310ATOMIC_ACQ_REL(add, 32)
311ATOMIC_ACQ_REL(subtract, 32)
312#if defined(__mips_n64) || defined(__mips_n32)
313ATOMIC_ACQ_REL(set, 64)
314ATOMIC_ACQ_REL(clear, 64)
315ATOMIC_ACQ_REL(add, 64)
316ATOMIC_ACQ_REL(subtract, 64)
317#endif
318
319#undef ATOMIC_ACQ_REL
320
321/*
322 * We assume that a = b will do atomic loads and stores.
323 */
324#define	ATOMIC_STORE_LOAD(WIDTH)			\
325static __inline  uint##WIDTH##_t			\
326atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p)	\
327{							\
328	uint##WIDTH##_t v;				\
329							\
330	v = *p;						\
331	mips_sync();					\
332	return (v);					\
333}							\
334							\
335static __inline  void					\
336atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
337{							\
338	mips_sync();					\
339	*p = v;						\
340}
341
342ATOMIC_STORE_LOAD(32)
343ATOMIC_STORE_LOAD(64)
344#undef ATOMIC_STORE_LOAD
345
346#ifdef __mips_n32
347#define	atomic_load_64	atomic_load_acq_64
348#endif
349
350/*
351 * Atomically compare the value stored at *p with cmpval and if the
352 * two values are equal, update the value of *p with newval. Returns
353 * zero if the compare failed, nonzero otherwise.
354 */
355static __inline uint32_t
356atomic_cmpset_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
357{
358	uint32_t ret;
359
360	__asm __volatile (
361		"1:\tll	%0, %4\n\t"		/* load old value */
362		"bne %0, %2, 2f\n\t"		/* compare */
363		"move %0, %3\n\t"		/* value to store */
364		"sc %0, %1\n\t"			/* attempt to store */
365		"beqz %0, 1b\n\t"		/* if it failed, spin */
366		"j 3f\n\t"
367		"2:\n\t"
368		"li	%0, 0\n\t"
369		"3:\n"
370		: "=&r" (ret), "=m" (*p)
371		: "r" (cmpval), "r" (newval), "m" (*p)
372		: "memory");
373
374	return ret;
375}
376
377/*
378 * Atomically compare the value stored at *p with cmpval and if the
379 * two values are equal, update the value of *p with newval. Returns
380 * zero if the compare failed, nonzero otherwise.
381 */
382static __inline uint32_t
383atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
384{
385	int retval;
386
387	retval = atomic_cmpset_32(p, cmpval, newval);
388	mips_sync();
389	return (retval);
390}
391
392static __inline uint32_t
393atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
394{
395	mips_sync();
396	return (atomic_cmpset_32(p, cmpval, newval));
397}
398
399static __inline uint32_t
400atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
401{
402	uint32_t ret;
403
404	__asm __volatile (
405		"1:\n\t"
406		"ll	%0, %1\n\t"		/* load old value */
407		"bne	%0, %4, 2f\n\t"		/* compare */
408		"move	%0, %3\n\t"		/* value to store */
409		"sc	%0, %1\n\t"		/* attempt to store */
410		"beqz	%0, 1b\n\t"		/* if it failed, spin */
411		"j	3f\n\t"
412		"2:\n\t"
413		"sw	%0, %2\n\t"		/* save old value */
414		"li	%0, 0\n\t"
415		"3:\n"
416		: "=&r" (ret), "+m" (*p), "=m" (*cmpval)
417		: "r" (newval), "r" (*cmpval)
418		: "memory");
419	return ret;
420}
421
422static __inline uint32_t
423atomic_fcmpset_acq_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
424{
425	int retval;
426
427	retval = atomic_fcmpset_32(p, cmpval, newval);
428	mips_sync();
429	return (retval);
430}
431
432static __inline uint32_t
433atomic_fcmpset_rel_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
434{
435	mips_sync();
436	return (atomic_fcmpset_32(p, cmpval, newval));
437}
438
439/*
440 * Atomically add the value of v to the integer pointed to by p and return
441 * the previous value of *p.
442 */
443static __inline uint32_t
444atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
445{
446	uint32_t value, temp;
447
448	__asm __volatile (
449		"1:\tll %0, %1\n\t"		/* load old value */
450		"addu %2, %3, %0\n\t"		/* calculate new value */
451		"sc %2, %1\n\t"			/* attempt to store */
452		"beqz %2, 1b\n\t"		/* spin if failed */
453		: "=&r" (value), "=m" (*p), "=&r" (temp)
454		: "r" (v), "m" (*p));
455	return (value);
456}
457
458#if defined(__mips_n64) || defined(__mips_n32)
459/*
460 * Atomically compare the value stored at *p with cmpval and if the
461 * two values are equal, update the value of *p with newval. Returns
462 * zero if the compare failed, nonzero otherwise.
463 */
464static __inline uint64_t
465atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
466{
467	uint64_t ret;
468
469	__asm __volatile (
470		"1:\n\t"
471		"lld	%0, %4\n\t"		/* load old value */
472		"bne	%0, %2, 2f\n\t"		/* compare */
473		"move	%0, %3\n\t"		/* value to store */
474		"scd	%0, %1\n\t"		/* attempt to store */
475		"beqz	%0, 1b\n\t"		/* if it failed, spin */
476		"j	3f\n\t"
477		"2:\n\t"
478		"li	%0, 0\n\t"
479		"3:\n"
480		: "=&r" (ret), "=m" (*p)
481		: "r" (cmpval), "r" (newval), "m" (*p)
482		: "memory");
483
484	return ret;
485}
486
487/*
488 * Atomically compare the value stored at *p with cmpval and if the
489 * two values are equal, update the value of *p with newval. Returns
490 * zero if the compare failed, nonzero otherwise.
491 */
492static __inline uint64_t
493atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
494{
495	int retval;
496
497	retval = atomic_cmpset_64(p, cmpval, newval);
498	mips_sync();
499	return (retval);
500}
501
502static __inline uint64_t
503atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
504{
505	mips_sync();
506	return (atomic_cmpset_64(p, cmpval, newval));
507}
508
509static __inline uint32_t
510atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
511{
512        uint32_t ret;
513
514        __asm __volatile (
515                "1:\n\t"
516		"lld	%0, %1\n\t"		/* load old value */
517                "bne	%0, %4, 2f\n\t"		/* compare */
518                "move	%0, %3\n\t"		/* value to store */
519                "scd	%0, %1\n\t"		/* attempt to store */
520                "beqz	%0, 1b\n\t"		/* if it failed, spin */
521                "j	3f\n\t"
522                "2:\n\t"
523                "sd	%0, %2\n\t"		/* save old value */
524                "li	%0, 0\n\t"
525                "3:\n"
526                : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
527                : "r" (newval), "r" (*cmpval)
528                : "memory");
529
530	return ret;
531}
532
533static __inline uint64_t
534atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
535{
536	int retval;
537
538	retval = atomic_fcmpset_64(p, cmpval, newval);
539	mips_sync();
540	return (retval);
541}
542
543static __inline uint64_t
544atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
545{
546	mips_sync();
547	return (atomic_fcmpset_64(p, cmpval, newval));
548}
549
550/*
551 * Atomically add the value of v to the integer pointed to by p and return
552 * the previous value of *p.
553 */
554static __inline uint64_t
555atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v)
556{
557	uint64_t value, temp;
558
559	__asm __volatile (
560		"1:\n\t"
561		"lld	%0, %1\n\t"		/* load old value */
562		"daddu	%2, %3, %0\n\t"		/* calculate new value */
563		"scd	%2, %1\n\t"		/* attempt to store */
564		"beqz	%2, 1b\n\t"		/* spin if failed */
565		: "=&r" (value), "=m" (*p), "=&r" (temp)
566		: "r" (v), "m" (*p));
567	return (value);
568}
569#endif
570
571static __inline void
572atomic_thread_fence_acq(void)
573{
574
575	mips_sync();
576}
577
578static __inline void
579atomic_thread_fence_rel(void)
580{
581
582	mips_sync();
583}
584
585static __inline void
586atomic_thread_fence_acq_rel(void)
587{
588
589	mips_sync();
590}
591
592static __inline void
593atomic_thread_fence_seq_cst(void)
594{
595
596	mips_sync();
597}
598
599/* Operations on chars. */
600#define	atomic_set_char		atomic_set_8
601#define	atomic_set_acq_char	atomic_set_acq_8
602#define	atomic_set_rel_char	atomic_set_rel_8
603#define	atomic_clear_char	atomic_clear_8
604#define	atomic_clear_acq_char	atomic_clear_acq_8
605#define	atomic_clear_rel_char	atomic_clear_rel_8
606#define	atomic_add_char		atomic_add_8
607#define	atomic_add_acq_char	atomic_add_acq_8
608#define	atomic_add_rel_char	atomic_add_rel_8
609#define	atomic_subtract_char	atomic_subtract_8
610#define	atomic_subtract_acq_char	atomic_subtract_acq_8
611#define	atomic_subtract_rel_char	atomic_subtract_rel_8
612
613/* Operations on shorts. */
614#define	atomic_set_short	atomic_set_16
615#define	atomic_set_acq_short	atomic_set_acq_16
616#define	atomic_set_rel_short	atomic_set_rel_16
617#define	atomic_clear_short	atomic_clear_16
618#define	atomic_clear_acq_short	atomic_clear_acq_16
619#define	atomic_clear_rel_short	atomic_clear_rel_16
620#define	atomic_add_short	atomic_add_16
621#define	atomic_add_acq_short	atomic_add_acq_16
622#define	atomic_add_rel_short	atomic_add_rel_16
623#define	atomic_subtract_short	atomic_subtract_16
624#define	atomic_subtract_acq_short	atomic_subtract_acq_16
625#define	atomic_subtract_rel_short	atomic_subtract_rel_16
626
627/* Operations on ints. */
628#define	atomic_set_int		atomic_set_32
629#define	atomic_set_acq_int	atomic_set_acq_32
630#define	atomic_set_rel_int	atomic_set_rel_32
631#define	atomic_clear_int	atomic_clear_32
632#define	atomic_clear_acq_int	atomic_clear_acq_32
633#define	atomic_clear_rel_int	atomic_clear_rel_32
634#define	atomic_add_int		atomic_add_32
635#define	atomic_add_acq_int	atomic_add_acq_32
636#define	atomic_add_rel_int	atomic_add_rel_32
637#define	atomic_subtract_int	atomic_subtract_32
638#define	atomic_subtract_acq_int	atomic_subtract_acq_32
639#define	atomic_subtract_rel_int	atomic_subtract_rel_32
640#define	atomic_cmpset_int	atomic_cmpset_32
641#define	atomic_cmpset_acq_int	atomic_cmpset_acq_32
642#define	atomic_cmpset_rel_int	atomic_cmpset_rel_32
643#define	atomic_fcmpset_int	atomic_fcmpset_32
644#define	atomic_fcmpset_acq_int	atomic_fcmpset_acq_32
645#define	atomic_fcmpset_rel_int	atomic_fcmpset_rel_32
646#define	atomic_load_acq_int	atomic_load_acq_32
647#define	atomic_store_rel_int	atomic_store_rel_32
648#define	atomic_readandclear_int	atomic_readandclear_32
649#define	atomic_readandset_int	atomic_readandset_32
650#define	atomic_fetchadd_int	atomic_fetchadd_32
651
652/*
653 * I think the following is right, even for n32.  For n32 the pointers
654 * are still 32-bits, so we need to operate on them as 32-bit quantities,
655 * even though they are sign extended in operation.  For longs, there's
656 * no question because they are always 32-bits.
657 */
658#ifdef __mips_n64
659/* Operations on longs. */
660#define	atomic_set_long		atomic_set_64
661#define	atomic_set_acq_long	atomic_set_acq_64
662#define	atomic_set_rel_long	atomic_set_rel_64
663#define	atomic_clear_long	atomic_clear_64
664#define	atomic_clear_acq_long	atomic_clear_acq_64
665#define	atomic_clear_rel_long	atomic_clear_rel_64
666#define	atomic_add_long		atomic_add_64
667#define	atomic_add_acq_long	atomic_add_acq_64
668#define	atomic_add_rel_long	atomic_add_rel_64
669#define	atomic_subtract_long	atomic_subtract_64
670#define	atomic_subtract_acq_long	atomic_subtract_acq_64
671#define	atomic_subtract_rel_long	atomic_subtract_rel_64
672#define	atomic_cmpset_long	atomic_cmpset_64
673#define	atomic_cmpset_acq_long	atomic_cmpset_acq_64
674#define	atomic_cmpset_rel_long	atomic_cmpset_rel_64
675#define	atomic_fcmpset_long	atomic_fcmpset_64
676#define	atomic_fcmpset_acq_long	atomic_fcmpset_acq_64
677#define	atomic_fcmpset_rel_long	atomic_fcmpset_rel_64
678#define	atomic_load_acq_long	atomic_load_acq_64
679#define	atomic_store_rel_long	atomic_store_rel_64
680#define	atomic_fetchadd_long	atomic_fetchadd_64
681#define	atomic_readandclear_long	atomic_readandclear_64
682
683#else /* !__mips_n64 */
684
685/* Operations on longs. */
686#define	atomic_set_long(p, v)						\
687	atomic_set_32((volatile u_int *)(p), (u_int)(v))
688#define	atomic_set_acq_long(p, v)					\
689	atomic_set_acq_32((volatile u_int *)(p), (u_int)(v))
690#define	atomic_set_rel_long(p, v)					\
691	atomic_set_rel_32((volatile u_int *)(p), (u_int)(v))
692#define	atomic_clear_long(p, v)						\
693	atomic_clear_32((volatile u_int *)(p), (u_int)(v))
694#define	atomic_clear_acq_long(p, v)					\
695	atomic_clear_acq_32((volatile u_int *)(p), (u_int)(v))
696#define	atomic_clear_rel_long(p, v)					\
697	atomic_clear_rel_32((volatile u_int *)(p), (u_int)(v))
698#define	atomic_add_long(p, v)						\
699	atomic_add_32((volatile u_int *)(p), (u_int)(v))
700#define	atomic_add_acq_long(p, v)					\
701	atomic_add_32((volatile u_int *)(p), (u_int)(v))
702#define	atomic_add_rel_long(p, v)					\
703	atomic_add_32((volatile u_int *)(p), (u_int)(v))
704#define	atomic_subtract_long(p, v)					\
705	atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
706#define	atomic_subtract_acq_long(p, v)					\
707	atomic_subtract_acq_32((volatile u_int *)(p), (u_int)(v))
708#define	atomic_subtract_rel_long(p, v)					\
709	atomic_subtract_rel_32((volatile u_int *)(p), (u_int)(v))
710#define	atomic_cmpset_long(p, cmpval, newval)				\
711	atomic_cmpset_32((volatile u_int *)(p), (u_int)(cmpval),	\
712	    (u_int)(newval))
713#define	atomic_cmpset_acq_long(p, cmpval, newval)			\
714	atomic_cmpset_acq_32((volatile u_int *)(p), (u_int)(cmpval),	\
715	    (u_int)(newval))
716#define	atomic_cmpset_rel_long(p, cmpval, newval)			\
717	atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval),	\
718	    (u_int)(newval))
719#define	atomic_fcmpset_long(p, cmpval, newval)				\
720	atomic_fcmpset_32((volatile u_int *)(p), (u_int *)(cmpval),	\
721	    (u_int)(newval))
722#define	atomic_fcmpset_acq_long(p, cmpval, newval)			\
723	atomic_fcmpset_acq_32((volatile u_int *)(p), (u_int *)(cmpval),	\
724	    (u_int)(newval))
725#define	atomic_fcmpset_rel_long(p, cmpval, newval)			\
726	atomic_fcmpset_rel_32((volatile u_int *)(p), (u_int *)(cmpval),	\
727	    (u_int)(newval))
728#define	atomic_load_acq_long(p)						\
729	(u_long)atomic_load_acq_32((volatile u_int *)(p))
730#define	atomic_store_rel_long(p, v)					\
731	atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
732#define	atomic_fetchadd_long(p, v)					\
733	atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
734#define	atomic_readandclear_long(p)					\
735	atomic_readandclear_32((volatile u_int *)(p))
736
737#endif /* __mips_n64 */
738
739/* Operations on pointers. */
740#define	atomic_set_ptr		atomic_set_long
741#define	atomic_set_acq_ptr	atomic_set_acq_long
742#define	atomic_set_rel_ptr	atomic_set_rel_long
743#define	atomic_clear_ptr	atomic_clear_long
744#define	atomic_clear_acq_ptr	atomic_clear_acq_long
745#define	atomic_clear_rel_ptr	atomic_clear_rel_long
746#define	atomic_add_ptr		atomic_add_long
747#define	atomic_add_acq_ptr	atomic_add_acq_long
748#define	atomic_add_rel_ptr	atomic_add_rel_long
749#define	atomic_subtract_ptr	atomic_subtract_long
750#define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
751#define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
752#define	atomic_cmpset_ptr	atomic_cmpset_long
753#define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
754#define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
755#define	atomic_fcmpset_ptr	atomic_fcmpset_long
756#define	atomic_fcmpset_acq_ptr	atomic_fcmpset_acq_long
757#define	atomic_fcmpset_rel_ptr	atomic_fcmpset_rel_long
758#define	atomic_load_acq_ptr	atomic_load_acq_long
759#define	atomic_store_rel_ptr	atomic_store_rel_long
760#define	atomic_readandclear_ptr	atomic_readandclear_long
761
762static __inline unsigned int
763atomic_swap_int(volatile unsigned int *ptr, const unsigned int value)
764{
765	unsigned int retval;
766
767	retval = *ptr;
768
769	while (!atomic_fcmpset_int(ptr, &retval, value))
770		;
771	return (retval);
772}
773
774static __inline uint32_t
775atomic_swap_32(volatile uint32_t *ptr, const uint32_t value)
776{
777	uint32_t retval;
778
779	retval = *ptr;
780
781	while (!atomic_fcmpset_32(ptr, &retval, value))
782		;
783	return (retval);
784}
785
786#if defined(__mips_n64) || defined(__mips_n32)
787static __inline uint64_t
788atomic_swap_64(volatile uint64_t *ptr, const uint64_t value)
789{
790	uint64_t retval;
791
792	retval = *ptr;
793
794	while (!atomic_fcmpset_64(ptr, &retval, value))
795		;
796	return (retval);
797}
798#endif
799
800#ifdef __mips_n64
801static __inline unsigned long
802atomic_swap_long(volatile unsigned long *ptr, const unsigned long value)
803{
804	unsigned long retval;
805
806	retval = *ptr;
807
808	while (!atomic_fcmpset_64((volatile uint64_t *)ptr,
809	    (uint64_t *)&retval, value))
810		;
811	return (retval);
812}
813#else
814static __inline unsigned long
815atomic_swap_long(volatile unsigned long *ptr, const unsigned long value)
816{
817	unsigned long retval;
818
819	retval = *ptr;
820
821	while (!atomic_fcmpset_32((volatile uint32_t *)ptr,
822	    (uint32_t *)&retval, value))
823		;
824	return (retval);
825}
826#endif
827#define	atomic_swap_ptr(ptr, value) atomic_swap_long((unsigned long *)(ptr), value)
828
829#endif /* ! _MACHINE_ATOMIC_H_ */
830