1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * forked from parisc asm/atomic.h which was:
4 *	Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
5 *	Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
6 */
7
8#ifndef _ASM_PARISC_CMPXCHG_H_
9#define _ASM_PARISC_CMPXCHG_H_
10
11/* This should get optimized out since it's never called.
12** Or get a link error if xchg is used "wrong".
13*/
14extern void __xchg_called_with_bad_pointer(void);
15
16/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
17extern unsigned long __xchg8(char, volatile char *);
18extern unsigned long __xchg32(int, volatile int *);
19#ifdef CONFIG_64BIT
20extern unsigned long __xchg64(unsigned long, volatile unsigned long *);
21#endif
22
23/* optimizer better get rid of switch since size is a constant */
24static inline unsigned long
25__xchg(unsigned long x, volatile void *ptr, int size)
26{
27	switch (size) {
28#ifdef CONFIG_64BIT
29	case 8: return __xchg64(x, (volatile unsigned long *) ptr);
30#endif
31	case 4: return __xchg32((int) x, (volatile int *) ptr);
32	case 1: return __xchg8((char) x, (volatile char *) ptr);
33	}
34	__xchg_called_with_bad_pointer();
35	return x;
36}
37
38/*
39** REVISIT - Abandoned use of LDCW in xchg() for now:
40** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
41** o and while we are at it, could CONFIG_64BIT code use LDCD too?
42**
43**	if (__builtin_constant_p(x) && (x == NULL))
44**		if (((unsigned long)p & 0xf) == 0)
45**			return __ldcw(p);
46*/
47#define arch_xchg(ptr, x)						\
48({									\
49	__typeof__(*(ptr)) __ret;					\
50	__typeof__(*(ptr)) _x_ = (x);					\
51	__ret = (__typeof__(*(ptr)))					\
52		__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr)));	\
53	__ret;								\
54})
55
56/* bug catcher for when unsupported size is used - won't link */
57extern void __cmpxchg_called_with_bad_pointer(void);
58
59/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
60extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
61				   unsigned int new_);
62extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
63extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
64
65/* don't worry...optimizer will get rid of most of this */
66static inline unsigned long
67__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
68{
69	switch (size) {
70#ifdef CONFIG_64BIT
71	case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
72#endif
73	case 4: return __cmpxchg_u32((unsigned int *)ptr,
74				     (unsigned int)old, (unsigned int)new_);
75	case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
76	}
77	__cmpxchg_called_with_bad_pointer();
78	return old;
79}
80
81#define arch_cmpxchg(ptr, o, n)						 \
82({									 \
83	__typeof__(*(ptr)) _o_ = (o);					 \
84	__typeof__(*(ptr)) _n_ = (n);					 \
85	(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,	 \
86				    (unsigned long)_n_, sizeof(*(ptr))); \
87})
88
89#include <asm-generic/cmpxchg-local.h>
90
91static inline unsigned long __cmpxchg_local(volatile void *ptr,
92				      unsigned long old,
93				      unsigned long new_, int size)
94{
95	switch (size) {
96#ifdef CONFIG_64BIT
97	case 8:	return __cmpxchg_u64((u64 *)ptr, old, new_);
98#endif
99	case 4:	return __cmpxchg_u32(ptr, old, new_);
100	default:
101		return __generic_cmpxchg_local(ptr, old, new_, size);
102	}
103}
104
105/*
106 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
107 * them available.
108 */
109#define arch_cmpxchg_local(ptr, o, n)					\
110	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
111			(unsigned long)(n), sizeof(*(ptr))))
112#ifdef CONFIG_64BIT
113#define arch_cmpxchg64_local(ptr, o, n)					\
114({									\
115	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
116	cmpxchg_local((ptr), (o), (n));					\
117})
118#else
119#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
120#endif
121
122#define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
123
124#endif /* _ASM_PARISC_CMPXCHG_H_ */
125