1#ifndef _ASM_IA64_INTRINSICS_H
2#define _ASM_IA64_INTRINSICS_H
3
4/*
5 * Compiler-dependent intrinsics.
6 *
7 * Copyright (C) 2002-2003 Hewlett-Packard Co
8 *	David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11#ifndef __ASSEMBLY__
12
13/* include compiler specific intrinsics */
14#include <asm/ia64regs.h>
15#ifdef __INTEL_COMPILER
16# include <asm/intel_intrin.h>
17#else
18# include <asm/gcc_intrin.h>
19#endif
20
21/*
22 * Force an unresolved reference if someone tries to use
23 * ia64_fetch_and_add() with a bad value.
24 */
25extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
26extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
27
28#define IA64_FETCHADD(tmp,v,n,sz,sem)						\
29({										\
30	switch (sz) {								\
31	      case 4:								\
32	        tmp = ia64_fetchadd4_##sem((unsigned int *) v, n);		\
33		break;								\
34										\
35	      case 8:								\
36	        tmp = ia64_fetchadd8_##sem((unsigned long *) v, n);		\
37		break;								\
38										\
39	      default:								\
40		__bad_size_for_ia64_fetch_and_add();				\
41	}									\
42})
43
44#define ia64_fetchadd(i,v,sem)								\
45({											\
46	__u64 _tmp;									\
47	volatile __typeof__(*(v)) *_v = (v);						\
48	/* Can't use a switch () here: gcc isn't always smart enough for that... */	\
49	if ((i) == -16)									\
50		IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);			\
51	else if ((i) == -8)								\
52		IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);				\
53	else if ((i) == -4)								\
54		IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);				\
55	else if ((i) == -1)								\
56		IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);				\
57	else if ((i) == 1)								\
58		IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);				\
59	else if ((i) == 4)								\
60		IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);				\
61	else if ((i) == 8)								\
62		IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);				\
63	else if ((i) == 16)								\
64		IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);				\
65	else										\
66		_tmp = __bad_increment_for_ia64_fetch_and_add();			\
67	(__typeof__(*(v))) (_tmp);	/* return old value */				\
68})
69
70#define ia64_fetch_and_add(i,v)	(ia64_fetchadd(i, v, rel) + (i)) /* return new value */
71
72/*
73 * This function doesn't exist, so you'll get a linker error if
74 * something tries to do an invalid xchg().
75 */
76extern void ia64_xchg_called_with_bad_pointer (void);
77
78#define __xchg(x,ptr,size)						\
79({									\
80	unsigned long __xchg_result;					\
81									\
82	switch (size) {							\
83	      case 1:							\
84		__xchg_result = ia64_xchg1((__u8 *)ptr, x);		\
85		break;							\
86									\
87	      case 2:							\
88		__xchg_result = ia64_xchg2((__u16 *)ptr, x);		\
89		break;							\
90									\
91	      case 4:							\
92		__xchg_result = ia64_xchg4((__u32 *)ptr, x);		\
93		break;							\
94									\
95	      case 8:							\
96		__xchg_result = ia64_xchg8((__u64 *)ptr, x);		\
97		break;							\
98	      default:							\
99		ia64_xchg_called_with_bad_pointer();			\
100	}								\
101	__xchg_result;							\
102})
103
104#define xchg(ptr,x)							     \
105  ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
106
107/*
108 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
109 * store NEW in MEM.  Return the initial value in MEM.  Success is
110 * indicated by comparing RETURN with OLD.
111 */
112
113#define __HAVE_ARCH_CMPXCHG 1
114
115/*
116 * This function doesn't exist, so you'll get a linker error
117 * if something tries to do an invalid cmpxchg().
118 */
119extern long ia64_cmpxchg_called_with_bad_pointer (void);
120
121#define ia64_cmpxchg(sem,ptr,old,new,size)						\
122({											\
123	__u64 _o_, _r_;									\
124											\
125	switch (size) {									\
126	      case 1: _o_ = (__u8 ) (long) (old); break;				\
127	      case 2: _o_ = (__u16) (long) (old); break;				\
128	      case 4: _o_ = (__u32) (long) (old); break;				\
129	      case 8: _o_ = (__u64) (long) (old); break;				\
130	      default: break;								\
131	}										\
132	switch (size) {									\
133	      case 1:									\
134	      	_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);			\
135		break;									\
136											\
137	      case 2:									\
138	       _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);			\
139		break;									\
140											\
141	      case 4:									\
142	      	_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);			\
143		break;									\
144											\
145	      case 8:									\
146		_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);			\
147		break;									\
148											\
149	      default:									\
150		_r_ = ia64_cmpxchg_called_with_bad_pointer();				\
151		break;									\
152	}										\
153	(__typeof__(old)) _r_;								\
154})
155
156#define cmpxchg_acq(ptr,o,n)	ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
157#define cmpxchg_rel(ptr,o,n)	ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
158
159/* for compatibility with other platforms: */
160#define cmpxchg(ptr,o,n)	cmpxchg_acq(ptr,o,n)
161
162#ifdef CONFIG_IA64_DEBUG_CMPXCHG
163# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
164# define CMPXCHG_BUGCHECK(v)							\
165  do {										\
166	if (_cmpxchg_bugcheck_count-- <= 0) {					\
167		void *ip;							\
168		extern int printk(const char *fmt, ...);			\
169		ip = (void *) ia64_getreg(_IA64_REG_IP);			\
170		printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));	\
171		break;								\
172	}									\
173  } while (0)
174#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
175# define CMPXCHG_BUGCHECK_DECL
176# define CMPXCHG_BUGCHECK(v)
177#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
178
179#endif
180#endif /* _ASM_IA64_INTRINSICS_H */
181