1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/bits.h>
17#include <linux/compiler.h>
18#include <linux/types.h>
19#include <asm/asm.h>
20#include <asm/barrier.h>
21#include <asm/byteorder.h>		/* sigh ... */
22#include <asm/compiler.h>
23#include <asm/cpu-features.h>
24#include <asm/sgidefs.h>
25
26#define __bit_op(mem, insn, inputs...) do {			\
27	unsigned long __temp;					\
28								\
29	asm volatile(						\
30	"	.set		push			\n"	\
31	"	.set		" MIPS_ISA_LEVEL "	\n"	\
32	"	" __SYNC(full, loongson3_war) "		\n"	\
33	"1:	" __stringify(LONG_LL)	"	%0, %1	\n"	\
34	"	" insn		"			\n"	\
35	"	" __stringify(LONG_SC)	"	%0, %1	\n"	\
36	"	" __stringify(SC_BEQZ)	"	%0, 1b	\n"	\
37	"	.set		pop			\n"	\
38	: "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem)		\
39	: inputs						\
40	: __LLSC_CLOBBER);					\
41} while (0)
42
43#define __test_bit_op(mem, ll_dst, insn, inputs...) ({		\
44	unsigned long __orig, __temp;				\
45								\
46	asm volatile(						\
47	"	.set		push			\n"	\
48	"	.set		" MIPS_ISA_LEVEL "	\n"	\
49	"	" __SYNC(full, loongson3_war) "		\n"	\
50	"1:	" __stringify(LONG_LL) " "	ll_dst ", %2\n"	\
51	"	" insn		"			\n"	\
52	"	" __stringify(LONG_SC)	"	%1, %2	\n"	\
53	"	" __stringify(SC_BEQZ)	"	%1, 1b	\n"	\
54	"	.set		pop			\n"	\
55	: "=&r"(__orig), "=&r"(__temp),				\
56	  "+" GCC_OFF_SMALL_ASM()(mem)				\
57	: inputs						\
58	: __LLSC_CLOBBER);					\
59								\
60	__orig;							\
61})
62
63/*
64 * These are the "slower" versions of the functions and are in bitops.c.
65 * These functions call raw_local_irq_{save,restore}().
66 */
67void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
68void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
69void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
70int __mips_test_and_set_bit_lock(unsigned long nr,
71				 volatile unsigned long *addr);
72int __mips_test_and_clear_bit(unsigned long nr,
73			      volatile unsigned long *addr);
74int __mips_test_and_change_bit(unsigned long nr,
75			       volatile unsigned long *addr);
76bool __mips_xor_is_negative_byte(unsigned long mask,
77		volatile unsigned long *addr);
78
79/*
80 * set_bit - Atomically set a bit in memory
81 * @nr: the bit to set
82 * @addr: the address to start counting from
83 *
84 * This function is atomic and may not be reordered.  See __set_bit()
85 * if you do not require the atomic guarantees.
86 * Note that @nr may be almost arbitrarily large; this function is not
87 * restricted to acting on a single-word quantity.
88 */
89static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
90{
91	volatile unsigned long *m = &addr[BIT_WORD(nr)];
92	int bit = nr % BITS_PER_LONG;
93
94	if (!kernel_uses_llsc) {
95		__mips_set_bit(nr, addr);
96		return;
97	}
98
99	if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
100		__bit_op(*m, __stringify(LONG_INS) " %0, %3, %2, 1", "i"(bit), "r"(~0));
101		return;
102	}
103
104	__bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
105}
106
107/*
108 * clear_bit - Clears a bit in memory
109 * @nr: Bit to clear
110 * @addr: Address to start counting from
111 *
112 * clear_bit() is atomic and may not be reordered.  However, it does
113 * not contain a memory barrier, so if it is used for locking purposes,
114 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
115 * in order to ensure changes are visible on other processors.
116 */
117static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
118{
119	volatile unsigned long *m = &addr[BIT_WORD(nr)];
120	int bit = nr % BITS_PER_LONG;
121
122	if (!kernel_uses_llsc) {
123		__mips_clear_bit(nr, addr);
124		return;
125	}
126
127	if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
128		__bit_op(*m, __stringify(LONG_INS) " %0, $0, %2, 1", "i"(bit));
129		return;
130	}
131
132	__bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
133}
134
135/*
136 * clear_bit_unlock - Clears a bit in memory
137 * @nr: Bit to clear
138 * @addr: Address to start counting from
139 *
140 * clear_bit() is atomic and implies release semantics before the memory
141 * operation. It can be used for an unlock.
142 */
143static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
144{
145	smp_mb__before_atomic();
146	clear_bit(nr, addr);
147}
148
149/*
150 * change_bit - Toggle a bit in memory
151 * @nr: Bit to change
152 * @addr: Address to start counting from
153 *
154 * change_bit() is atomic and may not be reordered.
155 * Note that @nr may be almost arbitrarily large; this function is not
156 * restricted to acting on a single-word quantity.
157 */
158static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
159{
160	volatile unsigned long *m = &addr[BIT_WORD(nr)];
161	int bit = nr % BITS_PER_LONG;
162
163	if (!kernel_uses_llsc) {
164		__mips_change_bit(nr, addr);
165		return;
166	}
167
168	__bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
169}
170
171/*
172 * test_and_set_bit_lock - Set a bit and return its old value
173 * @nr: Bit to set
174 * @addr: Address to count from
175 *
176 * This operation is atomic and implies acquire ordering semantics
177 * after the memory operation.
178 */
179static inline int test_and_set_bit_lock(unsigned long nr,
180	volatile unsigned long *addr)
181{
182	volatile unsigned long *m = &addr[BIT_WORD(nr)];
183	int bit = nr % BITS_PER_LONG;
184	unsigned long res, orig;
185
186	if (!kernel_uses_llsc) {
187		res = __mips_test_and_set_bit_lock(nr, addr);
188	} else {
189		orig = __test_bit_op(*m, "%0",
190				     "or\t%1, %0, %3",
191				     "ir"(BIT(bit)));
192		res = (orig & BIT(bit)) != 0;
193	}
194
195	smp_llsc_mb();
196
197	return res;
198}
199
200/*
201 * test_and_set_bit - Set a bit and return its old value
202 * @nr: Bit to set
203 * @addr: Address to count from
204 *
205 * This operation is atomic and cannot be reordered.
206 * It also implies a memory barrier.
207 */
208static inline int test_and_set_bit(unsigned long nr,
209	volatile unsigned long *addr)
210{
211	smp_mb__before_atomic();
212	return test_and_set_bit_lock(nr, addr);
213}
214
215/*
216 * test_and_clear_bit - Clear a bit and return its old value
217 * @nr: Bit to clear
218 * @addr: Address to count from
219 *
220 * This operation is atomic and cannot be reordered.
221 * It also implies a memory barrier.
222 */
223static inline int test_and_clear_bit(unsigned long nr,
224	volatile unsigned long *addr)
225{
226	volatile unsigned long *m = &addr[BIT_WORD(nr)];
227	int bit = nr % BITS_PER_LONG;
228	unsigned long res, orig;
229
230	smp_mb__before_atomic();
231
232	if (!kernel_uses_llsc) {
233		res = __mips_test_and_clear_bit(nr, addr);
234	} else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
235		res = __test_bit_op(*m, "%1",
236				    __stringify(LONG_EXT) " %0, %1, %3, 1;"
237				    __stringify(LONG_INS) " %1, $0, %3, 1",
238				    "i"(bit));
239	} else {
240		orig = __test_bit_op(*m, "%0",
241				     "or\t%1, %0, %3;"
242				     "xor\t%1, %1, %3",
243				     "ir"(BIT(bit)));
244		res = (orig & BIT(bit)) != 0;
245	}
246
247	smp_llsc_mb();
248
249	return res;
250}
251
252/*
253 * test_and_change_bit - Change a bit and return its old value
254 * @nr: Bit to change
255 * @addr: Address to count from
256 *
257 * This operation is atomic and cannot be reordered.
258 * It also implies a memory barrier.
259 */
260static inline int test_and_change_bit(unsigned long nr,
261	volatile unsigned long *addr)
262{
263	volatile unsigned long *m = &addr[BIT_WORD(nr)];
264	int bit = nr % BITS_PER_LONG;
265	unsigned long res, orig;
266
267	smp_mb__before_atomic();
268
269	if (!kernel_uses_llsc) {
270		res = __mips_test_and_change_bit(nr, addr);
271	} else {
272		orig = __test_bit_op(*m, "%0",
273				     "xor\t%1, %0, %3",
274				     "ir"(BIT(bit)));
275		res = (orig & BIT(bit)) != 0;
276	}
277
278	smp_llsc_mb();
279
280	return res;
281}
282
283static inline bool xor_unlock_is_negative_byte(unsigned long mask,
284		volatile unsigned long *p)
285{
286	unsigned long orig;
287	bool res;
288
289	smp_mb__before_atomic();
290
291	if (!kernel_uses_llsc) {
292		res = __mips_xor_is_negative_byte(mask, p);
293	} else {
294		orig = __test_bit_op(*p, "%0",
295				     "xor\t%1, %0, %3",
296				     "ir"(mask));
297		res = (orig & BIT(7)) != 0;
298	}
299
300	smp_llsc_mb();
301
302	return res;
303}
304
305#undef __bit_op
306#undef __test_bit_op
307
308#include <asm-generic/bitops/non-atomic.h>
309
310/*
311 * __clear_bit_unlock - Clears a bit in memory
312 * @nr: Bit to clear
313 * @addr: Address to start counting from
314 *
315 * __clear_bit() is non-atomic and implies release semantics before the memory
316 * operation. It can be used for an unlock if no other CPUs can concurrently
317 * modify other bits in the word.
318 */
319static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
320{
321	smp_mb__before_llsc();
322	__clear_bit(nr, addr);
323	nudge_writes();
324}
325
326/*
327 * Return the bit position (0..63) of the most significant 1 bit in a word
328 * Returns -1 if no 1 bit exists
329 */
330static __always_inline unsigned long __fls(unsigned long word)
331{
332	int num;
333
334	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
335	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
336		__asm__(
337		"	.set	push					\n"
338		"	.set	"MIPS_ISA_LEVEL"			\n"
339		"	clz	%0, %1					\n"
340		"	.set	pop					\n"
341		: "=r" (num)
342		: "r" (word));
343
344		return 31 - num;
345	}
346
347	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
348	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
349		__asm__(
350		"	.set	push					\n"
351		"	.set	"MIPS_ISA_LEVEL"			\n"
352		"	dclz	%0, %1					\n"
353		"	.set	pop					\n"
354		: "=r" (num)
355		: "r" (word));
356
357		return 63 - num;
358	}
359
360	num = BITS_PER_LONG - 1;
361
362#if BITS_PER_LONG == 64
363	if (!(word & (~0ul << 32))) {
364		num -= 32;
365		word <<= 32;
366	}
367#endif
368	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
369		num -= 16;
370		word <<= 16;
371	}
372	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
373		num -= 8;
374		word <<= 8;
375	}
376	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
377		num -= 4;
378		word <<= 4;
379	}
380	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
381		num -= 2;
382		word <<= 2;
383	}
384	if (!(word & (~0ul << (BITS_PER_LONG-1))))
385		num -= 1;
386	return num;
387}
388
389/*
390 * __ffs - find first bit in word.
391 * @word: The word to search
392 *
393 * Returns 0..SZLONG-1
394 * Undefined if no bit exists, so code should check against 0 first.
395 */
396static __always_inline unsigned long __ffs(unsigned long word)
397{
398	return __fls(word & -word);
399}
400
401/*
402 * fls - find last bit set.
403 * @word: The word to search
404 *
405 * This is defined the same way as ffs.
406 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
407 */
408static inline int fls(unsigned int x)
409{
410	int r;
411
412	if (!__builtin_constant_p(x) &&
413	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
414		__asm__(
415		"	.set	push					\n"
416		"	.set	"MIPS_ISA_LEVEL"			\n"
417		"	clz	%0, %1					\n"
418		"	.set	pop					\n"
419		: "=r" (x)
420		: "r" (x));
421
422		return 32 - x;
423	}
424
425	r = 32;
426	if (!x)
427		return 0;
428	if (!(x & 0xffff0000u)) {
429		x <<= 16;
430		r -= 16;
431	}
432	if (!(x & 0xff000000u)) {
433		x <<= 8;
434		r -= 8;
435	}
436	if (!(x & 0xf0000000u)) {
437		x <<= 4;
438		r -= 4;
439	}
440	if (!(x & 0xc0000000u)) {
441		x <<= 2;
442		r -= 2;
443	}
444	if (!(x & 0x80000000u)) {
445		x <<= 1;
446		r -= 1;
447	}
448	return r;
449}
450
451#include <asm-generic/bitops/fls64.h>
452
453/*
454 * ffs - find first bit set.
455 * @word: The word to search
456 *
457 * This is defined the same way as
458 * the libc and compiler builtin ffs routines, therefore
459 * differs in spirit from the below ffz (man ffs).
460 */
461static inline int ffs(int word)
462{
463	if (!word)
464		return 0;
465
466	return fls(word & -word);
467}
468
469#include <asm-generic/bitops/ffz.h>
470
471#ifdef __KERNEL__
472
473#include <asm-generic/bitops/sched.h>
474
475#include <asm/arch_hweight.h>
476#include <asm-generic/bitops/const_hweight.h>
477
478#include <asm-generic/bitops/le.h>
479#include <asm-generic/bitops/ext2-atomic.h>
480
481#endif /* __KERNEL__ */
482
483#endif /* _ASM_BITOPS_H */
484