1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#include <linux/compiler.h>
13#include <linux/irqflags.h>
14#include <linux/types.h>
15#include <asm/barrier.h>
16#include <asm/bug.h>
17#include <asm/byteorder.h>		/* sigh ... */
18#include <asm/cpu-features.h>
19#include <asm/sgidefs.h>
20#include <asm/war.h>
21
22#if (_MIPS_SZLONG == 32)
23#define SZLONG_LOG 5
24#define SZLONG_MASK 31UL
25#define __LL		"ll	"
26#define __SC		"sc	"
27#define __INS		"ins    "
28#define __EXT		"ext    "
29#elif (_MIPS_SZLONG == 64)
30#define SZLONG_LOG 6
31#define SZLONG_MASK 63UL
32#define __LL		"lld	"
33#define __SC		"scd	"
34#define __INS		"dins    "
35#define __EXT		"dext    "
36#endif
37
38/*
39 * clear_bit() doesn't provide any barrier for the compiler.
40 */
41#define smp_mb__before_clear_bit()	smp_mb()
42#define smp_mb__after_clear_bit()	smp_mb()
43
44/*
45 * set_bit - Atomically set a bit in memory
46 * @nr: the bit to set
47 * @addr: the address to start counting from
48 *
49 * This function is atomic and may not be reordered.  See __set_bit()
50 * if you do not require the atomic guarantees.
51 * Note that @nr may be almost arbitrarily large; this function is not
52 * restricted to acting on a single-word quantity.
53 */
54static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
55{
56	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
57	unsigned short bit = nr & SZLONG_MASK;
58	unsigned long temp;
59
60	if (cpu_has_llsc && R10000_LLSC_WAR) {
61		__asm__ __volatile__(
62		"	.set	mips3					\n"
63		"1:	" __LL "%0, %1			# set_bit	\n"
64		"	or	%0, %2					\n"
65		"	" __SC	"%0, %1					\n"
66		"	beqzl	%0, 1b					\n"
67		"	.set	mips0					\n"
68		: "=&r" (temp), "=m" (*m)
69		: "ir" (1UL << bit), "m" (*m));
70#ifdef CONFIG_CPU_MIPSR2
71	} else if (__builtin_constant_p(bit)) {
72		__asm__ __volatile__(
73		"1:	" __LL "%0, %1			# set_bit	\n"
74		"	" __INS "%0, %4, %2, 1				\n"
75		"	" __SC "%0, %1					\n"
76		"	beqz	%0, 2f					\n"
77		"	.subsection 2					\n"
78		"2:	b	1b					\n"
79		"	.previous					\n"
80		: "=&r" (temp), "=m" (*m)
81		: "ir" (bit), "m" (*m), "r" (~0));
82#endif /* CONFIG_CPU_MIPSR2 */
83	} else if (cpu_has_llsc) {
84		__asm__ __volatile__(
85		"	.set	mips3					\n"
86		"1:	" __LL "%0, %1			# set_bit	\n"
87		"	or	%0, %2					\n"
88		"	" __SC	"%0, %1					\n"
89		"	beqz	%0, 2f					\n"
90		"	.subsection 2					\n"
91		"2:	b	1b					\n"
92		"	.previous					\n"
93		"	.set	mips0					\n"
94		: "=&r" (temp), "=m" (*m)
95		: "ir" (1UL << bit), "m" (*m));
96	} else {
97		volatile unsigned long *a = addr;
98		unsigned long mask;
99		unsigned long flags;
100
101		a += nr >> SZLONG_LOG;
102		mask = 1UL << bit;
103		raw_local_irq_save(flags);
104		*a |= mask;
105		raw_local_irq_restore(flags);
106	}
107}
108
109/*
110 * clear_bit - Clears a bit in memory
111 * @nr: Bit to clear
112 * @addr: Address to start counting from
113 *
114 * clear_bit() is atomic and may not be reordered.  However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
116 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
117 * in order to ensure changes are visible on other processors.
118 */
119static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
120{
121	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
122	unsigned short bit = nr & SZLONG_MASK;
123	unsigned long temp;
124
125	if (cpu_has_llsc && R10000_LLSC_WAR) {
126		__asm__ __volatile__(
127		"	.set	mips3					\n"
128		"1:	" __LL "%0, %1			# clear_bit	\n"
129		"	and	%0, %2					\n"
130		"	" __SC "%0, %1					\n"
131		"	beqzl	%0, 1b					\n"
132		"	.set	mips0					\n"
133		: "=&r" (temp), "=m" (*m)
134		: "ir" (~(1UL << bit)), "m" (*m));
135#ifdef CONFIG_CPU_MIPSR2
136	} else if (__builtin_constant_p(bit)) {
137		__asm__ __volatile__(
138		"1:	" __LL "%0, %1			# clear_bit	\n"
139		"	" __INS "%0, $0, %2, 1				\n"
140		"	" __SC "%0, %1					\n"
141		"	beqz	%0, 2f					\n"
142		"	.subsection 2					\n"
143		"2:	b	1b					\n"
144		"	.previous					\n"
145		: "=&r" (temp), "=m" (*m)
146		: "ir" (bit), "m" (*m));
147#endif /* CONFIG_CPU_MIPSR2 */
148	} else if (cpu_has_llsc) {
149		__asm__ __volatile__(
150		"	.set	mips3					\n"
151		"1:	" __LL "%0, %1			# clear_bit	\n"
152		"	and	%0, %2					\n"
153		"	" __SC "%0, %1					\n"
154		"	beqz	%0, 2f					\n"
155		"	.subsection 2					\n"
156		"2:	b	1b					\n"
157		"	.previous					\n"
158		"	.set	mips0					\n"
159		: "=&r" (temp), "=m" (*m)
160		: "ir" (~(1UL << bit)), "m" (*m));
161	} else {
162		volatile unsigned long *a = addr;
163		unsigned long mask;
164		unsigned long flags;
165
166		a += nr >> SZLONG_LOG;
167		mask = 1UL << bit;
168		raw_local_irq_save(flags);
169		*a &= ~mask;
170		raw_local_irq_restore(flags);
171	}
172}
173
174/*
175 * change_bit - Toggle a bit in memory
176 * @nr: Bit to change
177 * @addr: Address to start counting from
178 *
179 * change_bit() is atomic and may not be reordered.
180 * Note that @nr may be almost arbitrarily large; this function is not
181 * restricted to acting on a single-word quantity.
182 */
183static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
184{
185	unsigned short bit = nr & SZLONG_MASK;
186
187	if (cpu_has_llsc && R10000_LLSC_WAR) {
188		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
189		unsigned long temp;
190
191		__asm__ __volatile__(
192		"	.set	mips3				\n"
193		"1:	" __LL "%0, %1		# change_bit	\n"
194		"	xor	%0, %2				\n"
195		"	" __SC	"%0, %1				\n"
196		"	beqzl	%0, 1b				\n"
197		"	.set	mips0				\n"
198		: "=&r" (temp), "=m" (*m)
199		: "ir" (1UL << bit), "m" (*m));
200	} else if (cpu_has_llsc) {
201		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
202		unsigned long temp;
203
204		__asm__ __volatile__(
205		"	.set	mips3				\n"
206		"1:	" __LL "%0, %1		# change_bit	\n"
207		"	xor	%0, %2				\n"
208		"	" __SC	"%0, %1				\n"
209		"	beqz	%0, 2f				\n"
210		"	.subsection 2				\n"
211		"2:	b	1b				\n"
212		"	.previous				\n"
213		"	.set	mips0				\n"
214		: "=&r" (temp), "=m" (*m)
215		: "ir" (1UL << bit), "m" (*m));
216	} else {
217		volatile unsigned long *a = addr;
218		unsigned long mask;
219		unsigned long flags;
220
221		a += nr >> SZLONG_LOG;
222		mask = 1UL << bit;
223		raw_local_irq_save(flags);
224		*a ^= mask;
225		raw_local_irq_restore(flags);
226	}
227}
228
229/*
230 * test_and_set_bit - Set a bit and return its old value
231 * @nr: Bit to set
232 * @addr: Address to count from
233 *
234 * This operation is atomic and cannot be reordered.
235 * It also implies a memory barrier.
236 */
237static inline int test_and_set_bit(unsigned long nr,
238	volatile unsigned long *addr)
239{
240	unsigned short bit = nr & SZLONG_MASK;
241	unsigned long res;
242
243	if (cpu_has_llsc && R10000_LLSC_WAR) {
244		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
245		unsigned long temp;
246
247		__asm__ __volatile__(
248		"	.set	mips3					\n"
249		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
250		"	or	%2, %0, %3				\n"
251		"	" __SC	"%2, %1					\n"
252		"	beqzl	%2, 1b					\n"
253		"	and	%2, %0, %3				\n"
254		"	.set	mips0					\n"
255		: "=&r" (temp), "=m" (*m), "=&r" (res)
256		: "r" (1UL << bit), "m" (*m)
257		: "memory");
258	} else if (cpu_has_llsc) {
259		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
260		unsigned long temp;
261
262		__asm__ __volatile__(
263		"	.set	push					\n"
264		"	.set	noreorder				\n"
265		"	.set	mips3					\n"
266		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
267		"	or	%2, %0, %3				\n"
268		"	" __SC	"%2, %1					\n"
269		"	beqz	%2, 2f					\n"
270		"	 and	%2, %0, %3				\n"
271		"	.subsection 2					\n"
272		"2:	b	1b					\n"
273		"	 nop						\n"
274		"	.previous					\n"
275		"	.set	pop					\n"
276		: "=&r" (temp), "=m" (*m), "=&r" (res)
277		: "r" (1UL << bit), "m" (*m)
278		: "memory");
279	} else {
280		volatile unsigned long *a = addr;
281		unsigned long mask;
282		unsigned long flags;
283
284		a += nr >> SZLONG_LOG;
285		mask = 1UL << bit;
286		raw_local_irq_save(flags);
287		res = (mask & *a);
288		*a |= mask;
289		raw_local_irq_restore(flags);
290	}
291
292	smp_mb();
293
294	return res != 0;
295}
296
297/*
298 * test_and_clear_bit - Clear a bit and return its old value
299 * @nr: Bit to clear
300 * @addr: Address to count from
301 *
302 * This operation is atomic and cannot be reordered.
303 * It also implies a memory barrier.
304 */
305static inline int test_and_clear_bit(unsigned long nr,
306	volatile unsigned long *addr)
307{
308	unsigned short bit = nr & SZLONG_MASK;
309	unsigned long res;
310
311	if (cpu_has_llsc && R10000_LLSC_WAR) {
312		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
313		unsigned long temp;
314
315		__asm__ __volatile__(
316		"	.set	mips3					\n"
317		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
318		"	or	%2, %0, %3				\n"
319		"	xor	%2, %3					\n"
320		"	" __SC 	"%2, %1					\n"
321		"	beqzl	%2, 1b					\n"
322		"	and	%2, %0, %3				\n"
323		"	.set	mips0					\n"
324		: "=&r" (temp), "=m" (*m), "=&r" (res)
325		: "r" (1UL << bit), "m" (*m)
326		: "memory");
327#ifdef CONFIG_CPU_MIPSR2
328	} else if (__builtin_constant_p(nr)) {
329		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
330		unsigned long temp;
331
332		__asm__ __volatile__(
333		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
334		"	" __EXT "%2, %0, %3, 1				\n"
335		"	" __INS	"%0, $0, %3, 1				\n"
336		"	" __SC 	"%0, %1					\n"
337		"	beqz	%0, 2f					\n"
338		"	.subsection 2					\n"
339		"2:	b	1b					\n"
340		"	.previous					\n"
341		: "=&r" (temp), "=m" (*m), "=&r" (res)
342		: "ri" (bit), "m" (*m)
343		: "memory");
344#endif
345	} else if (cpu_has_llsc) {
346		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
347		unsigned long temp;
348
349		__asm__ __volatile__(
350		"	.set	push					\n"
351		"	.set	noreorder				\n"
352		"	.set	mips3					\n"
353		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
354		"	or	%2, %0, %3				\n"
355		"	xor	%2, %3					\n"
356		"	" __SC 	"%2, %1					\n"
357		"	beqz	%2, 2f					\n"
358		"	 and	%2, %0, %3				\n"
359		"	.subsection 2					\n"
360		"2:	b	1b					\n"
361		"	 nop						\n"
362		"	.previous					\n"
363		"	.set	pop					\n"
364		: "=&r" (temp), "=m" (*m), "=&r" (res)
365		: "r" (1UL << bit), "m" (*m)
366		: "memory");
367	} else {
368		volatile unsigned long *a = addr;
369		unsigned long mask;
370		unsigned long flags;
371
372		a += nr >> SZLONG_LOG;
373		mask = 1UL << bit;
374		raw_local_irq_save(flags);
375		res = (mask & *a);
376		*a &= ~mask;
377		raw_local_irq_restore(flags);
378	}
379
380	smp_mb();
381
382	return res != 0;
383}
384
385/*
386 * test_and_change_bit - Change a bit and return its old value
387 * @nr: Bit to change
388 * @addr: Address to count from
389 *
390 * This operation is atomic and cannot be reordered.
391 * It also implies a memory barrier.
392 */
393static inline int test_and_change_bit(unsigned long nr,
394	volatile unsigned long *addr)
395{
396	unsigned short bit = nr & SZLONG_MASK;
397	unsigned long res;
398
399	if (cpu_has_llsc && R10000_LLSC_WAR) {
400		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
401		unsigned long temp;
402
403		__asm__ __volatile__(
404		"	.set	mips3					\n"
405		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
406		"	xor	%2, %0, %3				\n"
407		"	" __SC	"%2, %1					\n"
408		"	beqzl	%2, 1b					\n"
409		"	and	%2, %0, %3				\n"
410		"	.set	mips0					\n"
411		: "=&r" (temp), "=m" (*m), "=&r" (res)
412		: "r" (1UL << bit), "m" (*m)
413		: "memory");
414	} else if (cpu_has_llsc) {
415		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
416		unsigned long temp;
417
418		__asm__ __volatile__(
419		"	.set	push					\n"
420		"	.set	noreorder				\n"
421		"	.set	mips3					\n"
422		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
423		"	xor	%2, %0, %3				\n"
424		"	" __SC	"\t%2, %1				\n"
425		"	beqz	%2, 2f					\n"
426		"	 and	%2, %0, %3				\n"
427		"	.subsection 2					\n"
428		"2:	b	1b					\n"
429		"	 nop						\n"
430		"	.previous					\n"
431		"	.set	pop					\n"
432		: "=&r" (temp), "=m" (*m), "=&r" (res)
433		: "r" (1UL << bit), "m" (*m)
434		: "memory");
435	} else {
436		volatile unsigned long *a = addr;
437		unsigned long mask;
438		unsigned long flags;
439
440		a += nr >> SZLONG_LOG;
441		mask = 1UL << bit;
442		raw_local_irq_save(flags);
443		res = (mask & *a);
444		*a ^= mask;
445		raw_local_irq_restore(flags);
446	}
447
448	smp_mb();
449
450	return res != 0;
451}
452
453#include <asm-generic/bitops/non-atomic.h>
454
455/*
456 * Return the bit position (0..63) of the most significant 1 bit in a word
457 * Returns -1 if no 1 bit exists
458 */
459static inline int __ilog2(unsigned long x)
460{
461	int lz;
462
463	if (sizeof(x) == 4) {
464		__asm__ (
465		"	.set	push					\n"
466		"	.set	mips32					\n"
467		"	clz	%0, %1					\n"
468		"	.set	pop					\n"
469		: "=r" (lz)
470		: "r" (x));
471
472		return 31 - lz;
473	}
474
475	BUG_ON(sizeof(x) != 8);
476
477	__asm__ (
478	"	.set	push						\n"
479	"	.set	mips64						\n"
480	"	dclz	%0, %1						\n"
481	"	.set	pop						\n"
482	: "=r" (lz)
483	: "r" (x));
484
485	return 63 - lz;
486}
487
488#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
489
490/*
491 * __ffs - find first bit in word.
492 * @word: The word to search
493 *
494 * Returns 0..SZLONG-1
495 * Undefined if no bit exists, so code should check against 0 first.
496 */
497static inline unsigned long __ffs(unsigned long word)
498{
499	return __ilog2(word & -word);
500}
501
502/*
503 * fls - find last bit set.
504 * @word: The word to search
505 *
506 * This is defined the same way as ffs.
507 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
508 */
509static inline int fls(int word)
510{
511	__asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
512
513	return 32 - word;
514}
515
516#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
517static inline int fls64(__u64 word)
518{
519	__asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
520
521	return 64 - word;
522}
523#else
524#include <asm-generic/bitops/fls64.h>
525#endif
526
527/*
528 * ffs - find first bit set.
529 * @word: The word to search
530 *
531 * This is defined the same way as
532 * the libc and compiler builtin ffs routines, therefore
533 * differs in spirit from the above ffz (man ffs).
534 */
535static inline int ffs(int word)
536{
537	if (!word)
538		return 0;
539
540	return fls(word & -word);
541}
542
543#else
544
545#include <asm-generic/bitops/__ffs.h>
546#include <asm-generic/bitops/ffs.h>
547#include <asm-generic/bitops/fls.h>
548#include <asm-generic/bitops/fls64.h>
549
550#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
551
552#include <asm-generic/bitops/ffz.h>
553#include <asm-generic/bitops/find.h>
554
555#ifdef __KERNEL__
556
557#include <asm-generic/bitops/sched.h>
558#include <asm-generic/bitops/hweight.h>
559#include <asm-generic/bitops/ext2-non-atomic.h>
560#include <asm-generic/bitops/ext2-atomic.h>
561#include <asm-generic/bitops/minix.h>
562
563#endif /* __KERNEL__ */
564
565#endif /* _ASM_BITOPS_H */
566