• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/m32r/include/asm/
1#ifndef _ASM_M32R_BITOPS_H
2#define _ASM_M32R_BITOPS_H
3
4/*
5 *  linux/include/asm-m32r/bitops.h
6 *
7 *  Copyright 1992, Linus Torvalds.
8 *
9 *  M32R version:
10 *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
11 *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
12 */
13
14#ifndef _LINUX_BITOPS_H
15#error only <linux/bitops.h> can be included directly
16#endif
17
18#include <linux/compiler.h>
19#include <asm/assembler.h>
20#include <asm/system.h>
21#include <asm/byteorder.h>
22#include <asm/types.h>
23
24/*
25 * These have to be done with inline assembly: that way the bit-setting
26 * is guaranteed to be atomic. All bit operations return 0 if the bit
27 * was cleared before the operation and != 0 if it was not.
28 *
29 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
30 */
31
32/**
33 * set_bit - Atomically set a bit in memory
34 * @nr: the bit to set
35 * @addr: the address to start counting from
36 *
37 * This function is atomic and may not be reordered.  See __set_bit()
38 * if you do not require the atomic guarantees.
39 * Note that @nr may be almost arbitrarily large; this function is not
40 * restricted to acting on a single-word quantity.
41 */
42static __inline__ void set_bit(int nr, volatile void * addr)
43{
44	__u32 mask;
45	volatile __u32 *a = addr;
46	unsigned long flags;
47	unsigned long tmp;
48
49	a += (nr >> 5);
50	mask = (1 << (nr & 0x1F));
51
52	local_irq_save(flags);
53	__asm__ __volatile__ (
54		DCACHE_CLEAR("%0", "r6", "%1")
55		M32R_LOCK" %0, @%1;		\n\t"
56		"or	%0, %2;			\n\t"
57		M32R_UNLOCK" %0, @%1;		\n\t"
58		: "=&r" (tmp)
59		: "r" (a), "r" (mask)
60		: "memory"
61#ifdef CONFIG_CHIP_M32700_TS1
62		, "r6"
63#endif	/* CONFIG_CHIP_M32700_TS1 */
64	);
65	local_irq_restore(flags);
66}
67
68/**
69 * clear_bit - Clears a bit in memory
70 * @nr: Bit to clear
71 * @addr: Address to start counting from
72 *
73 * clear_bit() is atomic and may not be reordered.  However, it does
74 * not contain a memory barrier, so if it is used for locking purposes,
75 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
76 * in order to ensure changes are visible on other processors.
77 */
78static __inline__ void clear_bit(int nr, volatile void * addr)
79{
80	__u32 mask;
81	volatile __u32 *a = addr;
82	unsigned long flags;
83	unsigned long tmp;
84
85	a += (nr >> 5);
86	mask = (1 << (nr & 0x1F));
87
88	local_irq_save(flags);
89
90	__asm__ __volatile__ (
91		DCACHE_CLEAR("%0", "r6", "%1")
92		M32R_LOCK" %0, @%1;		\n\t"
93		"and	%0, %2;			\n\t"
94		M32R_UNLOCK" %0, @%1;		\n\t"
95		: "=&r" (tmp)
96		: "r" (a), "r" (~mask)
97		: "memory"
98#ifdef CONFIG_CHIP_M32700_TS1
99		, "r6"
100#endif	/* CONFIG_CHIP_M32700_TS1 */
101	);
102	local_irq_restore(flags);
103}
104
105#define smp_mb__before_clear_bit()	barrier()
106#define smp_mb__after_clear_bit()	barrier()
107
108/**
109 * change_bit - Toggle a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * change_bit() is atomic and may not be reordered.
114 * Note that @nr may be almost arbitrarily large; this function is not
115 * restricted to acting on a single-word quantity.
116 */
117static __inline__ void change_bit(int nr, volatile void * addr)
118{
119	__u32  mask;
120	volatile __u32  *a = addr;
121	unsigned long flags;
122	unsigned long tmp;
123
124	a += (nr >> 5);
125	mask = (1 << (nr & 0x1F));
126
127	local_irq_save(flags);
128	__asm__ __volatile__ (
129		DCACHE_CLEAR("%0", "r6", "%1")
130		M32R_LOCK" %0, @%1;		\n\t"
131		"xor	%0, %2;			\n\t"
132		M32R_UNLOCK" %0, @%1;		\n\t"
133		: "=&r" (tmp)
134		: "r" (a), "r" (mask)
135		: "memory"
136#ifdef CONFIG_CHIP_M32700_TS1
137		, "r6"
138#endif	/* CONFIG_CHIP_M32700_TS1 */
139	);
140	local_irq_restore(flags);
141}
142
143/**
144 * test_and_set_bit - Set a bit and return its old value
145 * @nr: Bit to set
146 * @addr: Address to count from
147 *
148 * This operation is atomic and cannot be reordered.
149 * It also implies a memory barrier.
150 */
151static __inline__ int test_and_set_bit(int nr, volatile void * addr)
152{
153	__u32 mask, oldbit;
154	volatile __u32 *a = addr;
155	unsigned long flags;
156	unsigned long tmp;
157
158	a += (nr >> 5);
159	mask = (1 << (nr & 0x1F));
160
161	local_irq_save(flags);
162	__asm__ __volatile__ (
163		DCACHE_CLEAR("%0", "%1", "%2")
164		M32R_LOCK" %0, @%2;		\n\t"
165		"mv	%1, %0;			\n\t"
166		"and	%0, %3;			\n\t"
167		"or	%1, %3;			\n\t"
168		M32R_UNLOCK" %1, @%2;		\n\t"
169		: "=&r" (oldbit), "=&r" (tmp)
170		: "r" (a), "r" (mask)
171		: "memory"
172	);
173	local_irq_restore(flags);
174
175	return (oldbit != 0);
176}
177
178/**
179 * test_and_clear_bit - Clear a bit and return its old value
180 * @nr: Bit to set
181 * @addr: Address to count from
182 *
183 * This operation is atomic and cannot be reordered.
184 * It also implies a memory barrier.
185 */
186static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
187{
188	__u32 mask, oldbit;
189	volatile __u32 *a = addr;
190	unsigned long flags;
191	unsigned long tmp;
192
193	a += (nr >> 5);
194	mask = (1 << (nr & 0x1F));
195
196	local_irq_save(flags);
197
198	__asm__ __volatile__ (
199		DCACHE_CLEAR("%0", "%1", "%3")
200		M32R_LOCK" %0, @%3;		\n\t"
201		"mv	%1, %0;			\n\t"
202		"and	%0, %2;			\n\t"
203		"not	%2, %2;			\n\t"
204		"and	%1, %2;			\n\t"
205		M32R_UNLOCK" %1, @%3;		\n\t"
206		: "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
207		: "r" (a)
208		: "memory"
209	);
210	local_irq_restore(flags);
211
212	return (oldbit != 0);
213}
214
215/**
216 * test_and_change_bit - Change a bit and return its old value
217 * @nr: Bit to set
218 * @addr: Address to count from
219 *
220 * This operation is atomic and cannot be reordered.
221 * It also implies a memory barrier.
222 */
223static __inline__ int test_and_change_bit(int nr, volatile void * addr)
224{
225	__u32 mask, oldbit;
226	volatile __u32 *a = addr;
227	unsigned long flags;
228	unsigned long tmp;
229
230	a += (nr >> 5);
231	mask = (1 << (nr & 0x1F));
232
233	local_irq_save(flags);
234	__asm__ __volatile__ (
235		DCACHE_CLEAR("%0", "%1", "%2")
236		M32R_LOCK" %0, @%2;		\n\t"
237		"mv	%1, %0;			\n\t"
238		"and	%0, %3;			\n\t"
239		"xor	%1, %3;			\n\t"
240		M32R_UNLOCK" %1, @%2;		\n\t"
241		: "=&r" (oldbit), "=&r" (tmp)
242		: "r" (a), "r" (mask)
243		: "memory"
244	);
245	local_irq_restore(flags);
246
247	return (oldbit != 0);
248}
249
250#include <asm-generic/bitops/non-atomic.h>
251#include <asm-generic/bitops/ffz.h>
252#include <asm-generic/bitops/__ffs.h>
253#include <asm-generic/bitops/fls.h>
254#include <asm-generic/bitops/__fls.h>
255#include <asm-generic/bitops/fls64.h>
256
257#ifdef __KERNEL__
258
259#include <asm-generic/bitops/sched.h>
260#include <asm-generic/bitops/find.h>
261#include <asm-generic/bitops/ffs.h>
262#include <asm-generic/bitops/hweight.h>
263#include <asm-generic/bitops/lock.h>
264
265#endif /* __KERNEL__ */
266
267#ifdef __KERNEL__
268
269#include <asm-generic/bitops/ext2-non-atomic.h>
270#include <asm-generic/bitops/ext2-atomic.h>
271#include <asm-generic/bitops/minix.h>
272
273#endif /* __KERNEL__ */
274
275#endif /* _ASM_M32R_BITOPS_H */
276