• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/sparc/include/asm/
1/* spinlock.h: 32-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC_SPINLOCK_H
7#define __SPARC_SPINLOCK_H
8
9#ifndef __ASSEMBLY__
10
11#include <asm/psr.h>
12
13#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
14
15#define arch_spin_unlock_wait(lock) \
16	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
17
18static inline void arch_spin_lock(arch_spinlock_t *lock)
19{
20	__asm__ __volatile__(
21	"\n1:\n\t"
22	"ldstub	[%0], %%g2\n\t"
23	"orcc	%%g2, 0x0, %%g0\n\t"
24	"bne,a	2f\n\t"
25	" ldub	[%0], %%g2\n\t"
26	".subsection	2\n"
27	"2:\n\t"
28	"orcc	%%g2, 0x0, %%g0\n\t"
29	"bne,a	2b\n\t"
30	" ldub	[%0], %%g2\n\t"
31	"b,a	1b\n\t"
32	".previous\n"
33	: /* no outputs */
34	: "r" (lock)
35	: "g2", "memory", "cc");
36}
37
38static inline int arch_spin_trylock(arch_spinlock_t *lock)
39{
40	unsigned int result;
41	__asm__ __volatile__("ldstub [%1], %0"
42			     : "=r" (result)
43			     : "r" (lock)
44			     : "memory");
45	return (result == 0);
46}
47
48static inline void arch_spin_unlock(arch_spinlock_t *lock)
49{
50	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51}
52
53static inline void __arch_read_lock(arch_rwlock_t *rw)
54{
55	register arch_rwlock_t *lp asm("g1");
56	lp = rw;
57	__asm__ __volatile__(
58	"mov	%%o7, %%g4\n\t"
59	"call	___rw_read_enter\n\t"
60	" ldstub	[%%g1 + 3], %%g2\n"
61	: /* no outputs */
62	: "r" (lp)
63	: "g2", "g4", "memory", "cc");
64}
65
66#define arch_read_lock(lock) \
67do {	unsigned long flags; \
68	local_irq_save(flags); \
69	__arch_read_lock(lock); \
70	local_irq_restore(flags); \
71} while(0)
72
73static inline void __arch_read_unlock(arch_rwlock_t *rw)
74{
75	register arch_rwlock_t *lp asm("g1");
76	lp = rw;
77	__asm__ __volatile__(
78	"mov	%%o7, %%g4\n\t"
79	"call	___rw_read_exit\n\t"
80	" ldstub	[%%g1 + 3], %%g2\n"
81	: /* no outputs */
82	: "r" (lp)
83	: "g2", "g4", "memory", "cc");
84}
85
86#define arch_read_unlock(lock) \
87do {	unsigned long flags; \
88	local_irq_save(flags); \
89	__arch_read_unlock(lock); \
90	local_irq_restore(flags); \
91} while(0)
92
93static inline void arch_write_lock(arch_rwlock_t *rw)
94{
95	register arch_rwlock_t *lp asm("g1");
96	lp = rw;
97	__asm__ __volatile__(
98	"mov	%%o7, %%g4\n\t"
99	"call	___rw_write_enter\n\t"
100	" ldstub	[%%g1 + 3], %%g2\n"
101	: /* no outputs */
102	: "r" (lp)
103	: "g2", "g4", "memory", "cc");
104	*(volatile __u32 *)&lp->lock = ~0U;
105}
106
107static inline int arch_write_trylock(arch_rwlock_t *rw)
108{
109	unsigned int val;
110
111	__asm__ __volatile__("ldstub [%1 + 3], %0"
112			     : "=r" (val)
113			     : "r" (&rw->lock)
114			     : "memory");
115
116	if (val == 0) {
117		val = rw->lock & ~0xff;
118		if (val)
119			((volatile u8*)&rw->lock)[3] = 0;
120		else
121			*(volatile u32*)&rw->lock = ~0U;
122	}
123
124	return (val == 0);
125}
126
127static inline int __arch_read_trylock(arch_rwlock_t *rw)
128{
129	register arch_rwlock_t *lp asm("g1");
130	register int res asm("o0");
131	lp = rw;
132	__asm__ __volatile__(
133	"mov	%%o7, %%g4\n\t"
134	"call	___rw_read_try\n\t"
135	" ldstub	[%%g1 + 3], %%g2\n"
136	: "=r" (res)
137	: "r" (lp)
138	: "g2", "g4", "memory", "cc");
139	return res;
140}
141
142#define arch_read_trylock(lock) \
143({	unsigned long flags; \
144	int res; \
145	local_irq_save(flags); \
146	res = __arch_read_trylock(lock); \
147	local_irq_restore(flags); \
148	res; \
149})
150
151#define arch_write_unlock(rw)	do { (rw)->lock = 0; } while(0)
152
153#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
154#define arch_read_lock_flags(rw, flags)   arch_read_lock(rw)
155#define arch_write_lock_flags(rw, flags)  arch_write_lock(rw)
156
157#define arch_spin_relax(lock)	cpu_relax()
158#define arch_read_relax(lock)	cpu_relax()
159#define arch_write_relax(lock)	cpu_relax()
160
161#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
162#define arch_write_can_lock(rw) (!(rw)->lock)
163
164#endif /* !(__ASSEMBLY__) */
165
166#endif /* __SPARC_SPINLOCK_H */
167