• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/s390/include/asm/
1/*
2 *  include/asm-s390/spinlock.h
3 *
4 *  S390 version
5 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 *  Derived from "include/asm-i386/spinlock.h"
9 */
10
11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H
13
14#include <linux/smp.h>
15
16static inline int
17_raw_compare_and_swap(volatile unsigned int *lock,
18		      unsigned int old, unsigned int new)
19{
20	asm volatile(
21		"	cs	%0,%3,%1"
22		: "=d" (old), "=Q" (*lock)
23		: "0" (old), "d" (new), "Q" (*lock)
24		: "cc", "memory" );
25	return old;
26}
27
28/*
29 * Simple spin lock operations.  There are two variants, one clears IRQ's
30 * on the local processor, one does not.
31 *
32 * We make no fairness assumptions. They have a cost.
33 *
34 * (the type definitions are in asm/spinlock_types.h)
35 */
36
37#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
38#define arch_spin_unlock_wait(lock) \
39	do { while (arch_spin_is_locked(lock)) \
40		 arch_spin_relax(lock); } while (0)
41
42extern void arch_spin_lock_wait(arch_spinlock_t *);
43extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44extern int arch_spin_trylock_retry(arch_spinlock_t *);
45extern void arch_spin_relax(arch_spinlock_t *lock);
46
47static inline void arch_spin_lock(arch_spinlock_t *lp)
48{
49	int old;
50
51	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
52	if (likely(old == 0))
53		return;
54	arch_spin_lock_wait(lp);
55}
56
57static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
58					 unsigned long flags)
59{
60	int old;
61
62	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
63	if (likely(old == 0))
64		return;
65	arch_spin_lock_wait_flags(lp, flags);
66}
67
68static inline int arch_spin_trylock(arch_spinlock_t *lp)
69{
70	int old;
71
72	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
73	if (likely(old == 0))
74		return 1;
75	return arch_spin_trylock_retry(lp);
76}
77
78static inline void arch_spin_unlock(arch_spinlock_t *lp)
79{
80	_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
81}
82
83/*
84 * Read-write spinlocks, allowing multiple readers
85 * but only one writer.
86 *
87 * NOTE! it is quite common to have readers in interrupts
88 * but no interrupt writers. For those circumstances we
89 * can "mix" irq-safe locks - any writer needs to get a
90 * irq-safe write-lock, but readers can get non-irqsafe
91 * read-locks.
92 */
93
94/**
95 * read_can_lock - would read_trylock() succeed?
96 * @lock: the rwlock in question.
97 */
98#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
99
100/**
101 * write_can_lock - would write_trylock() succeed?
102 * @lock: the rwlock in question.
103 */
104#define arch_write_can_lock(x) ((x)->lock == 0)
105
106extern void _raw_read_lock_wait(arch_rwlock_t *lp);
107extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
108extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
109extern void _raw_write_lock_wait(arch_rwlock_t *lp);
110extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
111extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
112
113static inline void arch_read_lock(arch_rwlock_t *rw)
114{
115	unsigned int old;
116	old = rw->lock & 0x7fffffffU;
117	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
118		_raw_read_lock_wait(rw);
119}
120
121static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
122{
123	unsigned int old;
124	old = rw->lock & 0x7fffffffU;
125	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
126		_raw_read_lock_wait_flags(rw, flags);
127}
128
129static inline void arch_read_unlock(arch_rwlock_t *rw)
130{
131	unsigned int old, cmp;
132
133	old = rw->lock;
134	do {
135		cmp = old;
136		old = _raw_compare_and_swap(&rw->lock, old, old - 1);
137	} while (cmp != old);
138}
139
140static inline void arch_write_lock(arch_rwlock_t *rw)
141{
142	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
143		_raw_write_lock_wait(rw);
144}
145
146static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
147{
148	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
149		_raw_write_lock_wait_flags(rw, flags);
150}
151
152static inline void arch_write_unlock(arch_rwlock_t *rw)
153{
154	_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
155}
156
157static inline int arch_read_trylock(arch_rwlock_t *rw)
158{
159	unsigned int old;
160	old = rw->lock & 0x7fffffffU;
161	if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
162		return 1;
163	return _raw_read_trylock_retry(rw);
164}
165
166static inline int arch_write_trylock(arch_rwlock_t *rw)
167{
168	if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
169		return 1;
170	return _raw_write_trylock_retry(rw);
171}
172
173#define arch_read_relax(lock)	cpu_relax()
174#define arch_write_relax(lock)	cpu_relax()
175
176#endif /* __ASM_SPINLOCK_H */
177