lock.h revision 1.27
1/*	$NetBSD: lock.h,v 1.27 2007/10/17 19:57:48 garbled Exp $	*/
2
3/*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *     This product includes software developed at Ludd, University of Lule}.
18 * 4. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#ifndef _VAX_LOCK_H_
34#define _VAX_LOCK_H_
35
36#ifdef _KERNEL
37#ifdef _KERNEL_OPT
38#include "opt_multiprocessor.h"
39#include <machine/intr.h>
40#endif
41#include <machine/cpu.h>
42#endif
43
44static __inline int
45__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
46{
47	return *__ptr == __SIMPLELOCK_LOCKED;
48}
49
50static __inline int
51__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
52{
53	return *__ptr == __SIMPLELOCK_UNLOCKED;
54}
55
56static __inline void
57__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
58{
59	*__ptr = __SIMPLELOCK_UNLOCKED;
60}
61
62static __inline void
63__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
64{
65	*__ptr = __SIMPLELOCK_LOCKED;
66}
67
68static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
69static __inline void
70__cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
71{
72#ifdef _KERNEL
73	__asm volatile ("movl %0,%%r1;jsb Sunlock"
74		: /* No output */
75		: "g"(__alp)
76		: "r1","cc","memory");
77#else
78	__asm volatile ("bbcci $0,%0,1f;1:"
79		: /* No output */
80		: "m"(*__alp)
81		: "cc");
82#endif
83}
84
85static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
86static __inline int
87__cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
88{
89	int ret;
90
91#ifdef _KERNEL
92	__asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
93		: "=&r"(ret)
94		: "g"(__alp)
95		: "r0","r1","cc","memory");
96#else
97	__asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
98		: "=&r"(ret)
99		: "m"(*__alp)
100		: "cc");
101#endif
102
103	return ret;
104}
105
106#ifdef _KERNEL
107#if defined(MULTIPROCESSOR)
108#define	VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
109#define	__cpu_simple_lock(__alp)					\
110do {									\
111	struct cpu_info *__ci = curcpu();				\
112									\
113	while (__cpu_simple_lock_try(__alp) == 0) {			\
114		int __s;						\
115									\
116		if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {		\
117			__s = splipi();				\
118			cpu_handle_ipi();				\
119			splx(__s);					\
120		}							\
121	}								\
122} while (/*CONSTCOND*/0)
123#else /* MULTIPROCESSOR */
124#define __cpu_simple_lock(__alp)					\
125do {									\
126	while (__cpu_simple_lock_try(__alp) == 0) {			\
127		;							\
128	}								\
129} while (/*CONSTCOND*/0)
130#endif
131#else
132static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
133static __inline void
134__cpu_simple_lock(__cpu_simple_lock_t *__alp)
135{
136	__asm volatile ("1:bbssi $0,%0,1b"
137		: /* No outputs */
138		: "m"(*__alp)
139		: "cc");
140}
141#endif /* _KERNEL */
142
143#if 0
144static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
145static __inline void
146__cpu_simple_lock(__cpu_simple_lock_t *__alp)
147{
148	struct cpu_info *ci = curcpu();
149
150	while (__cpu_simple_lock_try(__alp) == 0) {
151		int s;
152
153		if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
154			s = splipi();
155			cpu_handle_ipi();
156			splx(s);
157		}
158	}
159
160#if 0
161	__asm volatile ("movl %0,%%r1;jsb Slock"
162		: /* No output */
163		: "g"(__alp)
164		: "r0","r1","cc","memory");
165#endif
166#if 0
167	__asm volatile ("1:;bbssi $0, %0, 1b"
168		: /* No output */
169		: "m"(*__alp));
170#endif
171}
172#endif
173
174static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
175static __inline void
176__cpu_simple_unlock(__cpu_simple_lock_t *__alp)
177{
178#ifdef _KERNEL
179	__asm volatile ("movl %0,%%r1;jsb Sunlock"
180		: /* No output */
181		: "g"(__alp)
182		: "r1","cc","memory");
183#else
184	__asm volatile ("bbcci $0,%0,1f;1:"
185		: /* No output */
186		: "m"(*__alp)
187		: "cc");
188#endif
189}
190
191#if defined(MULTIPROCESSOR)
192/*
193 * On the Vax, interprocessor interrupts can come in at device priority
194 * level or lower. This can cause some problems while waiting for r/w
195 * spinlocks from a high'ish priority level: IPIs that come in will not
196 * be processed. This can lead to deadlock.
197 *
198 * This hook allows IPIs to be processed while a spinlock's interlock
199 * is released.
200 */
201#define SPINLOCK_SPIN_HOOK						\
202do {									\
203	struct cpu_info *__ci = curcpu();				\
204	int __s;							\
205									\
206	if (__ci->ci_ipimsgs != 0) {					\
207		/* printf("CPU %lu has IPIs pending\n",			\
208		    __ci->ci_cpuid); */					\
209		__s = splipi();						\
210		cpu_handle_ipi();					\
211		splx(__s);						\
212	}								\
213} while (/*CONSTCOND*/0)
214#endif /* MULTIPROCESSOR */
215
216static __inline void mb_read(void);
217static __inline void
218mb_read(void)
219{
220}
221
222static __inline void mb_write(void);
223static __inline void
224mb_write(void)
225{
226}
227#endif /* _VAX_LOCK_H_ */
228