lock.h revision 1.34
1/*	$NetBSD: lock.h,v 1.34 2022/02/13 13:42:21 riastradh Exp $	*/
2
3/*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifndef _VAX_LOCK_H_
29#define _VAX_LOCK_H_
30
31#include <sys/param.h>
32
33#ifdef _KERNEL
34#ifdef _KERNEL_OPT
35#include "opt_multiprocessor.h"
36#include <machine/intr.h>
37#endif
38#include <machine/cpu.h>
39#endif
40
41static __inline int
42__SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
43{
44	return *__ptr == __SIMPLELOCK_LOCKED;
45}
46
47static __inline int
48__SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
49{
50	return *__ptr == __SIMPLELOCK_UNLOCKED;
51}
52
53static __inline void
54__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
55{
56	*__ptr = __SIMPLELOCK_UNLOCKED;
57}
58
59static __inline void
60__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
61{
62	*__ptr = __SIMPLELOCK_LOCKED;
63}
64
65static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
66static __inline void
67__cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
68{
69	*__alp = __SIMPLELOCK_UNLOCKED;
70}
71
72static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
73static __inline int
74__cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
75{
76	int ret;
77
78#ifdef _HARDKERNEL
79	__asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
80		: "=&r"(ret)
81		: "g"(__alp)
82		: "r0","r1","cc","memory");
83#else
84	__asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
85		: "=&r"(ret)
86		: "m"(*__alp)
87		: "cc", "memory");
88#endif
89
90	return ret;
91}
92
93static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
94static __inline void
95__cpu_simple_lock(__cpu_simple_lock_t *__alp)
96{
97#if defined(_HARDKERNEL) && defined(MULTIPROCESSOR)
98	struct cpu_info * const __ci = curcpu();
99
100	while (__cpu_simple_lock_try(__alp) == 0) {
101#define	VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
102		if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {
103			cpu_handle_ipi();
104		}
105	}
106#else /* _HARDKERNEL && MULTIPROCESSOR */
107	__asm __volatile ("1:bbssi $0,%0,1b"
108		: /* No outputs */
109		: "m"(*__alp)
110		: "cc", "memory");
111#endif /* _HARDKERNEL && MULTIPROCESSOR */
112}
113
114static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
115static __inline void
116__cpu_simple_unlock(__cpu_simple_lock_t *__alp)
117{
118#ifdef _HARDKERNEL
119	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
120		: /* No output */
121		: "g"(__alp)
122		: "r1","cc","memory");
123#else
124	__asm __volatile ("bbcci $0,%0,1f;1:"
125		: /* No output */
126		: "m"(*__alp)
127		: "cc", "memory");
128#endif
129}
130
131#if defined(MULTIPROCESSOR)
132/*
133 * On the Vax, interprocessor interrupts can come in at device priority
134 * level or lower. This can cause some problems while waiting for r/w
135 * spinlocks from a high'ish priority level: IPIs that come in will not
136 * be processed. This can lead to deadlock.
137 *
138 * This hook allows IPIs to be processed while a spinlock's interlock
139 * is released.
140 */
141#define SPINLOCK_SPIN_HOOK						\
142do {									\
143	struct cpu_info * const __ci = curcpu();			\
144									\
145	if (__ci->ci_ipimsgs != 0) {					\
146		/* printf("CPU %lu has IPIs pending\n",			\
147		    __ci->ci_cpuid); */					\
148		cpu_handle_ipi();					\
149	}								\
150} while (/*CONSTCOND*/0)
151#endif /* MULTIPROCESSOR */
152
153#endif /* _VAX_LOCK_H_ */
154