lock.h revision 1.23
1/*	$NetBSD: lock.h,v 1.23 2013/01/24 10:15:30 matt Exp $	*/
2
3/*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Machine-dependent spin lock operations.
34 *
35 * NOTE: The SWP insn used here is available only on ARM architecture
36 * version 3 and later (as well as 2a).  What we are going to do is
37 * expect that the kernel will trap and emulate the insn.  That will
38 * be slow, but give us the atomicity that we need.
39 */
40
41#ifndef _ARM_LOCK_H_
42#define	_ARM_LOCK_H_
43
44static __inline int
45__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
46{
47	return *__ptr == __SIMPLELOCK_LOCKED;
48}
49
50static __inline int
51__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
52{
53	return *__ptr == __SIMPLELOCK_UNLOCKED;
54}
55
56static __inline void
57__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
58{
59	*__ptr = __SIMPLELOCK_UNLOCKED;
60}
61
62static __inline void
63__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
64{
65	*__ptr = __SIMPLELOCK_LOCKED;
66}
67
68#ifdef _KERNEL
69#include <arm/cpufunc.h>
70
71#define	mb_read		drain_writebuf		/* in cpufunc.h */
72#define	mb_write	drain_writebuf		/* in cpufunc.h */
73#define	mb_memory	drain_writebuf		/* in cpufunc.h */
74#endif
75
76#if defined(_KERNEL)
77static __inline unsigned char
78__swp(__cpu_simple_lock_t __val, volatile __cpu_simple_lock_t *__ptr)
79{
80#ifdef _ARM_ARCH_6
81	uint32_t __rv;
82	__cpu_simple_lock_t __tmp;
83	if (sizeof(*__ptr) == 1) {
84		__asm volatile(
85			"1:\t"
86			"ldrexb\t%[__rv], [%[__ptr]]"			"\n\t"
87			"cmp\t%[__rv],%[__val]"				"\n\t"
88			"strexbne\t%[__tmp], %[__val], [%[__ptr]]"	"\n\t"
89			"cmpne\t%[__tmp], #0"				"\n\t"
90			"bne\t1b"					"\n\t"
91#ifdef _ARM_ARCH_7
92			"dmb"
93#else
94			"mcr\tp15, 0, %[__tmp], c7, c10, 5"
95#endif
96		    : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp)
97		    : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
98	} else {
99		__asm volatile(
100			"1:\t"
101			"ldrex\t%[__rv], [%[__ptr]]"			"\n\t"
102			"cmp\t%[__rv],%[__val]"				"\n\t"
103			"strexne\t%[__tmp], %[__val], [%[__ptr]]"	"\n\t"
104			"cmpne\t%[__tmp], #0"				"\n\t"
105			"bne\t1b"					"\n\t"
106#ifdef _ARM_ARCH_7
107			"nop"
108#else
109			"mcr\tp15, 0, %[__tmp], c7, c10, 5"
110#endif
111		    : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp)
112		    : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
113	}
114	return __rv;
115#else
116	uint32_t __val32;
117	__asm volatile("swpb %0, %1, [%2]"
118	    : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory");
119	return __val32;
120#endif
121}
122#else
123/*
124 * On Cortex-A9 (SMP), SWP no longer guarantees atomic results.  Thus we pad
125 * out SWP so that when the A9 generates an undefined exception we can replace
126 * the SWP/MOV instructions with the right LDREX/STREX instructions.
127 *
128 * This is why we force the SWP into the template needed for LDREX/STREX
129 * including the extra instructions and extra register for testing the result.
130 */
131static __inline int
132__swp(int __val, volatile int *__ptr)
133{
134	int __rv, __tmp;
135	__asm volatile(
136		"1:\t"
137#ifdef _ARM_ARCH_6
138		"ldrex\t%[__rv], [%[__ptr]]"			"\n\t"
139		"cmp\t%[__rv],%[__val]"				"\n\t"
140		"strexne\t%[__tmp], %[__val], [%[__ptr]]"	"\n\t"
141#else
142		"swp\t%[__rv], %[__val], [%[__ptr]]"		"\n\t"
143		"cmp\t%[__rv],%[__val]"				"\n\t"
144		"movs\t%[__tmp], #0"				"\n\t"
145#endif
146		"cmpne\t%[__tmp], #0"				"\n\t"
147		"bne\t1b"					"\n\t"
148#ifdef _ARM_ARCH_7
149		"dmb"
150#elif defined(_ARM_ARCH_6)
151		"mcr\tp15, 0, %[__tmp], c7, c10, 5"
152#else
153		"nop"
154#endif
155	    : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp)
156	    : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
157	return __rv;
158}
159#endif /* _KERNEL */
160
161static __inline void __attribute__((__unused__))
162__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
163{
164
165	*alp = __SIMPLELOCK_UNLOCKED;
166#ifdef _ARM_ARCH_7
167	__asm __volatile("dsb");
168#endif
169}
170
171static __inline void __attribute__((__unused__))
172__cpu_simple_lock(__cpu_simple_lock_t *alp)
173{
174
175	while (__swp(__SIMPLELOCK_LOCKED, alp) != __SIMPLELOCK_UNLOCKED)
176		continue;
177}
178
179static __inline int __attribute__((__unused__))
180__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
181{
182
183	return (__swp(__SIMPLELOCK_LOCKED, alp) == __SIMPLELOCK_UNLOCKED);
184}
185
186static __inline void __attribute__((__unused__))
187__cpu_simple_unlock(__cpu_simple_lock_t *alp)
188{
189
190#ifdef _ARM_ARCH_7
191	__asm __volatile("dmb");
192#endif
193	*alp = __SIMPLELOCK_UNLOCKED;
194#ifdef _ARM_ARCH_7
195	__asm __volatile("dsb");
196#endif
197}
198
199#endif /* _ARM_LOCK_H_ */
200