lock.h revision 1.20
1/* $NetBSD: lock.h,v 1.20 2012/08/29 07:04:14 matt Exp $ */ 2 3/*- 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Machine-dependent spin lock operations. 34 * 35 * NOTE: The SWP insn used here is available only on ARM architecture 36 * version 3 and later (as well as 2a). What we are going to do is 37 * expect that the kernel will trap and emulate the insn. That will 38 * be slow, but give us the atomicity that we need. 39 */ 40 41#ifndef _ARM_LOCK_H_ 42#define _ARM_LOCK_H_ 43 44static __inline int 45__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr) 46{ 47 return *__ptr == __SIMPLELOCK_LOCKED; 48} 49 50static __inline int 51__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr) 52{ 53 return *__ptr == __SIMPLELOCK_UNLOCKED; 54} 55 56static __inline void 57__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 58{ 59 *__ptr = __SIMPLELOCK_UNLOCKED; 60} 61 62static __inline void 63__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 64{ 65 *__ptr = __SIMPLELOCK_LOCKED; 66} 67 68#ifdef _KERNEL 69#include <arm/cpufunc.h> 70 71#define mb_read drain_writebuf /* in cpufunc.h */ 72#define mb_write drain_writebuf /* in cpufunc.h */ 73#define mb_memory drain_writebuf /* in cpufunc.h */ 74#endif 75 76#if defined(_KERNEL) 77static __inline __cpu_simple_lock_t 78__swp(__cpu_simple_lock_t __val, volatile __cpu_simple_lock_t *__ptr) 79{ 80#ifdef _ARM_ARCH_6 81 __cpu_simple_lock_t __rv, __tmp; 82 if (sizeof(*__ptr) == 1) { 83 __asm volatile( 84 "1:\t" 85 "ldrexb\t%[__rv], [%[__ptr]]" "\n\t" 86 "cmp\t%[__rv],%[__val]" "\n\t" 87 "strexbne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 88 "cmpne\t%[__tmp], #0" "\n\t" 89 "bne\t1b" "\n\t" 90#ifdef _ARM_ARCH_7 91 "dmb" 92#else 93 "mrc p15, 0, %[__tmp], c7, c10, 5" 94#endif 95 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp) 96 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 97 } else { 98 __asm volatile( 99 "1:\t" 100 "ldrex\t%[__rv], [%[__ptr]]" "\n\t" 101 "cmp\t%[__rv],%[__val]" "\n\t" 102 "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 103 "cmpne\t%[__tmp], #0" "\n\t" 104 "bne\t1b" "\n\t" 105#ifdef _ARM_ARCH_7 106 "nop" 107#else 108 "mrc p15, 0, %[__tmp], c7, c10, 5" 109#endif 110 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp) 111 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 112 } 113 return __rv; 114#else 115 __asm volatile("swpb %0, %1, [%2]" 116 : "=&r" (__val) : "r" (__val), "r" (__ptr) : "memory"); 117 return __val; 118#endif 119} 120#else 121/* 122 * On Cortex-A9 (SMP), SWP no longer guarantees atomic results. Thus we pad 123 * out SWP so that when the A9 generates an undefined exception we can replace 124 * the SWP/MOV instructions with the right LDREX/STREX instructions. 125 * 126 * This is why we force the SWP into the template needed for LDREX/STREX 127 * including the extra instructions and extra register for testing the result. 128 */ 129static __inline int 130__swp(int __val, volatile int *__ptr) 131{ 132 int __rv, __tmp; 133 __asm volatile( 134 "1:\t" 135#ifdef _ARM_ARCH_6 136 "ldrex\t%[__rv], [%[__ptr]]" "\n\t" 137 "cmp\t%[__rv],%[__val]" "\n\t" 138 "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 139#else 140 "swp\t%[__rv], %[__val], [%[__ptr]]" "\n\t" 141 "cmp\t%[__rv],%[__val]" "\n\t" 142 "movs\t%[__tmp], #0" "\n\t" 143#endif 144 "cmpne\t%[__tmp], #0" "\n\t" 145 "bne\t1b" "\n\t" 146#ifdef _ARM_ARCH_7 147 "dmb" 148#elif defined(_ARM_ARCH_6) 149 "mrc p15, 0, %[__tmp], c7, c10, 5" 150#else 151 "nop" 152#endif 153 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp) 154 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 155 return __rv; 156} 157#endif /* _KERNEL */ 158 159static __inline void __attribute__((__unused__)) 160__cpu_simple_lock_init(__cpu_simple_lock_t *alp) 161{ 162 163 *alp = __SIMPLELOCK_UNLOCKED; 164#ifdef _ARM_ARCH_7 165 __asm __volatile("dsb"); 166#endif 167} 168 169static __inline void __attribute__((__unused__)) 170__cpu_simple_lock(__cpu_simple_lock_t *alp) 171{ 172 173 while (__swp(__SIMPLELOCK_LOCKED, alp) != __SIMPLELOCK_UNLOCKED) 174 continue; 175} 176 177static __inline int __attribute__((__unused__)) 178__cpu_simple_lock_try(__cpu_simple_lock_t *alp) 179{ 180 181 return (__swp(__SIMPLELOCK_LOCKED, alp) == __SIMPLELOCK_UNLOCKED); 182} 183 184static __inline void __attribute__((__unused__)) 185__cpu_simple_unlock(__cpu_simple_lock_t *alp) 186{ 187 188#ifdef _ARM_ARCH_7 189 __asm __volatile("dmb"); 190#endif 191 *alp = __SIMPLELOCK_UNLOCKED; 192#ifdef _ARM_ARCH_7 193 __asm __volatile("dsb"); 194#endif 195} 196 197#endif /* _ARM_LOCK_H_ */ 198