lock.h revision 1.28
1/* $NetBSD: lock.h,v 1.28 2014/08/10 06:23:13 matt Exp $ */ 2 3/*- 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Machine-dependent spin lock operations. 34 * 35 * NOTE: The SWP insn used here is available only on ARM architecture 36 * version 3 and later (as well as 2a). What we are going to do is 37 * expect that the kernel will trap and emulate the insn. That will 38 * be slow, but give us the atomicity that we need. 39 */ 40 41#ifndef _ARM_LOCK_H_ 42#define _ARM_LOCK_H_ 43 44static __inline int 45__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr) 46{ 47 return *__ptr == __SIMPLELOCK_LOCKED; 48} 49 50static __inline int 51__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr) 52{ 53 return *__ptr == __SIMPLELOCK_UNLOCKED; 54} 55 56static __inline void 57__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 58{ 59 *__ptr = __SIMPLELOCK_UNLOCKED; 60} 61 62static __inline void 63__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 64{ 65 *__ptr = __SIMPLELOCK_LOCKED; 66} 67 68#ifdef _KERNEL 69#include <arm/cpufunc.h> 70 71#define mb_read drain_writebuf /* in cpufunc.h */ 72#define mb_write drain_writebuf /* in cpufunc.h */ 73#define mb_memory drain_writebuf /* in cpufunc.h */ 74#endif 75 76#ifdef _ARM_ARCH_6 77static __inline unsigned int 78__arm_load_exclusive(__cpu_simple_lock_t *__alp) 79{ 80 unsigned int __rv; 81 if (sizeof(*__alp) == 1) { 82 __asm __volatile("ldrexb\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); 83 } else { 84 __asm __volatile("ldrex\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); 85 } 86 return __rv; 87} 88 89/* returns 0 on success and 1 on failure */ 90static __inline unsigned int 91__arm_store_exclusive(__cpu_simple_lock_t *__alp, unsigned int __val) 92{ 93 unsigned int __rv; 94 if (sizeof(*__alp) == 1) { 95 __asm __volatile("strexb\t%0,%1,[%2]" 96 : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); 97 } else { 98 __asm __volatile("strex\t%0,%1,[%2]" 99 : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); 100 } 101 return __rv; 102} 103#elif defined(_KERNEL) 104static __inline unsigned char 105__swp(unsigned char __val, __cpu_simple_lock_t *__ptr) 106{ 107 uint32_t __val32; 108 __asm volatile("swpb %0, %1, [%2]" 109 : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory"); 110 return __val32; 111} 112#else 113/* 114 * On MP Cortex, SWP no longer guarantees atomic results. Thus we pad 115 * out SWP so that when the cpu generates an undefined exception we can replace 116 * the SWP/MOV instructions with the right LDREX/STREX instructions. 117 * 118 * This is why we force the SWP into the template needed for LDREX/STREX 119 * including the extra instructions and extra register for testing the result. 120 */ 121static __inline int 122__swp(int __val, __cpu_simple_lock_t *__ptr) 123{ 124 int __tmp, __rv; 125 __asm volatile( 126#if 1 127 "1:\t" "swp %[__rv], %[__val], [%[__ptr]]" 128 "\n\t" "b 2f" 129#else 130 "1:\t" "ldrex %[__rv],[%[__ptr]]" 131 "\n\t" "strex %[__tmp],%[__val],[%[__ptr]]" 132#endif 133 "\n\t" "cmp %[__tmp],#0" 134 "\n\t" "bne 1b" 135 "\n" "2:" 136 : [__rv] "=&r" (__rv), [__tmp] "=&r" (__tmp) 137 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 138 return __rv; 139} 140#endif /* !_ARM_ARCH_6 */ 141 142static inline void 143__arm_membar_producer(void) 144{ 145#ifdef _ARM_ARCH_7 146 __asm __volatile("dsb"); 147#elif defined(_ARM_ARCH_6) 148 __asm __volatile("mcr\tp15,0,%0,c7,c10,4" :: "r"(0)); 149#endif 150} 151 152static inline void 153__arm_membar_consumer(void) 154{ 155#ifdef _ARM_ARCH_7 156 __asm __volatile("dmb"); 157#elif defined(_ARM_ARCH_6) 158 __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0)); 159#endif 160} 161 162static __inline void __unused 163__cpu_simple_lock_init(__cpu_simple_lock_t *__alp) 164{ 165 166 *__alp = __SIMPLELOCK_UNLOCKED; 167 __arm_membar_producer(); 168} 169 170#if !defined(__thumb__) || defined(_ARM_ARCH_T2) 171static __inline void __unused 172__cpu_simple_lock(__cpu_simple_lock_t *__alp) 173{ 174#ifdef _ARM_ARCH_6 175 __arm_membar_consumer(); 176 do { 177 /* spin */ 178 } while (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED 179 || __arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); 180 __arm_membar_producer(); 181#else 182 while (__swp(__SIMPLELOCK_LOCKED, __alp) != __SIMPLELOCK_UNLOCKED) 183 continue; 184#endif 185} 186#else 187void __cpu_simple_lock(__cpu_simple_lock_t *); 188#endif 189 190#if !defined(__thumb__) || defined(_ARM_ARCH_T2) 191static __inline int __unused 192__cpu_simple_lock_try(__cpu_simple_lock_t *__alp) 193{ 194#ifdef _ARM_ARCH_6 195 __arm_membar_consumer(); 196 do { 197 if (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED) { 198 return 0; 199 } 200 } while (__arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); 201 __arm_membar_producer(); 202 return 1; 203#else 204 return (__swp(__SIMPLELOCK_LOCKED, __alp) == __SIMPLELOCK_UNLOCKED); 205#endif 206} 207#else 208int __cpu_simple_lock_try(__cpu_simple_lock_t *); 209#endif 210 211static __inline void __unused 212__cpu_simple_unlock(__cpu_simple_lock_t *__alp) 213{ 214 215#ifdef _ARM_ARCH_8 216 if (sizeof(*__alp) == 1) { 217 __asm __volatile("stlb\t%0, [%1]" 218 :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); 219 } else { 220 __asm __volatile("stl\t%0, [%1]" 221 :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); 222 } 223#else 224 __arm_membar_consumer(); 225 *__alp = __SIMPLELOCK_UNLOCKED; 226 __arm_membar_producer(); 227#endif 228} 229 230#endif /* _ARM_LOCK_H_ */ 231