1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Based on arch/arm/include/asm/cmpxchg.h 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7#ifndef __ASM_CMPXCHG_H 8#define __ASM_CMPXCHG_H 9 10#include <linux/build_bug.h> 11#include <linux/compiler.h> 12 13#include <asm/barrier.h> 14#include <asm/lse.h> 15 16/* 17 * We need separate acquire parameters for ll/sc and lse, since the full 18 * barrier case is generated as release+dmb for the former and 19 * acquire+release for the latter. 20 */ 21#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \ 22static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \ 23{ \ 24 u##sz ret; \ 25 unsigned long tmp; \ 26 \ 27 asm volatile(ARM64_LSE_ATOMIC_INSN( \ 28 /* LL/SC */ \ 29 " prfm pstl1strm, %2\n" \ 30 "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \ 31 " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \ 32 " cbnz %w1, 1b\n" \ 33 " " #mb, \ 34 /* LSE atomics */ \ 35 " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \ 36 __nops(3) \ 37 " " #nop_lse) \ 38 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \ 39 : "r" (x) \ 40 : cl); \ 41 \ 42 return ret; \ 43} 44 45__XCHG_CASE(w, b, , 8, , , , , , ) 46__XCHG_CASE(w, h, , 16, , , , , , ) 47__XCHG_CASE(w, , , 32, , , , , , ) 48__XCHG_CASE( , , , 64, , , , , , ) 49__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory") 50__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory") 51__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory") 52__XCHG_CASE( , , acq_, 64, , , a, a, , "memory") 53__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory") 54__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory") 55__XCHG_CASE(w, , rel_, 32, , , , , l, "memory") 56__XCHG_CASE( , , rel_, 64, , , , , l, "memory") 57__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory") 58__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory") 59__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory") 60__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") 61 62#undef __XCHG_CASE 63 64#define __XCHG_GEN(sfx) \ 65static __always_inline unsigned long \ 66__arch_xchg##sfx(unsigned long x, volatile void *ptr, int size) \ 67{ \ 68 switch (size) { \ 69 case 1: \ 70 return __xchg_case##sfx##_8(x, ptr); \ 71 case 2: \ 72 return __xchg_case##sfx##_16(x, ptr); \ 73 case 4: \ 74 return __xchg_case##sfx##_32(x, ptr); \ 75 case 8: \ 76 return __xchg_case##sfx##_64(x, ptr); \ 77 default: \ 78 BUILD_BUG(); \ 79 } \ 80 \ 81 unreachable(); \ 82} 83 84__XCHG_GEN() 85__XCHG_GEN(_acq) 86__XCHG_GEN(_rel) 87__XCHG_GEN(_mb) 88 89#undef __XCHG_GEN 90 91#define __xchg_wrapper(sfx, ptr, x) \ 92({ \ 93 __typeof__(*(ptr)) __ret; \ 94 __ret = (__typeof__(*(ptr))) \ 95 __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \ 96 __ret; \ 97}) 98 99/* xchg */ 100#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) 101#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) 102#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) 103#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) 104 105#define __CMPXCHG_CASE(name, sz) \ 106static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ 107 u##sz old, \ 108 u##sz new) \ 109{ \ 110 return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \ 111 ptr, old, new); \ 112} 113 114__CMPXCHG_CASE( , 8) 115__CMPXCHG_CASE( , 16) 116__CMPXCHG_CASE( , 32) 117__CMPXCHG_CASE( , 64) 118__CMPXCHG_CASE(acq_, 8) 119__CMPXCHG_CASE(acq_, 16) 120__CMPXCHG_CASE(acq_, 32) 121__CMPXCHG_CASE(acq_, 64) 122__CMPXCHG_CASE(rel_, 8) 123__CMPXCHG_CASE(rel_, 16) 124__CMPXCHG_CASE(rel_, 32) 125__CMPXCHG_CASE(rel_, 64) 126__CMPXCHG_CASE(mb_, 8) 127__CMPXCHG_CASE(mb_, 16) 128__CMPXCHG_CASE(mb_, 32) 129__CMPXCHG_CASE(mb_, 64) 130 131#undef __CMPXCHG_CASE 132 133#define __CMPXCHG128(name) \ 134static inline u128 __cmpxchg128##name(volatile u128 *ptr, \ 135 u128 old, u128 new) \ 136{ \ 137 return __lse_ll_sc_body(_cmpxchg128##name, \ 138 ptr, old, new); \ 139} 140 141__CMPXCHG128( ) 142__CMPXCHG128(_mb) 143 144#undef __CMPXCHG128 145 146#define __CMPXCHG_GEN(sfx) \ 147static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ 148 unsigned long old, \ 149 unsigned long new, \ 150 int size) \ 151{ \ 152 switch (size) { \ 153 case 1: \ 154 return __cmpxchg_case##sfx##_8(ptr, old, new); \ 155 case 2: \ 156 return __cmpxchg_case##sfx##_16(ptr, old, new); \ 157 case 4: \ 158 return __cmpxchg_case##sfx##_32(ptr, old, new); \ 159 case 8: \ 160 return __cmpxchg_case##sfx##_64(ptr, old, new); \ 161 default: \ 162 BUILD_BUG(); \ 163 } \ 164 \ 165 unreachable(); \ 166} 167 168__CMPXCHG_GEN() 169__CMPXCHG_GEN(_acq) 170__CMPXCHG_GEN(_rel) 171__CMPXCHG_GEN(_mb) 172 173#undef __CMPXCHG_GEN 174 175#define __cmpxchg_wrapper(sfx, ptr, o, n) \ 176({ \ 177 __typeof__(*(ptr)) __ret; \ 178 __ret = (__typeof__(*(ptr))) \ 179 __cmpxchg##sfx((ptr), (unsigned long)(o), \ 180 (unsigned long)(n), sizeof(*(ptr))); \ 181 __ret; \ 182}) 183 184/* cmpxchg */ 185#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) 186#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) 187#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) 188#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) 189#define arch_cmpxchg_local arch_cmpxchg_relaxed 190 191/* cmpxchg64 */ 192#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed 193#define arch_cmpxchg64_acquire arch_cmpxchg_acquire 194#define arch_cmpxchg64_release arch_cmpxchg_release 195#define arch_cmpxchg64 arch_cmpxchg 196#define arch_cmpxchg64_local arch_cmpxchg_local 197 198/* cmpxchg128 */ 199#define system_has_cmpxchg128() 1 200 201#define arch_cmpxchg128(ptr, o, n) \ 202({ \ 203 __cmpxchg128_mb((ptr), (o), (n)); \ 204}) 205 206#define arch_cmpxchg128_local(ptr, o, n) \ 207({ \ 208 __cmpxchg128((ptr), (o), (n)); \ 209}) 210 211#define __CMPWAIT_CASE(w, sfx, sz) \ 212static inline void __cmpwait_case_##sz(volatile void *ptr, \ 213 unsigned long val) \ 214{ \ 215 unsigned long tmp; \ 216 \ 217 asm volatile( \ 218 " sevl\n" \ 219 " wfe\n" \ 220 " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \ 221 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ 222 " cbnz %" #w "[tmp], 1f\n" \ 223 " wfe\n" \ 224 "1:" \ 225 : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr) \ 226 : [val] "r" (val)); \ 227} 228 229__CMPWAIT_CASE(w, b, 8); 230__CMPWAIT_CASE(w, h, 16); 231__CMPWAIT_CASE(w, , 32); 232__CMPWAIT_CASE( , , 64); 233 234#undef __CMPWAIT_CASE 235 236#define __CMPWAIT_GEN(sfx) \ 237static __always_inline void __cmpwait##sfx(volatile void *ptr, \ 238 unsigned long val, \ 239 int size) \ 240{ \ 241 switch (size) { \ 242 case 1: \ 243 return __cmpwait_case##sfx##_8(ptr, (u8)val); \ 244 case 2: \ 245 return __cmpwait_case##sfx##_16(ptr, (u16)val); \ 246 case 4: \ 247 return __cmpwait_case##sfx##_32(ptr, val); \ 248 case 8: \ 249 return __cmpwait_case##sfx##_64(ptr, val); \ 250 default: \ 251 BUILD_BUG(); \ 252 } \ 253 \ 254 unreachable(); \ 255} 256 257__CMPWAIT_GEN() 258 259#undef __CMPWAIT_GEN 260 261#define __cmpwait_relaxed(ptr, val) \ 262 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) 263 264#endif /* __ASM_CMPXCHG_H */ 265