atomic.h revision 66458
1/*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/ia64/include/atomic.h 66458 2000-09-29 13:46:07Z dfr $ 27 */ 28 29#ifndef _MACHINE_ATOMIC_H_ 30#define _MACHINE_ATOMIC_H_ 31 32/* 33 * Various simple arithmetic on memory which is atomic in the presence 34 * of interrupts and SMP safe. 35 */ 36 37/* 38 * Everything is built out of cmpxchg. 39 */ 40#define IA64_CMPXCHG(sz, sem, type, p, cmpval, newval) \ 41({ \ 42 type _cmpval = cmpval; \ 43 type _newval = newval; \ 44 volatile type *_p = (volatile type *) p; \ 45 type _ret; \ 46 \ 47 __asm __volatile ( \ 48 "mov ar.ccv=%2;;\n\t" \ 49 "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t" \ 50 : "=r" (_ret), "=m" (*_p) \ 51 : "r" (_cmpval), "r" (_newval), "m" (*_p) \ 52 : "memory"); \ 53 _ret; \ 54}) 55 56/* 57 * Some common forms of cmpxch. 58 */ 59static __inline u_int32_t 60ia64_cmpxchg_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval) 61{ 62 return IA64_CMPXCHG(4, acq, u_int32_t, p, cmpval, newval); 63} 64 65static __inline u_int32_t 66ia64_cmpxchg_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval) 67{ 68 return IA64_CMPXCHG(4, rel, u_int32_t, p, cmpval, newval); 69} 70 71static __inline u_int64_t 72ia64_cmpxchg_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval) 73{ 74 return IA64_CMPXCHG(8, acq, u_int64_t, p, cmpval, newval); 75} 76 77static __inline u_int64_t 78ia64_cmpxchg_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval) 79{ 80 return IA64_CMPXCHG(8, rel, u_int64_t, p, cmpval, newval); 81} 82 83/* 84 * Store with release semantics is used to release locks. 85 */ 86static __inline void 87ia64_st_rel_32(volatile u_int32_t* p, u_int32_t v) 88{ 89 __asm __volatile ("st4.rel %0=%1" 90 : "=m" (*p) 91 : "r" (v) 92 : "memory"); 93} 94 95static __inline void 96ia64_st_rel_64(volatile u_int64_t* p, u_int64_t v) 97{ 98 __asm __volatile ("st8.rel %0=%1" 99 : "=m" (*p) 100 : "r" (v) 101 : "memory"); 102} 103 104#define IA64_ATOMIC(sz, type, name, op) \ 105 \ 106static __inline void \ 107atomic_##name(volatile type *p, type v) \ 108{ \ 109 type old; \ 110 do { \ 111 old = *p; \ 112 } while (IA64_CMPXCHG(sz, acq, type, p, old, old op v) != old); \ 113} 114 115IA64_ATOMIC(1, u_int8_t, set_8, |) 116IA64_ATOMIC(2, u_int16_t, set_16, |) 117IA64_ATOMIC(4, u_int32_t, set_32, |) 118IA64_ATOMIC(8, u_int64_t, set_64, |) 119 120IA64_ATOMIC(1, u_int8_t, clear_8, &~) 121IA64_ATOMIC(2, u_int16_t, clear_16, &~) 122IA64_ATOMIC(4, u_int32_t, clear_32, &~) 123IA64_ATOMIC(8, u_int64_t, clear_64, &~) 124 125IA64_ATOMIC(1, u_int8_t, add_8, +) 126IA64_ATOMIC(2, u_int16_t, add_16, +) 127IA64_ATOMIC(4, u_int32_t, add_32, +) 128IA64_ATOMIC(8, u_int64_t, add_64, +) 129 130IA64_ATOMIC(1, u_int8_t, subtract_8, -) 131IA64_ATOMIC(2, u_int16_t, subtract_16, -) 132IA64_ATOMIC(4, u_int32_t, subtract_32, -) 133IA64_ATOMIC(8, u_int64_t, subtract_64, -) 134 135#undef IA64_ATOMIC 136#undef IA64_CMPXCHG 137 138#define atomic_set_char atomic_set_8 139#define atomic_clear_char atomic_clear_8 140#define atomic_add_char atomic_add_8 141#define atomic_subtract_char atomic_subtract_8 142 143#define atomic_set_short atomic_set_16 144#define atomic_clear_short atomic_clear_16 145#define atomic_add_short atomic_add_16 146#define atomic_subtract_short atomic_subtract_16 147 148#define atomic_set_int atomic_set_32 149#define atomic_clear_int atomic_clear_32 150#define atomic_add_int atomic_add_32 151#define atomic_subtract_int atomic_subtract_32 152 153#define atomic_set_long atomic_set_64 154#define atomic_clear_long atomic_clear_64 155#define atomic_add_long atomic_add_64 156#define atomic_subtract_long atomic_subtract_64 157 158/* 159 * Atomically compare the value stored at *p with cmpval and if the 160 * two values are equal, update the value of *p with newval. Returns 161 * zero if the compare failed, nonzero otherwise. 162 */ 163static __inline int 164atomic_cmpset_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval) 165{ 166 return ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval; 167} 168 169/* 170 * Atomically compare the value stored at *p with cmpval and if the 171 * two values are equal, update the value of *p with newval. Returns 172 * zero if the compare failed, nonzero otherwise. 173 */ 174static __inline int 175atomic_cmpset_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval) 176{ 177 return ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval; 178} 179 180#define atomic_cmpset_int atomic_cmpset_32 181#define atomic_cmpset_long atomic_cmpset_64 182 183static __inline int 184atomic_cmpset_ptr(volatile void *dst, void *exp, void *src) 185{ 186 return atomic_cmpset_long((volatile u_long *)dst, 187 (u_long)exp, (u_long)src); 188} 189 190#endif /* ! _MACHINE_ATOMIC_H_ */ 191