/linux-master/tools/arch/x86/include/asm/ |
H A D | atomic.h | 9 #define LOCK_PREFIX "\n\tlock; " macro 52 asm volatile(LOCK_PREFIX "incl %0" 66 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); 76 GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, "Ir", nr, "%0", "c"); 81 GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, "Ir", nr, "%0", "c");
|
H A D | cmpxchg.h | 84 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
|
/linux-master/arch/x86/include/asm/ |
H A D | atomic.h | 33 asm volatile(LOCK_PREFIX "addl %1,%0" 40 asm volatile(LOCK_PREFIX "subl %1,%0" 47 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); 53 asm volatile(LOCK_PREFIX "incl %0" 60 asm volatile(LOCK_PREFIX "decl %0" 67 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); 73 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); 79 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); 127 asm volatile(LOCK_PREFIX "andl %1,%0" 145 asm volatile(LOCK_PREFIX "or [all...] |
H A D | atomic64_64.h | 25 asm volatile(LOCK_PREFIX "addq %1,%0" 32 asm volatile(LOCK_PREFIX "subq %1,%0" 39 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); 45 asm volatile(LOCK_PREFIX "incq %0" 53 asm volatile(LOCK_PREFIX "decq %0" 61 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); 67 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); 73 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); 121 asm volatile(LOCK_PREFIX "andq %1,%0" 139 asm volatile(LOCK_PREFIX "or [all...] |
H A D | qspinlock_paravirt.h | 45 LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \
|
H A D | bitops.h | 55 asm volatile(LOCK_PREFIX "orb %b1,%0" 60 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" 75 asm volatile(LOCK_PREFIX "andb %b1,%0" 79 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" 101 asm volatile(LOCK_PREFIX "xorb %2,%1" 125 asm volatile(LOCK_PREFIX "xorb %b1,%0" 129 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" 137 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); 161 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); 200 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZ [all...] |
H A D | futex.h | 35 "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ 59 unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, 90 "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
|
H A D | cmpxchg_32.h | 25 asm volatile(LOCK_PREFIX "cmpxchg8b %1" 52 asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
|
H A D | cmpxchg_64.h | 46 return __arch_cmpxchg128(ptr, old, new, LOCK_PREFIX); 78 return __arch_try_cmpxchg128(ptr, oldp, new, LOCK_PREFIX);
|
H A D | qspinlock.h | 23 val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
|
H A D | alternative.h | 24 * The LOCK_PREFIX macro defined here replaces the LOCK and 25 * LOCK_PREFIX macros used everywhere in the source tree. 50 #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " macro 54 #define LOCK_PREFIX "" macro 358 .macro LOCK_PREFIX 366 .macro LOCK_PREFIX
|
H A D | cmpxchg.h | 7 #include <asm/alternative.h> /* Provides LOCK_PREFIX */ 134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) 222 __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX) 246 #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
|
H A D | uaccess.h | 379 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 398 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 419 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 450 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
|
/linux-master/arch/x86/lib/ |
H A D | atomic64_cx8_32.S | 14 /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ 15 LOCK_PREFIX 26 /* we don't need LOCK_PREFIX since aligned 64-bit writes 36 LOCK_PREFIX 60 LOCK_PREFIX 88 LOCK_PREFIX 113 LOCK_PREFIX 143 LOCK_PREFIX 172 LOCK_PREFIX
|
/linux-master/arch/x86/kvm/ |
H A D | xen.c | 619 asm volatile(LOCK_PREFIX "orq %0, %1\n" 621 LOCK_PREFIX "andq %0, %2\n" 631 asm volatile(LOCK_PREFIX "orl %0, %1\n" 633 LOCK_PREFIX "andl %0, %2\n"
|