1/*- 2 * Copyright (c) 2001 Jake Burkholder. 3 * Copyright (c) 2011 Marius Strobl <marius@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30#ifndef _MACHINE_ASMACROS_H_ 31#define _MACHINE_ASMACROS_H_ 32 33#ifdef _KERNEL 34 35/* 36 * Normal and alternate %g6 point to the pcb of the current process. Normal, 37 * alternate and interrupt %g7 point to per-cpu data. 38 */ 39#define PCB_REG %g6 40#define PCPU_REG %g7 41 42/* 43 * Alternate %g5 points to a per-cpu panic stack, which is used as a last 44 * resort, and for temporarily saving alternate globals. 45 */ 46#define ASP_REG %g5 47 48#ifdef LOCORE 49 50/* 51 * Atomically decrement an integer in memory. 52 */ 53#define ATOMIC_DEC_INT(r1, r2, r3) \ 54 lduw [r1], r2 ; \ 559: sub r2, 1, r3 ; \ 56 casa [r1] ASI_N, r2, r3 ; \ 57 cmp r2, r3 ; \ 58 bne,pn %icc, 9b ; \ 59 mov r3, r2 60 61/* 62 * Atomically increment an integer in memory. 63 */ 64#define ATOMIC_INC_INT(r1, r2, r3) \ 65 lduw [r1], r2 ; \ 669: add r2, 1, r3 ; \ 67 casa [r1] ASI_N, r2, r3 ; \ 68 cmp r2, r3 ; \ 69 bne,pn %icc, 9b ; \ 70 mov r3, r2 71 72/* 73 * Atomically increment a long in memory. 74 */ 75#define ATOMIC_INC_LONG(r1, r2, r3) \ 76 ldx [r1], r2 ; \ 779: add r2, 1, r3 ; \ 78 casxa [r1] ASI_N, r2, r3 ; \ 79 cmp r2, r3 ; \ 80 bne,pn %xcc, 9b ; \ 81 mov r3, r2 82 83/* 84 * Atomically clear a number of bits of an integer in memory. 85 */ 86#define ATOMIC_CLEAR_INT(r1, r2, r3, bits) \ 87 lduw [r1], r2 ; \ 889: andn r2, bits, r3 ; \ 89 casa [r1] ASI_N, r2, r3 ; \ 90 cmp r2, r3 ; \ 91 bne,pn %icc, 9b ; \ 92 mov r3, r2 93 94/* 95 * Atomically clear a number of bits of a long in memory. 96 */ 97#define ATOMIC_CLEAR_LONG(r1, r2, r3, bits) \ 98 ldx [r1], r2 ; \ 999: andn r2, bits, r3 ; \ 100 casxa [r1] ASI_N, r2, r3 ; \ 101 cmp r2, r3 ; \ 102 bne,pn %xcc, 9b ; \ 103 mov r3, r2 104 105/* 106 * Atomically load an integer from memory. 107 */ 108#define ATOMIC_LOAD_INT(r1, val) \ 109 clr val ; \ 110 casa [r1] ASI_N, %g0, val 111 112/* 113 * Atomically load a long from memory. 114 */ 115#define ATOMIC_LOAD_LONG(r1, val) \ 116 clr val ; \ 117 casxa [r1] ASI_N, %g0, val 118 119/* 120 * Atomically set a number of bits of an integer in memory. 121 */ 122#define ATOMIC_SET_INT(r1, r2, r3, bits) \ 123 lduw [r1], r2 ; \ 1249: or r2, bits, r3 ; \ 125 casa [r1] ASI_N, r2, r3 ; \ 126 cmp r2, r3 ; \ 127 bne,pn %icc, 9b ; \ 128 mov r3, r2 129 130/* 131 * Atomically set a number of bits of a long in memory. 132 */ 133#define ATOMIC_SET_LONG(r1, r2, r3, bits) \ 134 ldx [r1], r2 ; \ 1359: or r2, bits, r3 ; \ 136 casxa [r1] ASI_N, r2, r3 ; \ 137 cmp r2, r3 ; \ 138 bne,pn %xcc, 9b ; \ 139 mov r3, r2 140 141/* 142 * Atomically store an integer in memory. 143 */ 144#define ATOMIC_STORE_INT(r1, r2, r3, val) \ 145 lduw [r1], r2 ; \ 1469: mov val, r3 ; \ 147 casa [r1] ASI_N, r2, r3 ; \ 148 cmp r2, r3 ; \ 149 bne,pn %icc, 9b ; \ 150 mov r3, r2 151 152/* 153 * Atomically store a long in memory. 154 */ 155#define ATOMIC_STORE_LONG(r1, r2, r3, val) \ 156 ldx [r1], r2 ; \ 1579: mov val, r3 ; \ 158 casxa [r1] ASI_N, r2, r3 ; \ 159 cmp r2, r3 ; \ 160 bne,pn %xcc, 9b ; \ 161 mov r3, r2 162 163#define PCPU(member) PCPU_REG + PC_ ## member 164#define PCPU_ADDR(member, reg) \ 165 add PCPU_REG, PC_ ## member, reg 166 167#define DEBUGGER() \ 168 ta %xcc, 1 169 170#define PANIC(msg, r1) \ 171 .sect .rodata ; \ 1729: .asciz msg ; \ 173 .previous ; \ 174 SET(9b, r1, %o0) ; \ 175 call panic ; \ 176 nop 177 178#ifdef INVARIANTS 179#define KASSERT(r1, msg) \ 180 brnz,pt r1, 8f ; \ 181 nop ; \ 182 PANIC(msg, r1) ; \ 1838: 184#else 185#define KASSERT(r1, msg) 186#endif 187 188#define PUTS(msg, r1) \ 189 .sect .rodata ; \ 1909: .asciz msg ; \ 191 .previous ; \ 192 SET(9b, r1, %o0) ; \ 193 call printf ; \ 194 nop 195 196#define _ALIGN_DATA .align 8 197 198#define DATA(name) \ 199 .data ; \ 200 _ALIGN_DATA ; \ 201 .globl name ; \ 202 .type name, @object ; \ 203name: 204 205#define EMPTY 206 207/* 208 * Generate atomic compare and swap, load and store instructions for the 209 * corresponding width and ASI (or not). Note that we want to evaluate the 210 * macro args before concatenating, so that EMPTY really turns into nothing. 211 */ 212#define _LD(w, a) ld ## w ## a 213#define _ST(w, a) st ## w ## a 214#define _CAS(w, a) cas ## w ## a 215 216#define LD(w, a) _LD(w, a) 217#define ST(w, a) _ST(w, a) 218#define CAS(w, a) _CAS(w, a) 219 220#endif /* LOCORE */ 221 222#endif /* _KERNEL */ 223 224#endif /* !_MACHINE_ASMACROS_H_ */ 225