cpufunc.h revision 218383
1/* $OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */ 2 3/*- 4 * Copyright (c) 2002-2004 Juli Mallett. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27/* 28 * Copyright (c) 1995-1999 Per Fogelstrom. All rights reserved. 29 * 30 * Redistribution and use in source and binary forms, with or without 31 * modification, are permitted provided that the following conditions 32 * are met: 33 * 1. Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * 2. Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in the 37 * documentation and/or other materials provided with the distribution. 38 * 3. All advertising materials mentioning features or use of this software 39 * must display the following acknowledgement: 40 * This product includes software developed by Per Fogelstrom. 41 * 4. The name of the author may not be used to endorse or promote products 42 * derived from this software without specific prior written permission 43 * 44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 49 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 53 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * 55 * JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta 56 * $FreeBSD: head/sys/mips/include/cpufunc.h 218383 2011-02-06 22:21:18Z jmallett $ 57 */ 58 59#ifndef _MACHINE_CPUFUNC_H_ 60#define _MACHINE_CPUFUNC_H_ 61 62#include <sys/types.h> 63#include <machine/cpuregs.h> 64 65/* 66 * These functions are required by user-land atomi ops 67 */ 68 69static __inline void 70mips_barrier(void) 71{ 72#ifdef CPU_CNMIPS 73 __asm __volatile("" : : : "memory"); 74#else 75 __asm __volatile (".set noreorder\n\t" 76 "nop\n\t" 77 "nop\n\t" 78 "nop\n\t" 79 "nop\n\t" 80 "nop\n\t" 81 "nop\n\t" 82 "nop\n\t" 83 "nop\n\t" 84 ".set reorder\n\t" 85 : : : "memory"); 86#endif 87} 88 89static __inline void 90mips_cp0_sync(void) 91{ 92 __asm __volatile (__XSTRING(COP0_SYNC)); 93} 94 95static __inline void 96mips_wbflush(void) 97{ 98#if defined(CPU_CNMIPS) 99 __asm __volatile (".set noreorder\n\t" 100 "syncw\n\t" 101 ".set reorder\n" 102 : : : "memory"); 103#else 104 __asm __volatile ("sync" : : : "memory"); 105 mips_barrier(); 106#endif 107} 108 109static __inline void 110mips_read_membar(void) 111{ 112 /* Nil */ 113} 114 115static __inline void 116mips_write_membar(void) 117{ 118 mips_wbflush(); 119} 120 121#ifdef _KERNEL 122/* 123 * XXX 124 * It would be nice to add variants that read/write register_t, to avoid some 125 * ABI checks. 126 */ 127#if defined(__mips_n32) || defined(__mips_n64) 128#define MIPS_RW64_COP0(n,r) \ 129static __inline uint64_t \ 130mips_rd_ ## n (void) \ 131{ \ 132 int v0; \ 133 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";" \ 134 : [v0] "=&r"(v0)); \ 135 mips_barrier(); \ 136 return (v0); \ 137} \ 138static __inline void \ 139mips_wr_ ## n (uint64_t a0) \ 140{ \ 141 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";" \ 142 __XSTRING(COP0_SYNC)";" \ 143 "nop;" \ 144 "nop;" \ 145 : \ 146 : [a0] "r"(a0)); \ 147 mips_barrier(); \ 148} struct __hack 149 150#define MIPS_RW64_COP0_SEL(n,r,s) \ 151static __inline uint64_t \ 152mips_rd_ ## n(void) \ 153{ \ 154 int v0; \ 155 __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \ 156 : [v0] "=&r"(v0)); \ 157 mips_barrier(); \ 158 return (v0); \ 159} \ 160static __inline void \ 161mips_wr_ ## n(uint64_t a0) \ 162{ \ 163 __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \ 164 __XSTRING(COP0_SYNC)";" \ 165 : \ 166 : [a0] "r"(a0)); \ 167 mips_barrier(); \ 168} struct __hack 169 170#if defined(__mips_n64) 171MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC); 172MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI); 173MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK); 174#ifdef CPU_CNMIPS 175MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6); 176MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7); 177MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7); 178MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0); 179MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1); 180#endif 181#endif 182#if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */ 183MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0); 184MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1); 185#endif 186MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT); 187 188#undef MIPS_RW64_COP0 189#undef MIPS_RW64_COP0_SEL 190#endif 191 192#define MIPS_RW32_COP0(n,r) \ 193static __inline uint32_t \ 194mips_rd_ ## n (void) \ 195{ \ 196 int v0; \ 197 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";" \ 198 : [v0] "=&r"(v0)); \ 199 mips_barrier(); \ 200 return (v0); \ 201} \ 202static __inline void \ 203mips_wr_ ## n (uint32_t a0) \ 204{ \ 205 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";" \ 206 __XSTRING(COP0_SYNC)";" \ 207 "nop;" \ 208 "nop;" \ 209 : \ 210 : [a0] "r"(a0)); \ 211 mips_barrier(); \ 212} struct __hack 213 214#define MIPS_RW32_COP0_SEL(n,r,s) \ 215static __inline uint32_t \ 216mips_rd_ ## n(void) \ 217{ \ 218 int v0; \ 219 __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";" \ 220 : [v0] "=&r"(v0)); \ 221 mips_barrier(); \ 222 return (v0); \ 223} \ 224static __inline void \ 225mips_wr_ ## n(uint32_t a0) \ 226{ \ 227 __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";" \ 228 __XSTRING(COP0_SYNC)";" \ 229 "nop;" \ 230 "nop;" \ 231 : \ 232 : [a0] "r"(a0)); \ 233 mips_barrier(); \ 234} struct __hack 235 236#ifdef CPU_CNMIPS 237static __inline void mips_sync_icache (void) 238{ 239 __asm __volatile ( 240 ".set push\n" 241 ".set mips64\n" 242 ".word 0x041f0000\n" /* xxx ICACHE */ 243 "nop\n" 244 ".set pop\n" 245 : : ); 246} 247#endif 248 249MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE); 250MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG); 251MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1); 252MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2); 253MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3); 254#ifdef CPU_CNMIPS 255MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4); 256#endif 257MIPS_RW32_COP0(count, MIPS_COP_0_COUNT); 258MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX); 259MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED); 260MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE); 261#if !defined(__mips_n64) 262MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC); 263#endif 264MIPS_RW32_COP0(status, MIPS_COP_0_STATUS); 265 266/* XXX: Some of these registers are specific to MIPS32. */ 267#if !defined(__mips_n64) 268MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI); 269MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK); 270#endif 271#if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */ 272MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0); 273MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1); 274#endif 275MIPS_RW32_COP0(prid, MIPS_COP_0_PRID); 276/* XXX 64-bit? */ 277MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1); 278MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO); 279MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1); 280MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2); 281MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3); 282MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI); 283MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1); 284MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2); 285MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3); 286 287MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0); 288MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1); 289MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2); 290MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3); 291 292#undef MIPS_RW32_COP0 293#undef MIPS_RW32_COP0_SEL 294 295static __inline register_t 296intr_disable(void) 297{ 298 register_t s; 299 300 s = mips_rd_status(); 301 mips_wr_status(s & ~MIPS_SR_INT_IE); 302 303 return (s & MIPS_SR_INT_IE); 304} 305 306static __inline register_t 307intr_enable(void) 308{ 309 register_t s; 310 311 s = mips_rd_status(); 312 mips_wr_status(s | MIPS_SR_INT_IE); 313 314 return (s); 315} 316 317static __inline void 318intr_restore(register_t ie) 319{ 320 if (ie == MIPS_SR_INT_IE) { 321 intr_enable(); 322 } 323} 324 325static __inline uint32_t 326set_intr_mask(uint32_t mask) 327{ 328 uint32_t ostatus; 329 330 ostatus = mips_rd_status(); 331 mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK); 332 mips_wr_status(mask); 333 return (ostatus); 334} 335 336static __inline uint32_t 337get_intr_mask(void) 338{ 339 340 return (mips_rd_status() & MIPS_SR_INT_MASK); 341} 342 343static __inline void 344breakpoint(void) 345{ 346 __asm __volatile ("break"); 347} 348 349#if defined(__GNUC__) && !defined(__mips_o32) 350static inline uint64_t 351mips3_ld(const volatile uint64_t *va) 352{ 353 uint64_t rv; 354 355#if defined(_LP64) 356 rv = *va; 357#else 358 __asm volatile("ld %0,0(%1)" : "=d"(rv) : "r"(va)); 359#endif 360 361 return (rv); 362} 363 364static inline void 365mips3_sd(volatile uint64_t *va, uint64_t v) 366{ 367#if defined(_LP64) 368 *va = v; 369#else 370 __asm volatile("sd %0,0(%1)" :: "r"(v), "r"(va)); 371#endif 372} 373#else 374uint64_t mips3_ld(volatile uint64_t *va); 375void mips3_sd(volatile uint64_t *, uint64_t); 376#endif /* __GNUC__ */ 377 378#endif /* _KERNEL */ 379 380#define readb(va) (*(volatile uint8_t *) (va)) 381#define readw(va) (*(volatile uint16_t *) (va)) 382#define readl(va) (*(volatile uint32_t *) (va)) 383 384#define writeb(va, d) (*(volatile uint8_t *) (va) = (d)) 385#define writew(va, d) (*(volatile uint16_t *) (va) = (d)) 386#define writel(va, d) (*(volatile uint32_t *) (va) = (d)) 387 388/* 389 * I/O macros. 390 */ 391 392#define outb(a,v) (*(volatile unsigned char*)(a) = (v)) 393#define out8(a,v) (*(volatile unsigned char*)(a) = (v)) 394#define outw(a,v) (*(volatile unsigned short*)(a) = (v)) 395#define out16(a,v) outw(a,v) 396#define outl(a,v) (*(volatile unsigned int*)(a) = (v)) 397#define out32(a,v) outl(a,v) 398#define inb(a) (*(volatile unsigned char*)(a)) 399#define in8(a) (*(volatile unsigned char*)(a)) 400#define inw(a) (*(volatile unsigned short*)(a)) 401#define in16(a) inw(a) 402#define inl(a) (*(volatile unsigned int*)(a)) 403#define in32(a) inl(a) 404 405#define out8rb(a,v) (*(volatile unsigned char*)(a) = (v)) 406#define out16rb(a,v) (__out16rb((volatile uint16_t *)(a), v)) 407#define out32rb(a,v) (__out32rb((volatile uint32_t *)(a), v)) 408#define in8rb(a) (*(volatile unsigned char*)(a)) 409#define in16rb(a) (__in16rb((volatile uint16_t *)(a))) 410#define in32rb(a) (__in32rb((volatile uint32_t *)(a))) 411 412#define _swap_(x) (((x) >> 24) | ((x) << 24) | \ 413 (((x) >> 8) & 0xff00) | (((x) & 0xff00) << 8)) 414 415static __inline void __out32rb(volatile uint32_t *, uint32_t); 416static __inline void __out16rb(volatile uint16_t *, uint16_t); 417static __inline uint32_t __in32rb(volatile uint32_t *); 418static __inline uint16_t __in16rb(volatile uint16_t *); 419 420static __inline void 421__out32rb(volatile uint32_t *a, uint32_t v) 422{ 423 uint32_t _v_ = v; 424 425 _v_ = _swap_(_v_); 426 out32(a, _v_); 427} 428 429static __inline void 430__out16rb(volatile uint16_t *a, uint16_t v) 431{ 432 uint16_t _v_; 433 434 _v_ = ((v >> 8) & 0xff) | (v << 8); 435 out16(a, _v_); 436} 437 438static __inline uint32_t 439__in32rb(volatile uint32_t *a) 440{ 441 uint32_t _v_; 442 443 _v_ = in32(a); 444 _v_ = _swap_(_v_); 445 return _v_; 446} 447 448static __inline uint16_t 449__in16rb(volatile uint16_t *a) 450{ 451 uint16_t _v_; 452 453 _v_ = in16(a); 454 _v_ = ((_v_ >> 8) & 0xff) | (_v_ << 8); 455 return _v_; 456} 457 458void insb(uint8_t *, uint8_t *,int); 459void insw(uint16_t *, uint16_t *,int); 460void insl(uint32_t *, uint32_t *,int); 461void outsb(uint8_t *, const uint8_t *,int); 462void outsw(uint16_t *, const uint16_t *,int); 463void outsl(uint32_t *, const uint32_t *,int); 464u_int loadandclear(volatile u_int *addr); 465 466#endif /* !_MACHINE_CPUFUNC_H_ */ 467