cache_r4k.h revision 178172
1/* $NetBSD: cache_r4k.h,v 1.10 2003/03/08 04:43:26 rafal Exp $ */ 2 3/* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * $FreeBSD: head/sys/mips/include/cache_r4k.h 178172 2008-04-13 07:27:37Z imp $ 38 */ 39 40/* 41 * Cache definitions/operations for R4000-style caches. 42 */ 43 44#define CACHE_R4K_I 0 45#define CACHE_R4K_D 1 46#define CACHE_R4K_SI 2 47#define CACHE_R4K_SD 3 48 49#define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */ 50#define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */ 51#define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */ 52#define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */ 53#define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */ 54#define CACHEOP_R4K_HIT_INV (4 << 2) /* all */ 55#define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */ 56#define CACHEOP_R4K_FILL (5 << 2) /* I */ 57#define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */ 58#define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */ 59 60#if !defined(LOCORE) 61 62/* 63 * cache_r4k_op_line: 64 * 65 * Perform the specified cache operation on a single line. 66 */ 67#define cache_op_r4k_line(va, op) \ 68do { \ 69 __asm __volatile( \ 70 ".set noreorder \n\t" \ 71 "cache %1, 0(%0) \n\t" \ 72 ".set reorder" \ 73 : \ 74 : "r" (va), "i" (op) \ 75 : "memory"); \ 76} while (/*CONSTCOND*/0) 77 78/* 79 * cache_r4k_op_8lines_16: 80 * 81 * Perform the specified cache operation on 8 16-byte cache lines. 82 */ 83#define cache_r4k_op_8lines_16(va, op) \ 84do { \ 85 __asm __volatile( \ 86 ".set noreorder \n\t" \ 87 "cache %1, 0x00(%0); cache %1, 0x10(%0) \n\t" \ 88 "cache %1, 0x20(%0); cache %1, 0x30(%0) \n\t" \ 89 "cache %1, 0x40(%0); cache %1, 0x50(%0) \n\t" \ 90 "cache %1, 0x60(%0); cache %1, 0x70(%0) \n\t" \ 91 ".set reorder" \ 92 : \ 93 : "r" (va), "i" (op) \ 94 : "memory"); \ 95} while (/*CONSTCOND*/0) 96 97/* 98 * cache_r4k_op_8lines_32: 99 * 100 * Perform the specified cache operation on 8 32-byte cache lines. 101 */ 102#define cache_r4k_op_8lines_32(va, op) \ 103do { \ 104 __asm __volatile( \ 105 ".set noreorder \n\t" \ 106 "cache %1, 0x00(%0); cache %1, 0x20(%0) \n\t" \ 107 "cache %1, 0x40(%0); cache %1, 0x60(%0) \n\t" \ 108 "cache %1, 0x80(%0); cache %1, 0xa0(%0) \n\t" \ 109 "cache %1, 0xc0(%0); cache %1, 0xe0(%0) \n\t" \ 110 ".set reorder" \ 111 : \ 112 : "r" (va), "i" (op) \ 113 : "memory"); \ 114} while (/*CONSTCOND*/0) 115 116/* 117 * cache_r4k_op_32lines_16: 118 * 119 * Perform the specified cache operation on 32 16-byte 120 * cache lines. 121 */ 122#define cache_r4k_op_32lines_16(va, op) \ 123do { \ 124 __asm __volatile( \ 125 ".set noreorder \n\t" \ 126 "cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \ 127 "cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \ 128 "cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \ 129 "cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \ 130 "cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \ 131 "cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \ 132 "cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \ 133 "cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \ 134 "cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \ 135 "cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \ 136 "cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \ 137 "cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \ 138 "cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \ 139 "cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \ 140 "cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \ 141 "cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \ 142 ".set reorder" \ 143 : \ 144 : "r" (va), "i" (op) \ 145 : "memory"); \ 146} while (/*CONSTCOND*/0) 147 148/* 149 * cache_r4k_op_32lines_32: 150 * 151 * Perform the specified cache operation on 32 32-byte 152 * cache lines. 153 */ 154#define cache_r4k_op_32lines_32(va, op) \ 155do { \ 156 __asm __volatile( \ 157 ".set noreorder \n\t" \ 158 "cache %1, 0x000(%0); cache %1, 0x020(%0); \n\t" \ 159 "cache %1, 0x040(%0); cache %1, 0x060(%0); \n\t" \ 160 "cache %1, 0x080(%0); cache %1, 0x0a0(%0); \n\t" \ 161 "cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \n\t" \ 162 "cache %1, 0x100(%0); cache %1, 0x120(%0); \n\t" \ 163 "cache %1, 0x140(%0); cache %1, 0x160(%0); \n\t" \ 164 "cache %1, 0x180(%0); cache %1, 0x1a0(%0); \n\t" \ 165 "cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \n\t" \ 166 "cache %1, 0x200(%0); cache %1, 0x220(%0); \n\t" \ 167 "cache %1, 0x240(%0); cache %1, 0x260(%0); \n\t" \ 168 "cache %1, 0x280(%0); cache %1, 0x2a0(%0); \n\t" \ 169 "cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \n\t" \ 170 "cache %1, 0x300(%0); cache %1, 0x320(%0); \n\t" \ 171 "cache %1, 0x340(%0); cache %1, 0x360(%0); \n\t" \ 172 "cache %1, 0x380(%0); cache %1, 0x3a0(%0); \n\t" \ 173 "cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \n\t" \ 174 ".set reorder" \ 175 : \ 176 : "r" (va), "i" (op) \ 177 : "memory"); \ 178} while (/*CONSTCOND*/0) 179 180/* 181 * cache_r4k_op_32lines_128: 182 * 183 * Perform the specified cache operation on 32 128-byte 184 * cache lines. 185 */ 186#define cache_r4k_op_32lines_128(va, op) \ 187do { \ 188 __asm __volatile( \ 189 ".set noreorder \n\t" \ 190 "cache %1, 0x0000(%0); cache %1, 0x0080(%0); \n\t" \ 191 "cache %1, 0x0100(%0); cache %1, 0x0180(%0); \n\t" \ 192 "cache %1, 0x0200(%0); cache %1, 0x0280(%0); \n\t" \ 193 "cache %1, 0x0300(%0); cache %1, 0x0380(%0); \n\t" \ 194 "cache %1, 0x0400(%0); cache %1, 0x0480(%0); \n\t" \ 195 "cache %1, 0x0500(%0); cache %1, 0x0580(%0); \n\t" \ 196 "cache %1, 0x0600(%0); cache %1, 0x0680(%0); \n\t" \ 197 "cache %1, 0x0700(%0); cache %1, 0x0780(%0); \n\t" \ 198 "cache %1, 0x0800(%0); cache %1, 0x0880(%0); \n\t" \ 199 "cache %1, 0x0900(%0); cache %1, 0x0980(%0); \n\t" \ 200 "cache %1, 0x0a00(%0); cache %1, 0x0a80(%0); \n\t" \ 201 "cache %1, 0x0b00(%0); cache %1, 0x0b80(%0); \n\t" \ 202 "cache %1, 0x0c00(%0); cache %1, 0x0c80(%0); \n\t" \ 203 "cache %1, 0x0d00(%0); cache %1, 0x0d80(%0); \n\t" \ 204 "cache %1, 0x0e00(%0); cache %1, 0x0e80(%0); \n\t" \ 205 "cache %1, 0x0f00(%0); cache %1, 0x0f80(%0); \n\t" \ 206 ".set reorder" \ 207 : \ 208 : "r" (va), "i" (op) \ 209 : "memory"); \ 210} while (/*CONSTCOND*/0) 211 212/* 213 * cache_r4k_op_16lines_16_2way: 214 * 215 * Perform the specified cache operation on 16 16-byte 216 * cache lines, 2-ways. 217 */ 218#define cache_r4k_op_16lines_16_2way(va1, va2, op) \ 219do { \ 220 __asm __volatile( \ 221 ".set noreorder \n\t" \ 222 "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \ 223 "cache %2, 0x010(%0); cache %2, 0x010(%1); \n\t" \ 224 "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \ 225 "cache %2, 0x030(%0); cache %2, 0x030(%1); \n\t" \ 226 "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \ 227 "cache %2, 0x050(%0); cache %2, 0x050(%1); \n\t" \ 228 "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \ 229 "cache %2, 0x070(%0); cache %2, 0x070(%1); \n\t" \ 230 "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \ 231 "cache %2, 0x090(%0); cache %2, 0x090(%1); \n\t" \ 232 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \ 233 "cache %2, 0x0b0(%0); cache %2, 0x0b0(%1); \n\t" \ 234 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \ 235 "cache %2, 0x0d0(%0); cache %2, 0x0d0(%1); \n\t" \ 236 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \ 237 "cache %2, 0x0f0(%0); cache %2, 0x0f0(%1); \n\t" \ 238 ".set reorder" \ 239 : \ 240 : "r" (va1), "r" (va2), "i" (op) \ 241 : "memory"); \ 242} while (/*CONSTCOND*/0) 243 244/* 245 * cache_r4k_op_16lines_32_2way: 246 * 247 * Perform the specified cache operation on 16 32-byte 248 * cache lines, 2-ways. 249 */ 250#define cache_r4k_op_16lines_32_2way(va1, va2, op) \ 251do { \ 252 __asm __volatile( \ 253 ".set noreorder \n\t" \ 254 "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \ 255 "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \ 256 "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \ 257 "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \ 258 "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \ 259 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \ 260 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \ 261 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \ 262 "cache %2, 0x100(%0); cache %2, 0x100(%1); \n\t" \ 263 "cache %2, 0x120(%0); cache %2, 0x120(%1); \n\t" \ 264 "cache %2, 0x140(%0); cache %2, 0x140(%1); \n\t" \ 265 "cache %2, 0x160(%0); cache %2, 0x160(%1); \n\t" \ 266 "cache %2, 0x180(%0); cache %2, 0x180(%1); \n\t" \ 267 "cache %2, 0x1a0(%0); cache %2, 0x1a0(%1); \n\t" \ 268 "cache %2, 0x1c0(%0); cache %2, 0x1c0(%1); \n\t" \ 269 "cache %2, 0x1e0(%0); cache %2, 0x1e0(%1); \n\t" \ 270 ".set reorder" \ 271 : \ 272 : "r" (va1), "r" (va2), "i" (op) \ 273 : "memory"); \ 274} while (/*CONSTCOND*/0) 275 276/* 277 * cache_r4k_op_8lines_16_4way: 278 * 279 * Perform the specified cache operation on 8 16-byte 280 * cache lines, 4-ways. 281 */ 282#define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \ 283do { \ 284 __asm __volatile( \ 285 ".set noreorder \n\t" \ 286 "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \ 287 "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \ 288 "cache %4, 0x010(%0); cache %4, 0x010(%1); \n\t" \ 289 "cache %4, 0x010(%2); cache %4, 0x010(%3); \n\t" \ 290 "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \ 291 "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \ 292 "cache %4, 0x030(%0); cache %4, 0x030(%1); \n\t" \ 293 "cache %4, 0x030(%2); cache %4, 0x030(%3); \n\t" \ 294 "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \ 295 "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \ 296 "cache %4, 0x050(%0); cache %4, 0x050(%1); \n\t" \ 297 "cache %4, 0x050(%2); cache %4, 0x050(%3); \n\t" \ 298 "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \ 299 "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \ 300 "cache %4, 0x070(%0); cache %4, 0x070(%1); \n\t" \ 301 "cache %4, 0x070(%2); cache %4, 0x070(%3); \n\t" \ 302 ".set reorder" \ 303 : \ 304 : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \ 305 : "memory"); \ 306} while (/*CONSTCOND*/0) 307 308/* 309 * cache_r4k_op_8lines_32_4way: 310 * 311 * Perform the specified cache operation on 8 32-byte 312 * cache lines, 4-ways. 313 */ 314#define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \ 315do { \ 316 __asm __volatile( \ 317 ".set noreorder \n\t" \ 318 "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \ 319 "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \ 320 "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \ 321 "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \ 322 "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \ 323 "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \ 324 "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \ 325 "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \ 326 "cache %4, 0x080(%0); cache %4, 0x080(%1); \n\t" \ 327 "cache %4, 0x080(%2); cache %4, 0x080(%3); \n\t" \ 328 "cache %4, 0x0a0(%0); cache %4, 0x0a0(%1); \n\t" \ 329 "cache %4, 0x0a0(%2); cache %4, 0x0a0(%3); \n\t" \ 330 "cache %4, 0x0c0(%0); cache %4, 0x0c0(%1); \n\t" \ 331 "cache %4, 0x0c0(%2); cache %4, 0x0c0(%3); \n\t" \ 332 "cache %4, 0x0e0(%0); cache %4, 0x0e0(%1); \n\t" \ 333 "cache %4, 0x0e0(%2); cache %4, 0x0e0(%3); \n\t" \ 334 ".set reorder" \ 335 : \ 336 : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \ 337 : "memory"); \ 338} while (/*CONSTCOND*/0) 339 340void r4k_icache_sync_all_16(void); 341void r4k_icache_sync_range_16(vm_paddr_t, vm_size_t); 342void r4k_icache_sync_range_index_16(vm_paddr_t, vm_size_t); 343 344void r4k_icache_sync_all_32(void); 345void r4k_icache_sync_range_32(vm_paddr_t, vm_size_t); 346void r4k_icache_sync_range_index_32(vm_paddr_t, vm_size_t); 347 348void r4k_pdcache_wbinv_all_16(void); 349void r4k_pdcache_wbinv_range_16(vm_paddr_t, vm_size_t); 350void r4k_pdcache_wbinv_range_index_16(vm_paddr_t, vm_size_t); 351 352void r4k_pdcache_inv_range_16(vm_paddr_t, vm_size_t); 353void r4k_pdcache_wb_range_16(vm_paddr_t, vm_size_t); 354 355void r4k_pdcache_wbinv_all_32(void); 356void r4k_pdcache_wbinv_range_32(vm_paddr_t, vm_size_t); 357void r4k_pdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t); 358 359void r4k_pdcache_inv_range_32(vm_paddr_t, vm_size_t); 360void r4k_pdcache_wb_range_32(vm_paddr_t, vm_size_t); 361 362void r4k_sdcache_wbinv_all_32(void); 363void r4k_sdcache_wbinv_range_32(vm_paddr_t, vm_size_t); 364void r4k_sdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t); 365 366void r4k_sdcache_inv_range_32(vm_paddr_t, vm_size_t); 367void r4k_sdcache_wb_range_32(vm_paddr_t, vm_size_t); 368 369void r4k_sdcache_wbinv_all_128(void); 370void r4k_sdcache_wbinv_range_128(vm_paddr_t, vm_size_t); 371void r4k_sdcache_wbinv_range_index_128(vm_paddr_t, vm_size_t); 372 373void r4k_sdcache_inv_range_128(vm_paddr_t, vm_size_t); 374void r4k_sdcache_wb_range_128(vm_paddr_t, vm_size_t); 375 376void r4k_sdcache_wbinv_all_generic(void); 377void r4k_sdcache_wbinv_range_generic(vm_paddr_t, vm_size_t); 378void r4k_sdcache_wbinv_range_index_generic(vm_paddr_t, vm_size_t); 379 380void r4k_sdcache_inv_range_generic(vm_paddr_t, vm_size_t); 381void r4k_sdcache_wb_range_generic(vm_paddr_t, vm_size_t); 382 383#endif /* !LOCORE */ 384