1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Inline assembly cache operations. 7 * 8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org) 10 */ 11#ifndef __ASM_R4KCACHE_H 12#define __ASM_R4KCACHE_H 13 14#include <asm/asm.h> 15#include <asm/cacheops.h> 16 17#define cache_op(op,addr) \ 18 __asm__ __volatile__( \ 19 " .set noreorder \n" \ 20 " .set mips3\n\t \n" \ 21 " cache %0, %1 \n" \ 22 " .set mips0 \n" \ 23 " .set reorder" \ 24 : \ 25 : "i" (op), "m" (*(unsigned char *)(addr))) 26 27static inline void flush_icache_line_indexed(unsigned long addr) 28{ 29 cache_op(Index_Invalidate_I, addr); 30} 31 32static inline void flush_dcache_line_indexed(unsigned long addr) 33{ 34 cache_op(Index_Writeback_Inv_D, addr); 35} 36 37static inline void flush_scache_line_indexed(unsigned long addr) 38{ 39 cache_op(Index_Writeback_Inv_SD, addr); 40} 41 42static inline void flush_icache_line(unsigned long addr) 43{ 44 cache_op(Hit_Invalidate_I, addr); 45} 46 47static inline void flush_dcache_line(unsigned long addr) 48{ 49 cache_op(Hit_Writeback_Inv_D, addr); 50} 51 52static inline void invalidate_dcache_line(unsigned long addr) 53{ 54 cache_op(Hit_Invalidate_D, addr); 55} 56 57static inline void invalidate_scache_line(unsigned long addr) 58{ 59 cache_op(Hit_Invalidate_SD, addr); 60} 61 62static inline void flush_scache_line(unsigned long addr) 63{ 64 cache_op(Hit_Writeback_Inv_SD, addr); 65} 66 67/* 68 * The next two are for badland addresses like signal trampolines. 69 */ 70static inline void protected_flush_icache_line(unsigned long addr) 71{ 72 __asm__ __volatile__( 73 ".set noreorder\n\t" 74 ".set mips3\n" 75 "1:\tcache %0,(%1)\n" 76 "2:\t.set mips0\n\t" 77 ".set reorder\n\t" 78 ".section\t__ex_table,\"a\"\n\t" 79 STR(PTR)"\t1b,2b\n\t" 80 ".previous" 81 : 82 : "i" (Hit_Invalidate_I), "r" (addr)); 83} 84 85static inline void protected_writeback_dcache_line(unsigned long addr) 86{ 87 __asm__ __volatile__( 88 ".set noreorder\n\t" 89 ".set mips3\n" 90 "1:\tcache %0,(%1)\n" 91 "2:\t.set mips0\n\t" 92 ".set reorder\n\t" 93 ".section\t__ex_table,\"a\"\n\t" 94 STR(PTR)"\t1b,2b\n\t" 95 ".previous" 96 : 97 : "i" (Hit_Writeback_D), "r" (addr)); 98} 99 100#define cache16_unroll32(base,op) \ 101 __asm__ __volatile__(" \ 102 .set noreorder; \ 103 .set mips3; \ 104 cache %1, 0x000(%0); cache %1, 0x010(%0); \ 105 cache %1, 0x020(%0); cache %1, 0x030(%0); \ 106 cache %1, 0x040(%0); cache %1, 0x050(%0); \ 107 cache %1, 0x060(%0); cache %1, 0x070(%0); \ 108 cache %1, 0x080(%0); cache %1, 0x090(%0); \ 109 cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \ 110 cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \ 111 cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \ 112 cache %1, 0x100(%0); cache %1, 0x110(%0); \ 113 cache %1, 0x120(%0); cache %1, 0x130(%0); \ 114 cache %1, 0x140(%0); cache %1, 0x150(%0); \ 115 cache %1, 0x160(%0); cache %1, 0x170(%0); \ 116 cache %1, 0x180(%0); cache %1, 0x190(%0); \ 117 cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \ 118 cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \ 119 cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \ 120 .set mips0; \ 121 .set reorder" \ 122 : \ 123 : "r" (base), \ 124 "i" (op)); 125 126static inline void blast_dcache16(void) 127{ 128 unsigned long start = KSEG0; 129 unsigned long end = (start + dcache_size); 130 131 while (start < end) { 132 cache16_unroll32(start,Index_Writeback_Inv_D); 133 start += 0x200; 134 } 135} 136 137static inline void blast_dcache16_wayLSB(void) 138{ 139 unsigned long start = KSEG0; 140 unsigned long end = (start + mips_cpu.dcache.sets * mips_cpu.dcache.linesz); 141 int way; 142 143 while (start < end) { 144 /* LSB of VA select the way */ 145 for (way = 0; way < mips_cpu.dcache.ways; way++) 146 cache16_unroll32(start|way,Index_Writeback_Inv_D); 147 start += 0x200; 148 } 149} 150 151static inline void blast_dcache16_page(unsigned long page) 152{ 153 unsigned long start = page; 154 unsigned long end = (start + PAGE_SIZE); 155 156 while (start < end) { 157 cache16_unroll32(start,Hit_Writeback_Inv_D); 158 start += 0x200; 159 } 160} 161 162static inline void blast_dcache16_page_indexed(unsigned long page) 163{ 164 unsigned long start = page; 165 unsigned long end = (start + PAGE_SIZE); 166 167 while (start < end) { 168 cache16_unroll32(start,Index_Writeback_Inv_D); 169 start += 0x200; 170 } 171} 172 173static inline void blast_dcache16_page_indexed_wayLSB(unsigned long page) 174{ 175 unsigned long start = page; 176 unsigned long end = (start + PAGE_SIZE); 177 int way; 178 179 while (start < end) { 180 /* LSB of VA select the way */ 181 for (way = 0; way < mips_cpu.dcache.ways; way++) 182 cache16_unroll32(start|way,Index_Writeback_Inv_D); 183 start += 0x200; 184 } 185} 186 187static inline void blast_icache16(void) 188{ 189 unsigned long start = KSEG0; 190 unsigned long end = (start + icache_size); 191 192 while (start < end) { 193 cache16_unroll32(start,Index_Invalidate_I); 194 start += 0x200; 195 } 196} 197 198static inline void blast_icache16_wayLSB(void) 199{ 200 unsigned long start = KSEG0; 201 unsigned long end = (start + mips_cpu.icache.sets * mips_cpu.icache.linesz); 202 int way; 203 204 while (start < end) { 205 /* LSB of VA select the way */ 206 for (way = 0; way < mips_cpu.icache.ways; way++) 207 cache16_unroll32(start|way,Index_Invalidate_I); 208 start += 0x200; 209 } 210} 211 212static inline void blast_icache16_page(unsigned long page) 213{ 214 unsigned long start = page; 215 unsigned long end = (start + PAGE_SIZE); 216 217 while (start < end) { 218 cache16_unroll32(start,Hit_Invalidate_I); 219 start += 0x200; 220 } 221} 222 223static inline void blast_icache16_page_indexed(unsigned long page) 224{ 225 unsigned long start = page; 226 unsigned long end = (start + PAGE_SIZE); 227 228 while (start < end) { 229 cache16_unroll32(start,Index_Invalidate_I); 230 start += 0x200; 231 } 232} 233 234static inline void blast_scache16(void) 235{ 236 unsigned long start = KSEG0; 237 unsigned long end = KSEG0 + scache_size; 238 239 while (start < end) { 240 cache16_unroll32(start,Index_Writeback_Inv_SD); 241 start += 0x200; 242 } 243} 244 245static inline void blast_scache16_page(unsigned long page) 246{ 247 unsigned long start = page; 248 unsigned long end = page + PAGE_SIZE; 249 250 while (start < end) { 251 cache16_unroll32(start,Hit_Writeback_Inv_SD); 252 start += 0x200; 253 } 254} 255 256static inline void blast_scache16_page_indexed(unsigned long page) 257{ 258 unsigned long start = page; 259 unsigned long end = page + PAGE_SIZE; 260 261 while (start < end) { 262 cache16_unroll32(start,Index_Writeback_Inv_SD); 263 start += 0x200; 264 } 265} 266 267#define cache32_unroll32(base,op) \ 268 __asm__ __volatile__(" \ 269 .set noreorder; \ 270 .set mips3; \ 271 cache %1, 0x000(%0); cache %1, 0x020(%0); \ 272 cache %1, 0x040(%0); cache %1, 0x060(%0); \ 273 cache %1, 0x080(%0); cache %1, 0x0a0(%0); \ 274 cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \ 275 cache %1, 0x100(%0); cache %1, 0x120(%0); \ 276 cache %1, 0x140(%0); cache %1, 0x160(%0); \ 277 cache %1, 0x180(%0); cache %1, 0x1a0(%0); \ 278 cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \ 279 cache %1, 0x200(%0); cache %1, 0x220(%0); \ 280 cache %1, 0x240(%0); cache %1, 0x260(%0); \ 281 cache %1, 0x280(%0); cache %1, 0x2a0(%0); \ 282 cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \ 283 cache %1, 0x300(%0); cache %1, 0x320(%0); \ 284 cache %1, 0x340(%0); cache %1, 0x360(%0); \ 285 cache %1, 0x380(%0); cache %1, 0x3a0(%0); \ 286 cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \ 287 .set mips0; \ 288 .set reorder" \ 289 : \ 290 : "r" (base), \ 291 "i" (op)); 292 293static inline void blast_dcache32(void) 294{ 295 unsigned long start = KSEG0; 296 unsigned long end = (start + dcache_size); 297 298 while (start < end) { 299 cache32_unroll32(start,Index_Writeback_Inv_D); 300 start += 0x400; 301 } 302} 303 304static inline void blast_dcache32_wayLSB(void) 305{ 306 unsigned long start = KSEG0; 307 unsigned long end = (start + mips_cpu.dcache.sets * mips_cpu.dcache.linesz); 308 int way; 309 310 while (start < end) { 311 /* LSB of VA select the way */ 312 for (way = 0; way < mips_cpu.dcache.ways; way++) 313 cache32_unroll32(start|way,Index_Writeback_Inv_D); 314 start += 0x400; 315 } 316} 317 318/* 319 * Call this function only with interrupts disabled or R4600 V2.0 may blow 320 * up on you. 321 * 322 * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D, 323 * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Excl_D will only 324 * operate correctly if the internal data cache refill buffer is empty. These 325 * CACHE instructions should be separated from any potential data cache miss 326 * by a load instruction to an uncached address to empty the response buffer." 327 * (Revision 2.0 device errata from IDT available on http://www.idt.com/ 328 * in .pdf format.) 329 */ 330static inline void blast_dcache32_page(unsigned long page) 331{ 332 unsigned long start = page; 333 unsigned long end = (start + PAGE_SIZE); 334 335 *(volatile unsigned long *)KSEG1; 336 337 __asm__ __volatile__("nop;nop;nop;nop"); 338 while (start < end) { 339 cache32_unroll32(start,Hit_Writeback_Inv_D); 340 start += 0x400; 341 } 342} 343 344static inline void blast_dcache32_page_indexed(unsigned long page) 345{ 346 unsigned long start = page; 347 unsigned long end = (start + PAGE_SIZE); 348 349 while (start < end) { 350 cache32_unroll32(start,Index_Writeback_Inv_D); 351 start += 0x400; 352 } 353} 354 355static inline void blast_dcache32_page_indexed_wayLSB(unsigned long page) 356{ 357 unsigned long start = page; 358 unsigned long end = (start + PAGE_SIZE); 359 int way; 360 361 while (start < end) { 362 /* LSB of VA select the way */ 363 for (way = 0; way < mips_cpu.dcache.ways; way++) 364 cache32_unroll32(start|way,Index_Writeback_Inv_D); 365 start += 0x400; 366 } 367} 368 369static inline void blast_icache32(void) 370{ 371 unsigned long start = KSEG0; 372 unsigned long end = (start + icache_size); 373 374 while (start < end) { 375 cache32_unroll32(start,Index_Invalidate_I); 376 start += 0x400; 377 } 378} 379 380static inline void blast_icache32_wayLSB(void) 381{ 382 unsigned long start = KSEG0; 383 unsigned long end = (start + mips_cpu.icache.sets * mips_cpu.icache.linesz); 384 int way; 385 386 while (start < end) { 387 /* LSB of VA select the way */ 388 for (way = 0; way < mips_cpu.icache.ways; way++) 389 cache32_unroll32(start|way,Index_Invalidate_I); 390 start += 0x400; 391 } 392} 393 394static inline void blast_icache32_page(unsigned long page) 395{ 396 unsigned long start = page; 397 unsigned long end = (start + PAGE_SIZE); 398 399 while (start < end) { 400 cache32_unroll32(start,Hit_Invalidate_I); 401 start += 0x400; 402 } 403} 404 405static inline void blast_icache32_page_indexed(unsigned long page) 406{ 407 unsigned long start = page; 408 unsigned long end = (start + PAGE_SIZE); 409 410 while (start < end) { 411 cache32_unroll32(start,Index_Invalidate_I); 412 start += 0x400; 413 } 414} 415 416static inline void blast_scache32(void) 417{ 418 unsigned long start = KSEG0; 419 unsigned long end = KSEG0 + scache_size; 420 421 while (start < end) { 422 cache32_unroll32(start,Index_Writeback_Inv_SD); 423 start += 0x400; 424 } 425} 426 427static inline void blast_scache32_page(unsigned long page) 428{ 429 unsigned long start = page; 430 unsigned long end = page + PAGE_SIZE; 431 432 while (start < end) { 433 cache32_unroll32(start,Hit_Writeback_Inv_SD); 434 start += 0x400; 435 } 436} 437 438static inline void blast_scache32_page_indexed(unsigned long page) 439{ 440 unsigned long start = page; 441 unsigned long end = page + PAGE_SIZE; 442 443 while (start < end) { 444 cache32_unroll32(start,Index_Writeback_Inv_SD); 445 start += 0x400; 446 } 447} 448 449#define cache64_unroll32(base,op) \ 450 __asm__ __volatile__(" \ 451 .set noreorder; \ 452 .set mips3; \ 453 cache %1, 0x000(%0); cache %1, 0x040(%0); \ 454 cache %1, 0x080(%0); cache %1, 0x0c0(%0); \ 455 cache %1, 0x100(%0); cache %1, 0x140(%0); \ 456 cache %1, 0x180(%0); cache %1, 0x1c0(%0); \ 457 cache %1, 0x200(%0); cache %1, 0x240(%0); \ 458 cache %1, 0x280(%0); cache %1, 0x2c0(%0); \ 459 cache %1, 0x300(%0); cache %1, 0x340(%0); \ 460 cache %1, 0x380(%0); cache %1, 0x3c0(%0); \ 461 cache %1, 0x400(%0); cache %1, 0x440(%0); \ 462 cache %1, 0x480(%0); cache %1, 0x4c0(%0); \ 463 cache %1, 0x500(%0); cache %1, 0x540(%0); \ 464 cache %1, 0x580(%0); cache %1, 0x5c0(%0); \ 465 cache %1, 0x600(%0); cache %1, 0x640(%0); \ 466 cache %1, 0x680(%0); cache %1, 0x6c0(%0); \ 467 cache %1, 0x700(%0); cache %1, 0x740(%0); \ 468 cache %1, 0x780(%0); cache %1, 0x7c0(%0); \ 469 .set mips0; \ 470 .set reorder" \ 471 : \ 472 : "r" (base), \ 473 "i" (op)); 474 475static inline void blast_scache64(void) 476{ 477 unsigned long start = KSEG0; 478 unsigned long end = KSEG0 + scache_size; 479 480 while (start < end) { 481 cache64_unroll32(start,Index_Writeback_Inv_SD); 482 start += 0x800; 483 } 484} 485 486static inline void blast_scache64_page(unsigned long page) 487{ 488 unsigned long start = page; 489 unsigned long end = page + PAGE_SIZE; 490 491 while (start < end) { 492 cache64_unroll32(start,Hit_Writeback_Inv_SD); 493 start += 0x800; 494 } 495} 496 497static inline void blast_scache64_page_indexed(unsigned long page) 498{ 499 unsigned long start = page; 500 unsigned long end = page + PAGE_SIZE; 501 502 while (start < end) { 503 cache64_unroll32(start,Index_Writeback_Inv_SD); 504 start += 0x800; 505 } 506} 507 508#define cache128_unroll32(base,op) \ 509 __asm__ __volatile__(" \ 510 .set noreorder; \ 511 .set mips3; \ 512 cache %1, 0x000(%0); cache %1, 0x080(%0); \ 513 cache %1, 0x100(%0); cache %1, 0x180(%0); \ 514 cache %1, 0x200(%0); cache %1, 0x280(%0); \ 515 cache %1, 0x300(%0); cache %1, 0x380(%0); \ 516 cache %1, 0x400(%0); cache %1, 0x480(%0); \ 517 cache %1, 0x500(%0); cache %1, 0x580(%0); \ 518 cache %1, 0x600(%0); cache %1, 0x680(%0); \ 519 cache %1, 0x700(%0); cache %1, 0x780(%0); \ 520 cache %1, 0x800(%0); cache %1, 0x880(%0); \ 521 cache %1, 0x900(%0); cache %1, 0x980(%0); \ 522 cache %1, 0xa00(%0); cache %1, 0xa80(%0); \ 523 cache %1, 0xb00(%0); cache %1, 0xb80(%0); \ 524 cache %1, 0xc00(%0); cache %1, 0xc80(%0); \ 525 cache %1, 0xd00(%0); cache %1, 0xd80(%0); \ 526 cache %1, 0xe00(%0); cache %1, 0xe80(%0); \ 527 cache %1, 0xf00(%0); cache %1, 0xf80(%0); \ 528 .set mips0; \ 529 .set reorder" \ 530 : \ 531 : "r" (base), \ 532 "i" (op)); 533 534static inline void blast_scache128(void) 535{ 536 unsigned long start = KSEG0; 537 unsigned long end = KSEG0 + scache_size; 538 539 while (start < end) { 540 cache128_unroll32(start,Index_Writeback_Inv_SD); 541 start += 0x1000; 542 } 543} 544 545static inline void blast_scache128_page(unsigned long page) 546{ 547 cache128_unroll32(page,Hit_Writeback_Inv_SD); 548} 549 550static inline void blast_scache128_page_indexed(unsigned long page) 551{ 552 cache128_unroll32(page,Index_Writeback_Inv_SD); 553} 554 555#endif /* __ASM_R4KCACHE_H */ 556