1/* $NetBSD: locore_mips1.S,v 1.82 2011/05/07 19:15:48 tsutsui Exp $ */ 2 3/* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Digital Equipment Corporation and Ralph Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (C) 1989 Digital Equipment Corporation. 35 * Permission to use, copy, modify, and distribute this software and 36 * its documentation for any purpose and without fee is hereby granted, 37 * provided that the above copyright notice appears in all copies. 38 * Digital Equipment Corporation makes no representations about the 39 * suitability of this software for any purpose. It is provided "as is" 40 * without express or implied warranty. 41 * 42 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s, 43 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL) 44 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s, 45 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL) 46 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s, 47 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL) 48 * 49 * @(#)locore.s 8.5 (Berkeley) 1/4/94 50 */ 51#include "opt_cputype.h" 52#include "opt_ddb.h" 53#include "opt_kgdb.h" 54 55#include <sys/cdefs.h> 56 57#include <mips/asm.h> 58#include <mips/cpuregs.h> 59#include <machine/param.h> 60 61#include "assym.h" 62 63#define _SLLV sllv 64 65#define _SLL sll 66#define _SRL srl 67#define WIRED_SHIFT 2 68 69/* 70 * Use correct-sized m?c0/dm?c0 opcodes. 71 */ 72#define _MFC0 mfc0 73#define _MTC0 mtc0 74 75 76#define MIPSX(name) __CONCAT(mips1_,name) 77 78 .set noreorder 79 .text 80 81EXPORT(MIPSX(exceptionentry_start)) 82 83/* 84 * mipsN_utlb_miss 85 * 86 * A reference is made (in either kernel or user mode) to a page in 87 * kuseg that has no matching TLB entry. This routine is copied down 88 * at 0x80000000 and total length must be less than 32 instructions. 89 * No pc relative jump instruction is allowed. 90 */ 91VECTOR(MIPSX(utlb_miss), unknown) 92 .set noat 93 _MFC0 k0, MIPS_COP_0_BAD_VADDR #00: k0=bad address 94 lui k1, %hi(CPUVAR(PMAP_SEG0TAB)) #01: k1=hi of seg0tab 95 bltz k0, 1f # R3000 chip bug 96 PTR_SRL k0, 1*(PGSHIFT-PTR_SCALESHIFT)+(PGSHIFT-2)#03: k0=seg offset (almost) 97 PTR_L k1, %lo(CPUVAR(PMAP_SEG0TAB))(k1) #04: k1=seg0tab 98 andi k0, (NBPG-(1<<PTR_SCALESHIFT)) #07: k0=seg offset (mask 0x3) 99 PTR_ADDU k1, k0 #08: k1=seg entry address 100 PTR_L k1, 0(k1) #09: k1=seg entry 101 _MFC0 k0, MIPS_COP_0_BAD_VADDR #0a: k0=bad address (again) 102 beqz k1, MIPSX(nopagetable) #0b: ==0 -- no page table 103 PTR_SRL k0, (PGSHIFT-2) #0c: k0=VPN (aka va>>10) 104 andi k0, (NBPG-4) #0d: k0=page table offset 105 PTR_ADDU k1, k0 #0e: k1=pte address 106 INT_L k0, 0(k1) #0f: k0=lo0 pte 107 nop #10: load delay 108 beqz k0, MIPSX(invalidpte) #11: dont load invalid entries 109 nop #12 branch delay 110 mtc0 k0, MIPS_COP_0_TLB_LOW #13: lo0 is loaded 111 nop #14: load delay 112 tlbwr #15: update TLB 1131: 114 _MFC0 k1, MIPS_COP_0_EXC_PC #16: get return address 115 nop #17: load delay 116 j k1 #18: return from 117 rfe #19: exception 118MIPSX(nopagetable): 119MIPSX(invalidpte): 120 j MIPSX(slowfault) #1a: handle the rest 121 nop #1b: branch delay 122 .set at 123VECTOR_END(MIPSX(utlb_miss)) 124 125 126/* 127 * mipsN_exception 128 * 129 * Handles any exceptions other than reset and UTLB miss. This routine 130 * is copied down at 0x80000080 and total length must be less than 32 131 * instructions. No pc relative jump instruction is allowed. 132 */ 133 .org MIPSX(utlb_miss) + 0x80 134VECTOR(MIPSX(exception), unknown) 135/* 136 * Find out what mode we came from and jump to the proper handler. 137 */ 138 .set noat 139 mfc0 k0, MIPS_COP_0_STATUS #00: get the status register 140 mfc0 k1, MIPS_COP_0_CAUSE #01: get the cause register 141 and k0, MIPS1_SR_KU_PREV #02: test for user mode 142 sll k0, 4 #03: shift user bit for cause index 143 and k1, MIPS1_CR_EXC_CODE #04: mask out the cause bits 144 or k1, k0 #05: change index to user table 145 PTR_LA k0, MIPSX(excpt_sw) #06: get base of the jump table 146 PTR_ADDU k0, k1 #08: get the address of the 147 # function entry. Note that 148 # the cause is already 149 # shifted left by 2 bits so 150 # we dont have to shift. 151 PTR_L k0, 0(k0) #09: get the function address 152 nop #0a: load delay 153 j k0 #0b: jump to the function 154 nop #0c 155 nop #0d 156 nop #0e 157 nop #0f 158 .set at 159VECTOR_END(MIPSX(exception)) 160 161 162/*---------------------------------------------------------------------------- 163 * 164 * mipsN_slowfault 165 * 166 * Alternate entry point into the mipsN_user_gen_exception or 167 * mipsN_kern_gen_exception, when the UTLB miss handler couldn't 168 * find a TLB entry. 169 * 170 * Find out what mode we came from and call the appropriate handler. 171 * 172 *---------------------------------------------------------------------------- 173 */ 174MIPSX(slowfault): 175 .set noat 176 mfc0 k0, MIPS_COP_0_STATUS 177 nop 178 and k0, MIPS1_SR_KU_PREV 179 bnez k0, _C_LABEL(MIPSX(user_gen_exception)) 180 nop 181 .set at 182/* 183 * Fall through ... 184 */ 185 186/* 187 * mipsN_kern_gen_exception 188 * 189 * Handle an exception during kernel mode. 190 * Build trapframe on stack to hold interrupted kernel context, then 191 * call trap() to process the condition. 192 * 193 * trapframe is pointed to by the 5th arg and a dummy sixth argument is used 194 * to avoid alignment problems 195 * { 196 * register_t cf_args[4 + 1]; 197 * register_t cf_pad; (for 8 word alignment) 198 * register_t cf_sp; 199 * register_t cf_ra; 200 * struct reg cf_tf; 201 * }; 202 */ 203NESTED_NOPROFILE(MIPSX(kern_gen_exception), KERNFRAME_SIZ, ra) 204 .set noat 205 .mask 0x80000000, -4 206#ifdef PARANOIA 207 PTR_L k0, L_PCB(MIPS_CURLWP) 208 nop 209 slt k0, k0, sp # k0 = L_PCB(MIPS_CURLWP) < sp 2101: beqz k0, 1b # loop forever if false 211 nop 212 PTR_L k0, L_PCB(MIPS_CURLWP) 213 nop 214 PTR_ADDU k0, USPACE 215 slt k0, sp, k0 # k0 = sp < L_PCB(MIPS_CURLWP) + USPACE 2162: beqz k0, 2b # loop forever if false 217 nop 218#endif /* PARANOIA 219/* 220 * Save the relevant kernel registers onto the stack. 221 * We don't need to save s0 - s8, sp and gp because 222 * the compiler does it for us. 223 */ 224 PTR_SUBU sp, KERNFRAME_SIZ 225 REG_S AT, TF_BASE+TF_REG_AST(sp) 226 REG_S v0, TF_BASE+TF_REG_V0(sp) 227 REG_S v1, TF_BASE+TF_REG_V1(sp) 228 mflo v0 229 mfhi v1 230 REG_S a0, TF_BASE+TF_REG_A0(sp) 231 REG_S a1, TF_BASE+TF_REG_A1(sp) 232 REG_S a2, TF_BASE+TF_REG_A2(sp) 233 REG_S a3, TF_BASE+TF_REG_A3(sp) 234 mfc0 a0, MIPS_COP_0_STATUS # 1st arg is STATUS 235 REG_S t0, TF_BASE+TF_REG_T0(sp) 236 REG_S t1, TF_BASE+TF_REG_T1(sp) 237 REG_S t2, TF_BASE+TF_REG_T2(sp) 238 REG_S t3, TF_BASE+TF_REG_T3(sp) 239 mfc0 a1, MIPS_COP_0_CAUSE # 2nd arg is CAUSE 240 REG_S ta0, TF_BASE+TF_REG_TA0(sp) 241 REG_S ta1, TF_BASE+TF_REG_TA1(sp) 242 REG_S ta2, TF_BASE+TF_REG_TA2(sp) 243 REG_S ta3, TF_BASE+TF_REG_TA3(sp) 244 _MFC0 a2, MIPS_COP_0_BAD_VADDR # 3rd arg is fault address 245 #REG_S t8, TF_BASE+TF_REG_T8(sp) # is MIPS_CURLWP 246 REG_S t9, TF_BASE+TF_REG_T9(sp) 247 REG_S ra, TF_BASE+TF_REG_RA(sp) 248 REG_S a0, TF_BASE+TF_REG_SR(sp) 249 _MFC0 a3, MIPS_COP_0_EXC_PC # 4th arg is exception PC 250 REG_S v0, TF_BASE+TF_REG_MULLO(sp) 251 REG_S v1, TF_BASE+TF_REG_MULHI(sp) 252 REG_S a3, TF_BASE+TF_REG_EPC(sp) 253 REG_S a1, TF_BASE+TF_REG_CAUSE(sp) 254#if defined(DDB) || defined(KGDB) 255 REG_S s0, TF_BASE+TF_REG_S0(sp) 256 REG_S s1, TF_BASE+TF_REG_S1(sp) 257 REG_S s2, TF_BASE+TF_REG_S2(sp) 258 REG_S s3, TF_BASE+TF_REG_S3(sp) 259 REG_S s4, TF_BASE+TF_REG_S4(sp) 260 REG_S s5, TF_BASE+TF_REG_S5(sp) 261 REG_S s6, TF_BASE+TF_REG_S6(sp) 262 REG_S s7, TF_BASE+TF_REG_S7(sp) 263 PTR_ADDU v0, sp, KERNFRAME_SIZ 264 REG_S v0, TF_BASE+TF_REG_SP(sp) 265 REG_S s8, TF_BASE+TF_REG_S8(sp) 266 REG_S gp, TF_BASE+TF_REG_GP(sp) 267#endif 268 PTR_ADDU v0, sp, TF_BASE 269 REG_S v0, KERNFRAME_ARG5(sp) # 5th arg is p. to trapframe 270#ifdef PARANOIA 271 /* 272 * save PPL in trapframe 273 */ 274 PTR_L t0, L_CPU(MIPS_CURLWP) 275 nop 276 INT_L t1, CPU_INFO_CPL(t0) # get current priority level 277 nop 278 INT_S t1, TF_BASE+TF_PPL(sp) # save priority level 279#endif /* PARANOIA */ 280 281#if defined(DDB) || defined(DEBUG) || defined(KGDB) 282 PTR_ADDU v0, sp, KERNFRAME_SIZ 283 REG_S v0, KERNFRAME_SP(sp) 284#endif 285 286#ifdef PARANOIA 287 /* 288 * Verify our existing interrupt level. 289 */ 290 jal _C_LABEL(splcheck) 291 nop 292#endif /* PARANOIA */ 293 294 /* 295 * Call the trap handler. 296 */ 297 jal _C_LABEL(trap) 298 REG_S a3, KERNFRAME_RA(sp) # for debugging 299 300 /* 301 * Restore registers and return from the exception. 302 */ 303 REG_L a0, TF_BASE+TF_REG_SR(sp) 304 nop 305 mtc0 a0, MIPS_COP_0_STATUS # restore the SR, disable intrs 306 307 /* 308 * Start of common kernel exception return code for both 309 * mipxN_kern_gen_exception and mipsN_kern_intr. 310 */ 311MIPSX(kern_return): 312 REG_L t0, TF_BASE+TF_REG_MULLO(sp) 313 REG_L t1, TF_BASE+TF_REG_MULHI(sp) 314 REG_L k1, TF_BASE+TF_REG_EPC(sp) # might be changed inside trap 315 mtlo t0 316 mthi t1 317 318#ifdef PARANOIA 319 INT_L t2, TF_BASE+TF_PPL(sp) # get saved priority level 320 PTR_L t0, L_CPU(MIPS_CURLWP) 321 nop 322 INT_L t1, CPU_INFO_CPL(t0) # get current priority level 323 nop 32411: bne t2, t1, 11b # loop forever if unequal 325 nop 326 327 /* 328 * Verify our existing interrupt level. 329 */ 330 jal _C_LABEL(splcheck) 331 nop 332#endif /* PARANOIA */ 333 334 /* 335 * Check for kernel restartable atomic sequences. 336 */ 337 PTR_LA t0, _C_LABEL(_lock_ras_start) 338 li t1, -MIPS_LOCK_RAS_SIZE 339 and t1, k1 340 bne t1, t0, 1f # exception PC in RAS area? 341 nop 342 jal _C_LABEL(_restart_lock_ras) # fix the pc (k1) 343 nop 3441: 345 346 REG_L AT, TF_BASE+TF_REG_AST(sp) 347 REG_L v0, TF_BASE+TF_REG_V0(sp) 348 REG_L v1, TF_BASE+TF_REG_V1(sp) 349 REG_L a0, TF_BASE+TF_REG_A0(sp) 350 REG_L a1, TF_BASE+TF_REG_A1(sp) 351 REG_L a2, TF_BASE+TF_REG_A2(sp) 352 REG_L a3, TF_BASE+TF_REG_A3(sp) 353 REG_L t0, TF_BASE+TF_REG_T0(sp) 354 REG_L t1, TF_BASE+TF_REG_T1(sp) 355 REG_L t2, TF_BASE+TF_REG_T2(sp) 356 REG_L t3, TF_BASE+TF_REG_T3(sp) 357 REG_L ta0, TF_BASE+TF_REG_TA0(sp) 358 REG_L ta1, TF_BASE+TF_REG_TA1(sp) 359 REG_L ta2, TF_BASE+TF_REG_TA2(sp) 360 REG_L ta3, TF_BASE+TF_REG_TA3(sp) 361 #REG_L t8, TF_BASE+TF_REG_T8(sp) # is MIPS_CURLWP 362 REG_L t9, TF_BASE+TF_REG_T9(sp) 363 REG_L ra, TF_BASE+TF_REG_RA(sp) 364#ifdef DDBnotyet 365 REG_L s0, TF_BASE+TF_REG_S0(sp) 366 REG_L s1, TF_BASE+TF_REG_S1(sp) 367 REG_L s2, TF_BASE+TF_REG_S2(sp) 368 REG_L s3, TF_BASE+TF_REG_S3(sp) 369 REG_L s4, TF_BASE+TF_REG_S4(sp) 370 REG_L s5, TF_BASE+TF_REG_S5(sp) 371 REG_L s6, TF_BASE+TF_REG_S6(sp) 372 REG_L s7, TF_BASE+TF_REG_S7(sp) 373 REG_L s8, TF_BASE+TF_REG_S8(sp) 374#endif 375 PTR_ADDU sp, KERNFRAME_SIZ 376 j k1 # return to interrupted point 377 rfe 378 .set at 379END(MIPSX(kern_gen_exception)) 380 381/* 382 * mipsN_kern_intr 383 * 384 * Handle an interrupt from kernel mode. 385 * Build kernframe on stack to hold interrupted kernel context, then 386 * call cpu_intr() to process it. 387 * 388 */ 389NESTED_NOPROFILE(MIPSX(kern_intr), KERNFRAME_SIZ, ra) 390 .set noat 391 .mask 0x80000000, -4 392#ifdef PARANOIA 393 PTR_L k0, L_PCB(MIPS_CURLWP) 394 nop 395 slt k0, k0, sp # k0 = L_PCB(MIPS_CURLWP) < sp 3961: beqz k0, 1b # loop forever if false 397 nop 398 PTR_L k0, L_PCB(MIPS_CURLWP) 399 nop 400 PTR_ADDU k0, USPACE 401 slt k0, sp, k0 # k0 = sp < L_PCB(MIPS_CURLWP) + USPACE 4022: beqz k0, 2b # loop forever if false 403 nop 404 PTR_L k0, L_CPU(MIPS_CURLWP) 405 nop 406 INT_L k0, CPU_INFO_IDEPTH(k0) # grab interrupt depth 407 nop 408 sltu k0, k0, 3 # must be < 3 4093: beqz k0, 3b # loop forever if false 410 nop 411#endif 412 /* 413 * Save the relevant kernel registers onto the stack. We don't need 414 * to save s0 - s8, sp, and gp because the compiler does it for us. 415 * But we use s0-s2 so need to save them. 416 */ 417 PTR_SUBU sp, KERNFRAME_SIZ 418 REG_S AT, TF_BASE+TF_REG_AST(sp) 419 REG_S v0, TF_BASE+TF_REG_V0(sp) 420 REG_S v1, TF_BASE+TF_REG_V1(sp) 421 mflo v0 422 mfhi v1 423 REG_S a0, TF_BASE+TF_REG_A0(sp) 424 REG_S a1, TF_BASE+TF_REG_A1(sp) 425 REG_S a2, TF_BASE+TF_REG_A2(sp) 426 REG_S a3, TF_BASE+TF_REG_A3(sp) 427 REG_S t0, TF_BASE+TF_REG_T0(sp) 428 REG_S t1, TF_BASE+TF_REG_T1(sp) 429 REG_S t2, TF_BASE+TF_REG_T2(sp) 430 REG_S t3, TF_BASE+TF_REG_T3(sp) 431 REG_S ta0, TF_BASE+TF_REG_TA0(sp) 432 REG_S ta1, TF_BASE+TF_REG_TA1(sp) 433 REG_S ta2, TF_BASE+TF_REG_TA2(sp) 434 REG_S ta3, TF_BASE+TF_REG_TA3(sp) 435 REG_S s0, TF_BASE+TF_REG_S0(sp) # used for saved ipl/idepth 436 REG_S s1, TF_BASE+TF_REG_S1(sp) # used for initial status 437 mfc0 s1, MIPS_COP_0_STATUS 438 REG_S s2, TF_BASE+TF_REG_S2(sp) # used for cpu_info 439 #REG_S t8, TF_BASE+TF_REG_T8(sp) # already contains MIPS_CURLWP 440 REG_S t9, TF_BASE+TF_REG_T9(sp) 441 REG_S ra, TF_BASE+TF_REG_RA(sp) 442 REG_S s1, TF_BASE+TF_REG_SR(sp) 443 REG_S v0, TF_BASE+TF_REG_MULLO(sp) 444 REG_S v1, TF_BASE+TF_REG_MULHI(sp) 445/* 446 * Call the interrupt handler. 447 */ 448 _MFC0 ta0, MIPS_COP_0_EXC_PC # grab exception PC 449 PTR_L s2, L_CPU(MIPS_CURLWP) # delay slot 450 REG_S ta0, TF_BASE+TF_REG_EPC(sp) # and save it 451 452#if defined(DDB) || defined(DEBUG) || defined(KGDB) 453 REG_S ta0, KERNFRAME_RA(sp) # for debugging 454#endif 455 456#ifdef PARANOIA 457 INT_L s0, CPU_INFO_CPL(s2) 458 nop # load delay 459 INT_S s0, TF_BASE+TF_PPL(sp) # save priority level 460 461 /* 462 * Verify the current interrupt level 463 */ 464 jal _C_LABEL(splcheck) 465 nop 466#endif /* PARANOIA */ 467 468 /* 469 * We first need to get to IPL_HIGH so that interrupts are masked. 470 */ 471 jal _C_LABEL(splhigh_noprof) 472 nop 473 474#ifdef PARANOIA 4751: bne s0, v0, 1b 476 nop 477#endif /* PARANOIA */ 478 479 sll s0, v0, 8 # remember previous priority 480 # low 8 bits used for idepth 481 482#ifdef PARANOIA 483 /* 484 * Interrupts at IPL_HIGH are not allowed. 485 */ 486 li v1, IPL_HIGH 487 sltu t0, v0, v1 4882: beqz t0, 2b 489 nop 490#endif /* PARANOIA */ 491 492 INT_L t1, CPU_INFO_IDEPTH(s2) # we need to inc. intr depth 493 nop # load delay 494 or s0, t1 # save old interrupt depth 495 INT_ADDU t1, 1 496 INT_S t1, CPU_INFO_IDEPTH(s2) # store new interrupt depth 497 498 /* 499 * Now we can enable interrupts since no interrupts can be delivered 500 */ 501 mfc0 v1, MIPS_COP_0_STATUS 502 nop 503 or v0, v1, MIPS_SR_INT_IE 504 mtc0 v0, MIPS_COP_0_STATUS # write new status 505 506 /* 507 * Now hard interrupts can be processed. 508 */ 509 move a1, ta0 # 2nd arg is exception PC 510 move a2, s1 # 3rd arg is status 511 jal _C_LABEL(cpu_intr) # cpu_intr(ppl, pc, status) 512 srl a0, s0, 8 # 1st arg is previous pri level 513 514 and t1, s0, 0xff # get previous interrupt depth 515 INT_S t1, CPU_INFO_IDEPTH(s2) # to it previous value 516 517#ifdef PARANOIA 518 mfc0 t0, MIPS_COP_0_STATUS # verify INT_IE is still set 519 nop 520 and t0, MIPS_SR_INT_IE 5213: beqz t0, 3b 522 nop 523#endif /* PARANOIA */ 524 525#ifdef __HAVE_FAST_SOFTINTS 526 and a0, s1, MIPS_SOFT_INT_MASK # were softints enabled? 527 beqz a0, 4f # nope 528 nop 529 mfc0 v0, MIPS_COP_0_CAUSE # grab the pending softints 530 nop 531 and a0, v0 # are softints pending 532 beqz a0, 4f # nope 533 nop 534 535 jal _C_LABEL(softint_process) # softint_process(pending) 536 nop 537 538#ifdef __HAVE_PREEMPTION 539 srl v1, s0, 8 # get saved priority level 540 bnez v1, 4f # branch if not at IPL_NONE 541 nop 542 INT_L t0, CPU_INFO_SOFTINTS(s2) # get pending softints 543 nop 544 and v0, t0, 1 << SOFTINT_KPREEMPT # do we need a kernel preempt? 545 beqz v0, 4f # nope 546 nop 547 xor t0, v0 # clear preempt bit 548 INT_S t0, CPU_INFO_SOFTINTS(s2) # and save it. 549 jal _C_LABEL(kpreempt) # kpreempt(pc) 550 PTR_L a0, TF_BASE+TF_REG_EPC(sp) 551#endif /* __HAVE_PREEMPTION */ 5524: 553#endif /* __HAVE_FAST_SOFTINT */ 554 /* 555 * Interrupts handled, restore registers and return from the interrupt. 556 * First, clear interrupt enable 557 */ 558 mtc0 s1, MIPS_COP_0_STATUS # disable interrupts 559 560 srl a0, s0, 8 # get previous priority level 561#ifdef PARANOIA 562 INT_L t0, TF_BASE+TF_PPL(sp) # get saved priority level 563 nop 5649: bne t0, a0, 9b # should still match 565 nop 566 567 li t0, IPL_HIGH 568 sltu v0, a0, t0 5698: beqz v0, 8b 570 nop 571#endif /* PARANOIA */ 572 573 /* 574 * Restore IPL knowing interrupts are disabled 575 */ 576 jal _C_LABEL(splx_noprof) # splx(ppl) 577 nop 578 579#ifdef PARANOIA 580 mfc0 v0, MIPS_COP_0_STATUS 581 nop 582 or v0, MIPS_SR_INT_IE 5835: bne v0, s1, 5b 584 nop 585#endif /* PARANOIA */ 586 587 /* 588 * Restore SR 589 */ 590 mtc0 s1, MIPS_COP_0_STATUS 591 592 /* 593 * Restore s0-s2 and goto common kernel return code. 594 */ 595 REG_L s0, TF_BASE+TF_REG_S0(sp) 596 REG_L s1, TF_BASE+TF_REG_S1(sp) 597 b MIPSX(kern_return) 598 REG_L s2, TF_BASE+TF_REG_S2(sp) 599 .set at 600END(MIPSX(kern_intr)) 601 602/* 603 * mipsN_user_gen_exception 604 * 605 * Handle an exception during user mode. 606 * Save user context atop the kernel stack, then call trap() to process 607 * the condition. The context can be manipulated alternatively via 608 * curlwp->l_md.md_regs. 609 */ 610NESTED_NOPROFILE(MIPSX(user_gen_exception), CALLFRAME_SIZ, ra) 611 .set noat 612 .mask 0x80000000, -4 613 /* 614 * Save all the registers except the kernel temporaries onto the stack. 615 */ 616 PTR_L k1, CPUVAR(CURLWP) 617 nop 618 PTR_L k0, L_PCB(k1) 619 nop 620 PTR_ADDU k0, USPACE - TF_SIZ - CALLFRAME_SIZ 621 REG_S AT, CALLFRAME_SIZ+TF_REG_AST(k0) 622 REG_S v0, CALLFRAME_SIZ+TF_REG_V0(k0) 623 REG_S v1, CALLFRAME_SIZ+TF_REG_V1(k0) 624 mflo v0 625 REG_S a0, CALLFRAME_SIZ+TF_REG_A0(k0) 626 REG_S a1, CALLFRAME_SIZ+TF_REG_A1(k0) 627 REG_S a2, CALLFRAME_SIZ+TF_REG_A2(k0) 628 REG_S a3, CALLFRAME_SIZ+TF_REG_A3(k0) 629 mfhi v1 630 REG_S t0, CALLFRAME_SIZ+TF_REG_T0(k0) 631 REG_S t1, CALLFRAME_SIZ+TF_REG_T1(k0) 632 REG_S t2, CALLFRAME_SIZ+TF_REG_T2(k0) 633 REG_S t3, CALLFRAME_SIZ+TF_REG_T3(k0) 634 mfc0 a0, MIPS_COP_0_STATUS # 1st arg is STATUS 635 REG_S ta0, CALLFRAME_SIZ+TF_REG_TA0(k0) 636 REG_S ta1, CALLFRAME_SIZ+TF_REG_TA1(k0) 637 REG_S ta2, CALLFRAME_SIZ+TF_REG_TA2(k0) 638 REG_S ta3, CALLFRAME_SIZ+TF_REG_TA3(k0) 639 mfc0 a1, MIPS_COP_0_CAUSE # 2nd arg is CAUSE 640 REG_S s0, CALLFRAME_SIZ+TF_REG_S0(k0) 641 REG_S s1, CALLFRAME_SIZ+TF_REG_S1(k0) 642 REG_S s2, CALLFRAME_SIZ+TF_REG_S2(k0) 643 REG_S s3, CALLFRAME_SIZ+TF_REG_S3(k0) 644 _MFC0 a2, MIPS_COP_0_BAD_VADDR # 3rd arg is fault address 645 REG_S s4, CALLFRAME_SIZ+TF_REG_S4(k0) 646 REG_S s5, CALLFRAME_SIZ+TF_REG_S5(k0) 647 REG_S s6, CALLFRAME_SIZ+TF_REG_S6(k0) 648 REG_S s7, CALLFRAME_SIZ+TF_REG_S7(k0) 649 _MFC0 a3, MIPS_COP_0_EXC_PC # 4th arg is exception PC 650 REG_S t8, CALLFRAME_SIZ+TF_REG_T8(k0) # will be MIPS_CURLWP 651 REG_S t9, CALLFRAME_SIZ+TF_REG_T9(k0) 652 REG_S gp, CALLFRAME_SIZ+TF_REG_GP(k0) 653 REG_S sp, CALLFRAME_SIZ+TF_REG_SP(k0) 654 REG_S s8, CALLFRAME_SIZ+TF_REG_S8(k0) 655 REG_S ra, CALLFRAME_SIZ+TF_REG_RA(k0) 656 REG_S a0, CALLFRAME_SIZ+TF_REG_SR(k0) 657 REG_S v0, CALLFRAME_SIZ+TF_REG_MULLO(k0) 658 REG_S v1, CALLFRAME_SIZ+TF_REG_MULHI(k0) 659 REG_S a3, CALLFRAME_SIZ+TF_REG_EPC(k0) 660#ifdef __GP_SUPPORT__ 661 PTR_LA gp, _C_LABEL(_gp) # switch to kernel GP 662#endif 663 move sp, k0 # switch to kernel SP 664 move MIPS_CURLWP, k1 665#ifdef NOFPU 666 li t0, MIPS_SR_INT_IE # reenable intrs 667#else 668 lui t0, %hi(MIPS_SR_COP_1_BIT) 669 and t0, a0 670 or t0, MIPS_SR_INT_IE # make sure intrs are still on 671#endif 672 xor t0, a0 # turn off the FPU & ints on 673/* 674 * Call the trap handler. 675 */ 676 mtc0 t0, MIPS_COP_0_STATUS 677 678 jal _C_LABEL(trap) 679 REG_S a3, CALLFRAME_RA(sp) # for debugging 680/* 681 * Check pending asynchronous traps. 682 */ 683 INT_L v0, L_MD_ASTPENDING(MIPS_CURLWP)# any pending ast? 684 nop 685 beqz v0, MIPSX(user_return) # if no, skip ast processing 686 nop 687/* 688 * We have pending asynchronous traps; all the state is already saved. 689 */ 690 lui ra, %hi(MIPSX(user_return)) # return directly to user return 691 j _C_LABEL(ast) 692 PTR_ADDIU ra, %lo(MIPSX(user_return)) # return directly to user return 693 .set at 694END(MIPSX(user_gen_exception)) 695 696/*---------------------------------------------------------------------------- 697 * 698 * mipsN_user_intr 699 * 700 * Handle an interrupt from user mode. 701 * We save partial state onto the kernel stack since we know there will 702 * always a kernel stack and chances are we won't need the registers we 703 * don't save. If there is a pending asynchronous system trap, then save 704 * the remaining state and call ast(). 705 * 706 * Results: 707 * None. 708 * 709 * Side effects: 710 * None. 711 * 712 *---------------------------------------------------------------------------- 713 */ 714NESTED_NOPROFILE(MIPSX(user_intr), CALLFRAME_SIZ, ra) 715 .set noat 716 .mask 0x80000000, -4 717/* 718 * Save the relevant user registers onto the kernel stack. 719 * We don't need to save s0 - s8 because the compiler does it for us. 720 */ 721 PTR_L k1, CPUVAR(CURLWP) 722 nop 723 PTR_L k0, L_PCB(k1) # XXXuvm_lwp_getuarea 724 nop 725 PTR_ADDU k0, USPACE - TF_SIZ - CALLFRAME_SIZ 726 REG_S AT, CALLFRAME_SIZ+TF_REG_AST(k0) # $1 727 REG_S v0, CALLFRAME_SIZ+TF_REG_V0(k0) # $2 728 REG_S v1, CALLFRAME_SIZ+TF_REG_V1(k0) # $3 729 mflo v0 730 REG_S a0, CALLFRAME_SIZ+TF_REG_A0(k0) # $4 731 REG_S a1, CALLFRAME_SIZ+TF_REG_A1(k0) # $5 732 REG_S a2, CALLFRAME_SIZ+TF_REG_A2(k0) # $6 733 REG_S a3, CALLFRAME_SIZ+TF_REG_A3(k0) # $7 734 mfhi v1 735 REG_S t0, CALLFRAME_SIZ+TF_REG_T0(k0) # $8 736 REG_S t1, CALLFRAME_SIZ+TF_REG_T1(k0) # $9 737 REG_S t2, CALLFRAME_SIZ+TF_REG_T2(k0) # $10 738 REG_S t3, CALLFRAME_SIZ+TF_REG_T3(k0) # $11 739 mfc0 t0, MIPS_COP_0_CAUSE 740 REG_S ta0, CALLFRAME_SIZ+TF_REG_TA0(k0) # $12 741 REG_S ta1, CALLFRAME_SIZ+TF_REG_TA1(k0) # $13 742 REG_S ta2, CALLFRAME_SIZ+TF_REG_TA2(k0) # $14 743 REG_S ta3, CALLFRAME_SIZ+TF_REG_TA3(k0) # $15 744 REG_S s0, CALLFRAME_SIZ+TF_REG_S0(k0) # $16 745 REG_S s1, CALLFRAME_SIZ+TF_REG_S1(k0) # $17 746 mfc0 s1, MIPS_COP_0_STATUS 747 REG_S t8, CALLFRAME_SIZ+TF_REG_T8(k0) # $24 MIPS_CURLWP 748 REG_S t9, CALLFRAME_SIZ+TF_REG_T9(k0) # $25 749 REG_S gp, CALLFRAME_SIZ+TF_REG_GP(k0) # $28 750 REG_S sp, CALLFRAME_SIZ+TF_REG_SP(k0) # $29 751 REG_S ra, CALLFRAME_SIZ+TF_REG_RA(k0) # $31 752 REG_S s1, CALLFRAME_SIZ+TF_REG_SR(k0) 753 _MFC0 ta0, MIPS_COP_0_EXC_PC 754 REG_S v0, CALLFRAME_SIZ+TF_REG_MULLO(k0) 755 REG_S v1, CALLFRAME_SIZ+TF_REG_MULHI(k0) 756 REG_S ta0, CALLFRAME_SIZ+TF_REG_EPC(k0) 757 REG_S t0, CALLFRAME_SIZ+TF_REG_CAUSE(k0) 758 move sp, k0 # switch to kernel SP 759 move MIPS_CURLWP, k1 # set curlwp reg (t8) 760#if defined(DDB) || defined(DEBUG) || defined(KGDB) 761 REG_S ta0, CALLFRAME_RA(sp) # for debugging 762#endif 763#ifdef __GP_SUPPORT__ 764 PTR_LA gp, _C_LABEL(_gp) # switch to kernel GP 765#endif 766 767 /* 768 * We first need to get to IPL_HIGH so that interrupts are masked. 769 */ 770 jal _C_LABEL(splhigh_noprof) # splhigh() 771 nop 772 move s0, v0 # remember previous priority 773 774 /* 775 * Now that we are at IPL_HIGH, we can turn off FPU and turn on 776 * interrupts since they all masked. 777 */ 778 mfc0 v1, MIPS_COP_0_STATUS 779#ifndef NOFPU 780 lui v0, %hi(MIPS_SR_COP_1_BIT) 781 and v0, v1 782 or v0, MIPS_SR_INT_IE # make sure intrs are still on 783#else 784 li v0, MIPS_SR_INT_IE # reenable intrs 785#endif 786 xor v0, v1 787 mtc0 v0, MIPS_COP_0_STATUS 788 nop 789 790 /* 791 * Since we interrupted user mode, the new interrupt depth must be 1. 792 */ 793 PTR_L t0, L_CPU(MIPS_CURLWP) 794 li t1, 1 795 INT_S t1, CPU_INFO_IDEPTH(t0) # store new interrupt depth (1) 796 797 /* 798 * Now hard interrupts can be processed. 799 */ 800 move a1, ta0 # 2nd arg is exception pc 801 move a2, s1 # 3rd arg is status 802 jal _C_LABEL(cpu_intr) # cpu_intr(ppl, pc, status) 803 move a0, s0 # 1st arg is previous pri level 804 805 /* 806 * Interrupt depth is now back to 0. 807 */ 808 PTR_L t0, L_CPU(MIPS_CURLWP) 809 nop 810 INT_S zero, CPU_INFO_IDEPTH(t0) 811 812#ifdef __HAVE_FAST_SOFTINTS 813 /* 814 * This an interrupt from user mode so both softints must be enabled. 815 * No need to check (unless we're being paranoid). 816 */ 817#ifdef PARANOIA 818 and a0, s1, MIPS_SOFT_INT_MASK # get softints enabled bits 819 xor a0, MIPS_SOFT_INT_MASK # invert them. 8201: bnez a0, 1b # loop forever if disabled 821 nop 822#endif 823 mfc0 a0, MIPS_COP_0_CAUSE # grab the pending softints 824 nop # load delay 825 and a0, MIPS_SOFT_INT_MASK # are there softints pending 826 beqz a0, 4f # nope 827 nop 828 jal _C_LABEL(softint_process) # softint_process(pending) 829 nop 8304: 831#endif 832 /* 833 * Disable interrupts 834 */ 835 mfc0 v1, MIPS_COP_0_STATUS 836 nop # delay slot 837 and v0, v1, MIPS_SR_INT_IE # clear interrupt enable 838 xor v0, v1 839 mtc0 v0, MIPS_COP_0_STATUS # interrupts are disabled 840 841 /* 842 * Restore IPL knowing interrupts are off 843 */ 844 jal _C_LABEL(splx_noprof) 845 move a0, s0 # fetch previous priority level 846 847 /* 848 * Check pending asynchronous traps. 849 */ 850 REG_L s0, CALLFRAME_SIZ+TF_REG_S0(sp) # restore 851 REG_L s1, CALLFRAME_SIZ+TF_REG_S1(sp) # restore 852 INT_L v0, L_MD_ASTPENDING(MIPS_CURLWP)# any pending ast? 853 nop 854 beqz v0, MIPSX(user_intr_return) # if no, skip ast processing 855 nop 856 857 /* 858 * We have a pending asynchronous trap; save remaining user state into 859 * trapframe. 860 */ 861 #REG_S s0, CALLFRAME_SIZ+TF_REG_S0(sp) # $16 (saved above) 862 #REG_S s1, CALLFRAME_SIZ+TF_REG_S1(sp) # $17 (saved above) 863 REG_S s2, CALLFRAME_SIZ+TF_REG_S2(sp) # $18 864 REG_S s3, CALLFRAME_SIZ+TF_REG_S3(sp) # $19 865 REG_S s4, CALLFRAME_SIZ+TF_REG_S4(sp) # $20 866 REG_S s5, CALLFRAME_SIZ+TF_REG_S5(sp) # $21 867 REG_S s6, CALLFRAME_SIZ+TF_REG_S6(sp) # $22 868 REG_S s7, CALLFRAME_SIZ+TF_REG_S7(sp) # $23 869 REG_S s8, CALLFRAME_SIZ+TF_REG_S8(sp) # $30 870 871 mfc0 t0, MIPS_COP_0_STATUS 872 PTR_LA ra, MIPSX(user_return) # load delay 873 or t0, MIPS_SR_INT_IE # enable interrupts 874 j _C_LABEL(ast) # ast() 875 mtc0 t0, MIPS_COP_0_STATUS # enable interrupts (spl0) 876 .set at 877END(MIPSX(user_intr)) 878 879/* 880 * mipsN_systemcall 881 * 882 * Save user context atop of kernel stack, then call syscall() to process 883 * a system call. The context can be manipulated alternatively via 884 * curlwp->l_md.md_utf->tf_regs. 885 */ 886NESTED_NOPROFILE(MIPSX(systemcall), CALLFRAME_SIZ, ra) 887 .set noat 888 .mask 0x80000000, -4 889 /* 890 * Save all the registers but kernel temporaries onto the stack. 891 */ 892 PTR_L k1, CPUVAR(CURLWP) 893 nop 894 PTR_L k0, L_PCB(k1) 895 nop 896 PTR_ADDU k0, USPACE - TF_SIZ - CALLFRAME_SIZ 897 #REG_S AT, CALLFRAME_SIZ+TF_REG_AST(k0) 898 #.set at 899 REG_S v0, CALLFRAME_SIZ+TF_REG_V0(k0) # syscall # 900 REG_S v1, CALLFRAME_SIZ+TF_REG_V1(k0) # used by syscall() 901 mflo v0 902 REG_S a0, CALLFRAME_SIZ+TF_REG_A0(k0) 903 REG_S a1, CALLFRAME_SIZ+TF_REG_A1(k0) 904 REG_S a2, CALLFRAME_SIZ+TF_REG_A2(k0) 905 REG_S a3, CALLFRAME_SIZ+TF_REG_A3(k0) 906 move a0, k1 # 1st arg is curlwp 907 mfhi v1 908 mfc0 a1, MIPS_COP_0_STATUS # 2nd arg is STATUS 909 REG_S s0, CALLFRAME_SIZ+TF_REG_S0(k0) 910 REG_S s1, CALLFRAME_SIZ+TF_REG_S1(k0) 911 REG_S s2, CALLFRAME_SIZ+TF_REG_S2(k0) 912 REG_S s3, CALLFRAME_SIZ+TF_REG_S3(k0) 913 mfc0 a2, MIPS_COP_0_CAUSE # 3rd arg is CAUSE 914 REG_S s4, CALLFRAME_SIZ+TF_REG_S4(k0) 915 REG_S s5, CALLFRAME_SIZ+TF_REG_S5(k0) 916 REG_S s6, CALLFRAME_SIZ+TF_REG_S6(k0) 917 REG_S s7, CALLFRAME_SIZ+TF_REG_S7(k0) 918 _MFC0 a3, MIPS_COP_0_EXC_PC # 4th arg is PC 919 REG_S t0, CALLFRAME_SIZ+TF_REG_T0(k0) 920 REG_S t1, CALLFRAME_SIZ+TF_REG_T1(k0) 921 REG_S t2, CALLFRAME_SIZ+TF_REG_T2(k0) 922 REG_S t3, CALLFRAME_SIZ+TF_REG_T3(k0) # syscall saved gp for fork 923 REG_S ta0, CALLFRAME_SIZ+TF_REG_TA0(k0) 924 REG_S ta1, CALLFRAME_SIZ+TF_REG_TA1(k0) 925 REG_S ta2, CALLFRAME_SIZ+TF_REG_TA2(k0) 926 REG_S ta3, CALLFRAME_SIZ+TF_REG_TA3(k0) 927 REG_S t8, CALLFRAME_SIZ+TF_REG_T8(k0) # will be MIPS_CURLWP 928 REG_S t9, CALLFRAME_SIZ+TF_REG_T9(k0) 929 REG_S gp, CALLFRAME_SIZ+TF_REG_GP(k0) 930 REG_S sp, CALLFRAME_SIZ+TF_REG_SP(k0) 931 REG_S s8, CALLFRAME_SIZ+TF_REG_S8(k0) 932 REG_S ra, CALLFRAME_SIZ+TF_REG_RA(k0) 933 REG_S a1, CALLFRAME_SIZ+TF_REG_SR(k0) 934 REG_S v0, CALLFRAME_SIZ+TF_REG_MULLO(k0) 935 REG_S v1, CALLFRAME_SIZ+TF_REG_MULHI(k0) 936 REG_S a3, CALLFRAME_SIZ+TF_REG_EPC(k0) 937 PTR_L t0, L_PROC(a0) # curlwp->l_proc (used below) 938 move sp, k0 # switch to kernel SP 939 move MIPS_CURLWP, a0 # set curlwp reg 940#ifdef __GP_SUPPORT__ 941 PTR_LA gp, _C_LABEL(_gp) # switch to kernel GP 942#endif 943#if defined(DDB) || defined(DEBUG) || defined(KGDB) 944 move ra, a3 945 REG_S ra, CALLFRAME_RA(sp) 946#endif 947 PTR_L t9, P_MD_SYSCALL(t0) # t9 = syscall 948 /* 949 * Turn off FPU 950 */ 951#ifdef NOFPU 952 li t0, MIPS_SR_INT_IE 953#else 954 lui t0, %hi(MIPS_SR_COP_1_BIT) 955 and t0, a1 956 ori t0, MIPS_SR_INT_IE # turn on IEc, enable intr. 957#endif 958 xor t0, a1 # turns off the FPU & ints on 959 mtc0 t0, MIPS_COP_0_STATUS # re-enable interrupts 960/* 961 * Call the system call handler. 962 */ 963 jalr t9 964 nop 965/* 966 * Check pending asynchronous traps. 967 */ 968 INT_L v0, L_MD_ASTPENDING(MIPS_CURLWP)# any pending ast? 969 nop 970 beqz v0, MIPSX(user_return) # no, skip ast processing 971 nop 972/* 973 * We have pending asynchronous traps; all the state is already saved. 974 */ 975 lui ra, %hi(MIPSX(user_return)) # return directly to user return 976 j _C_LABEL(ast) 977 PTR_ADDIU ra, %lo(MIPSX(user_return)) # return directly to user return 978 .set at 979END(MIPSX(systemcall)) 980 981/*---------------------------------------------------------------------------- 982 * 983 * R3000 TLB exception handlers 984 * 985 *---------------------------------------------------------------------------- 986 */ 987 988/*---------------------------------------------------------------------------- 989 * 990 * mipsN_kern_tlb_miss -- 991 * 992 * Handle a TLB miss exception from kernel mode in kernel space. 993 * The BaddVAddr, Context, and EntryHi registers contain the failed 994 * virtual address. 995 * 996 * Results: 997 * None. 998 * 999 * Side effects: 1000 * None. 1001 * 1002 *---------------------------------------------------------------------------- 1003 */ 1004LEAF_NOPROFILE(MIPSX(kern_tlb_miss)) 1005 .set noat 1006 _MFC0 k0, MIPS_COP_0_BAD_VADDR # get the fault address 1007 li k1, VM_MIN_KERNEL_ADDRESS # compute index 1008 PTR_SUBU k0, k1 1009 INT_L k1, _C_LABEL(Sysmapsize) # index within range? 1010 PTR_SRL k0, PGSHIFT 1011 sltu k1, k0, k1 1012 /* 1013 * If we are beyond the bounds of Sysmap, let trap panic for us. 1014 */ 1015 beqz k1, _C_LABEL(MIPSX(kern_gen_exception)) # full trap processing 1016 nop # - delay slot - 1017 PTR_L k1, _C_LABEL(Sysmap) 1018 PTR_SLL k0, 2 # compute offset from index 1019 PTR_ADDU k1, k0 1020 INT_L k0, 0(k1) # get PTE entry 1021 _MFC0 k1, MIPS_COP_0_EXC_PC # get return address 1022 mtc0 k0, MIPS_COP_0_TLB_LOW # save PTE entry 1023 and k0, MIPS1_PG_V # check for valid PTE entry 1024 beqz k0, _C_LABEL(MIPSX(kern_gen_exception)) # PTE invalid 1025 nop 1026 tlbwr # write random TLB 1027 j k1 1028 rfe 1029END(MIPSX(kern_tlb_miss)) 1030 1031#if 0 1032/*---------------------------------------------------------------------------- 1033 * 1034 * mipsN_tlb_invalid_exception -- 1035 * 1036 * Handle a TLB modified exception. 1037 * The BaddVAddr, Context, and EntryHi registers contain the failed 1038 * virtual address. 1039 * 1040 * Results: 1041 * None. 1042 * 1043 * Side effects: 1044 * None. 1045 * 1046 *---------------------------------------------------------------------------- 1047 */ 1048LEAF_NOPROFILE(MIPSX(tlb_mod_exception)) 1049 .set noat 1050 tlbp # find the TLB entry 1051 mfc0 k0, MIPS_COP_0_TLB_LOW # get the physical address 1052 mfc0 k1, MIPS_COP_0_TLB_INDEX # check to be sure its valid 1053 or k0, k0, MIPS1_TLB_DIRTY_BIT # update TLB 1054 blt k1, zero, 4f # not found!!! 1055 mtc0 k0, MIPS_COP_0_TLB_LOW 1056 li k1, MIPS_KSEG0_START 1057 PTR_SUBU k0, k1 1058 srl k0, k0, MIPS1_TLB_PHYS_PAGE_SHIFT 1059 PTR_L k1, pmap_attributes # DANGER! DANGER! 1060 PTR_ADDU k0, k1 1061 lbu k1, 0(k0) # fetch old value 1062 nop 1063 or k1, k1, 1 # set modified bit 1064 sb k1, 0(k0) # save new value 1065 _MFC0 k0, MIPS_COP_0_EXC_PC # get return address 1066 nop 1067 j k0 1068 rfe 10694: 1070 break 0 # panic 1071 .set at 1072END(MIPSX(tlb_mod_exception)) 1073#endif 1074 1075/* 1076 * Mark where code entreed from exception hander jumptable 1077 * ends, for stack traceback code. 1078 */ 1079 1080 .globl _C_LABEL(MIPSX(exceptionentry_end)) 1081_C_LABEL(MIPSX(exceptionentry_end)): 1082 1083/*-------------------------------------------------------------------------- 1084 * 1085 * mipsN_tlb_set_asid -- 1086 * 1087 * Write the given pid into the TLB pid reg. 1088 * 1089 * void mipsN_tlb_set_asid(uint32_t pid) 1090 * 1091 * Results: 1092 * None. 1093 * 1094 * Side effects: 1095 * PID set in the entry hi register. 1096 * 1097 *-------------------------------------------------------------------------- 1098 */ 1099LEAF(MIPSX(tlb_set_asid)) 1100 sll a0, MIPS1_TLB_PID_SHIFT # put PID in right spot 1101 mtc0 a0, MIPS_COP_0_TLB_HI # Write the hi reg value 1102 j ra 1103 nop 1104END(MIPSX(tlb_set_asid)) 1105 1106/*-------------------------------------------------------------------------- 1107 * 1108 * mipsN_tlb_update -- 1109 * 1110 * Update the TLB if highreg is found; otherwise, do_nothing 1111 * 1112 * int mipsN_tlb_update(vaddr_t va, register_t lowreg) 1113 * 1114 * Results: 1115 * < 0 if skipped, >= 0 if updated 1116 * 1117 * Side effects: 1118 * None. 1119 * 1120 *-------------------------------------------------------------------------- 1121 */ 1122LEAF(MIPSX(tlb_update)) 1123 mfc0 v1, MIPS_COP_0_STATUS # save the status register 1124 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 1125 nop 1126 mfc0 t0, MIPS_COP_0_TLB_HI # save current PID 1127 nop 1128 mtc0 a0, MIPS_COP_0_TLB_HI # set entryhi 1129 nop 1130 tlbp # probe the existence 1131 mfc0 v0, MIPS_COP_0_TLB_INDEX # see what we got 1132 mtc0 a1, MIPS_COP_0_TLB_LOW # set new entrylo 1133 bltz v0, 2f # index < 0 => !found 1134 nop 1135 tlbwi # update slot found 11362: 1137 mtc0 t0, MIPS_COP_0_TLB_HI # restore current PID 1138 j ra 1139 mtc0 v1, MIPS_COP_0_STATUS 1140END(MIPSX(tlb_update)) 1141 1142/*-------------------------------------------------------------------------- 1143 * 1144 * mipsN_tlb_read_indexed -- 1145 * 1146 * Read the TLB entry. 1147 * 1148 * void mipsN_tlb_read_indexed(register_t entry, struct tlbmask *tlb) 1149 * 1150 * Results: 1151 * tlb will contain the TLB entry found (tlb_lo1/tlb_mask will be 0). 1152 * 1153 *-------------------------------------------------------------------------- 1154 */ 1155LEAF(MIPSX(tlb_read_indexed)) 1156 mfc0 v1, MIPS_COP_0_STATUS # Save the status register. 1157 mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts 1158 mfc0 t0, MIPS_COP_0_TLB_HI # Get current PID 1159 1160 sll a0, MIPS1_TLB_INDEX_SHIFT 1161 mtc0 a0, MIPS_COP_0_TLB_INDEX # Set the index register 1162 nop 1163 tlbr # Read from the TLB 1164 mfc0 t2, MIPS_COP_0_TLB_HI # fetch the hi entry 1165 mfc0 t3, MIPS_COP_0_TLB_LOW # fetch the low entry 1166 1167 mtc0 t0, MIPS_COP_0_TLB_HI # Restore proper PID 1168 # (before touching memory) 1169 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register 1170 1171 PTR_S t2, TLBMASK_HI(a1) 1172 INT_S t3, TLBMASK_LO0(a1) 1173 INT_S zero, TLBMASK_LO1(a1) 1174 j ra 1175 INT_S zero, TLBMASK_MASK(a1) 1176END(MIPSX(tlb_read_indexed)) 1177 1178/*-------------------------------------------------------------------------- 1179 * 1180 * mipsX_tlb_write_indexed -- 1181 * 1182 * Write the TLB entry. 1183 * 1184 * void mipsX_tlb_write_indexed(size_t entry, struct tlbmask *tlb) 1185 * 1186 * Results: 1187 * None. 1188 * 1189 *-------------------------------------------------------------------------- 1190 */ 1191LEAF(MIPSX(tlb_write_indexed)) 1192 PTR_L t2, TLBMASK_HI(a1) # fetch the hi entry 1193 INT_L t3, TLBMASK_LO0(a1) # fetch the low entry 1194 mfc0 v1, MIPS_COP_0_STATUS # Save the status register. 1195 mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts 1196 mfc0 t0, MIPS_COP_0_TLB_HI # Get current PID 1197 1198 sll a0, MIPS1_TLB_INDEX_SHIFT 1199 mtc0 a0, MIPS_COP_0_TLB_INDEX # Set the index register 1200 nop 1201 mtc0 t2, MIPS_COP_0_TLB_HI 1202 mtc0 t3, MIPS_COP_0_TLB_LOW 1203 1204 tlbwi # Write to the TLB entry 1205 1206 mtc0 t0, MIPS_COP_0_TLB_HI # restore PID 1207 j ra 1208 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register 1209END(MIPSX(tlb_write_indexed)) 1210 1211/* 1212 * void mipsN_tlb_invalidate_addr(vaddr_t va) 1213 * 1214 * Invalidate a TLB entry for given virtual address if found in TLB. 1215 */ 1216LEAF(MIPSX(tlb_invalidate_addr)) 1217 mfc0 v1, MIPS_COP_0_STATUS # save status register 1218 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 1219 mfc0 t0, MIPS_COP_0_TLB_HI # save current PID 1220 nop 1221 1222 mtc0 a0, MIPS_COP_0_TLB_HI # look for addr & PID 1223 nop 1224 tlbp # probe the entry in question 1225 mfc0 a0, MIPS_COP_0_TLB_INDEX # see what we got 1226 li t1, MIPS_KSEG0_START # load invalid address 1227 bltz a0, 1f # index < 0 then skip 1228 nop 1229 mtc0 t1, MIPS_COP_0_TLB_HI # make entryHi invalid 1230 mtc0 zero, MIPS_COP_0_TLB_LOW # zero out entryLo 1231 nop 1232 tlbwi 12331: 1234 mtc0 t0, MIPS_COP_0_TLB_HI # restore PID 1235 j ra 1236 mtc0 v1, MIPS_COP_0_STATUS # restore the status register 1237END(MIPSX(tlb_invalidate_addr)) 1238 1239/* 1240 * void mipsN_tlb_invalidate_asids(uint32_t asid_lo, uint32_t asid_hi) 1241 * 1242 * Invalidate TLB entries belonging to asids (asid_lo,asid_hi] 1243 * leaving entries for kernel space marked global intact. 1244 */ 1245LEAF(MIPSX(tlb_invalidate_asids)) 1246 mfc0 t3, MIPS_COP_0_TLB_HI # save EntryHi 1247 mfc0 v1, MIPS_COP_0_STATUS # save status register 1248 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 1249 1250 INT_L t2, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES 1251 li t1, MIPS1_TLB_FIRST_RAND_ENTRY << MIPS1_TLB_INDEX_SHIFT 1252 li v0, MIPS_KSEG0_START # invalid address 1253 sll t2, MIPS1_TLB_INDEX_SHIFT 1254 1255 # do {} while (t1 < t2) 12561: 1257 mtc0 t1, MIPS_COP_0_TLB_INDEX # set index 1258 nop 1259 tlbr # obtain an entry 1260 mfc0 ta0, MIPS_COP_0_TLB_LOW 1261 nop 1262 and ta0, ta0, MIPS1_PG_G # check to see it has G bit 1263 bnez ta0, 2f 1264 nop 1265 1266 mfc0 ta0, MIPS_COP_0_TLB_HI # get va and ASID 1267 nop 1268 and ta0, MIPS1_TLB_PID # mask off ASID 1269 srl ta0, MIPS1_TLB_PID_SHIFT 1270 sltu ta1, ta0, a0 # < asid_lo 1271 bnez ta1, 2f # yes, next tlb entry 1272 nop 1273 sltu ta2, ta0, a1 # < asid_hi 1274 beqz ta2, 2f # no, next tlb entry 1275 nop 1276 1277 mtc0 v0, MIPS_COP_0_TLB_HI # make entryHi invalid 1278 mtc0 zero, MIPS_COP_0_TLB_LOW # zero out entryLo 1279 nop 1280 tlbwi # invalidate the TLB entry 12812: 1282 addu t1, t1, 1 << MIPS1_TLB_INDEX_SHIFT # increment index 1283 bne t1, t2, 1b 1284 nop 1285 1286 mtc0 t3, MIPS_COP_0_TLB_HI # restore entryHi 1287 1288 j ra # new TLBpid will be set soon 1289 mtc0 v1, MIPS_COP_0_STATUS # restore status register 1290END(MIPSX(tlb_invalidate_asids)) 1291 1292/* 1293 * void mipsN_tlb_invalidate_all(void) 1294 * 1295 * Invalidate TLB entirely. 1296 */ 1297LEAF(MIPSX(tlb_invalidate_all)) 1298 INT_L a0, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES 1299 1300 mfc0 v1, MIPS_COP_0_STATUS # save the status register. 1301 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 1302 1303 mfc0 t0, MIPS_COP_0_TLB_HI # save current PID 1304 li t1, MIPS_KSEG0_START # invalid address 1305 mtc0 t1, MIPS_COP_0_TLB_HI # make entryHi invalid 1306 mtc0 zero, MIPS_COP_0_TLB_LOW # zero out entryLo 1307 1308 move t1, zero 1309 sll a0, MIPS1_TLB_INDEX_SHIFT 1310 1311 # do {} while (t1 < a0) 13121: 1313 mtc0 t1, MIPS_COP_0_TLB_INDEX # set TLBindex 1314 addu t1, t1, 1 << MIPS1_TLB_INDEX_SHIFT # increment index 1315 bne t1, a0, 1b 1316 tlbwi # invalidate the entry 1317 1318 mtc0 t0, MIPS_COP_0_TLB_HI # restore PID 1319 j ra 1320 mtc0 v1, MIPS_COP_0_STATUS # restore status register 1321END(MIPSX(tlb_invalidate_all)) 1322 1323/* 1324 * u_int mipsN_tlb_record_asids(u_long *bitmap, uint32_t asid_mask) 1325 * 1326 * Scan the random part of the TLB looking at non-global entries and 1327 * record each ASID in use into the bitmap. Additionally, return the 1328 * number of new unique ASIDs encountered. 1329 */ 1330LEAF(MIPSX(tlb_record_asids)) 1331 mfc0 a3, MIPS_COP_0_TLB_HI # save EntryHi 1332 li ta0, MIPS1_TLB_FIRST_RAND_ENTRY << MIPS1_TLB_INDEX_SHIFT 1333 INT_L ta1, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES 1334 move ta2, zero 1335 li ta3, 1 1336 sll ta1, MIPS1_TLB_INDEX_SHIFT 1337 1338 mfc0 v1, MIPS_COP_0_STATUS # save status register 1339 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 1340 1341 move v0, zero # start at zero ASIDs 1342 1343 # do {} while (ta0 < ta1) 13441: 1345 mtc0 ta0, MIPS_COP_0_TLB_INDEX # set index 1346 nop 1347 tlbr # obtain an entry 1348 mfc0 t0, MIPS_COP_0_TLB_LOW 1349 nop 1350 and t0, MIPS1_PG_G # check to see it has G bit 1351 bnez t0, 4f 1352 nop 1353 1354 mfc0 t0, MIPS_COP_0_TLB_HI # get va and ASID 1355 nop 1356 and t0, MIPS1_TLB_PID 1357 srl t0, MIPS1_TLB_PID_SHIFT # shift to low bits 1358 and t0, a1 # focus on asid_mask 1359 1360 srl a2, t0, 3 + LONG_SCALESHIFT # drop low 5 bits 1361 sll a2, LONG_SCALESHIFT # make an index for the bitmap 1362 sllv t0, ta3, t0 # t0 is mask (ta3 == 1) 1363 1364 PTR_ADDU a2, a0 # index into the bitmap 1365 beq a2, ta2, 3f # is the desired cell loaded? 1366 nop # yes, don't reload it 1367 beqz ta2, 2f # have we ever loaded it? 1368 nop # nope, so don't save it 1369 1370 LONG_S t2, 0(ta2) # save the updated value. 13712: 1372 LONG_L t2, 0(a2) # and load it 1373 move ta2, a2 # remember the new cell's addr 13743: 1375 and t1, t2, t0 # see if this asid was recorded 1376 sltu t1, t1, ta3 # t1 = t1 < 1 (aka t1 == 0) 1377 addu v0, t1 # v0 += t1 1378 or t2, t0 # or in the new ASID bits 1379 13804: 1381 addu ta0, 1 << MIPS1_TLB_INDEX_SHIFT # increment TLB entry # 1382 bne ta0, ta1, 1b # keep lookup if not limit 1383 nop 1384 1385 beqz ta2, 5f # do we have a cell to write? 1386 nop # nope, nothing. 1387 1388 LONG_S t2, 0(ta2) # save the updated value. 13895: 1390 mtc0 a3, MIPS_COP_0_TLB_HI # restore entryHi 1391 1392 j ra # new TLBpid will be set soon 1393 mtc0 v1, MIPS_COP_0_STATUS # restore status register 1394END(MIPSX(tlb_record_asids)) 1395 1396/*-------------------------------------------------------------------------- 1397 * 1398 * mipsN_tlb_enter -- 1399 * 1400 * Entr the 1401 * 1402 * mipsN_tlb_enter(size_t tlb_index, vaddr_t va_asid, uint32_t tlb_lo) 1403 * 1404 * Results: 1405 * None. 1406 * 1407 * Side effects: 1408 * None. 1409 * 1410 *-------------------------------------------------------------------------- 1411 */ 1412LEAF(MIPSX(tlb_enter)) 1413 mfc0 t0, MIPS_COP_0_TLB_HI # save current PID 1414 nop 1415 mtc0 a0, MIPS_COP_0_TLB_HI # set entryhi 1416 nop 1417 tlbp # probe the existence 1418 mfc0 v0, MIPS_COP_0_TLB_INDEX # see what we got 1419 nop 1420 bltz v0, 2f # index < 0 => !found 1421 nop 1422 beq v0, a0, 3f 1423 nop 1424 li t1, MIPS_KSEG0_START 1425 mtc0 t1, MIPS_COP_0_TLB_HI # set invalid address 1426 nop 1427 mtc0 zero, MIPS_COP_0_TLB_LOW # set invalid entrylo 1428 nop 1429 tlbwi # invalidate the old entry 1430 1431 mtc0 a1, MIPS_COP_0_TLB_HI # set entryhi 1432 nop 14332: 1434 mtc0 a0, MIPS_COP_0_TLB_INDEX # set the new location 1435 nop 14363: 1437 mtc0 a2, MIPS_COP_0_TLB_LOW # set new entrylo 1438 nop 1439 tlbwi # update the existing one 14403: 1441 j ra 1442 mtc0 t0, MIPS_COP_0_TLB_HI # restore current PID 1443END(MIPSX(tlb_enter)) 1444 1445/*---------------------------------------------------------------------------- 1446 * 1447 * R3000 trampolines and context resume 1448 * 1449 *---------------------------------------------------------------------------- 1450 */ 1451 1452/*---------------------------------------------------------------------------- 1453 * 1454 * mipsN_lwp_trampoline 1455 * 1456 * Special arrangement for a process about to go user mode right after 1457 * fork() system call. When the first CPU tick is scheduled to run the 1458 * forked child, it starts running from here. Then, a service function 1459 * is called with one argument supplied to complete final preparations, 1460 * and the process returns to user mode as if the fork() system call is 1461 * handled in a normal way. No need to save any registers although this 1462 * calls another. 1463 *---------------------------------------------------------------------------- 1464 */ 1465LEAF(MIPSX(lwp_trampoline)) 1466 PTR_ADDU sp, -CALLFRAME_SIZ 1467 1468 # Call lwp_startup(), with args from cpu_switchto()/cpu_setfunc() 1469 move a0, v0 1470 jal _C_LABEL(lwp_startup) 1471 move a1, MIPS_CURLWP 1472 1473 # Call the routine specified by cpu_setfunc() 1474 jalr s0 1475 move a0, s1 1476 1477 # Return to user (won't happen if a kernel thread) 1478 .set noat 1479MIPSX(user_return): 1480 REG_L s0, CALLFRAME_SIZ+TF_REG_S0(sp) # $16 1481 REG_L s1, CALLFRAME_SIZ+TF_REG_S1(sp) # $17 1482 REG_L s2, CALLFRAME_SIZ+TF_REG_S2(sp) # $18 1483 REG_L s3, CALLFRAME_SIZ+TF_REG_S3(sp) # $19 1484 REG_L s4, CALLFRAME_SIZ+TF_REG_S4(sp) # $20 1485 REG_L s5, CALLFRAME_SIZ+TF_REG_S5(sp) # $21 1486 REG_L s6, CALLFRAME_SIZ+TF_REG_S6(sp) # $22 1487 REG_L s7, CALLFRAME_SIZ+TF_REG_S7(sp) # $23 1488 REG_L s8, CALLFRAME_SIZ+TF_REG_S8(sp) # $30 1489MIPSX(user_intr_return): 1490 REG_L a0, CALLFRAME_SIZ+TF_REG_SR(sp) 1491 REG_L t0, CALLFRAME_SIZ+TF_REG_MULLO(sp) 1492 REG_L t1, CALLFRAME_SIZ+TF_REG_MULHI(sp) 1493 mtc0 a0, MIPS_COP_0_STATUS # this should disable interrupts 1494 mtlo t0 1495 mthi t1 1496 move k1, sp 1497 REG_L AT, TF_BASE+TF_REG_AST(sp) 1498 REG_L k0, CALLFRAME_SIZ+TF_REG_EPC(k1) 1499 REG_L AT, CALLFRAME_SIZ+TF_REG_AST(k1) 1500 REG_L v0, CALLFRAME_SIZ+TF_REG_V0(k1) 1501 REG_L v1, CALLFRAME_SIZ+TF_REG_V1(k1) 1502 REG_L a0, CALLFRAME_SIZ+TF_REG_A0(k1) 1503 REG_L a1, CALLFRAME_SIZ+TF_REG_A1(k1) 1504 REG_L a2, CALLFRAME_SIZ+TF_REG_A2(k1) 1505 REG_L a3, CALLFRAME_SIZ+TF_REG_A3(k1) 1506 REG_L t0, CALLFRAME_SIZ+TF_REG_T0(k1) 1507 REG_L t1, CALLFRAME_SIZ+TF_REG_T1(k1) 1508 REG_L t2, CALLFRAME_SIZ+TF_REG_T2(k1) 1509 REG_L t3, CALLFRAME_SIZ+TF_REG_T3(k1) 1510 REG_L ta0, CALLFRAME_SIZ+TF_REG_TA0(k1) 1511 REG_L ta1, CALLFRAME_SIZ+TF_REG_TA1(k1) 1512 REG_L ta2, CALLFRAME_SIZ+TF_REG_TA2(k1) 1513 REG_L ta3, CALLFRAME_SIZ+TF_REG_TA3(k1) 1514 REG_L t8, CALLFRAME_SIZ+TF_REG_T8(k1) 1515 REG_L t9, CALLFRAME_SIZ+TF_REG_T9(k1) 1516 REG_L gp, CALLFRAME_SIZ+TF_REG_GP(k1) 1517 REG_L ra, CALLFRAME_SIZ+TF_REG_RA(k1) 1518 REG_L sp, CALLFRAME_SIZ+TF_REG_SP(k1) 1519 nop 1520 j k0 1521 rfe 1522 .set at 1523END(MIPSX(lwp_trampoline)) 1524 1525/* 1526 * Like lwp_trampoline, but do not call lwp_startup 1527 */ 1528LEAF(MIPSX(setfunc_trampoline)) 1529 PTR_ADDU sp, -CALLFRAME_SIZ 1530 1531 # Call the routine specified by cpu_setfunc() 1532 PTR_LA ra, MIPSX(user_return) 1533 jr s0 1534 move a0, s1 1535END(MIPSX(setfunc_trampoline)) 1536 1537/* 1538 * void mipsN_cpu_switch_resume(struct lwp *newlwp) 1539 * 1540 * Wiredown the USPACE of newproc with TLB entry#0 and #1. Check 1541 * if target USPACE is already refered by any TLB entry before 1542 * doing that, and make sure TBIS(them) in the case. 1543 */ 1544LEAF_NOPROFILE(MIPSX(cpu_switch_resume)) 1545 INT_L a1, L_MD_UPTE_0(a0) # a1 = upte[0] 1546 INT_L a2, L_MD_UPTE_1(a0) # a2 = upte[1] 1547 PTR_L s0, L_PCB(a0) # va = l->l_addr 1548 li s2, VM_MIN_KERNEL_ADDRESS 1549 blt s0, s2, resume 1550 nop 1551 1552 mfc0 t3, MIPS_COP_0_TLB_HI # save PID 1553 nop 1554 mtc0 s0, MIPS_COP_0_TLB_HI # VPN = va 1555 nop 1556 tlbp # probe 1st VPN 1557 mfc0 s1, MIPS_COP_0_TLB_INDEX 1558 nop 1559 bltz s1, entry0set 1560 li s1, MIPS_KSEG0_START # found, then 1561 mtc0 s1, MIPS_COP_0_TLB_HI 1562 mtc0 zero, MIPS_COP_0_TLB_LOW 1563 nop 1564 tlbwi # TBIS(va) 1565 nop 1566 mtc0 s0, MIPS_COP_0_TLB_HI # set 1st VPN again 1567entry0set: 1568 mtc0 zero, MIPS_COP_0_TLB_INDEX # TLB index #0 1569 ori a1, a1, MIPS1_PG_G 1570 mtc0 a1, MIPS_COP_0_TLB_LOW # 1st PFN w/ PG_G 1571 nop 1572 tlbwi # set TLB entry #0 1573 1574 addu s0, s0, PAGE_SIZE 1575 mtc0 s0, MIPS_COP_0_TLB_HI # VPN = va+PAGE_SIZE 1576 nop 1577 tlbp # probe 2nd VPN 1578 mfc0 s1, MIPS_COP_0_TLB_INDEX 1579 nop 1580 bltz s1, entry1set 1581 li s1, MIPS_KSEG0_START # found, then 1582 mtc0 s1, MIPS_COP_0_TLB_HI 1583 mtc0 zero, MIPS_COP_0_TLB_LOW 1584 nop 1585 tlbwi # TBIS(va+PAGE_SIZE) 1586 nop 1587 mtc0 s0, MIPS_COP_0_TLB_HI # set 2nd VPN again 1588entry1set: 1589 li s1, 1 << MIPS1_TLB_INDEX_SHIFT 1590 mtc0 s1, MIPS_COP_0_TLB_INDEX # TLB index #1 1591 ori a2, a2, MIPS1_PG_G 1592 mtc0 a2, MIPS_COP_0_TLB_LOW # 2nd PFN w/ PG_G 1593 nop 1594 tlbwi # set TLB entry #1 1595 nop 1596 mfc0 t3, MIPS_COP_0_TLB_HI # restore PID 1597 1598resume: 1599 j ra 1600 nop 1601END(MIPSX(cpu_switch_resume)) 1602 1603/*---------------------------------------------------------------------------- 1604 * 1605 * R3000 cache sizing and flushing code. 1606 * 1607 *---------------------------------------------------------------------------- 1608 */ 1609#ifndef ENABLE_MIPS_TX3900 1610/* 1611 * void mipsN_wbflush(void) 1612 * 1613 * Drain processor's write buffer, normally used to ensure any I/O 1614 * register write operations are done before subsequent manipulations. 1615 * 1616 * Some hardware implementations have a WB chip independent from CPU 1617 * core, and CU0 (Coprocessor Usability #0) bit of CP0 status register 1618 * is wired to indicate writebuffer condition. This code does busy-loop 1619 * while CU0 bit indicates false condition. 1620 * 1621 * For other hardwares which have the writebuffer logic is implemented 1622 * in a system controller ASIC chip, wbflush operation would done 1623 * differently. 1624 */ 1625LEAF(MIPSX(wbflush)) 1626 nop 1627 nop 1628 nop 1629 nop 16301: bc0f 1b 1631 nop 1632 j ra 1633 nop 1634END(MIPSX(wbflush)) 1635#else /* !ENABLE_MIPS_TX3900 */ 1636/* 1637 * The differences between R3900 and R3000. 1638 * 1. Cache system 1639 * Physical-index physical-tag 1640 * fixed line-size 1641 * refil-size 4/8/16/32 words (set in config register) 1642 * TX3912 1643 * Write-through 1644 * I-cache 4KB/16B direct mapped (256line) 1645 * D-cache 1KB/4B 2-way sa (128line) 1646 * Cache snoop 1647 * TX3922 1648 * Write-through/write-back (set in config register) 1649 * I-cache 16KB/16B 2-way sa 1650 * D-cache 8KB/16B 2-way sa 1651 * Cache snoop 1652 * 1653 * 2. Coprocessor1 1654 * 2.1 cache operation. 1655 * R3900 uses MIPSIII cache op like method. 1656 * 2.2 R3900 specific CP0 register. 1657 * (mips/include/r3900regs.h overrides cpuregs.h) 1658 * 2.3 # of TLB entries 1659 * TX3912 32 entries 1660 * TX3922 64 entries 1661 * 1662 * 3. System address map 1663 * kseg2 0xff000000-0xfffeffff is reserved. 1664 * (mips/include/vmparam.h) 1665 * 1666 * + If defined both MIPS1 and ENABLE_MIPS_TX3900, it generates kernel for 1667 * R3900. If defined MIPS1 only, No R3900 feature include. 1668 * + R3920 core has write-back mode. but it is always disabled in NetBSD. 1669 */ 1670 1671LEAF_NOPROFILE(tx3900_cp0_config_read) 1672 mfc0 v0, R3900_COP_0_CONFIG 1673 j ra 1674 nop 1675END(tx3900_cp0_config_read) 1676 1677LEAF(MIPSX(wbflush)) 1678 .set push 1679 .set mips2 1680 sync 1681 .set pop 1682 j ra 1683 nop 1684END(MIPSX(wbflush)) 1685#endif /* !ENABLE_MIPS_TX3900 */ 1686 1687 .rdata 1688 1689 .globl _C_LABEL(MIPSX(locore_vec)) 1690_C_LABEL(MIPSX(locore_vec)): 1691 PTR_WORD _C_LABEL(MIPSX(cpu_switch_resume)) 1692 PTR_WORD _C_LABEL(MIPSX(lwp_trampoline)) 1693 PTR_WORD _C_LABEL(MIPSX(setfunc_trampoline)) 1694 PTR_WORD _C_LABEL(MIPSX(wbflush)) # wbflush 1695 PTR_WORD _C_LABEL(MIPSX(tlb_set_asid)) 1696 PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_asids)) 1697 PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_addr)) 1698 PTR_WORD _C_LABEL(nullop) # tlb_invalidate_globals 1699 PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_all)) 1700 PTR_WORD _C_LABEL(MIPSX(tlb_record_asids)) 1701 PTR_WORD _C_LABEL(MIPSX(tlb_update)) 1702 PTR_WORD _C_LABEL(MIPSX(tlb_enter)) 1703 PTR_WORD _C_LABEL(MIPSX(tlb_read_indexed)) 1704 PTR_WORD _C_LABEL(MIPSX(tlb_write_indexed)) 1705 1706 .globl _C_LABEL(MIPSX(locoresw)) 1707_C_LABEL(MIPSX(locoresw)): 1708 PTR_WORD _C_LABEL(MIPSX(wbflush)) # lsw_wbflush 1709 PTR_WORD _C_LABEL(nullop) # lsw_cpu_idle 1710 PTR_WORD _C_LABEL(nullop) # lsw_send_ipi 1711 PTR_WORD _C_LABEL(nullop) # lsw_cpu_offline_md 1712 PTR_WORD _C_LABEL(nullop) # lsw_cpu_init 1713 PTR_WORD _C_LABEL(nullop) # lsw_cpu_run 1714 PTR_WORD _C_LABEL(nullop) # lsw_bus_error 1715 1716MIPSX(excpt_sw): 1717 #### 1718 #### The kernel exception handlers. 1719 #### 1720 PTR_WORD _C_LABEL(MIPSX(kern_intr)) # 0 external interrupt 1721 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 1 TLB modification 1722 PTR_WORD _C_LABEL(MIPSX(kern_tlb_miss)) # 2 TLB miss (LW/I-fetch) 1723 PTR_WORD _C_LABEL(MIPSX(kern_tlb_miss)) # 3 TLB miss (SW) 1724 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 4 address error (LW/I-fetch) 1725 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 5 address error (SW) 1726 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 6 bus error (I-fetch) 1727 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 7 bus error (load or store) 1728 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 8 system call 1729 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 9 breakpoint 1730 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 10 reserved instruction 1731 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 11 coprocessor unusable 1732 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 12 arithmetic overflow 1733 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 13 r3k reserved 1734 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 14 r3k reserved 1735 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 15 r3k reserved 1736 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 16 never happens w/ MIPS1 1737 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 17 never happens w/ MIPS1 1738 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 18 never happens w/ MIPS1 1739 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 19 never happens w/ MIPS1 1740 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 20 never happens w/ MIPS1 1741 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 21 never happens w/ MIPS1 1742 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 22 never happens w/ MIPS1 1743 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 23 never happens w/ MIPS1 1744 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 24 never happens w/ MIPS1 1745 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 25 never happens w/ MIPS1 1746 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 26 never happens w/ MIPS1 1747 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 27 never happens w/ MIPS1 1748 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 28 never happens w/ MIPS1 1749 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 29 never happens w/ MIPS1 1750 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 30 never happens w/ MIPS1 1751 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception))# 31 never happens w/ MIPS1 1752 ##### 1753 ##### The user exception handlers. 1754 ##### 1755 PTR_WORD _C_LABEL(MIPSX(user_intr)) # 0 1756 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 1 1757 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 2 1758 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 3 1759 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 4 1760 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 5 1761 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 6 1762 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 7 1763 PTR_WORD _C_LABEL(MIPSX(systemcall)) # 8 1764 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 9 1765 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 10 1766 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 11 1767 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 12 1768 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 13 1769 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 14 1770 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 15 1771 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 16 1772 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 17 1773 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 18 1774 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 19 1775 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 20 1776 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 21 1777 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 22 1778 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 23 1779 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 24 1780 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 25 1781 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 26 1782 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 27 1783 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 28 1784 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 29 1785 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 20 1786 PTR_WORD _C_LABEL(MIPSX(user_gen_exception))# 31 1787