1/* 2 * This program is free software; you can redistribute it and/or 3 * modify it under the terms of the GNU General Public License 4 * as published by the Free Software Foundation; either version 2 5 * of the License, or (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2004 Mips Technologies, Inc 17 * Copyright (C) 2008 Kevin D. Kissell 18 */ 19 20#include <linux/clockchips.h> 21#include <linux/kernel.h> 22#include <linux/sched.h> 23#include <linux/smp.h> 24#include <linux/cpumask.h> 25#include <linux/interrupt.h> 26#include <linux/kernel_stat.h> 27#include <linux/module.h> 28#include <linux/ftrace.h> 29#include <linux/slab.h> 30 31#include <asm/cpu.h> 32#include <asm/processor.h> 33#include <asm/atomic.h> 34#include <asm/system.h> 35#include <asm/hardirq.h> 36#include <asm/hazards.h> 37#include <asm/irq.h> 38#include <asm/mmu_context.h> 39#include <asm/mipsregs.h> 40#include <asm/cacheflush.h> 41#include <asm/time.h> 42#include <asm/addrspace.h> 43#include <asm/smtc.h> 44#include <asm/smtc_proc.h> 45 46/* 47 * SMTC Kernel needs to manipulate low-level CPU interrupt mask 48 * in do_IRQ. These are passed in setup_irq_smtc() and stored 49 * in this table. 50 */ 51unsigned long irq_hwmask[NR_IRQS]; 52 53#define LOCK_MT_PRA() \ 54 local_irq_save(flags); \ 55 mtflags = dmt() 56 57#define UNLOCK_MT_PRA() \ 58 emt(mtflags); \ 59 local_irq_restore(flags) 60 61#define LOCK_CORE_PRA() \ 62 local_irq_save(flags); \ 63 mtflags = dvpe() 64 65#define UNLOCK_CORE_PRA() \ 66 evpe(mtflags); \ 67 local_irq_restore(flags) 68 69/* 70 * Data structures purely associated with SMTC parallelism 71 */ 72 73 74/* 75 * Table for tracking ASIDs whose lifetime is prolonged. 76 */ 77 78asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 79 80/* 81 * Number of InterProcessor Interrupt (IPI) message buffers to allocate 82 */ 83 84#define IPIBUF_PER_CPU 4 85 86struct smtc_ipi_q IPIQ[NR_CPUS]; 87static struct smtc_ipi_q freeIPIq; 88 89 90/* 91 * Number of FPU contexts for each VPE 92 */ 93 94static int smtc_nconf1[MAX_SMTC_VPES]; 95 96 97/* Forward declarations */ 98 99void ipi_decode(struct smtc_ipi *); 100static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 101static void setup_cross_vpe_interrupts(unsigned int nvpe); 102void init_smtc_stats(void); 103 104/* Global SMTC Status */ 105 106unsigned int smtc_status; 107 108/* Boot command line configuration overrides */ 109 110static int vpe0limit; 111static int ipibuffers; 112static int nostlb; 113static int asidmask; 114unsigned long smtc_asid_mask = 0xff; 115 116static int __init vpe0tcs(char *str) 117{ 118 get_option(&str, &vpe0limit); 119 120 return 1; 121} 122 123static int __init ipibufs(char *str) 124{ 125 get_option(&str, &ipibuffers); 126 return 1; 127} 128 129static int __init stlb_disable(char *s) 130{ 131 nostlb = 1; 132 return 1; 133} 134 135static int __init asidmask_set(char *str) 136{ 137 get_option(&str, &asidmask); 138 switch (asidmask) { 139 case 0x1: 140 case 0x3: 141 case 0x7: 142 case 0xf: 143 case 0x1f: 144 case 0x3f: 145 case 0x7f: 146 case 0xff: 147 smtc_asid_mask = (unsigned long)asidmask; 148 break; 149 default: 150 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); 151 } 152 return 1; 153} 154 155__setup("vpe0tcs=", vpe0tcs); 156__setup("ipibufs=", ipibufs); 157__setup("nostlb", stlb_disable); 158__setup("asidmask=", asidmask_set); 159 160#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 161 162static int hang_trig; 163 164static int __init hangtrig_enable(char *s) 165{ 166 hang_trig = 1; 167 return 1; 168} 169 170 171__setup("hangtrig", hangtrig_enable); 172 173#define DEFAULT_BLOCKED_IPI_LIMIT 32 174 175static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; 176 177static int __init tintq(char *str) 178{ 179 get_option(&str, &timerq_limit); 180 return 1; 181} 182 183__setup("tintq=", tintq); 184 185static int imstuckcount[MAX_SMTC_VPES][8]; 186/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ 187static int vpemask[MAX_SMTC_VPES][8] = { 188 {0, 0, 1, 0, 0, 0, 0, 1}, 189 {0, 0, 0, 0, 0, 0, 0, 1} 190}; 191int tcnoprog[NR_CPUS]; 192static atomic_t idle_hook_initialized = ATOMIC_INIT(0); 193static int clock_hang_reported[NR_CPUS]; 194 195#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 196 197/* 198 * Configure shared TLB - VPC configuration bit must be set by caller 199 */ 200 201static void smtc_configure_tlb(void) 202{ 203 int i, tlbsiz, vpes; 204 unsigned long mvpconf0; 205 unsigned long config1val; 206 207 /* Set up ASID preservation table */ 208 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { 209 for(i = 0; i < MAX_SMTC_ASIDS; i++) { 210 smtc_live_asid[vpes][i] = 0; 211 } 212 } 213 mvpconf0 = read_c0_mvpconf0(); 214 215 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) 216 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { 217 /* If we have multiple VPEs, try to share the TLB */ 218 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { 219 /* 220 * If TLB sizing is programmable, shared TLB 221 * size is the total available complement. 222 * Otherwise, we have to take the sum of all 223 * static VPE TLB entries. 224 */ 225 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) 226 >> MVPCONF0_PTLBE_SHIFT)) == 0) { 227 /* 228 * If there's more than one VPE, there had better 229 * be more than one TC, because we need one to bind 230 * to each VPE in turn to be able to read 231 * its configuration state! 232 */ 233 settc(1); 234 /* Stop the TC from doing anything foolish */ 235 write_tc_c0_tchalt(TCHALT_H); 236 mips_ihb(); 237 /* No need to un-Halt - that happens later anyway */ 238 for (i=0; i < vpes; i++) { 239 write_tc_c0_tcbind(i); 240 /* 241 * To be 100% sure we're really getting the right 242 * information, we exit the configuration state 243 * and do an IHB after each rebinding. 244 */ 245 write_c0_mvpcontrol( 246 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); 247 mips_ihb(); 248 /* 249 * Only count if the MMU Type indicated is TLB 250 */ 251 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { 252 config1val = read_vpe_c0_config1(); 253 tlbsiz += ((config1val >> 25) & 0x3f) + 1; 254 } 255 256 /* Put core back in configuration state */ 257 write_c0_mvpcontrol( 258 read_c0_mvpcontrol() | MVPCONTROL_VPC ); 259 mips_ihb(); 260 } 261 } 262 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); 263 ehb(); 264 265 /* 266 * Setup kernel data structures to use software total, 267 * rather than read the per-VPE Config1 value. The values 268 * for "CPU 0" gets copied to all the other CPUs as part 269 * of their initialization in smtc_cpu_setup(). 270 */ 271 272 /* MIPS32 limits TLB indices to 64 */ 273 if (tlbsiz > 64) 274 tlbsiz = 64; 275 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; 276 smtc_status |= SMTC_TLB_SHARED; 277 local_flush_tlb_all(); 278 279 printk("TLB of %d entry pairs shared by %d VPEs\n", 280 tlbsiz, vpes); 281 } else { 282 printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); 283 } 284 } 285} 286 287 288/* 289 * Incrementally build the CPU map out of constituent MIPS MT cores, 290 * using the specified available VPEs and TCs. Plaform code needs 291 * to ensure that each MIPS MT core invokes this routine on reset, 292 * one at a time(!). 293 * 294 * This version of the build_cpu_map and prepare_cpus routines assumes 295 * that *all* TCs of a MIPS MT core will be used for Linux, and that 296 * they will be spread across *all* available VPEs (to minimise the 297 * loss of efficiency due to exception service serialization). 298 * An improved version would pick up configuration information and 299 * possibly leave some TCs/VPEs as "slave" processors. 300 * 301 * Use c0_MVPConf0 to find out how many TCs are available, setting up 302 * cpu_possible_map and the logical/physical mappings. 303 */ 304 305int __init smtc_build_cpu_map(int start_cpu_slot) 306{ 307 int i, ntcs; 308 309 /* 310 * The CPU map isn't actually used for anything at this point, 311 * so it's not clear what else we should do apart from set 312 * everything up so that "logical" = "physical". 313 */ 314 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 315 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 316 set_cpu_possible(i, true); 317 __cpu_number_map[i] = i; 318 __cpu_logical_map[i] = i; 319 } 320#ifdef CONFIG_MIPS_MT_FPAFF 321 /* Initialize map of CPUs with FPUs */ 322 cpus_clear(mt_fpu_cpumask); 323#endif 324 325 /* One of those TC's is the one booting, and not a secondary... */ 326 printk("%i available secondary CPU TC(s)\n", i - 1); 327 328 return i; 329} 330 331/* 332 * Common setup before any secondaries are started 333 * Make sure all CPUs are in a sensible state before we boot any of the 334 * secondaries. 335 * 336 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly 337 * as possible across the available VPEs. 338 */ 339 340static void smtc_tc_setup(int vpe, int tc, int cpu) 341{ 342 static int cp1contexts[MAX_SMTC_VPES]; 343 344 /* 345 * Make a local copy of the available FPU contexts in order 346 * to keep track of TCs that can have one. 347 */ 348 if (tc == 1) 349 { 350 cp1contexts[0] = smtc_nconf1[0] - 1; 351 cp1contexts[1] = smtc_nconf1[1]; 352 } 353 354 settc(tc); 355 write_tc_c0_tchalt(TCHALT_H); 356 mips_ihb(); 357 write_tc_c0_tcstatus((read_tc_c0_tcstatus() 358 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) 359 | TCSTATUS_A); 360 /* 361 * TCContext gets an offset from the base of the IPIQ array 362 * to be used in low-level code to detect the presence of 363 * an active IPI queue. 364 */ 365 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); 366 367 /* Bind TC to VPE. */ 368 write_tc_c0_tcbind(vpe); 369 370 /* In general, all TCs should have the same cpu_data indications. */ 371 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); 372 373 /* Check to see if there is a FPU context available for this TC. */ 374 if (!cp1contexts[vpe]) 375 cpu_data[cpu].options &= ~MIPS_CPU_FPU; 376 else 377 cp1contexts[vpe]--; 378 379 /* Store the TC and VPE into the cpu_data structure. */ 380 cpu_data[cpu].vpe_id = vpe; 381 cpu_data[cpu].tc_id = tc; 382 383 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; 384} 385 386/* 387 * Tweak to get Count registers synced as closely as possible. The 388 * value seems good for 34K-class cores. 389 */ 390 391#define CP0_SKEW 8 392 393void smtc_prepare_cpus(int cpus) 394{ 395 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; 396 unsigned long flags; 397 unsigned long val; 398 int nipi; 399 struct smtc_ipi *pipi; 400 401 /* disable interrupts so we can disable MT */ 402 local_irq_save(flags); 403 /* disable MT so we can configure */ 404 dvpe(); 405 dmt(); 406 407 spin_lock_init(&freeIPIq.lock); 408 409 /* 410 * We probably don't have as many VPEs as we do SMP "CPUs", 411 * but it's possible - and in any case we'll never use more! 412 */ 413 for (i=0; i<NR_CPUS; i++) { 414 IPIQ[i].head = IPIQ[i].tail = NULL; 415 spin_lock_init(&IPIQ[i].lock); 416 IPIQ[i].depth = 0; 417 IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ 418 } 419 420 /* cpu_data index starts at zero */ 421 cpu = 0; 422 cpu_data[cpu].vpe_id = 0; 423 cpu_data[cpu].tc_id = 0; 424 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; 425 cpu++; 426 427 /* Report on boot-time options */ 428 mips_mt_set_cpuoptions(); 429 if (vpelimit > 0) 430 printk("Limit of %d VPEs set\n", vpelimit); 431 if (tclimit > 0) 432 printk("Limit of %d TCs set\n", tclimit); 433 if (nostlb) { 434 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); 435 } 436 if (asidmask) 437 printk("ASID mask value override to 0x%x\n", asidmask); 438 439 /* Temporary */ 440#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 441 if (hang_trig) 442 printk("Logic Analyser Trigger on suspected TC hang\n"); 443#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 444 445 /* Put MVPE's into 'configuration state' */ 446 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); 447 448 val = read_c0_mvpconf0(); 449 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 450 if (vpelimit > 0 && nvpe > vpelimit) 451 nvpe = vpelimit; 452 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 453 if (ntc > NR_CPUS) 454 ntc = NR_CPUS; 455 if (tclimit > 0 && ntc > tclimit) 456 ntc = tclimit; 457 slop = ntc % nvpe; 458 for (i = 0; i < nvpe; i++) { 459 tcpervpe[i] = ntc / nvpe; 460 if (slop) { 461 if((slop - i) > 0) tcpervpe[i]++; 462 } 463 } 464 /* Handle command line override for VPE0 */ 465 if (vpe0limit > ntc) vpe0limit = ntc; 466 if (vpe0limit > 0) { 467 int slopslop; 468 if (vpe0limit < tcpervpe[0]) { 469 /* Reducing TC count - distribute to others */ 470 slop = tcpervpe[0] - vpe0limit; 471 slopslop = slop % (nvpe - 1); 472 tcpervpe[0] = vpe0limit; 473 for (i = 1; i < nvpe; i++) { 474 tcpervpe[i] += slop / (nvpe - 1); 475 if(slopslop && ((slopslop - (i - 1) > 0))) 476 tcpervpe[i]++; 477 } 478 } else if (vpe0limit > tcpervpe[0]) { 479 /* Increasing TC count - steal from others */ 480 slop = vpe0limit - tcpervpe[0]; 481 slopslop = slop % (nvpe - 1); 482 tcpervpe[0] = vpe0limit; 483 for (i = 1; i < nvpe; i++) { 484 tcpervpe[i] -= slop / (nvpe - 1); 485 if(slopslop && ((slopslop - (i - 1) > 0))) 486 tcpervpe[i]--; 487 } 488 } 489 } 490 491 /* Set up shared TLB */ 492 smtc_configure_tlb(); 493 494 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { 495 /* Get number of CP1 contexts for each VPE. */ 496 if (tc == 0) 497 { 498 /* 499 * Do not call settc() for TC0 or the FPU context 500 * value will be incorrect. Besides, we know that 501 * we are TC0 anyway. 502 */ 503 smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & 504 VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); 505 if (nvpe == 2) 506 { 507 settc(1); 508 smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & 509 VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); 510 settc(0); 511 } 512 } 513 if (tcpervpe[vpe] == 0) 514 continue; 515 if (vpe != 0) 516 printk(", "); 517 printk("VPE %d: TC", vpe); 518 for (i = 0; i < tcpervpe[vpe]; i++) { 519 /* 520 * TC 0 is bound to VPE 0 at reset, 521 * and is presumably executing this 522 * code. Leave it alone! 523 */ 524 if (tc != 0) { 525 smtc_tc_setup(vpe, tc, cpu); 526 if (vpe != 0) { 527 /* 528 * Set MVP bit (possibly again). Do it 529 * here to catch CPUs that have no TCs 530 * bound to the VPE at reset. In that 531 * case, a TC must be bound to the VPE 532 * before we can set VPEControl[MVP] 533 */ 534 write_vpe_c0_vpeconf0( 535 read_vpe_c0_vpeconf0() | 536 VPECONF0_MVP); 537 } 538 cpu++; 539 } 540 printk(" %d", tc); 541 tc++; 542 } 543 if (vpe != 0) { 544 /* 545 * Allow this VPE to control others. 546 */ 547 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | 548 VPECONF0_MVP); 549 550 /* 551 * Clear any stale software interrupts from VPE's Cause 552 */ 553 write_vpe_c0_cause(0); 554 555 /* 556 * Clear ERL/EXL of VPEs other than 0 557 * and set restricted interrupt enable/mask. 558 */ 559 write_vpe_c0_status((read_vpe_c0_status() 560 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) 561 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 562 | ST0_IE)); 563 /* 564 * set config to be the same as vpe0, 565 * particularly kseg0 coherency alg 566 */ 567 write_vpe_c0_config(read_c0_config()); 568 /* Clear any pending timer interrupt */ 569 write_vpe_c0_compare(0); 570 /* Propagate Config7 */ 571 write_vpe_c0_config7(read_c0_config7()); 572 write_vpe_c0_count(read_c0_count() + CP0_SKEW); 573 ehb(); 574 } 575 /* enable multi-threading within VPE */ 576 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 577 /* enable the VPE */ 578 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 579 } 580 581 /* 582 * Pull any physically present but unused TCs out of circulation. 583 */ 584 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 585 set_cpu_possible(tc, false); 586 set_cpu_present(tc, false); 587 tc++; 588 } 589 590 /* release config state */ 591 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); 592 593 printk("\n"); 594 595 /* Set up coprocessor affinity CPU mask(s) */ 596 597#ifdef CONFIG_MIPS_MT_FPAFF 598 for (tc = 0; tc < ntc; tc++) { 599 if (cpu_data[tc].options & MIPS_CPU_FPU) 600 cpu_set(tc, mt_fpu_cpumask); 601 } 602#endif 603 604 /* set up ipi interrupts... */ 605 606 /* If we have multiple VPEs running, set up the cross-VPE interrupt */ 607 608 setup_cross_vpe_interrupts(nvpe); 609 610 /* Set up queue of free IPI "messages". */ 611 nipi = NR_CPUS * IPIBUF_PER_CPU; 612 if (ipibuffers > 0) 613 nipi = ipibuffers; 614 615 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); 616 if (pipi == NULL) 617 panic("kmalloc of IPI message buffers failed\n"); 618 else 619 printk("IPI buffer pool of %d buffers\n", nipi); 620 for (i = 0; i < nipi; i++) { 621 smtc_ipi_nq(&freeIPIq, pipi); 622 pipi++; 623 } 624 625 /* Arm multithreading and enable other VPEs - but all TCs are Halted */ 626 emt(EMT_ENABLE); 627 evpe(EVPE_ENABLE); 628 local_irq_restore(flags); 629 /* Initialize SMTC /proc statistics/diagnostics */ 630 init_smtc_stats(); 631} 632 633 634/* 635 * Setup the PC, SP, and GP of a secondary processor and start it 636 * running! 637 * smp_bootstrap is the place to resume from 638 * __KSTK_TOS(idle) is apparently the stack pointer 639 * (unsigned long)idle->thread_info the gp 640 * 641 */ 642void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 643{ 644 extern u32 kernelsp[NR_CPUS]; 645 unsigned long flags; 646 int mtflags; 647 648 LOCK_MT_PRA(); 649 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 650 dvpe(); 651 } 652 settc(cpu_data[cpu].tc_id); 653 654 /* pc */ 655 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); 656 657 /* stack pointer */ 658 kernelsp[cpu] = __KSTK_TOS(idle); 659 write_tc_gpr_sp(__KSTK_TOS(idle)); 660 661 /* global pointer */ 662 write_tc_gpr_gp((unsigned long)task_thread_info(idle)); 663 664 smtc_status |= SMTC_MTC_ACTIVE; 665 write_tc_c0_tchalt(0); 666 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 667 evpe(EVPE_ENABLE); 668 } 669 UNLOCK_MT_PRA(); 670} 671 672void smtc_init_secondary(void) 673{ 674 local_irq_enable(); 675} 676 677void smtc_smp_finish(void) 678{ 679 int cpu = smp_processor_id(); 680 681 /* 682 * Lowest-numbered CPU per VPE starts a clock tick. 683 * Like per_cpu_trap_init() hack, this assumes that 684 * SMTC init code assigns TCs consdecutively and 685 * in ascending order across available VPEs. 686 */ 687 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) 688 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); 689 690 printk("TC %d going on-line as CPU %d\n", 691 cpu_data[smp_processor_id()].tc_id, smp_processor_id()); 692} 693 694void smtc_cpus_done(void) 695{ 696} 697 698/* 699 * Support for SMTC-optimized driver IRQ registration 700 */ 701 702/* 703 * SMTC Kernel needs to manipulate low-level CPU interrupt mask 704 * in do_IRQ. These are passed in setup_irq_smtc() and stored 705 * in this table. 706 */ 707 708int setup_irq_smtc(unsigned int irq, struct irqaction * new, 709 unsigned long hwmask) 710{ 711#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 712 unsigned int vpe = current_cpu_data.vpe_id; 713 714 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; 715#endif 716 irq_hwmask[irq] = hwmask; 717 718 return setup_irq(irq, new); 719} 720 721#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 722/* 723 * Support for IRQ affinity to TCs 724 */ 725 726void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) 727{ 728 /* 729 * If a "fast path" cache of quickly decodable affinity state 730 * is maintained, this is where it gets done, on a call up 731 * from the platform affinity code. 732 */ 733} 734 735void smtc_forward_irq(unsigned int irq) 736{ 737 int target; 738 739 /* 740 * OK wise guy, now figure out how to get the IRQ 741 * to be serviced on an authorized "CPU". 742 * 743 * Ideally, to handle the situation where an IRQ has multiple 744 * eligible CPUS, we would maintain state per IRQ that would 745 * allow a fair distribution of service requests. Since the 746 * expected use model is any-or-only-one, for simplicity 747 * and efficiency, we just pick the easiest one to find. 748 */ 749 750 target = cpumask_first(irq_desc[irq].affinity); 751 752 /* 753 * We depend on the platform code to have correctly processed 754 * IRQ affinity change requests to ensure that the IRQ affinity 755 * mask has been purged of bits corresponding to nonexistent and 756 * offline "CPUs", and to TCs bound to VPEs other than the VPE 757 * connected to the physical interrupt input for the interrupt 758 * in question. Otherwise we have a nasty problem with interrupt 759 * mask management. This is best handled in non-performance-critical 760 * platform IRQ affinity setting code, to minimize interrupt-time 761 * checks. 762 */ 763 764 /* If no one is eligible, service locally */ 765 if (target >= NR_CPUS) { 766 do_IRQ_no_affinity(irq); 767 return; 768 } 769 770 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); 771} 772 773#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 774 775/* 776 * IPI model for SMTC is tricky, because interrupts aren't TC-specific. 777 * Within a VPE one TC can interrupt another by different approaches. 778 * The easiest to get right would probably be to make all TCs except 779 * the target IXMT and set a software interrupt, but an IXMT-based 780 * scheme requires that a handler must run before a new IPI could 781 * be sent, which would break the "broadcast" loops in MIPS MT. 782 * A more gonzo approach within a VPE is to halt the TC, extract 783 * its Restart, Status, and a couple of GPRs, and program the Restart 784 * address to emulate an interrupt. 785 * 786 * Within a VPE, one can be confident that the target TC isn't in 787 * a critical EXL state when halted, since the write to the Halt 788 * register could not have issued on the writing thread if the 789 * halting thread had EXL set. So k0 and k1 of the target TC 790 * can be used by the injection code. Across VPEs, one can't 791 * be certain that the target TC isn't in a critical exception 792 * state. So we try a two-step process of sending a software 793 * interrupt to the target VPE, which either handles the event 794 * itself (if it was the target) or injects the event within 795 * the VPE. 796 */ 797 798static void smtc_ipi_qdump(void) 799{ 800 int i; 801 struct smtc_ipi *temp; 802 803 for (i = 0; i < NR_CPUS ;i++) { 804 pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", 805 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, 806 IPIQ[i].depth); 807 temp = IPIQ[i].head; 808 809 while (temp != IPIQ[i].tail) { 810 pr_debug("%d %d %d: ", temp->type, temp->dest, 811 (int)temp->arg); 812#ifdef SMTC_IPI_DEBUG 813 pr_debug("%u %lu\n", temp->sender, temp->stamp); 814#else 815 pr_debug("\n"); 816#endif 817 temp = temp->flink; 818 } 819 } 820} 821 822/* 823 * The standard atomic.h primitives don't quite do what we want 824 * here: We need an atomic add-and-return-previous-value (which 825 * could be done with atomic_add_return and a decrement) and an 826 * atomic set/zero-and-return-previous-value (which can't really 827 * be done with the atomic.h primitives). And since this is 828 * MIPS MT, we can assume that we have LL/SC. 829 */ 830static inline int atomic_postincrement(atomic_t *v) 831{ 832 unsigned long result; 833 834 unsigned long temp; 835 836 __asm__ __volatile__( 837 "1: ll %0, %2 \n" 838 " addu %1, %0, 1 \n" 839 " sc %1, %2 \n" 840 " beqz %1, 1b \n" 841 __WEAK_LLSC_MB 842 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 843 : "m" (v->counter) 844 : "memory"); 845 846 return result; 847} 848 849void smtc_send_ipi(int cpu, int type, unsigned int action) 850{ 851 int tcstatus; 852 struct smtc_ipi *pipi; 853 unsigned long flags; 854 int mtflags; 855 unsigned long tcrestart; 856 extern void r4k_wait_irqoff(void), __pastwait(void); 857 int set_resched_flag = (type == LINUX_SMP_IPI && 858 action == SMP_RESCHEDULE_YOURSELF); 859 860 if (cpu == smp_processor_id()) { 861 printk("Cannot Send IPI to self!\n"); 862 return; 863 } 864 if (set_resched_flag && IPIQ[cpu].resched_flag != 0) 865 return; /* There is a reschedule queued already */ 866 867 /* Set up a descriptor, to be delivered either promptly or queued */ 868 pipi = smtc_ipi_dq(&freeIPIq); 869 if (pipi == NULL) { 870 bust_spinlocks(1); 871 mips_mt_regdump(dvpe()); 872 panic("IPI Msg. Buffers Depleted\n"); 873 } 874 pipi->type = type; 875 pipi->arg = (void *)action; 876 pipi->dest = cpu; 877 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 878 /* If not on same VPE, enqueue and send cross-VPE interrupt */ 879 IPIQ[cpu].resched_flag |= set_resched_flag; 880 smtc_ipi_nq(&IPIQ[cpu], pipi); 881 LOCK_CORE_PRA(); 882 settc(cpu_data[cpu].tc_id); 883 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); 884 UNLOCK_CORE_PRA(); 885 } else { 886 /* 887 * Not sufficient to do a LOCK_MT_PRA (dmt) here, 888 * since ASID shootdown on the other VPE may 889 * collide with this operation. 890 */ 891 LOCK_CORE_PRA(); 892 settc(cpu_data[cpu].tc_id); 893 /* Halt the targeted TC */ 894 write_tc_c0_tchalt(TCHALT_H); 895 mips_ihb(); 896 897 /* 898 * Inspect TCStatus - if IXMT is set, we have to queue 899 * a message. Otherwise, we set up the "interrupt" 900 * of the other TC 901 */ 902 tcstatus = read_tc_c0_tcstatus(); 903 904 if ((tcstatus & TCSTATUS_IXMT) != 0) { 905 /* 906 * If we're in the the irq-off version of the wait 907 * loop, we need to force exit from the wait and 908 * do a direct post of the IPI. 909 */ 910 if (cpu_wait == r4k_wait_irqoff) { 911 tcrestart = read_tc_c0_tcrestart(); 912 if (tcrestart >= (unsigned long)r4k_wait_irqoff 913 && tcrestart < (unsigned long)__pastwait) { 914 write_tc_c0_tcrestart(__pastwait); 915 tcstatus &= ~TCSTATUS_IXMT; 916 write_tc_c0_tcstatus(tcstatus); 917 goto postdirect; 918 } 919 } 920 /* 921 * Otherwise we queue the message for the target TC 922 * to pick up when he does a local_irq_restore() 923 */ 924 write_tc_c0_tchalt(0); 925 UNLOCK_CORE_PRA(); 926 IPIQ[cpu].resched_flag |= set_resched_flag; 927 smtc_ipi_nq(&IPIQ[cpu], pipi); 928 } else { 929postdirect: 930 post_direct_ipi(cpu, pipi); 931 write_tc_c0_tchalt(0); 932 UNLOCK_CORE_PRA(); 933 } 934 } 935} 936 937/* 938 * Send IPI message to Halted TC, TargTC/TargVPE already having been set 939 */ 940static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) 941{ 942 struct pt_regs *kstack; 943 unsigned long tcstatus; 944 unsigned long tcrestart; 945 extern u32 kernelsp[NR_CPUS]; 946 extern void __smtc_ipi_vector(void); 947//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); 948 949 /* Extract Status, EPC from halted TC */ 950 tcstatus = read_tc_c0_tcstatus(); 951 tcrestart = read_tc_c0_tcrestart(); 952 /* If TCRestart indicates a WAIT instruction, advance the PC */ 953 if ((tcrestart & 0x80000000) 954 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { 955 tcrestart += 4; 956 } 957 /* 958 * Save on TC's future kernel stack 959 * 960 * CU bit of Status is indicator that TC was 961 * already running on a kernel stack... 962 */ 963 if (tcstatus & ST0_CU0) { 964 /* Note that this "- 1" is pointer arithmetic */ 965 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; 966 } else { 967 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; 968 } 969 970 kstack->cp0_epc = (long)tcrestart; 971 /* Save TCStatus */ 972 kstack->cp0_tcstatus = tcstatus; 973 /* Pass token of operation to be performed kernel stack pad area */ 974 kstack->pad0[4] = (unsigned long)pipi; 975 /* Pass address of function to be called likewise */ 976 kstack->pad0[5] = (unsigned long)&ipi_decode; 977 /* Set interrupt exempt and kernel mode */ 978 tcstatus |= TCSTATUS_IXMT; 979 tcstatus &= ~TCSTATUS_TKSU; 980 write_tc_c0_tcstatus(tcstatus); 981 ehb(); 982 /* Set TC Restart address to be SMTC IPI vector */ 983 write_tc_c0_tcrestart(__smtc_ipi_vector); 984} 985 986static void ipi_resched_interrupt(void) 987{ 988 /* Return from interrupt should be enough to cause scheduler check */ 989} 990 991static void ipi_call_interrupt(void) 992{ 993 /* Invoke generic function invocation code in smp.c */ 994 smp_call_function_interrupt(); 995} 996 997DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); 998 999static void __irq_entry smtc_clock_tick_interrupt(void) 1000{ 1001 unsigned int cpu = smp_processor_id(); 1002 struct clock_event_device *cd; 1003 int irq = MIPS_CPU_IRQ_BASE + 1; 1004 1005 irq_enter(); 1006 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 1007 cd = &per_cpu(mips_clockevent_device, cpu); 1008 cd->event_handler(cd); 1009 irq_exit(); 1010} 1011 1012void ipi_decode(struct smtc_ipi *pipi) 1013{ 1014 void *arg_copy = pipi->arg; 1015 int type_copy = pipi->type; 1016 1017 smtc_ipi_nq(&freeIPIq, pipi); 1018 1019 switch (type_copy) { 1020 case SMTC_CLOCK_TICK: 1021 smtc_clock_tick_interrupt(); 1022 break; 1023 1024 case LINUX_SMP_IPI: 1025 switch ((int)arg_copy) { 1026 case SMP_RESCHEDULE_YOURSELF: 1027 ipi_resched_interrupt(); 1028 break; 1029 case SMP_CALL_FUNCTION: 1030 ipi_call_interrupt(); 1031 break; 1032 default: 1033 printk("Impossible SMTC IPI Argument %p\n", arg_copy); 1034 break; 1035 } 1036 break; 1037#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 1038 case IRQ_AFFINITY_IPI: 1039 /* 1040 * Accept a "forwarded" interrupt that was initially 1041 * taken by a TC who doesn't have affinity for the IRQ. 1042 */ 1043 do_IRQ_no_affinity((int)arg_copy); 1044 break; 1045#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 1046 default: 1047 printk("Impossible SMTC IPI Type 0x%x\n", type_copy); 1048 break; 1049 } 1050} 1051 1052/* 1053 * Similar to smtc_ipi_replay(), but invoked from context restore, 1054 * so it reuses the current exception frame rather than set up a 1055 * new one with self_ipi. 1056 */ 1057 1058void deferred_smtc_ipi(void) 1059{ 1060 int cpu = smp_processor_id(); 1061 1062 /* 1063 * Test is not atomic, but much faster than a dequeue, 1064 * and the vast majority of invocations will have a null queue. 1065 * If irq_disabled when this was called, then any IPIs queued 1066 * after we test last will be taken on the next irq_enable/restore. 1067 * If interrupts were enabled, then any IPIs added after the 1068 * last test will be taken directly. 1069 */ 1070 1071 while (IPIQ[cpu].head != NULL) { 1072 struct smtc_ipi_q *q = &IPIQ[cpu]; 1073 struct smtc_ipi *pipi; 1074 unsigned long flags; 1075 1076 /* 1077 * It may be possible we'll come in with interrupts 1078 * already enabled. 1079 */ 1080 local_irq_save(flags); 1081 spin_lock(&q->lock); 1082 pipi = __smtc_ipi_dq(q); 1083 spin_unlock(&q->lock); 1084 if (pipi != NULL) { 1085 if (pipi->type == LINUX_SMP_IPI && 1086 (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) 1087 IPIQ[cpu].resched_flag = 0; 1088 ipi_decode(pipi); 1089 } 1090 /* 1091 * The use of the __raw_local restore isn't 1092 * as obviously necessary here as in smtc_ipi_replay(), 1093 * but it's more efficient, given that we're already 1094 * running down the IPI queue. 1095 */ 1096 __raw_local_irq_restore(flags); 1097 } 1098} 1099 1100/* 1101 * Cross-VPE interrupts in the SMTC prototype use "software interrupts" 1102 * set via cross-VPE MTTR manipulation of the Cause register. It would be 1103 * in some regards preferable to have external logic for "doorbell" hardware 1104 * interrupts. 1105 */ 1106 1107static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; 1108 1109static irqreturn_t ipi_interrupt(int irq, void *dev_idm) 1110{ 1111 int my_vpe = cpu_data[smp_processor_id()].vpe_id; 1112 int my_tc = cpu_data[smp_processor_id()].tc_id; 1113 int cpu; 1114 struct smtc_ipi *pipi; 1115 unsigned long tcstatus; 1116 int sent; 1117 unsigned long flags; 1118 unsigned int mtflags; 1119 unsigned int vpflags; 1120 1121 /* 1122 * So long as cross-VPE interrupts are done via 1123 * MFTR/MTTR read-modify-writes of Cause, we need 1124 * to stop other VPEs whenever the local VPE does 1125 * anything similar. 1126 */ 1127 local_irq_save(flags); 1128 vpflags = dvpe(); 1129 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); 1130 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); 1131 irq_enable_hazard(); 1132 evpe(vpflags); 1133 local_irq_restore(flags); 1134 1135 /* 1136 * Cross-VPE Interrupt handler: Try to directly deliver IPIs 1137 * queued for TCs on this VPE other than the current one. 1138 * Return-from-interrupt should cause us to drain the queue 1139 * for the current TC, so we ought not to have to do it explicitly here. 1140 */ 1141 1142 for_each_online_cpu(cpu) { 1143 if (cpu_data[cpu].vpe_id != my_vpe) 1144 continue; 1145 1146 pipi = smtc_ipi_dq(&IPIQ[cpu]); 1147 if (pipi != NULL) { 1148 if (cpu_data[cpu].tc_id != my_tc) { 1149 sent = 0; 1150 LOCK_MT_PRA(); 1151 settc(cpu_data[cpu].tc_id); 1152 write_tc_c0_tchalt(TCHALT_H); 1153 mips_ihb(); 1154 tcstatus = read_tc_c0_tcstatus(); 1155 if ((tcstatus & TCSTATUS_IXMT) == 0) { 1156 post_direct_ipi(cpu, pipi); 1157 sent = 1; 1158 } 1159 write_tc_c0_tchalt(0); 1160 UNLOCK_MT_PRA(); 1161 if (!sent) { 1162 smtc_ipi_req(&IPIQ[cpu], pipi); 1163 } 1164 } else { 1165 /* 1166 * ipi_decode() should be called 1167 * with interrupts off 1168 */ 1169 local_irq_save(flags); 1170 if (pipi->type == LINUX_SMP_IPI && 1171 (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) 1172 IPIQ[cpu].resched_flag = 0; 1173 ipi_decode(pipi); 1174 local_irq_restore(flags); 1175 } 1176 } 1177 } 1178 1179 return IRQ_HANDLED; 1180} 1181 1182static void ipi_irq_dispatch(void) 1183{ 1184 do_IRQ(cpu_ipi_irq); 1185} 1186 1187static struct irqaction irq_ipi = { 1188 .handler = ipi_interrupt, 1189 .flags = IRQF_DISABLED | IRQF_PERCPU, 1190 .name = "SMTC_IPI" 1191}; 1192 1193static void setup_cross_vpe_interrupts(unsigned int nvpe) 1194{ 1195 if (nvpe < 1) 1196 return; 1197 1198 if (!cpu_has_vint) 1199 panic("SMTC Kernel requires Vectored Interrupt support"); 1200 1201 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); 1202 1203 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1204 1205 set_irq_handler(cpu_ipi_irq, handle_percpu_irq); 1206} 1207 1208/* 1209 * SMTC-specific hacks invoked from elsewhere in the kernel. 1210 */ 1211 1212 /* 1213 * smtc_ipi_replay is called from raw_local_irq_restore 1214 */ 1215 1216void smtc_ipi_replay(void) 1217{ 1218 unsigned int cpu = smp_processor_id(); 1219 1220 /* 1221 * To the extent that we've ever turned interrupts off, 1222 * we may have accumulated deferred IPIs. This is subtle. 1223 * we should be OK: If we pick up something and dispatch 1224 * it here, that's great. If we see nothing, but concurrent 1225 * with this operation, another TC sends us an IPI, IXMT 1226 * is clear, and we'll handle it as a real pseudo-interrupt 1227 * and not a pseudo-pseudo interrupt. The important thing 1228 * is to do the last check for queued message *after* the 1229 * re-enabling of interrupts. 1230 */ 1231 while (IPIQ[cpu].head != NULL) { 1232 struct smtc_ipi_q *q = &IPIQ[cpu]; 1233 struct smtc_ipi *pipi; 1234 unsigned long flags; 1235 1236 /* 1237 * It's just possible we'll come in with interrupts 1238 * already enabled. 1239 */ 1240 local_irq_save(flags); 1241 1242 spin_lock(&q->lock); 1243 pipi = __smtc_ipi_dq(q); 1244 spin_unlock(&q->lock); 1245 /* 1246 ** But use a raw restore here to avoid recursion. 1247 */ 1248 __raw_local_irq_restore(flags); 1249 1250 if (pipi) { 1251 self_ipi(pipi); 1252 smtc_cpu_stats[cpu].selfipis++; 1253 } 1254 } 1255} 1256 1257EXPORT_SYMBOL(smtc_ipi_replay); 1258 1259void smtc_idle_loop_hook(void) 1260{ 1261#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 1262 int im; 1263 int flags; 1264 int mtflags; 1265 int bit; 1266 int vpe; 1267 int tc; 1268 int hook_ntcs; 1269 /* 1270 * printk within DMT-protected regions can deadlock, 1271 * so buffer diagnostic messages for later output. 1272 */ 1273 char *pdb_msg; 1274 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ 1275 1276 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ 1277 if (atomic_add_return(1, &idle_hook_initialized) == 1) { 1278 int mvpconf0; 1279 /* Tedious stuff to just do once */ 1280 mvpconf0 = read_c0_mvpconf0(); 1281 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 1282 if (hook_ntcs > NR_CPUS) 1283 hook_ntcs = NR_CPUS; 1284 for (tc = 0; tc < hook_ntcs; tc++) { 1285 tcnoprog[tc] = 0; 1286 clock_hang_reported[tc] = 0; 1287 } 1288 for (vpe = 0; vpe < 2; vpe++) 1289 for (im = 0; im < 8; im++) 1290 imstuckcount[vpe][im] = 0; 1291 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); 1292 atomic_set(&idle_hook_initialized, 1000); 1293 } else { 1294 /* Someone else is initializing in parallel - let 'em finish */ 1295 while (atomic_read(&idle_hook_initialized) < 1000) 1296 ; 1297 } 1298 } 1299 1300 /* Have we stupidly left IXMT set somewhere? */ 1301 if (read_c0_tcstatus() & 0x400) { 1302 write_c0_tcstatus(read_c0_tcstatus() & ~0x400); 1303 ehb(); 1304 printk("Dangling IXMT in cpu_idle()\n"); 1305 } 1306 1307 /* Have we stupidly left an IM bit turned off? */ 1308#define IM_LIMIT 2000 1309 local_irq_save(flags); 1310 mtflags = dmt(); 1311 pdb_msg = &id_ho_db_msg[0]; 1312 im = read_c0_status(); 1313 vpe = current_cpu_data.vpe_id; 1314 for (bit = 0; bit < 8; bit++) { 1315 /* 1316 * In current prototype, I/O interrupts 1317 * are masked for VPE > 0 1318 */ 1319 if (vpemask[vpe][bit]) { 1320 if (!(im & (0x100 << bit))) 1321 imstuckcount[vpe][bit]++; 1322 else 1323 imstuckcount[vpe][bit] = 0; 1324 if (imstuckcount[vpe][bit] > IM_LIMIT) { 1325 set_c0_status(0x100 << bit); 1326 ehb(); 1327 imstuckcount[vpe][bit] = 0; 1328 pdb_msg += sprintf(pdb_msg, 1329 "Dangling IM %d fixed for VPE %d\n", bit, 1330 vpe); 1331 } 1332 } 1333 } 1334 1335 emt(mtflags); 1336 local_irq_restore(flags); 1337 if (pdb_msg != &id_ho_db_msg[0]) 1338 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); 1339#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 1340 1341 smtc_ipi_replay(); 1342} 1343 1344void smtc_soft_dump(void) 1345{ 1346 int i; 1347 1348 printk("Counter Interrupts taken per CPU (TC)\n"); 1349 for (i=0; i < NR_CPUS; i++) { 1350 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); 1351 } 1352 printk("Self-IPI invocations:\n"); 1353 for (i=0; i < NR_CPUS; i++) { 1354 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 1355 } 1356 smtc_ipi_qdump(); 1357 printk("%d Recoveries of \"stolen\" FPU\n", 1358 atomic_read(&smtc_fpu_recoveries)); 1359} 1360 1361 1362/* 1363 * TLB management routines special to SMTC 1364 */ 1365 1366void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 1367{ 1368 unsigned long flags, mtflags, tcstat, prevhalt, asid; 1369 int tlb, i; 1370 1371 /* 1372 * It would be nice to be able to use a spinlock here, 1373 * but this is invoked from within TLB flush routines 1374 * that protect themselves with DVPE, so if a lock is 1375 * held by another TC, it'll never be freed. 1376 * 1377 * DVPE/DMT must not be done with interrupts enabled, 1378 * so even so most callers will already have disabled 1379 * them, let's be really careful... 1380 */ 1381 1382 local_irq_save(flags); 1383 if (smtc_status & SMTC_TLB_SHARED) { 1384 mtflags = dvpe(); 1385 tlb = 0; 1386 } else { 1387 mtflags = dmt(); 1388 tlb = cpu_data[cpu].vpe_id; 1389 } 1390 asid = asid_cache(cpu); 1391 1392 do { 1393 if (!((asid += ASID_INC) & ASID_MASK) ) { 1394 if (cpu_has_vtag_icache) 1395 flush_icache_all(); 1396 /* Traverse all online CPUs (hack requires contiguous range) */ 1397 for_each_online_cpu(i) { 1398 /* 1399 * We don't need to worry about our own CPU, nor those of 1400 * CPUs who don't share our TLB. 1401 */ 1402 if ((i != smp_processor_id()) && 1403 ((smtc_status & SMTC_TLB_SHARED) || 1404 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { 1405 settc(cpu_data[i].tc_id); 1406 prevhalt = read_tc_c0_tchalt() & TCHALT_H; 1407 if (!prevhalt) { 1408 write_tc_c0_tchalt(TCHALT_H); 1409 mips_ihb(); 1410 } 1411 tcstat = read_tc_c0_tcstatus(); 1412 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); 1413 if (!prevhalt) 1414 write_tc_c0_tchalt(0); 1415 } 1416 } 1417 if (!asid) /* fix version if needed */ 1418 asid = ASID_FIRST_VERSION; 1419 local_flush_tlb_all(); /* start new asid cycle */ 1420 } 1421 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); 1422 1423 /* 1424 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1425 */ 1426 for_each_online_cpu(i) { 1427 if ((smtc_status & SMTC_TLB_SHARED) || 1428 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 1429 cpu_context(i, mm) = asid_cache(i) = asid; 1430 } 1431 1432 if (smtc_status & SMTC_TLB_SHARED) 1433 evpe(mtflags); 1434 else 1435 emt(mtflags); 1436 local_irq_restore(flags); 1437} 1438 1439/* 1440 * Invoked from macros defined in mmu_context.h 1441 * which must already have disabled interrupts 1442 * and done a DVPE or DMT as appropriate. 1443 */ 1444 1445void smtc_flush_tlb_asid(unsigned long asid) 1446{ 1447 int entry; 1448 unsigned long ehi; 1449 1450 entry = read_c0_wired(); 1451 1452 /* Traverse all non-wired entries */ 1453 while (entry < current_cpu_data.tlbsize) { 1454 write_c0_index(entry); 1455 ehb(); 1456 tlb_read(); 1457 ehb(); 1458 ehi = read_c0_entryhi(); 1459 if ((ehi & ASID_MASK) == asid) { 1460 /* 1461 * Invalidate only entries with specified ASID, 1462 * makiing sure all entries differ. 1463 */ 1464 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); 1465 write_c0_entrylo0(0); 1466 write_c0_entrylo1(0); 1467 mtc0_tlbw_hazard(); 1468 tlb_write_indexed(); 1469 } 1470 entry++; 1471 } 1472 write_c0_index(PARKED_INDEX); 1473 tlbw_use_hazard(); 1474} 1475 1476/* 1477 * Support for single-threading cache flush operations. 1478 */ 1479 1480static int halt_state_save[NR_CPUS]; 1481 1482/* 1483 * To really, really be sure that nothing is being done 1484 * by other TCs, halt them all. This code assumes that 1485 * a DVPE has already been done, so while their Halted 1486 * state is theoretically architecturally unstable, in 1487 * practice, it's not going to change while we're looking 1488 * at it. 1489 */ 1490 1491void smtc_cflush_lockdown(void) 1492{ 1493 int cpu; 1494 1495 for_each_online_cpu(cpu) { 1496 if (cpu != smp_processor_id()) { 1497 settc(cpu_data[cpu].tc_id); 1498 halt_state_save[cpu] = read_tc_c0_tchalt(); 1499 write_tc_c0_tchalt(TCHALT_H); 1500 } 1501 } 1502 mips_ihb(); 1503} 1504 1505/* It would be cheating to change the cpu_online states during a flush! */ 1506 1507void smtc_cflush_release(void) 1508{ 1509 int cpu; 1510 1511 /* 1512 * Start with a hazard barrier to ensure 1513 * that all CACHE ops have played through. 1514 */ 1515 mips_ihb(); 1516 1517 for_each_online_cpu(cpu) { 1518 if (cpu != smp_processor_id()) { 1519 settc(cpu_data[cpu].tc_id); 1520 write_tc_c0_tchalt(halt_state_save[cpu]); 1521 } 1522 } 1523 mips_ihb(); 1524} 1525