1/* 2 * SN2 Platform specific SMP Support 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved. 9 */ 10 11#include <linux/init.h> 12#include <linux/kernel.h> 13#include <linux/spinlock.h> 14#include <linux/threads.h> 15#include <linux/sched.h> 16#include <linux/smp.h> 17#include <linux/interrupt.h> 18#include <linux/irq.h> 19#include <linux/mmzone.h> 20#include <linux/module.h> 21#include <linux/bitops.h> 22#include <linux/nodemask.h> 23#include <linux/proc_fs.h> 24#include <linux/seq_file.h> 25 26#include <asm/processor.h> 27#include <asm/irq.h> 28#include <asm/sal.h> 29#include <asm/system.h> 30#include <asm/delay.h> 31#include <asm/io.h> 32#include <asm/smp.h> 33#include <asm/tlb.h> 34#include <asm/numa.h> 35#include <asm/hw_irq.h> 36#include <asm/current.h> 37#include <asm/sn/sn_cpuid.h> 38#include <asm/sn/sn_sal.h> 39#include <asm/sn/addrs.h> 40#include <asm/sn/shub_mmr.h> 41#include <asm/sn/nodepda.h> 42#include <asm/sn/rw_mmr.h> 43#include <asm/sn/sn_feature_sets.h> 44 45DEFINE_PER_CPU(struct ptc_stats, ptcstats); 46DECLARE_PER_CPU(struct ptc_stats, ptcstats); 47 48static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); 49 50/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */ 51static int sn2_flush_opt = 0; 52 53extern unsigned long 54sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, 55 volatile unsigned long *, unsigned long, 56 volatile unsigned long *, unsigned long); 57void 58sn2_ptc_deadlock_recovery(short *, short, short, int, 59 volatile unsigned long *, unsigned long, 60 volatile unsigned long *, unsigned long); 61 62/* 63 * Note: some is the following is captured here to make degugging easier 64 * (the macros make more sense if you see the debug patch - not posted) 65 */ 66#define sn2_ptctest 0 67#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0) 68#define max_active_pio(sh1) ((sh1) ? 32 : 7) 69#define reset_max_active_on_deadlock() 1 70#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) 71 72struct ptc_stats { 73 unsigned long ptc_l; 74 unsigned long change_rid; 75 unsigned long shub_ptc_flushes; 76 unsigned long nodes_flushed; 77 unsigned long deadlocks; 78 unsigned long deadlocks2; 79 unsigned long lock_itc_clocks; 80 unsigned long shub_itc_clocks; 81 unsigned long shub_itc_clocks_max; 82 unsigned long shub_ptc_flushes_not_my_mm; 83 unsigned long shub_ipi_flushes; 84 unsigned long shub_ipi_flushes_itc_clocks; 85}; 86 87#define sn2_ptctest 0 88 89static inline unsigned long wait_piowc(void) 90{ 91 volatile unsigned long *piows; 92 unsigned long zeroval, ws; 93 94 piows = pda->pio_write_status_addr; 95 zeroval = pda->pio_write_status_val; 96 do { 97 cpu_relax(); 98 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval); 99 return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; 100} 101 102/** 103 * sn_migrate - SN-specific task migration actions 104 * @task: Task being migrated to new CPU 105 * 106 * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. 107 * Context switching user threads which have memory-mapped MMIO may cause 108 * PIOs to issue from separate CPUs, thus the PIO writes must be drained 109 * from the previous CPU's Shub before execution resumes on the new CPU. 110 */ 111void sn_migrate(struct task_struct *task) 112{ 113 pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); 114 volatile unsigned long *adr = last_pda->pio_write_status_addr; 115 unsigned long val = last_pda->pio_write_status_val; 116 117 /* Drain PIO writes from old CPU's Shub */ 118 while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) 119 != val)) 120 cpu_relax(); 121} 122 123void sn_tlb_migrate_finish(struct mm_struct *mm) 124{ 125 /* flush_tlb_mm is inefficient if more than 1 users of mm */ 126 if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1) 127 flush_tlb_mm(mm); 128} 129 130static void 131sn2_ipi_flush_all_tlb(struct mm_struct *mm) 132{ 133 unsigned long itc; 134 135 itc = ia64_get_itc(); 136 smp_flush_tlb_cpumask(*mm_cpumask(mm)); 137 itc = ia64_get_itc() - itc; 138 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; 139 __get_cpu_var(ptcstats).shub_ipi_flushes++; 140} 141 142/** 143 * sn2_global_tlb_purge - globally purge translation cache of virtual address range 144 * @mm: mm_struct containing virtual address range 145 * @start: start of virtual address range 146 * @end: end of virtual address range 147 * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc)) 148 * 149 * Purges the translation caches of all processors of the given virtual address 150 * range. 151 * 152 * Note: 153 * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context. 154 * - cpu_vm_mask is converted into a nodemask of the nodes containing the 155 * cpus in cpu_vm_mask. 156 * - if only one bit is set in cpu_vm_mask & it is the current cpu & the 157 * process is purging its own virtual address range, then only the 158 * local TLB needs to be flushed. This flushing can be done using 159 * ptc.l. This is the common case & avoids the global spinlock. 160 * - if multiple cpus have loaded the context, then flushing has to be 161 * done with ptc.g/MMRs under protection of the global ptc_lock. 162 */ 163 164void 165sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, 166 unsigned long end, unsigned long nbits) 167{ 168 int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid; 169 int mymm = (mm == current->active_mm && mm == current->mm); 170 int use_cpu_ptcga; 171 volatile unsigned long *ptc0, *ptc1; 172 unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; 173 short nasids[MAX_NUMNODES], nix; 174 nodemask_t nodes_flushed; 175 int active, max_active, deadlock, flush_opt = sn2_flush_opt; 176 177 if (flush_opt > 2) { 178 sn2_ipi_flush_all_tlb(mm); 179 return; 180 } 181 182 nodes_clear(nodes_flushed); 183 i = 0; 184 185 for_each_cpu(cpu, mm_cpumask(mm)) { 186 cnode = cpu_to_node(cpu); 187 node_set(cnode, nodes_flushed); 188 lcpu = cpu; 189 i++; 190 } 191 192 if (i == 0) 193 return; 194 195 preempt_disable(); 196 197 if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) { 198 do { 199 ia64_ptcl(start, nbits << 2); 200 start += (1UL << nbits); 201 } while (start < end); 202 ia64_srlz_i(); 203 __get_cpu_var(ptcstats).ptc_l++; 204 preempt_enable(); 205 return; 206 } 207 208 if (atomic_read(&mm->mm_users) == 1 && mymm) { 209 flush_tlb_mm(mm); 210 __get_cpu_var(ptcstats).change_rid++; 211 preempt_enable(); 212 return; 213 } 214 215 if (flush_opt == 2) { 216 sn2_ipi_flush_all_tlb(mm); 217 preempt_enable(); 218 return; 219 } 220 221 itc = ia64_get_itc(); 222 nix = 0; 223 for_each_node_mask(cnode, nodes_flushed) 224 nasids[nix++] = cnodeid_to_nasid(cnode); 225 226 rr_value = (mm->context << 3) | REGION_NUMBER(start); 227 228 shub1 = is_shub1(); 229 if (shub1) { 230 data0 = (1UL << SH1_PTC_0_A_SHFT) | 231 (nbits << SH1_PTC_0_PS_SHFT) | 232 (rr_value << SH1_PTC_0_RID_SHFT) | 233 (1UL << SH1_PTC_0_START_SHFT); 234 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0); 235 ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1); 236 } else { 237 data0 = (1UL << SH2_PTC_A_SHFT) | 238 (nbits << SH2_PTC_PS_SHFT) | 239 (1UL << SH2_PTC_START_SHFT); 240 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 241 (rr_value << SH2_PTC_RID_SHFT)); 242 ptc1 = NULL; 243 } 244 245 246 mynasid = get_nasid(); 247 use_cpu_ptcga = local_node_uses_ptc_ga(shub1); 248 max_active = max_active_pio(shub1); 249 250 itc = ia64_get_itc(); 251 spin_lock_irqsave(PTC_LOCK(shub1), flags); 252 itc2 = ia64_get_itc(); 253 254 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; 255 __get_cpu_var(ptcstats).shub_ptc_flushes++; 256 __get_cpu_var(ptcstats).nodes_flushed += nix; 257 if (!mymm) 258 __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++; 259 260 if (use_cpu_ptcga && !mymm) { 261 old_rr = ia64_get_rr(start); 262 ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8)); 263 ia64_srlz_d(); 264 } 265 266 wait_piowc(); 267 do { 268 if (shub1) 269 data1 = start | (1UL << SH1_PTC_1_START_SHFT); 270 else 271 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); 272 deadlock = 0; 273 active = 0; 274 for (ibegin = 0, i = 0; i < nix; i++) { 275 nasid = nasids[i]; 276 if (use_cpu_ptcga && unlikely(nasid == mynasid)) { 277 ia64_ptcga(start, nbits << 2); 278 ia64_srlz_i(); 279 } else { 280 ptc0 = CHANGE_NASID(nasid, ptc0); 281 if (ptc1) 282 ptc1 = CHANGE_NASID(nasid, ptc1); 283 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1); 284 active++; 285 } 286 if (active >= max_active || i == (nix - 1)) { 287 if ((deadlock = wait_piowc())) { 288 if (flush_opt == 1) 289 goto done; 290 sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); 291 if (reset_max_active_on_deadlock()) 292 max_active = 1; 293 } 294 active = 0; 295 ibegin = i + 1; 296 } 297 } 298 start += (1UL << nbits); 299 } while (start < end); 300 301done: 302 itc2 = ia64_get_itc() - itc2; 303 __get_cpu_var(ptcstats).shub_itc_clocks += itc2; 304 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) 305 __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; 306 307 if (old_rr) { 308 ia64_set_rr(start, old_rr); 309 ia64_srlz_d(); 310 } 311 312 spin_unlock_irqrestore(PTC_LOCK(shub1), flags); 313 314 if (flush_opt == 1 && deadlock) { 315 __get_cpu_var(ptcstats).deadlocks++; 316 sn2_ipi_flush_all_tlb(mm); 317 } 318 319 preempt_enable(); 320} 321 322/* 323 * sn2_ptc_deadlock_recovery 324 * 325 * Recover from PTC deadlocks conditions. Recovery requires stepping thru each 326 * TLB flush transaction. The recovery sequence is somewhat tricky & is 327 * coded in assembly language. 328 */ 329 330void 331sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, 332 volatile unsigned long *ptc0, unsigned long data0, 333 volatile unsigned long *ptc1, unsigned long data1) 334{ 335 short nasid, i; 336 unsigned long *piows, zeroval, n; 337 338 __get_cpu_var(ptcstats).deadlocks++; 339 340 piows = (unsigned long *) pda->pio_write_status_addr; 341 zeroval = pda->pio_write_status_val; 342 343 344 for (i=ib; i <= ie; i++) { 345 nasid = nasids[i]; 346 if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid) 347 continue; 348 ptc0 = CHANGE_NASID(nasid, ptc0); 349 if (ptc1) 350 ptc1 = CHANGE_NASID(nasid, ptc1); 351 352 n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); 353 __get_cpu_var(ptcstats).deadlocks2 += n; 354 } 355 356} 357 358/** 359 * sn_send_IPI_phys - send an IPI to a Nasid and slice 360 * @nasid: nasid to receive the interrupt (may be outside partition) 361 * @physid: physical cpuid to receive the interrupt. 362 * @vector: command to send 363 * @delivery_mode: delivery mechanism 364 * 365 * Sends an IPI (interprocessor interrupt) to the processor specified by 366 * @physid 367 * 368 * @delivery_mode can be one of the following 369 * 370 * %IA64_IPI_DM_INT - pend an interrupt 371 * %IA64_IPI_DM_PMI - pend a PMI 372 * %IA64_IPI_DM_NMI - pend an NMI 373 * %IA64_IPI_DM_INIT - pend an INIT interrupt 374 */ 375void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode) 376{ 377 long val; 378 unsigned long flags = 0; 379 volatile long *p; 380 381 p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT); 382 val = (1UL << SH_IPI_INT_SEND_SHFT) | 383 (physid << SH_IPI_INT_PID_SHFT) | 384 ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) | 385 ((long)vector << SH_IPI_INT_IDX_SHFT) | 386 (0x000feeUL << SH_IPI_INT_BASE_SHFT); 387 388 mb(); 389 if (enable_shub_wars_1_1()) { 390 spin_lock_irqsave(&sn2_global_ptc_lock, flags); 391 } 392 pio_phys_write_mmr(p, val); 393 if (enable_shub_wars_1_1()) { 394 wait_piowc(); 395 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); 396 } 397 398} 399 400EXPORT_SYMBOL(sn_send_IPI_phys); 401 402/** 403 * sn2_send_IPI - send an IPI to a processor 404 * @cpuid: target of the IPI 405 * @vector: command to send 406 * @delivery_mode: delivery mechanism 407 * @redirect: redirect the IPI? 408 * 409 * Sends an IPI (InterProcessor Interrupt) to the processor specified by 410 * @cpuid. @vector specifies the command to send, while @delivery_mode can 411 * be one of the following 412 * 413 * %IA64_IPI_DM_INT - pend an interrupt 414 * %IA64_IPI_DM_PMI - pend a PMI 415 * %IA64_IPI_DM_NMI - pend an NMI 416 * %IA64_IPI_DM_INIT - pend an INIT interrupt 417 */ 418void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) 419{ 420 long physid; 421 int nasid; 422 423 physid = cpu_physical_id(cpuid); 424 nasid = cpuid_to_nasid(cpuid); 425 426 /* the following is used only when starting cpus at boot time */ 427 if (unlikely(nasid == -1)) 428 ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL); 429 430 sn_send_IPI_phys(nasid, physid, vector, delivery_mode); 431} 432 433#ifdef CONFIG_HOTPLUG_CPU 434/** 435 * sn_cpu_disable_allowed - Determine if a CPU can be disabled. 436 * @cpu - CPU that is requested to be disabled. 437 * 438 * CPU disable is only allowed on SHub2 systems running with a PROM 439 * that supports CPU disable. It is not permitted to disable the boot processor. 440 */ 441bool sn_cpu_disable_allowed(int cpu) 442{ 443 if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) { 444 if (cpu != 0) 445 return true; 446 else 447 printk(KERN_WARNING 448 "Disabling the boot processor is not allowed.\n"); 449 450 } else 451 printk(KERN_WARNING 452 "CPU disable is not supported on this system.\n"); 453 454 return false; 455} 456#endif /* CONFIG_HOTPLUG_CPU */ 457 458#ifdef CONFIG_PROC_FS 459 460#define PTC_BASENAME "sgi_sn/ptc_statistics" 461 462static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) 463{ 464 if (*offset < nr_cpu_ids) 465 return offset; 466 return NULL; 467} 468 469static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) 470{ 471 (*offset)++; 472 if (*offset < nr_cpu_ids) 473 return offset; 474 return NULL; 475} 476 477static void sn2_ptc_seq_stop(struct seq_file *file, void *data) 478{ 479} 480 481static int sn2_ptc_seq_show(struct seq_file *file, void *data) 482{ 483 struct ptc_stats *stat; 484 int cpu; 485 486 cpu = *(loff_t *) data; 487 488 if (!cpu) { 489 seq_printf(file, 490 "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n"); 491 seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); 492 } 493 494 if (cpu < nr_cpu_ids && cpu_online(cpu)) { 495 stat = &per_cpu(ptcstats, cpu); 496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 498 stat->deadlocks, 499 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, 500 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, 501 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, 502 stat->shub_ptc_flushes_not_my_mm, 503 stat->deadlocks2, 504 stat->shub_ipi_flushes, 505 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); 506 } 507 return 0; 508} 509 510static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data) 511{ 512 int cpu; 513 char optstr[64]; 514 515 if (count == 0 || count > sizeof(optstr)) 516 return -EINVAL; 517 if (copy_from_user(optstr, user, count)) 518 return -EFAULT; 519 optstr[count - 1] = '\0'; 520 sn2_flush_opt = simple_strtoul(optstr, NULL, 0); 521 522 for_each_online_cpu(cpu) 523 memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); 524 525 return count; 526} 527 528static const struct seq_operations sn2_ptc_seq_ops = { 529 .start = sn2_ptc_seq_start, 530 .next = sn2_ptc_seq_next, 531 .stop = sn2_ptc_seq_stop, 532 .show = sn2_ptc_seq_show 533}; 534 535static int sn2_ptc_proc_open(struct inode *inode, struct file *file) 536{ 537 return seq_open(file, &sn2_ptc_seq_ops); 538} 539 540static const struct file_operations proc_sn2_ptc_operations = { 541 .open = sn2_ptc_proc_open, 542 .read = seq_read, 543 .write = sn2_ptc_proc_write, 544 .llseek = seq_lseek, 545 .release = seq_release, 546}; 547 548static struct proc_dir_entry *proc_sn2_ptc; 549 550static int __init sn2_ptc_init(void) 551{ 552 if (!ia64_platform_is("sn2")) 553 return 0; 554 555 proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, 556 NULL, &proc_sn2_ptc_operations); 557 if (!proc_sn2_ptc) { 558 printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); 559 return -EINVAL; 560 } 561 spin_lock_init(&sn2_global_ptc_lock); 562 return 0; 563} 564 565static void __exit sn2_ptc_exit(void) 566{ 567 remove_proc_entry(PTC_BASENAME, NULL); 568} 569 570module_init(sn2_ptc_init); 571module_exit(sn2_ptc_exit); 572#endif /* CONFIG_PROC_FS */ 573