1/* 2 * File: mca.c 3 * Purpose: Generic MCA handling layer 4 * 5 * Copyright (C) 2003 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 * 8 * Copyright (C) 2002 Dell Inc. 9 * Copyright (C) Matt Domsch <Matt_Domsch@dell.com> 10 * 11 * Copyright (C) 2002 Intel 12 * Copyright (C) Jenna Hall <jenna.s.hall@intel.com> 13 * 14 * Copyright (C) 2001 Intel 15 * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com> 16 * 17 * Copyright (C) 2000 Intel 18 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com> 19 * 20 * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc. 21 * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 22 * 23 * Copyright (C) 2006 FUJITSU LIMITED 24 * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 25 * 26 * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com> 27 * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, 28 * added min save state dump, added INIT handler. 29 * 30 * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com> 31 * Added setup of CMCI and CPEI IRQs, logging of corrected platform 32 * errors, completed code for logging of corrected & uncorrected 33 * machine check errors, and updated for conformance with Nov. 2000 34 * revision of the SAL 3.0 spec. 35 * 36 * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com> 37 * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag, 38 * set SAL default return values, changed error record structure to 39 * linked list, added init call to sal_get_state_info_size(). 40 * 41 * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com> 42 * GUID cleanups. 43 * 44 * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com> 45 * Added INIT backtrace support. 46 * 47 * 2003-12-08 Keith Owens <kaos@sgi.com> 48 * smp_call_function() must not be called from interrupt context 49 * (can deadlock on tasklist_lock). 50 * Use keventd to call smp_call_function(). 51 * 52 * 2004-02-01 Keith Owens <kaos@sgi.com> 53 * Avoid deadlock when using printk() for MCA and INIT records. 54 * Delete all record printing code, moved to salinfo_decode in user 55 * space. Mark variables and functions static where possible. 56 * Delete dead variables and functions. Reorder to remove the need 57 * for forward declarations and to consolidate related code. 58 * 59 * 2005-08-12 Keith Owens <kaos@sgi.com> 60 * Convert MCA/INIT handlers to use per event stacks and SAL/OS 61 * state. 62 * 63 * 2005-10-07 Keith Owens <kaos@sgi.com> 64 * Add notify_die() hooks. 65 * 66 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 67 * Add printing support for MCA/INIT. 68 * 69 * 2007-04-27 Russ Anderson <rja@sgi.com> 70 * Support multiple cpus going through OS_MCA in the same event. 71 */ 72#include <linux/jiffies.h> 73#include <linux/types.h> 74#include <linux/init.h> 75#include <linux/sched.h> 76#include <linux/interrupt.h> 77#include <linux/irq.h> 78#include <linux/bootmem.h> 79#include <linux/acpi.h> 80#include <linux/timer.h> 81#include <linux/module.h> 82#include <linux/kernel.h> 83#include <linux/smp.h> 84#include <linux/workqueue.h> 85#include <linux/cpumask.h> 86#include <linux/kdebug.h> 87#include <linux/cpu.h> 88#include <linux/gfp.h> 89 90#include <asm/delay.h> 91#include <asm/machvec.h> 92#include <asm/meminit.h> 93#include <asm/page.h> 94#include <asm/ptrace.h> 95#include <asm/system.h> 96#include <asm/sal.h> 97#include <asm/mca.h> 98#include <asm/kexec.h> 99 100#include <asm/irq.h> 101#include <asm/hw_irq.h> 102#include <asm/tlb.h> 103 104#include "mca_drv.h" 105#include "entry.h" 106 107#if defined(IA64_MCA_DEBUG_INFO) 108# define IA64_MCA_DEBUG(fmt...) printk(fmt) 109#else 110# define IA64_MCA_DEBUG(fmt...) 111#endif 112 113#define NOTIFY_INIT(event, regs, arg, spin) \ 114do { \ 115 if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \ 116 == NOTIFY_STOP) && ((spin) == 1)) \ 117 ia64_mca_spin(__func__); \ 118} while (0) 119 120#define NOTIFY_MCA(event, regs, arg, spin) \ 121do { \ 122 if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \ 123 == NOTIFY_STOP) && ((spin) == 1)) \ 124 ia64_mca_spin(__func__); \ 125} while (0) 126 127/* Used by mca_asm.S */ 128DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 129DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 130DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ 131DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ 132DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */ 133 134unsigned long __per_cpu_mca[NR_CPUS]; 135 136/* In mca_asm.S */ 137extern void ia64_os_init_dispatch_monarch (void); 138extern void ia64_os_init_dispatch_slave (void); 139 140static int monarch_cpu = -1; 141 142static ia64_mc_info_t ia64_mc_info; 143 144#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */ 145#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */ 146#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */ 147#define CPE_HISTORY_LENGTH 5 148#define CMC_HISTORY_LENGTH 5 149 150#ifdef CONFIG_ACPI 151static struct timer_list cpe_poll_timer; 152#endif 153static struct timer_list cmc_poll_timer; 154/* 155 * This variable tells whether we are currently in polling mode. 156 * Start with this in the wrong state so we won't play w/ timers 157 * before the system is ready. 158 */ 159static int cmc_polling_enabled = 1; 160 161/* 162 * Clearing this variable prevents CPE polling from getting activated 163 * in mca_late_init. Use it if your system doesn't provide a CPEI, 164 * but encounters problems retrieving CPE logs. This should only be 165 * necessary for debugging. 166 */ 167static int cpe_poll_enabled = 1; 168 169extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); 170 171static int mca_init __initdata; 172 173/* 174 * limited & delayed printing support for MCA/INIT handler 175 */ 176 177#define mprintk(fmt...) ia64_mca_printk(fmt) 178 179#define MLOGBUF_SIZE (512+256*NR_CPUS) 180#define MLOGBUF_MSGMAX 256 181static char mlogbuf[MLOGBUF_SIZE]; 182static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */ 183static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */ 184static unsigned long mlogbuf_start; 185static unsigned long mlogbuf_end; 186static unsigned int mlogbuf_finished = 0; 187static unsigned long mlogbuf_timestamp = 0; 188 189static int loglevel_save = -1; 190#define BREAK_LOGLEVEL(__console_loglevel) \ 191 oops_in_progress = 1; \ 192 if (loglevel_save < 0) \ 193 loglevel_save = __console_loglevel; \ 194 __console_loglevel = 15; 195 196#define RESTORE_LOGLEVEL(__console_loglevel) \ 197 if (loglevel_save >= 0) { \ 198 __console_loglevel = loglevel_save; \ 199 loglevel_save = -1; \ 200 } \ 201 mlogbuf_finished = 0; \ 202 oops_in_progress = 0; 203 204/* 205 * Push messages into buffer, print them later if not urgent. 206 */ 207void ia64_mca_printk(const char *fmt, ...) 208{ 209 va_list args; 210 int printed_len; 211 char temp_buf[MLOGBUF_MSGMAX]; 212 char *p; 213 214 va_start(args, fmt); 215 printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args); 216 va_end(args); 217 218 /* Copy the output into mlogbuf */ 219 if (oops_in_progress) { 220 /* mlogbuf was abandoned, use printk directly instead. */ 221 printk(temp_buf); 222 } else { 223 spin_lock(&mlogbuf_wlock); 224 for (p = temp_buf; *p; p++) { 225 unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE; 226 if (next != mlogbuf_start) { 227 mlogbuf[mlogbuf_end] = *p; 228 mlogbuf_end = next; 229 } else { 230 /* buffer full */ 231 break; 232 } 233 } 234 mlogbuf[mlogbuf_end] = '\0'; 235 spin_unlock(&mlogbuf_wlock); 236 } 237} 238EXPORT_SYMBOL(ia64_mca_printk); 239 240/* 241 * Print buffered messages. 242 * NOTE: call this after returning normal context. (ex. from salinfod) 243 */ 244void ia64_mlogbuf_dump(void) 245{ 246 char temp_buf[MLOGBUF_MSGMAX]; 247 char *p; 248 unsigned long index; 249 unsigned long flags; 250 unsigned int printed_len; 251 252 /* Get output from mlogbuf */ 253 while (mlogbuf_start != mlogbuf_end) { 254 temp_buf[0] = '\0'; 255 p = temp_buf; 256 printed_len = 0; 257 258 spin_lock_irqsave(&mlogbuf_rlock, flags); 259 260 index = mlogbuf_start; 261 while (index != mlogbuf_end) { 262 *p = mlogbuf[index]; 263 index = (index + 1) % MLOGBUF_SIZE; 264 if (!*p) 265 break; 266 p++; 267 if (++printed_len >= MLOGBUF_MSGMAX - 1) 268 break; 269 } 270 *p = '\0'; 271 if (temp_buf[0]) 272 printk(temp_buf); 273 mlogbuf_start = index; 274 275 mlogbuf_timestamp = 0; 276 spin_unlock_irqrestore(&mlogbuf_rlock, flags); 277 } 278} 279EXPORT_SYMBOL(ia64_mlogbuf_dump); 280 281/* 282 * Call this if system is going to down or if immediate flushing messages to 283 * console is required. (ex. recovery was failed, crash dump is going to be 284 * invoked, long-wait rendezvous etc.) 285 * NOTE: this should be called from monarch. 286 */ 287static void ia64_mlogbuf_finish(int wait) 288{ 289 BREAK_LOGLEVEL(console_loglevel); 290 291 spin_lock_init(&mlogbuf_rlock); 292 ia64_mlogbuf_dump(); 293 printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, " 294 "MCA/INIT might be dodgy or fail.\n"); 295 296 if (!wait) 297 return; 298 299 /* wait for console */ 300 printk("Delaying for 5 seconds...\n"); 301 udelay(5*1000000); 302 303 mlogbuf_finished = 1; 304} 305 306/* 307 * Print buffered messages from INIT context. 308 */ 309static void ia64_mlogbuf_dump_from_init(void) 310{ 311 if (mlogbuf_finished) 312 return; 313 314 if (mlogbuf_timestamp && 315 time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) { 316 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " 317 " and the system seems to be messed up.\n"); 318 ia64_mlogbuf_finish(0); 319 return; 320 } 321 322 if (!spin_trylock(&mlogbuf_rlock)) { 323 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. " 324 "Generated messages other than stack dump will be " 325 "buffered to mlogbuf and will be printed later.\n"); 326 printk(KERN_ERR "INIT: If messages would not printed after " 327 "this INIT, wait 30sec and assert INIT again.\n"); 328 if (!mlogbuf_timestamp) 329 mlogbuf_timestamp = jiffies; 330 return; 331 } 332 spin_unlock(&mlogbuf_rlock); 333 ia64_mlogbuf_dump(); 334} 335 336static void inline 337ia64_mca_spin(const char *func) 338{ 339 if (monarch_cpu == smp_processor_id()) 340 ia64_mlogbuf_finish(0); 341 mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); 342 while (1) 343 cpu_relax(); 344} 345/* 346 * IA64_MCA log support 347 */ 348#define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */ 349#define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */ 350 351typedef struct ia64_state_log_s 352{ 353 spinlock_t isl_lock; 354 int isl_index; 355 unsigned long isl_count; 356 ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */ 357} ia64_state_log_t; 358 359static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; 360 361#define IA64_LOG_ALLOCATE(it, size) \ 362 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ 363 (ia64_err_rec_t *)alloc_bootmem(size); \ 364 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ 365 (ia64_err_rec_t *)alloc_bootmem(size);} 366#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) 367#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) 368#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) 369#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index 370#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index 371#define IA64_LOG_INDEX_INC(it) \ 372 {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \ 373 ia64_state_log[it].isl_count++;} 374#define IA64_LOG_INDEX_DEC(it) \ 375 ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index 376#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])) 377#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) 378#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count 379 380/* 381 * ia64_log_init 382 * Reset the OS ia64 log buffer 383 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) 384 * Outputs : None 385 */ 386static void __init 387ia64_log_init(int sal_info_type) 388{ 389 u64 max_size = 0; 390 391 IA64_LOG_NEXT_INDEX(sal_info_type) = 0; 392 IA64_LOG_LOCK_INIT(sal_info_type); 393 394 // SAL will tell us the maximum size of any error record of this type 395 max_size = ia64_sal_get_state_info_size(sal_info_type); 396 if (!max_size) 397 /* alloc_bootmem() doesn't like zero-sized allocations! */ 398 return; 399 400 // set up OS data structures to hold error info 401 IA64_LOG_ALLOCATE(sal_info_type, max_size); 402 memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size); 403 memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size); 404} 405 406/* 407 * ia64_log_get 408 * 409 * Get the current MCA log from SAL and copy it into the OS log buffer. 410 * 411 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) 412 * irq_safe whether you can use printk at this point 413 * Outputs : size (total record length) 414 * *buffer (ptr to error record) 415 * 416 */ 417static u64 418ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) 419{ 420 sal_log_record_header_t *log_buffer; 421 u64 total_len = 0; 422 unsigned long s; 423 424 IA64_LOG_LOCK(sal_info_type); 425 426 /* Get the process state information */ 427 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type); 428 429 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer); 430 431 if (total_len) { 432 IA64_LOG_INDEX_INC(sal_info_type); 433 IA64_LOG_UNLOCK(sal_info_type); 434 if (irq_safe) { 435 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n", 436 __func__, sal_info_type, total_len); 437 } 438 *buffer = (u8 *) log_buffer; 439 return total_len; 440 } else { 441 IA64_LOG_UNLOCK(sal_info_type); 442 return 0; 443 } 444} 445 446static void 447ia64_mca_log_sal_error_record(int sal_info_type) 448{ 449 u8 *buffer; 450 sal_log_record_header_t *rh; 451 u64 size; 452 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; 453#ifdef IA64_MCA_DEBUG_INFO 454 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; 455#endif 456 457 size = ia64_log_get(sal_info_type, &buffer, irq_safe); 458 if (!size) 459 return; 460 461 salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe); 462 463 if (irq_safe) 464 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n", 465 smp_processor_id(), 466 sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN"); 467 468 /* Clear logs from corrected errors in case there's no user-level logger */ 469 rh = (sal_log_record_header_t *)buffer; 470 if (rh->severity == sal_log_severity_corrected) 471 ia64_sal_clear_state_info(sal_info_type); 472} 473 474/* 475 * search_mca_table 476 * See if the MCA surfaced in an instruction range 477 * that has been tagged as recoverable. 478 * 479 * Inputs 480 * first First address range to check 481 * last Last address range to check 482 * ip Instruction pointer, address we are looking for 483 * 484 * Return value: 485 * 1 on Success (in the table)/ 0 on Failure (not in the table) 486 */ 487int 488search_mca_table (const struct mca_table_entry *first, 489 const struct mca_table_entry *last, 490 unsigned long ip) 491{ 492 const struct mca_table_entry *curr; 493 u64 curr_start, curr_end; 494 495 curr = first; 496 while (curr <= last) { 497 curr_start = (u64) &curr->start_addr + curr->start_addr; 498 curr_end = (u64) &curr->end_addr + curr->end_addr; 499 500 if ((ip >= curr_start) && (ip <= curr_end)) { 501 return 1; 502 } 503 curr++; 504 } 505 return 0; 506} 507 508/* Given an address, look for it in the mca tables. */ 509int mca_recover_range(unsigned long addr) 510{ 511 extern struct mca_table_entry __start___mca_table[]; 512 extern struct mca_table_entry __stop___mca_table[]; 513 514 return search_mca_table(__start___mca_table, __stop___mca_table-1, addr); 515} 516EXPORT_SYMBOL_GPL(mca_recover_range); 517 518#ifdef CONFIG_ACPI 519 520int cpe_vector = -1; 521int ia64_cpe_irq = -1; 522 523static irqreturn_t 524ia64_mca_cpe_int_handler (int cpe_irq, void *arg) 525{ 526 static unsigned long cpe_history[CPE_HISTORY_LENGTH]; 527 static int index; 528 static DEFINE_SPINLOCK(cpe_history_lock); 529 530 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 531 __func__, cpe_irq, smp_processor_id()); 532 533 /* SAL spec states this should run w/ interrupts enabled */ 534 local_irq_enable(); 535 536 spin_lock(&cpe_history_lock); 537 if (!cpe_poll_enabled && cpe_vector >= 0) { 538 539 int i, count = 1; /* we know 1 happened now */ 540 unsigned long now = jiffies; 541 542 for (i = 0; i < CPE_HISTORY_LENGTH; i++) { 543 if (now - cpe_history[i] <= HZ) 544 count++; 545 } 546 547 IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH); 548 if (count >= CPE_HISTORY_LENGTH) { 549 550 cpe_poll_enabled = 1; 551 spin_unlock(&cpe_history_lock); 552 disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR)); 553 554 /* 555 * Corrected errors will still be corrected, but 556 * make sure there's a log somewhere that indicates 557 * something is generating more than we can handle. 558 */ 559 printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n"); 560 561 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); 562 563 /* lock already released, get out now */ 564 goto out; 565 } else { 566 cpe_history[index++] = now; 567 if (index == CPE_HISTORY_LENGTH) 568 index = 0; 569 } 570 } 571 spin_unlock(&cpe_history_lock); 572out: 573 /* Get the CPE error record and log it */ 574 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); 575 576 return IRQ_HANDLED; 577} 578 579#endif /* CONFIG_ACPI */ 580 581#ifdef CONFIG_ACPI 582/* 583 * ia64_mca_register_cpev 584 * 585 * Register the corrected platform error vector with SAL. 586 * 587 * Inputs 588 * cpev Corrected Platform Error Vector number 589 * 590 * Outputs 591 * None 592 */ 593void 594ia64_mca_register_cpev (int cpev) 595{ 596 /* Register the CPE interrupt vector with SAL */ 597 struct ia64_sal_retval isrv; 598 599 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0); 600 if (isrv.status) { 601 printk(KERN_ERR "Failed to register Corrected Platform " 602 "Error interrupt vector with SAL (status %ld)\n", isrv.status); 603 return; 604 } 605 606 IA64_MCA_DEBUG("%s: corrected platform error " 607 "vector %#x registered\n", __func__, cpev); 608} 609#endif /* CONFIG_ACPI */ 610 611/* 612 * ia64_mca_cmc_vector_setup 613 * 614 * Setup the corrected machine check vector register in the processor. 615 * (The interrupt is masked on boot. ia64_mca_late_init unmask this.) 616 * This function is invoked on a per-processor basis. 617 * 618 * Inputs 619 * None 620 * 621 * Outputs 622 * None 623 */ 624void __cpuinit 625ia64_mca_cmc_vector_setup (void) 626{ 627 cmcv_reg_t cmcv; 628 629 cmcv.cmcv_regval = 0; 630 cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */ 631 cmcv.cmcv_vector = IA64_CMC_VECTOR; 632 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 633 634 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n", 635 __func__, smp_processor_id(), IA64_CMC_VECTOR); 636 637 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", 638 __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); 639} 640 641/* 642 * ia64_mca_cmc_vector_disable 643 * 644 * Mask the corrected machine check vector register in the processor. 645 * This function is invoked on a per-processor basis. 646 * 647 * Inputs 648 * dummy(unused) 649 * 650 * Outputs 651 * None 652 */ 653static void 654ia64_mca_cmc_vector_disable (void *dummy) 655{ 656 cmcv_reg_t cmcv; 657 658 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); 659 660 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ 661 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 662 663 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n", 664 __func__, smp_processor_id(), cmcv.cmcv_vector); 665} 666 667/* 668 * ia64_mca_cmc_vector_enable 669 * 670 * Unmask the corrected machine check vector register in the processor. 671 * This function is invoked on a per-processor basis. 672 * 673 * Inputs 674 * dummy(unused) 675 * 676 * Outputs 677 * None 678 */ 679static void 680ia64_mca_cmc_vector_enable (void *dummy) 681{ 682 cmcv_reg_t cmcv; 683 684 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); 685 686 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ 687 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 688 689 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n", 690 __func__, smp_processor_id(), cmcv.cmcv_vector); 691} 692 693/* 694 * ia64_mca_cmc_vector_disable_keventd 695 * 696 * Called via keventd (smp_call_function() is not safe in interrupt context) to 697 * disable the cmc interrupt vector. 698 */ 699static void 700ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) 701{ 702 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); 703} 704 705/* 706 * ia64_mca_cmc_vector_enable_keventd 707 * 708 * Called via keventd (smp_call_function() is not safe in interrupt context) to 709 * enable the cmc interrupt vector. 710 */ 711static void 712ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) 713{ 714 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); 715} 716 717/* 718 * ia64_mca_wakeup 719 * 720 * Send an inter-cpu interrupt to wake-up a particular cpu. 721 * 722 * Inputs : cpuid 723 * Outputs : None 724 */ 725static void 726ia64_mca_wakeup(int cpu) 727{ 728 platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); 729} 730 731/* 732 * ia64_mca_wakeup_all 733 * 734 * Wakeup all the slave cpus which have rendez'ed previously. 735 * 736 * Inputs : None 737 * Outputs : None 738 */ 739static void 740ia64_mca_wakeup_all(void) 741{ 742 int cpu; 743 744 /* Clear the Rendez checkin flag for all cpus */ 745 for_each_online_cpu(cpu) { 746 if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) 747 ia64_mca_wakeup(cpu); 748 } 749 750} 751 752/* 753 * ia64_mca_rendez_interrupt_handler 754 * 755 * This is handler used to put slave processors into spinloop 756 * while the monarch processor does the mca handling and later 757 * wake each slave up once the monarch is done. The state 758 * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed 759 * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates 760 * the cpu has come out of OS rendezvous. 761 * 762 * Inputs : None 763 * Outputs : None 764 */ 765static irqreturn_t 766ia64_mca_rendez_int_handler(int rendez_irq, void *arg) 767{ 768 unsigned long flags; 769 int cpu = smp_processor_id(); 770 struct ia64_mca_notify_die nd = 771 { .sos = NULL, .monarch_cpu = &monarch_cpu }; 772 773 /* Mask all interrupts */ 774 local_irq_save(flags); 775 776 NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1); 777 778 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 779 /* Register with the SAL monarch that the slave has 780 * reached SAL 781 */ 782 ia64_sal_mc_rendez(); 783 784 NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1); 785 786 /* Wait for the monarch cpu to exit. */ 787 while (monarch_cpu != -1) 788 cpu_relax(); /* spin until monarch leaves */ 789 790 NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1); 791 792 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 793 /* Enable all interrupts */ 794 local_irq_restore(flags); 795 return IRQ_HANDLED; 796} 797 798/* 799 * ia64_mca_wakeup_int_handler 800 * 801 * The interrupt handler for processing the inter-cpu interrupt to the 802 * slave cpu which was spinning in the rendez loop. 803 * Since this spinning is done by turning off the interrupts and 804 * polling on the wakeup-interrupt bit in the IRR, there is 805 * nothing useful to be done in the handler. 806 * 807 * Inputs : wakeup_irq (Wakeup-interrupt bit) 808 * arg (Interrupt handler specific argument) 809 * Outputs : None 810 * 811 */ 812static irqreturn_t 813ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg) 814{ 815 return IRQ_HANDLED; 816} 817 818/* Function pointer for extra MCA recovery */ 819int (*ia64_mca_ucmc_extension) 820 (void*,struct ia64_sal_os_state*) 821 = NULL; 822 823int 824ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) 825{ 826 if (ia64_mca_ucmc_extension) 827 return 1; 828 829 ia64_mca_ucmc_extension = fn; 830 return 0; 831} 832 833void 834ia64_unreg_MCA_extension(void) 835{ 836 if (ia64_mca_ucmc_extension) 837 ia64_mca_ucmc_extension = NULL; 838} 839 840EXPORT_SYMBOL(ia64_reg_MCA_extension); 841EXPORT_SYMBOL(ia64_unreg_MCA_extension); 842 843 844static inline void 845copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat) 846{ 847 u64 fslot, tslot, nat; 848 *tr = *fr; 849 fslot = ((unsigned long)fr >> 3) & 63; 850 tslot = ((unsigned long)tr >> 3) & 63; 851 *tnat &= ~(1UL << tslot); 852 nat = (fnat >> fslot) & 1; 853 *tnat |= (nat << tslot); 854} 855 856/* Change the comm field on the MCA/INT task to include the pid that 857 * was interrupted, it makes for easier debugging. If that pid was 0 858 * (swapper or nested MCA/INIT) then use the start of the previous comm 859 * field suffixed with its cpu. 860 */ 861 862static void 863ia64_mca_modify_comm(const struct task_struct *previous_current) 864{ 865 char *p, comm[sizeof(current->comm)]; 866 if (previous_current->pid) 867 snprintf(comm, sizeof(comm), "%s %d", 868 current->comm, previous_current->pid); 869 else { 870 int l; 871 if ((p = strchr(previous_current->comm, ' '))) 872 l = p - previous_current->comm; 873 else 874 l = strlen(previous_current->comm); 875 snprintf(comm, sizeof(comm), "%s %*s %d", 876 current->comm, l, previous_current->comm, 877 task_thread_info(previous_current)->cpu); 878 } 879 memcpy(current->comm, comm, sizeof(current->comm)); 880} 881 882static void 883finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, 884 unsigned long *nat) 885{ 886 const pal_min_state_area_t *ms = sos->pal_min_state; 887 const u64 *bank; 888 889 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use 890 * pmsa_{xip,xpsr,xfs} 891 */ 892 if (ia64_psr(regs)->ic) { 893 regs->cr_iip = ms->pmsa_iip; 894 regs->cr_ipsr = ms->pmsa_ipsr; 895 regs->cr_ifs = ms->pmsa_ifs; 896 } else { 897 regs->cr_iip = ms->pmsa_xip; 898 regs->cr_ipsr = ms->pmsa_xpsr; 899 regs->cr_ifs = ms->pmsa_xfs; 900 901 sos->iip = ms->pmsa_iip; 902 sos->ipsr = ms->pmsa_ipsr; 903 sos->ifs = ms->pmsa_ifs; 904 } 905 regs->pr = ms->pmsa_pr; 906 regs->b0 = ms->pmsa_br0; 907 regs->ar_rsc = ms->pmsa_rsc; 908 copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, ®s->r1, nat); 909 copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, ®s->r2, nat); 910 copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, ®s->r3, nat); 911 copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, ®s->r8, nat); 912 copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, ®s->r9, nat); 913 copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, ®s->r10, nat); 914 copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, ®s->r11, nat); 915 copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, ®s->r12, nat); 916 copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, ®s->r13, nat); 917 copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, ®s->r14, nat); 918 copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, ®s->r15, nat); 919 if (ia64_psr(regs)->bn) 920 bank = ms->pmsa_bank1_gr; 921 else 922 bank = ms->pmsa_bank0_gr; 923 copy_reg(&bank[16-16], ms->pmsa_nat_bits, ®s->r16, nat); 924 copy_reg(&bank[17-16], ms->pmsa_nat_bits, ®s->r17, nat); 925 copy_reg(&bank[18-16], ms->pmsa_nat_bits, ®s->r18, nat); 926 copy_reg(&bank[19-16], ms->pmsa_nat_bits, ®s->r19, nat); 927 copy_reg(&bank[20-16], ms->pmsa_nat_bits, ®s->r20, nat); 928 copy_reg(&bank[21-16], ms->pmsa_nat_bits, ®s->r21, nat); 929 copy_reg(&bank[22-16], ms->pmsa_nat_bits, ®s->r22, nat); 930 copy_reg(&bank[23-16], ms->pmsa_nat_bits, ®s->r23, nat); 931 copy_reg(&bank[24-16], ms->pmsa_nat_bits, ®s->r24, nat); 932 copy_reg(&bank[25-16], ms->pmsa_nat_bits, ®s->r25, nat); 933 copy_reg(&bank[26-16], ms->pmsa_nat_bits, ®s->r26, nat); 934 copy_reg(&bank[27-16], ms->pmsa_nat_bits, ®s->r27, nat); 935 copy_reg(&bank[28-16], ms->pmsa_nat_bits, ®s->r28, nat); 936 copy_reg(&bank[29-16], ms->pmsa_nat_bits, ®s->r29, nat); 937 copy_reg(&bank[30-16], ms->pmsa_nat_bits, ®s->r30, nat); 938 copy_reg(&bank[31-16], ms->pmsa_nat_bits, ®s->r31, nat); 939} 940 941/* On entry to this routine, we are running on the per cpu stack, see 942 * mca_asm.h. The original stack has not been touched by this event. Some of 943 * the original stack's registers will be in the RBS on this stack. This stack 944 * also contains a partial pt_regs and switch_stack, the rest of the data is in 945 * PAL minstate. 946 * 947 * The first thing to do is modify the original stack to look like a blocked 948 * task so we can run backtrace on the original task. Also mark the per cpu 949 * stack as current to ensure that we use the correct task state, it also means 950 * that we can do backtrace on the MCA/INIT handler code itself. 951 */ 952 953static struct task_struct * 954ia64_mca_modify_original_stack(struct pt_regs *regs, 955 const struct switch_stack *sw, 956 struct ia64_sal_os_state *sos, 957 const char *type) 958{ 959 char *p; 960 ia64_va va; 961 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ 962 const pal_min_state_area_t *ms = sos->pal_min_state; 963 struct task_struct *previous_current; 964 struct pt_regs *old_regs; 965 struct switch_stack *old_sw; 966 unsigned size = sizeof(struct pt_regs) + 967 sizeof(struct switch_stack) + 16; 968 unsigned long *old_bspstore, *old_bsp; 969 unsigned long *new_bspstore, *new_bsp; 970 unsigned long old_unat, old_rnat, new_rnat, nat; 971 u64 slots, loadrs = regs->loadrs; 972 u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; 973 u64 ar_bspstore = regs->ar_bspstore; 974 u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); 975 const char *msg; 976 int cpu = smp_processor_id(); 977 978 previous_current = curr_task(cpu); 979 set_curr_task(cpu, current); 980 if ((p = strchr(current->comm, ' '))) 981 *p = '\0'; 982 983 /* Best effort attempt to cope with MCA/INIT delivered while in 984 * physical mode. 985 */ 986 regs->cr_ipsr = ms->pmsa_ipsr; 987 if (ia64_psr(regs)->dt == 0) { 988 va.l = r12; 989 if (va.f.reg == 0) { 990 va.f.reg = 7; 991 r12 = va.l; 992 } 993 va.l = r13; 994 if (va.f.reg == 0) { 995 va.f.reg = 7; 996 r13 = va.l; 997 } 998 } 999 if (ia64_psr(regs)->rt == 0) { 1000 va.l = ar_bspstore; 1001 if (va.f.reg == 0) { 1002 va.f.reg = 7; 1003 ar_bspstore = va.l; 1004 } 1005 va.l = ar_bsp; 1006 if (va.f.reg == 0) { 1007 va.f.reg = 7; 1008 ar_bsp = va.l; 1009 } 1010 } 1011 1012 /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers 1013 * have been copied to the old stack, the old stack may fail the 1014 * validation tests below. So ia64_old_stack() must restore the dirty 1015 * registers from the new stack. The old and new bspstore probably 1016 * have different alignments, so loadrs calculated on the old bsp 1017 * cannot be used to restore from the new bsp. Calculate a suitable 1018 * loadrs for the new stack and save it in the new pt_regs, where 1019 * ia64_old_stack() can get it. 1020 */ 1021 old_bspstore = (unsigned long *)ar_bspstore; 1022 old_bsp = (unsigned long *)ar_bsp; 1023 slots = ia64_rse_num_regs(old_bspstore, old_bsp); 1024 new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET); 1025 new_bsp = ia64_rse_skip_regs(new_bspstore, slots); 1026 regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; 1027 1028 /* Verify the previous stack state before we change it */ 1029 if (user_mode(regs)) { 1030 msg = "occurred in user space"; 1031 /* previous_current is guaranteed to be valid when the task was 1032 * in user space, so ... 1033 */ 1034 ia64_mca_modify_comm(previous_current); 1035 goto no_mod; 1036 } 1037 1038 if (r13 != sos->prev_IA64_KR_CURRENT) { 1039 msg = "inconsistent previous current and r13"; 1040 goto no_mod; 1041 } 1042 1043 if (!mca_recover_range(ms->pmsa_iip)) { 1044 if ((r12 - r13) >= KERNEL_STACK_SIZE) { 1045 msg = "inconsistent r12 and r13"; 1046 goto no_mod; 1047 } 1048 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { 1049 msg = "inconsistent ar.bspstore and r13"; 1050 goto no_mod; 1051 } 1052 va.p = old_bspstore; 1053 if (va.f.reg < 5) { 1054 msg = "old_bspstore is in the wrong region"; 1055 goto no_mod; 1056 } 1057 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { 1058 msg = "inconsistent ar.bsp and r13"; 1059 goto no_mod; 1060 } 1061 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; 1062 if (ar_bspstore + size > r12) { 1063 msg = "no room for blocked state"; 1064 goto no_mod; 1065 } 1066 } 1067 1068 ia64_mca_modify_comm(previous_current); 1069 1070 /* Make the original task look blocked. First stack a struct pt_regs, 1071 * describing the state at the time of interrupt. mca_asm.S built a 1072 * partial pt_regs, copy it and fill in the blanks using minstate. 1073 */ 1074 p = (char *)r12 - sizeof(*regs); 1075 old_regs = (struct pt_regs *)p; 1076 memcpy(old_regs, regs, sizeof(*regs)); 1077 old_regs->loadrs = loadrs; 1078 old_unat = old_regs->ar_unat; 1079 finish_pt_regs(old_regs, sos, &old_unat); 1080 1081 /* Next stack a struct switch_stack. mca_asm.S built a partial 1082 * switch_stack, copy it and fill in the blanks using pt_regs and 1083 * minstate. 1084 * 1085 * In the synthesized switch_stack, b0 points to ia64_leave_kernel, 1086 * ar.pfs is set to 0. 1087 * 1088 * unwind.c::unw_unwind() does special processing for interrupt frames. 1089 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate 1090 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not 1091 * that this is documented, of course. Set PRED_NON_SYSCALL in the 1092 * switch_stack on the original stack so it will unwind correctly when 1093 * unwind.c reads pt_regs. 1094 * 1095 * thread.ksp is updated to point to the synthesized switch_stack. 1096 */ 1097 p -= sizeof(struct switch_stack); 1098 old_sw = (struct switch_stack *)p; 1099 memcpy(old_sw, sw, sizeof(*sw)); 1100 old_sw->caller_unat = old_unat; 1101 old_sw->ar_fpsr = old_regs->ar_fpsr; 1102 copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); 1103 copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); 1104 copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); 1105 copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); 1106 old_sw->b0 = (u64)ia64_leave_kernel; 1107 old_sw->b1 = ms->pmsa_br1; 1108 old_sw->ar_pfs = 0; 1109 old_sw->ar_unat = old_unat; 1110 old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); 1111 previous_current->thread.ksp = (u64)p - 16; 1112 1113 /* Finally copy the original stack's registers back to its RBS. 1114 * Registers from ar.bspstore through ar.bsp at the time of the event 1115 * are in the current RBS, copy them back to the original stack. The 1116 * copy must be done register by register because the original bspstore 1117 * and the current one have different alignments, so the saved RNAT 1118 * data occurs at different places. 1119 * 1120 * mca_asm does cover, so the old_bsp already includes all registers at 1121 * the time of MCA/INIT. It also does flushrs, so all registers before 1122 * this function have been written to backing store on the MCA/INIT 1123 * stack. 1124 */ 1125 new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); 1126 old_rnat = regs->ar_rnat; 1127 while (slots--) { 1128 if (ia64_rse_is_rnat_slot(new_bspstore)) { 1129 new_rnat = ia64_get_rnat(new_bspstore++); 1130 } 1131 if (ia64_rse_is_rnat_slot(old_bspstore)) { 1132 *old_bspstore++ = old_rnat; 1133 old_rnat = 0; 1134 } 1135 nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; 1136 old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); 1137 old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); 1138 *old_bspstore++ = *new_bspstore++; 1139 } 1140 old_sw->ar_bspstore = (unsigned long)old_bspstore; 1141 old_sw->ar_rnat = old_rnat; 1142 1143 sos->prev_task = previous_current; 1144 return previous_current; 1145 1146no_mod: 1147 mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1148 smp_processor_id(), type, msg); 1149 old_unat = regs->ar_unat; 1150 finish_pt_regs(regs, sos, &old_unat); 1151 return previous_current; 1152} 1153 1154/* The monarch/slave interaction is based on monarch_cpu and requires that all 1155 * slaves have entered rendezvous before the monarch leaves. If any cpu has 1156 * not entered rendezvous yet then wait a bit. The assumption is that any 1157 * slave that has not rendezvoused after a reasonable time is never going to do 1158 * so. In this context, slave includes cpus that respond to the MCA rendezvous 1159 * interrupt, as well as cpus that receive the INIT slave event. 1160 */ 1161 1162static void 1163ia64_wait_for_slaves(int monarch, const char *type) 1164{ 1165 int c, i , wait; 1166 1167 /* 1168 * wait 5 seconds total for slaves (arbitrary) 1169 */ 1170 for (i = 0; i < 5000; i++) { 1171 wait = 0; 1172 for_each_online_cpu(c) { 1173 if (c == monarch) 1174 continue; 1175 if (ia64_mc_info.imi_rendez_checkin[c] 1176 == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { 1177 udelay(1000); /* short wait */ 1178 wait = 1; 1179 break; 1180 } 1181 } 1182 if (!wait) 1183 goto all_in; 1184 } 1185 1186 /* 1187 * Maybe slave(s) dead. Print buffered messages immediately. 1188 */ 1189 ia64_mlogbuf_finish(0); 1190 mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); 1191 for_each_online_cpu(c) { 1192 if (c == monarch) 1193 continue; 1194 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) 1195 mprintk(" %d", c); 1196 } 1197 mprintk("\n"); 1198 return; 1199 1200all_in: 1201 mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); 1202 return; 1203} 1204 1205/* mca_insert_tr 1206 * 1207 * Switch rid when TR reload and needed! 1208 * iord: 1: itr, 2: itr; 1209 * 1210*/ 1211static void mca_insert_tr(u64 iord) 1212{ 1213 1214 int i; 1215 u64 old_rr; 1216 struct ia64_tr_entry *p; 1217 unsigned long psr; 1218 int cpu = smp_processor_id(); 1219 1220 if (!ia64_idtrs[cpu]) 1221 return; 1222 1223 psr = ia64_clear_ic(); 1224 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { 1225 p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX; 1226 if (p->pte & 0x1) { 1227 old_rr = ia64_get_rr(p->ifa); 1228 if (old_rr != p->rr) { 1229 ia64_set_rr(p->ifa, p->rr); 1230 ia64_srlz_d(); 1231 } 1232 ia64_ptr(iord, p->ifa, p->itir >> 2); 1233 ia64_srlz_i(); 1234 if (iord & 0x1) { 1235 ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2); 1236 ia64_srlz_i(); 1237 } 1238 if (iord & 0x2) { 1239 ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2); 1240 ia64_srlz_i(); 1241 } 1242 if (old_rr != p->rr) { 1243 ia64_set_rr(p->ifa, old_rr); 1244 ia64_srlz_d(); 1245 } 1246 } 1247 } 1248 ia64_set_psr(psr); 1249} 1250 1251/* 1252 * ia64_mca_handler 1253 * 1254 * This is uncorrectable machine check handler called from OS_MCA 1255 * dispatch code which is in turn called from SAL_CHECK(). 1256 * This is the place where the core of OS MCA handling is done. 1257 * Right now the logs are extracted and displayed in a well-defined 1258 * format. This handler code is supposed to be run only on the 1259 * monarch processor. Once the monarch is done with MCA handling 1260 * further MCA logging is enabled by clearing logs. 1261 * Monarch also has the duty of sending wakeup-IPIs to pull the 1262 * slave processors out of rendezvous spinloop. 1263 * 1264 * If multiple processors call into OS_MCA, the first will become 1265 * the monarch. Subsequent cpus will be recorded in the mca_cpu 1266 * bitmask. After the first monarch has processed its MCA, it 1267 * will wake up the next cpu in the mca_cpu bitmask and then go 1268 * into the rendezvous loop. When all processors have serviced 1269 * their MCA, the last monarch frees up the rest of the processors. 1270 */ 1271void 1272ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, 1273 struct ia64_sal_os_state *sos) 1274{ 1275 int recover, cpu = smp_processor_id(); 1276 struct task_struct *previous_current; 1277 struct ia64_mca_notify_die nd = 1278 { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover }; 1279 static atomic_t mca_count; 1280 static cpumask_t mca_cpu; 1281 1282 if (atomic_add_return(1, &mca_count) == 1) { 1283 monarch_cpu = cpu; 1284 sos->monarch = 1; 1285 } else { 1286 cpu_set(cpu, mca_cpu); 1287 sos->monarch = 0; 1288 } 1289 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " 1290 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); 1291 1292 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1293 1294 NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1); 1295 1296 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1297 if (sos->monarch) { 1298 ia64_wait_for_slaves(cpu, "MCA"); 1299 1300 /* Wakeup all the processors which are spinning in the 1301 * rendezvous loop. They will leave SAL, then spin in the OS 1302 * with interrupts disabled until this monarch cpu leaves the 1303 * MCA handler. That gets control back to the OS so we can 1304 * backtrace the other cpus, backtrace when spinning in SAL 1305 * does not work. 1306 */ 1307 ia64_mca_wakeup_all(); 1308 } else { 1309 while (cpu_isset(cpu, mca_cpu)) 1310 cpu_relax(); /* spin until monarch wakes us */ 1311 } 1312 1313 NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1); 1314 1315 /* Get the MCA error record and log it */ 1316 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); 1317 1318 /* MCA error recovery */ 1319 recover = (ia64_mca_ucmc_extension 1320 && ia64_mca_ucmc_extension( 1321 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), 1322 sos)); 1323 1324 if (recover) { 1325 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); 1326 rh->severity = sal_log_severity_corrected; 1327 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 1328 sos->os_status = IA64_MCA_CORRECTED; 1329 } else { 1330 /* Dump buffered message to console */ 1331 ia64_mlogbuf_finish(1); 1332 } 1333 1334 if (__get_cpu_var(ia64_mca_tr_reload)) { 1335 mca_insert_tr(0x1); /*Reload dynamic itrs*/ 1336 mca_insert_tr(0x2); /*Reload dynamic itrs*/ 1337 } 1338 1339 NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1); 1340 1341 if (atomic_dec_return(&mca_count) > 0) { 1342 int i; 1343 1344 /* wake up the next monarch cpu, 1345 * and put this cpu in the rendez loop. 1346 */ 1347 for_each_online_cpu(i) { 1348 if (cpu_isset(i, mca_cpu)) { 1349 monarch_cpu = i; 1350 cpu_clear(i, mca_cpu); /* wake next cpu */ 1351 while (monarch_cpu != -1) 1352 cpu_relax(); /* spin until last cpu leaves */ 1353 set_curr_task(cpu, previous_current); 1354 ia64_mc_info.imi_rendez_checkin[cpu] 1355 = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1356 return; 1357 } 1358 } 1359 } 1360 set_curr_task(cpu, previous_current); 1361 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1362 monarch_cpu = -1; /* This frees the slaves and previous monarchs */ 1363} 1364 1365static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); 1366static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); 1367 1368/* 1369 * ia64_mca_cmc_int_handler 1370 * 1371 * This is corrected machine check interrupt handler. 1372 * Right now the logs are extracted and displayed in a well-defined 1373 * format. 1374 * 1375 * Inputs 1376 * interrupt number 1377 * client data arg ptr 1378 * 1379 * Outputs 1380 * None 1381 */ 1382static irqreturn_t 1383ia64_mca_cmc_int_handler(int cmc_irq, void *arg) 1384{ 1385 static unsigned long cmc_history[CMC_HISTORY_LENGTH]; 1386 static int index; 1387 static DEFINE_SPINLOCK(cmc_history_lock); 1388 1389 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 1390 __func__, cmc_irq, smp_processor_id()); 1391 1392 /* SAL spec states this should run w/ interrupts enabled */ 1393 local_irq_enable(); 1394 1395 spin_lock(&cmc_history_lock); 1396 if (!cmc_polling_enabled) { 1397 int i, count = 1; /* we know 1 happened now */ 1398 unsigned long now = jiffies; 1399 1400 for (i = 0; i < CMC_HISTORY_LENGTH; i++) { 1401 if (now - cmc_history[i] <= HZ) 1402 count++; 1403 } 1404 1405 IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH); 1406 if (count >= CMC_HISTORY_LENGTH) { 1407 1408 cmc_polling_enabled = 1; 1409 spin_unlock(&cmc_history_lock); 1410 /* If we're being hit with CMC interrupts, we won't 1411 * ever execute the schedule_work() below. Need to 1412 * disable CMC interrupts on this processor now. 1413 */ 1414 ia64_mca_cmc_vector_disable(NULL); 1415 schedule_work(&cmc_disable_work); 1416 1417 /* 1418 * Corrected errors will still be corrected, but 1419 * make sure there's a log somewhere that indicates 1420 * something is generating more than we can handle. 1421 */ 1422 printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n"); 1423 1424 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1425 1426 /* lock already released, get out now */ 1427 goto out; 1428 } else { 1429 cmc_history[index++] = now; 1430 if (index == CMC_HISTORY_LENGTH) 1431 index = 0; 1432 } 1433 } 1434 spin_unlock(&cmc_history_lock); 1435out: 1436 /* Get the CMC error record and log it */ 1437 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); 1438 1439 return IRQ_HANDLED; 1440} 1441 1442/* 1443 * ia64_mca_cmc_int_caller 1444 * 1445 * Triggered by sw interrupt from CMC polling routine. Calls 1446 * real interrupt handler and either triggers a sw interrupt 1447 * on the next cpu or does cleanup at the end. 1448 * 1449 * Inputs 1450 * interrupt number 1451 * client data arg ptr 1452 * Outputs 1453 * handled 1454 */ 1455static irqreturn_t 1456ia64_mca_cmc_int_caller(int cmc_irq, void *arg) 1457{ 1458 static int start_count = -1; 1459 unsigned int cpuid; 1460 1461 cpuid = smp_processor_id(); 1462 1463 /* If first cpu, update count */ 1464 if (start_count == -1) 1465 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC); 1466 1467 ia64_mca_cmc_int_handler(cmc_irq, arg); 1468 1469 cpuid = cpumask_next(cpuid+1, cpu_online_mask); 1470 1471 if (cpuid < nr_cpu_ids) { 1472 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1473 } else { 1474 /* If no log record, switch out of polling mode */ 1475 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { 1476 1477 printk(KERN_WARNING "Returning to interrupt driven CMC handler\n"); 1478 schedule_work(&cmc_enable_work); 1479 cmc_polling_enabled = 0; 1480 1481 } else { 1482 1483 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1484 } 1485 1486 start_count = -1; 1487 } 1488 1489 return IRQ_HANDLED; 1490} 1491 1492/* 1493 * ia64_mca_cmc_poll 1494 * 1495 * Poll for Corrected Machine Checks (CMCs) 1496 * 1497 * Inputs : dummy(unused) 1498 * Outputs : None 1499 * 1500 */ 1501static void 1502ia64_mca_cmc_poll (unsigned long dummy) 1503{ 1504 /* Trigger a CMC interrupt cascade */ 1505 platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1506} 1507 1508/* 1509 * ia64_mca_cpe_int_caller 1510 * 1511 * Triggered by sw interrupt from CPE polling routine. Calls 1512 * real interrupt handler and either triggers a sw interrupt 1513 * on the next cpu or does cleanup at the end. 1514 * 1515 * Inputs 1516 * interrupt number 1517 * client data arg ptr 1518 * Outputs 1519 * handled 1520 */ 1521#ifdef CONFIG_ACPI 1522 1523static irqreturn_t 1524ia64_mca_cpe_int_caller(int cpe_irq, void *arg) 1525{ 1526 static int start_count = -1; 1527 static int poll_time = MIN_CPE_POLL_INTERVAL; 1528 unsigned int cpuid; 1529 1530 cpuid = smp_processor_id(); 1531 1532 /* If first cpu, update count */ 1533 if (start_count == -1) 1534 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE); 1535 1536 ia64_mca_cpe_int_handler(cpe_irq, arg); 1537 1538 cpuid = cpumask_next(cpuid+1, cpu_online_mask); 1539 1540 if (cpuid < NR_CPUS) { 1541 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1542 } else { 1543 /* 1544 * If a log was recorded, increase our polling frequency, 1545 * otherwise, backoff or return to interrupt mode. 1546 */ 1547 if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) { 1548 poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2); 1549 } else if (cpe_vector < 0) { 1550 poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2); 1551 } else { 1552 poll_time = MIN_CPE_POLL_INTERVAL; 1553 1554 printk(KERN_WARNING "Returning to interrupt driven CPE handler\n"); 1555 enable_irq(local_vector_to_irq(IA64_CPE_VECTOR)); 1556 cpe_poll_enabled = 0; 1557 } 1558 1559 if (cpe_poll_enabled) 1560 mod_timer(&cpe_poll_timer, jiffies + poll_time); 1561 start_count = -1; 1562 } 1563 1564 return IRQ_HANDLED; 1565} 1566 1567/* 1568 * ia64_mca_cpe_poll 1569 * 1570 * Poll for Corrected Platform Errors (CPEs), trigger interrupt 1571 * on first cpu, from there it will trickle through all the cpus. 1572 * 1573 * Inputs : dummy(unused) 1574 * Outputs : None 1575 * 1576 */ 1577static void 1578ia64_mca_cpe_poll (unsigned long dummy) 1579{ 1580 /* Trigger a CPE interrupt cascade */ 1581 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1582} 1583 1584#endif /* CONFIG_ACPI */ 1585 1586static int 1587default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data) 1588{ 1589 int c; 1590 struct task_struct *g, *t; 1591 if (val != DIE_INIT_MONARCH_PROCESS) 1592 return NOTIFY_DONE; 1593#ifdef CONFIG_KEXEC 1594 if (atomic_read(&kdump_in_progress)) 1595 return NOTIFY_DONE; 1596#endif 1597 1598 BREAK_LOGLEVEL(console_loglevel); 1599 ia64_mlogbuf_dump_from_init(); 1600 1601 printk(KERN_ERR "Processes interrupted by INIT -"); 1602 for_each_online_cpu(c) { 1603 struct ia64_sal_os_state *s; 1604 t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); 1605 s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); 1606 g = s->prev_task; 1607 if (g) { 1608 if (g->pid) 1609 printk(" %d", g->pid); 1610 else 1611 printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); 1612 } 1613 } 1614 printk("\n\n"); 1615 if (read_trylock(&tasklist_lock)) { 1616 do_each_thread (g, t) { 1617 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); 1618 show_stack(t, NULL); 1619 } while_each_thread (g, t); 1620 read_unlock(&tasklist_lock); 1621 } 1622 RESTORE_LOGLEVEL(console_loglevel); 1623 return NOTIFY_DONE; 1624} 1625 1626/* 1627 * C portion of the OS INIT handler 1628 * 1629 * Called from ia64_os_init_dispatch 1630 * 1631 * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for 1632 * this event. This code is used for both monarch and slave INIT events, see 1633 * sos->monarch. 1634 * 1635 * All INIT events switch to the INIT stack and change the previous process to 1636 * blocked status. If one of the INIT events is the monarch then we are 1637 * probably processing the nmi button/command. Use the monarch cpu to dump all 1638 * the processes. The slave INIT events all spin until the monarch cpu 1639 * returns. We can also get INIT slave events for MCA, in which case the MCA 1640 * process is the monarch. 1641 */ 1642 1643void 1644ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, 1645 struct ia64_sal_os_state *sos) 1646{ 1647 static atomic_t slaves; 1648 static atomic_t monarchs; 1649 struct task_struct *previous_current; 1650 int cpu = smp_processor_id(); 1651 struct ia64_mca_notify_die nd = 1652 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1653 1654 NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0); 1655 1656 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1657 sos->proc_state_param, cpu, sos->monarch); 1658 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); 1659 1660 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); 1661 sos->os_status = IA64_INIT_RESUME; 1662 1663 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1664 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1665 __func__, cpu); 1666 atomic_dec(&slaves); 1667 sos->monarch = 1; 1668 } 1669 1670 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1671 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1672 __func__, cpu); 1673 atomic_dec(&monarchs); 1674 sos->monarch = 0; 1675 } 1676 1677 if (!sos->monarch) { 1678 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1679 1680#ifdef CONFIG_KEXEC 1681 while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress)) 1682 udelay(1000); 1683#else 1684 while (monarch_cpu == -1) 1685 cpu_relax(); /* spin until monarch enters */ 1686#endif 1687 1688 NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1); 1689 NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1); 1690 1691#ifdef CONFIG_KEXEC 1692 while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress)) 1693 udelay(1000); 1694#else 1695 while (monarch_cpu != -1) 1696 cpu_relax(); /* spin until monarch leaves */ 1697#endif 1698 1699 NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1); 1700 1701 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1702 set_curr_task(cpu, previous_current); 1703 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1704 atomic_dec(&slaves); 1705 return; 1706 } 1707 1708 monarch_cpu = cpu; 1709 NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1); 1710 1711 /* 1712 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1713 * generated via the BMC's command-line interface, but since the console is on the 1714 * same serial line, the user will need some time to switch out of the BMC before 1715 * the dump begins. 1716 */ 1717 mprintk("Delaying for 5 seconds...\n"); 1718 udelay(5*1000000); 1719 ia64_wait_for_slaves(cpu, "INIT"); 1720 /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through 1721 * to default_monarch_init_process() above and just print all the 1722 * tasks. 1723 */ 1724 NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1); 1725 NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1); 1726 1727 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1728 atomic_dec(&monarchs); 1729 set_curr_task(cpu, previous_current); 1730 monarch_cpu = -1; 1731 return; 1732} 1733 1734static int __init 1735ia64_mca_disable_cpe_polling(char *str) 1736{ 1737 cpe_poll_enabled = 0; 1738 return 1; 1739} 1740 1741__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); 1742 1743static struct irqaction cmci_irqaction = { 1744 .handler = ia64_mca_cmc_int_handler, 1745 .flags = IRQF_DISABLED, 1746 .name = "cmc_hndlr" 1747}; 1748 1749static struct irqaction cmcp_irqaction = { 1750 .handler = ia64_mca_cmc_int_caller, 1751 .flags = IRQF_DISABLED, 1752 .name = "cmc_poll" 1753}; 1754 1755static struct irqaction mca_rdzv_irqaction = { 1756 .handler = ia64_mca_rendez_int_handler, 1757 .flags = IRQF_DISABLED, 1758 .name = "mca_rdzv" 1759}; 1760 1761static struct irqaction mca_wkup_irqaction = { 1762 .handler = ia64_mca_wakeup_int_handler, 1763 .flags = IRQF_DISABLED, 1764 .name = "mca_wkup" 1765}; 1766 1767#ifdef CONFIG_ACPI 1768static struct irqaction mca_cpe_irqaction = { 1769 .handler = ia64_mca_cpe_int_handler, 1770 .flags = IRQF_DISABLED, 1771 .name = "cpe_hndlr" 1772}; 1773 1774static struct irqaction mca_cpep_irqaction = { 1775 .handler = ia64_mca_cpe_int_caller, 1776 .flags = IRQF_DISABLED, 1777 .name = "cpe_poll" 1778}; 1779#endif /* CONFIG_ACPI */ 1780 1781/* Minimal format of the MCA/INIT stacks. The pseudo processes that run on 1782 * these stacks can never sleep, they cannot return from the kernel to user 1783 * space, they do not appear in a normal ps listing. So there is no need to 1784 * format most of the fields. 1785 */ 1786 1787static void __cpuinit 1788format_mca_init_stack(void *mca_data, unsigned long offset, 1789 const char *type, int cpu) 1790{ 1791 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); 1792 struct thread_info *ti; 1793 memset(p, 0, KERNEL_STACK_SIZE); 1794 ti = task_thread_info(p); 1795 ti->flags = _TIF_MCA_INIT; 1796 ti->preempt_count = 1; 1797 ti->task = p; 1798 ti->cpu = cpu; 1799 p->stack = ti; 1800 p->state = TASK_UNINTERRUPTIBLE; 1801 cpu_set(cpu, p->cpus_allowed); 1802 INIT_LIST_HEAD(&p->tasks); 1803 p->parent = p->real_parent = p->group_leader = p; 1804 INIT_LIST_HEAD(&p->children); 1805 INIT_LIST_HEAD(&p->sibling); 1806 strncpy(p->comm, type, sizeof(p->comm)-1); 1807} 1808 1809/* Caller prevents this from being called after init */ 1810static void * __init_refok mca_bootmem(void) 1811{ 1812 return __alloc_bootmem(sizeof(struct ia64_mca_cpu), 1813 KERNEL_STACK_SIZE, 0); 1814} 1815 1816/* Do per-CPU MCA-related initialization. */ 1817void __cpuinit 1818ia64_mca_cpu_init(void *cpu_data) 1819{ 1820 void *pal_vaddr; 1821 void *data; 1822 long sz = sizeof(struct ia64_mca_cpu); 1823 int cpu = smp_processor_id(); 1824 static int first_time = 1; 1825 1826 /* 1827 * Structure will already be allocated if cpu has been online, 1828 * then offlined. 1829 */ 1830 if (__per_cpu_mca[cpu]) { 1831 data = __va(__per_cpu_mca[cpu]); 1832 } else { 1833 if (first_time) { 1834 data = mca_bootmem(); 1835 first_time = 0; 1836 } else 1837 data = __get_free_pages(GFP_KERNEL, get_order(sz)); 1838 if (!data) 1839 panic("Could not allocate MCA memory for cpu %d\n", 1840 cpu); 1841 } 1842 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack), 1843 "MCA", cpu); 1844 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack), 1845 "INIT", cpu); 1846 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data); 1847 1848 /* 1849 * Stash away a copy of the PTE needed to map the per-CPU page. 1850 * We may need it during MCA recovery. 1851 */ 1852 __get_cpu_var(ia64_mca_per_cpu_pte) = 1853 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); 1854 1855 /* 1856 * Also, stash away a copy of the PAL address and the PTE 1857 * needed to map it. 1858 */ 1859 pal_vaddr = efi_get_pal_addr(); 1860 if (!pal_vaddr) 1861 return; 1862 __get_cpu_var(ia64_mca_pal_base) = 1863 GRANULEROUNDDOWN((unsigned long) pal_vaddr); 1864 __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), 1865 PAGE_KERNEL)); 1866} 1867 1868static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) 1869{ 1870 unsigned long flags; 1871 1872 local_irq_save(flags); 1873 if (!cmc_polling_enabled) 1874 ia64_mca_cmc_vector_enable(NULL); 1875 local_irq_restore(flags); 1876} 1877 1878static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, 1879 unsigned long action, 1880 void *hcpu) 1881{ 1882 int hotcpu = (unsigned long) hcpu; 1883 1884 switch (action) { 1885 case CPU_ONLINE: 1886 case CPU_ONLINE_FROZEN: 1887 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, 1888 NULL, 0); 1889 break; 1890 } 1891 return NOTIFY_OK; 1892} 1893 1894static struct notifier_block mca_cpu_notifier __cpuinitdata = { 1895 .notifier_call = mca_cpu_callback 1896}; 1897 1898/* 1899 * ia64_mca_init 1900 * 1901 * Do all the system level mca specific initialization. 1902 * 1903 * 1. Register spinloop and wakeup request interrupt vectors 1904 * 1905 * 2. Register OS_MCA handler entry point 1906 * 1907 * 3. Register OS_INIT handler entry point 1908 * 1909 * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. 1910 * 1911 * Note that this initialization is done very early before some kernel 1912 * services are available. 1913 * 1914 * Inputs : None 1915 * 1916 * Outputs : None 1917 */ 1918void __init 1919ia64_mca_init(void) 1920{ 1921 ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; 1922 ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; 1923 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; 1924 int i; 1925 long rc; 1926 struct ia64_sal_retval isrv; 1927 unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ 1928 static struct notifier_block default_init_monarch_nb = { 1929 .notifier_call = default_monarch_init_process, 1930 .priority = 0/* we need to notified last */ 1931 }; 1932 1933 IA64_MCA_DEBUG("%s: begin\n", __func__); 1934 1935 /* Clear the Rendez checkin flag for all cpus */ 1936 for(i = 0 ; i < NR_CPUS; i++) 1937 ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1938 1939 /* 1940 * Register the rendezvous spinloop and wakeup mechanism with SAL 1941 */ 1942 1943 /* Register the rendezvous interrupt vector with SAL */ 1944 while (1) { 1945 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, 1946 SAL_MC_PARAM_MECHANISM_INT, 1947 IA64_MCA_RENDEZ_VECTOR, 1948 timeout, 1949 SAL_MC_PARAM_RZ_ALWAYS); 1950 rc = isrv.status; 1951 if (rc == 0) 1952 break; 1953 if (rc == -2) { 1954 printk(KERN_INFO "Increasing MCA rendezvous timeout from " 1955 "%ld to %ld milliseconds\n", timeout, isrv.v0); 1956 timeout = isrv.v0; 1957 NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0); 1958 continue; 1959 } 1960 printk(KERN_ERR "Failed to register rendezvous interrupt " 1961 "with SAL (status %ld)\n", rc); 1962 return; 1963 } 1964 1965 /* Register the wakeup interrupt vector with SAL */ 1966 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, 1967 SAL_MC_PARAM_MECHANISM_INT, 1968 IA64_MCA_WAKEUP_VECTOR, 1969 0, 0); 1970 rc = isrv.status; 1971 if (rc) { 1972 printk(KERN_ERR "Failed to register wakeup interrupt with SAL " 1973 "(status %ld)\n", rc); 1974 return; 1975 } 1976 1977 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__); 1978 1979 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); 1980 ia64_mc_info.imi_mca_handler_size = 0; 1981 1982 /* Register the os mca handler with SAL */ 1983 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 1984 ia64_mc_info.imi_mca_handler, 1985 ia64_tpa(mca_hldlr_ptr->gp), 1986 ia64_mc_info.imi_mca_handler_size, 1987 0, 0, 0))) 1988 { 1989 printk(KERN_ERR "Failed to register OS MCA handler with SAL " 1990 "(status %ld)\n", rc); 1991 return; 1992 } 1993 1994 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__, 1995 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); 1996 1997 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); 1998 ia64_mc_info.imi_monarch_init_handler_size = 0; 1999 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); 2000 ia64_mc_info.imi_slave_init_handler_size = 0; 2001 2002 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__, 2003 ia64_mc_info.imi_monarch_init_handler); 2004 2005 /* Register the os init handler with SAL */ 2006 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 2007 ia64_mc_info.imi_monarch_init_handler, 2008 ia64_tpa(ia64_getreg(_IA64_REG_GP)), 2009 ia64_mc_info.imi_monarch_init_handler_size, 2010 ia64_mc_info.imi_slave_init_handler, 2011 ia64_tpa(ia64_getreg(_IA64_REG_GP)), 2012 ia64_mc_info.imi_slave_init_handler_size))) 2013 { 2014 printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " 2015 "(status %ld)\n", rc); 2016 return; 2017 } 2018 if (register_die_notifier(&default_init_monarch_nb)) { 2019 printk(KERN_ERR "Failed to register default monarch INIT process\n"); 2020 return; 2021 } 2022 2023 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); 2024 2025 /* 2026 * Configure the CMCI/P vector and handler. Interrupts for CMC are 2027 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2028 */ 2029 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); 2030 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); 2031 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ 2032 2033 /* Setup the MCA rendezvous interrupt vector */ 2034 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); 2035 2036 /* Setup the MCA wakeup interrupt vector */ 2037 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 2038 2039#ifdef CONFIG_ACPI 2040 /* Setup the CPEI/P handler */ 2041 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2042#endif 2043 2044 /* Initialize the areas set aside by the OS to buffer the 2045 * platform/processor error states for MCA/INIT/CMC 2046 * handling. 2047 */ 2048 ia64_log_init(SAL_INFO_TYPE_MCA); 2049 ia64_log_init(SAL_INFO_TYPE_INIT); 2050 ia64_log_init(SAL_INFO_TYPE_CMC); 2051 ia64_log_init(SAL_INFO_TYPE_CPE); 2052 2053 mca_init = 1; 2054 printk(KERN_INFO "MCA related initialization done\n"); 2055} 2056 2057/* 2058 * ia64_mca_late_init 2059 * 2060 * Opportunity to setup things that require initialization later 2061 * than ia64_mca_init. Setup a timer to poll for CPEs if the 2062 * platform doesn't support an interrupt driven mechanism. 2063 * 2064 * Inputs : None 2065 * Outputs : Status 2066 */ 2067static int __init 2068ia64_mca_late_init(void) 2069{ 2070 if (!mca_init) 2071 return 0; 2072 2073 register_hotcpu_notifier(&mca_cpu_notifier); 2074 2075 /* Setup the CMCI/P vector and handler */ 2076 init_timer(&cmc_poll_timer); 2077 cmc_poll_timer.function = ia64_mca_cmc_poll; 2078 2079 /* Unmask/enable the vector */ 2080 cmc_polling_enabled = 0; 2081 schedule_work(&cmc_enable_work); 2082 2083 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__); 2084 2085#ifdef CONFIG_ACPI 2086 /* Setup the CPEI/P vector and handler */ 2087 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); 2088 init_timer(&cpe_poll_timer); 2089 cpe_poll_timer.function = ia64_mca_cpe_poll; 2090 2091 { 2092 struct irq_desc *desc; 2093 unsigned int irq; 2094 2095 if (cpe_vector >= 0) { 2096 /* If platform supports CPEI, enable the irq. */ 2097 irq = local_vector_to_irq(cpe_vector); 2098 if (irq > 0) { 2099 cpe_poll_enabled = 0; 2100 desc = irq_desc + irq; 2101 desc->status |= IRQ_PER_CPU; 2102 setup_irq(irq, &mca_cpe_irqaction); 2103 ia64_cpe_irq = irq; 2104 ia64_mca_register_cpev(cpe_vector); 2105 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", 2106 __func__); 2107 return 0; 2108 } 2109 printk(KERN_ERR "%s: Failed to find irq for CPE " 2110 "interrupt handler, vector %d\n", 2111 __func__, cpe_vector); 2112 } 2113 /* If platform doesn't support CPEI, get the timer going. */ 2114 if (cpe_poll_enabled) { 2115 ia64_mca_cpe_poll(0UL); 2116 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__); 2117 } 2118 } 2119#endif 2120 2121 return 0; 2122} 2123 2124device_initcall(ia64_mca_late_init); 2125