1/* 2 * File: mca.c 3 * Purpose: Generic MCA handling layer 4 * 5 * Updated for latest kernel 6 * Copyright (C) 2003 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * 9 * Copyright (C) 2002 Dell Inc. 10 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com) 11 * 12 * Copyright (C) 2002 Intel 13 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com) 14 * 15 * Copyright (C) 2001 Intel 16 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com) 17 * 18 * Copyright (C) 2000 Intel 19 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com) 20 * 21 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 22 * Copyright (C) Vijay Chander(vijay@engr.sgi.com) 23 * 24 * 03/04/15 D. Mosberger Added INIT backtrace support. 25 * 02/03/25 M. Domsch GUID cleanups 26 * 27 * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU 28 * error flag, set SAL default return values, changed 29 * error record structure to linked list, added init call 30 * to sal_get_state_info_size(). 31 * 32 * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected 33 * platform errors, completed code for logging of 34 * corrected & uncorrected machine check errors, and 35 * updated for conformance with Nov. 2000 revision of the 36 * SAL 3.0 spec. 37 * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, 38 * added min save state dump, added INIT handler. 39 * 40 * 2003-12-08 Keith Owens <kaos@sgi.com> 41 * smp_call_function() must not be called from interrupt context (can 42 * deadlock on tasklist_lock). Use keventd to call smp_call_function(). 43 * 44 * 2004-02-01 Keith Owens <kaos@sgi.com> 45 * Avoid deadlock when using printk() for MCA and INIT records. 46 * Delete all record printing code, moved to salinfo_decode in user space. 47 * Mark variables and functions static where possible. 48 * Delete dead variables and functions. 49 * Reorder to remove the need for forward declarations and to consolidate 50 * related code. 51 * 52 * 2005-08-12 Keith Owens <kaos@sgi.com> 53 * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. 54 * 55 * 2005-10-07 Keith Owens <kaos@sgi.com> 56 * Add notify_die() hooks. 57 * 58 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 59 * Add printing support for MCA/INIT. 60 */ 61#include <linux/types.h> 62#include <linux/init.h> 63#include <linux/sched.h> 64#include <linux/interrupt.h> 65#include <linux/irq.h> 66#include <linux/bootmem.h> 67#include <linux/acpi.h> 68#include <linux/timer.h> 69#include <linux/module.h> 70#include <linux/kernel.h> 71#include <linux/smp.h> 72#include <linux/workqueue.h> 73#include <linux/cpumask.h> 74#include <linux/kdebug.h> 75 76#include <asm/delay.h> 77#include <asm/machvec.h> 78#include <asm/meminit.h> 79#include <asm/page.h> 80#include <asm/ptrace.h> 81#include <asm/system.h> 82#include <asm/sal.h> 83#include <asm/mca.h> 84#include <asm/kexec.h> 85 86#include <asm/irq.h> 87#include <asm/hw_irq.h> 88 89#include "mca_drv.h" 90#include "entry.h" 91 92#if defined(IA64_MCA_DEBUG_INFO) 93# define IA64_MCA_DEBUG(fmt...) printk(fmt) 94#else 95# define IA64_MCA_DEBUG(fmt...) 96#endif 97 98/* Used by mca_asm.S */ 99u32 ia64_mca_serialize; 100DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 101DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 102DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ 103DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ 104 105unsigned long __per_cpu_mca[NR_CPUS]; 106 107/* In mca_asm.S */ 108extern void ia64_os_init_dispatch_monarch (void); 109extern void ia64_os_init_dispatch_slave (void); 110 111static int monarch_cpu = -1; 112 113static ia64_mc_info_t ia64_mc_info; 114 115#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */ 116#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */ 117#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */ 118#define CPE_HISTORY_LENGTH 5 119#define CMC_HISTORY_LENGTH 5 120 121#ifdef CONFIG_ACPI 122static struct timer_list cpe_poll_timer; 123#endif 124static struct timer_list cmc_poll_timer; 125/* 126 * This variable tells whether we are currently in polling mode. 127 * Start with this in the wrong state so we won't play w/ timers 128 * before the system is ready. 129 */ 130static int cmc_polling_enabled = 1; 131 132/* 133 * Clearing this variable prevents CPE polling from getting activated 134 * in mca_late_init. Use it if your system doesn't provide a CPEI, 135 * but encounters problems retrieving CPE logs. This should only be 136 * necessary for debugging. 137 */ 138static int cpe_poll_enabled = 1; 139 140extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); 141 142static int mca_init __initdata; 143 144/* 145 * limited & delayed printing support for MCA/INIT handler 146 */ 147 148#define mprintk(fmt...) ia64_mca_printk(fmt) 149 150#define MLOGBUF_SIZE (512+256*NR_CPUS) 151#define MLOGBUF_MSGMAX 256 152static char mlogbuf[MLOGBUF_SIZE]; 153static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */ 154static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */ 155static unsigned long mlogbuf_start; 156static unsigned long mlogbuf_end; 157static unsigned int mlogbuf_finished = 0; 158static unsigned long mlogbuf_timestamp = 0; 159 160static int loglevel_save = -1; 161#define BREAK_LOGLEVEL(__console_loglevel) \ 162 oops_in_progress = 1; \ 163 if (loglevel_save < 0) \ 164 loglevel_save = __console_loglevel; \ 165 __console_loglevel = 15; 166 167#define RESTORE_LOGLEVEL(__console_loglevel) \ 168 if (loglevel_save >= 0) { \ 169 __console_loglevel = loglevel_save; \ 170 loglevel_save = -1; \ 171 } \ 172 mlogbuf_finished = 0; \ 173 oops_in_progress = 0; 174 175/* 176 * Push messages into buffer, print them later if not urgent. 177 */ 178void ia64_mca_printk(const char *fmt, ...) 179{ 180 va_list args; 181 int printed_len; 182 char temp_buf[MLOGBUF_MSGMAX]; 183 char *p; 184 185 va_start(args, fmt); 186 printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args); 187 va_end(args); 188 189 /* Copy the output into mlogbuf */ 190 if (oops_in_progress) { 191 /* mlogbuf was abandoned, use printk directly instead. */ 192 printk(temp_buf); 193 } else { 194 spin_lock(&mlogbuf_wlock); 195 for (p = temp_buf; *p; p++) { 196 unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE; 197 if (next != mlogbuf_start) { 198 mlogbuf[mlogbuf_end] = *p; 199 mlogbuf_end = next; 200 } else { 201 /* buffer full */ 202 break; 203 } 204 } 205 mlogbuf[mlogbuf_end] = '\0'; 206 spin_unlock(&mlogbuf_wlock); 207 } 208} 209EXPORT_SYMBOL(ia64_mca_printk); 210 211/* 212 * Print buffered messages. 213 * NOTE: call this after returning normal context. (ex. from salinfod) 214 */ 215void ia64_mlogbuf_dump(void) 216{ 217 char temp_buf[MLOGBUF_MSGMAX]; 218 char *p; 219 unsigned long index; 220 unsigned long flags; 221 unsigned int printed_len; 222 223 /* Get output from mlogbuf */ 224 while (mlogbuf_start != mlogbuf_end) { 225 temp_buf[0] = '\0'; 226 p = temp_buf; 227 printed_len = 0; 228 229 spin_lock_irqsave(&mlogbuf_rlock, flags); 230 231 index = mlogbuf_start; 232 while (index != mlogbuf_end) { 233 *p = mlogbuf[index]; 234 index = (index + 1) % MLOGBUF_SIZE; 235 if (!*p) 236 break; 237 p++; 238 if (++printed_len >= MLOGBUF_MSGMAX - 1) 239 break; 240 } 241 *p = '\0'; 242 if (temp_buf[0]) 243 printk(temp_buf); 244 mlogbuf_start = index; 245 246 mlogbuf_timestamp = 0; 247 spin_unlock_irqrestore(&mlogbuf_rlock, flags); 248 } 249} 250EXPORT_SYMBOL(ia64_mlogbuf_dump); 251 252/* 253 * Call this if system is going to down or if immediate flushing messages to 254 * console is required. (ex. recovery was failed, crash dump is going to be 255 * invoked, long-wait rendezvous etc.) 256 * NOTE: this should be called from monarch. 257 */ 258static void ia64_mlogbuf_finish(int wait) 259{ 260 BREAK_LOGLEVEL(console_loglevel); 261 262 spin_lock_init(&mlogbuf_rlock); 263 ia64_mlogbuf_dump(); 264 printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, " 265 "MCA/INIT might be dodgy or fail.\n"); 266 267 if (!wait) 268 return; 269 270 /* wait for console */ 271 printk("Delaying for 5 seconds...\n"); 272 udelay(5*1000000); 273 274 mlogbuf_finished = 1; 275} 276 277/* 278 * Print buffered messages from INIT context. 279 */ 280static void ia64_mlogbuf_dump_from_init(void) 281{ 282 if (mlogbuf_finished) 283 return; 284 285 if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { 286 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " 287 " and the system seems to be messed up.\n"); 288 ia64_mlogbuf_finish(0); 289 return; 290 } 291 292 if (!spin_trylock(&mlogbuf_rlock)) { 293 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. " 294 "Generated messages other than stack dump will be " 295 "buffered to mlogbuf and will be printed later.\n"); 296 printk(KERN_ERR "INIT: If messages would not printed after " 297 "this INIT, wait 30sec and assert INIT again.\n"); 298 if (!mlogbuf_timestamp) 299 mlogbuf_timestamp = jiffies; 300 return; 301 } 302 spin_unlock(&mlogbuf_rlock); 303 ia64_mlogbuf_dump(); 304} 305 306static void inline 307ia64_mca_spin(const char *func) 308{ 309 if (monarch_cpu == smp_processor_id()) 310 ia64_mlogbuf_finish(0); 311 mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); 312 while (1) 313 cpu_relax(); 314} 315/* 316 * IA64_MCA log support 317 */ 318#define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */ 319#define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */ 320 321typedef struct ia64_state_log_s 322{ 323 spinlock_t isl_lock; 324 int isl_index; 325 unsigned long isl_count; 326 ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */ 327} ia64_state_log_t; 328 329static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; 330 331#define IA64_LOG_ALLOCATE(it, size) \ 332 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ 333 (ia64_err_rec_t *)alloc_bootmem(size); \ 334 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ 335 (ia64_err_rec_t *)alloc_bootmem(size);} 336#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) 337#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) 338#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) 339#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index 340#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index 341#define IA64_LOG_INDEX_INC(it) \ 342 {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \ 343 ia64_state_log[it].isl_count++;} 344#define IA64_LOG_INDEX_DEC(it) \ 345 ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index 346#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])) 347#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) 348#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count 349 350/* 351 * ia64_log_init 352 * Reset the OS ia64 log buffer 353 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) 354 * Outputs : None 355 */ 356static void __init 357ia64_log_init(int sal_info_type) 358{ 359 u64 max_size = 0; 360 361 IA64_LOG_NEXT_INDEX(sal_info_type) = 0; 362 IA64_LOG_LOCK_INIT(sal_info_type); 363 364 // SAL will tell us the maximum size of any error record of this type 365 max_size = ia64_sal_get_state_info_size(sal_info_type); 366 if (!max_size) 367 /* alloc_bootmem() doesn't like zero-sized allocations! */ 368 return; 369 370 // set up OS data structures to hold error info 371 IA64_LOG_ALLOCATE(sal_info_type, max_size); 372 memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size); 373 memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size); 374} 375 376/* 377 * ia64_log_get 378 * 379 * Get the current MCA log from SAL and copy it into the OS log buffer. 380 * 381 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) 382 * irq_safe whether you can use printk at this point 383 * Outputs : size (total record length) 384 * *buffer (ptr to error record) 385 * 386 */ 387static u64 388ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) 389{ 390 sal_log_record_header_t *log_buffer; 391 u64 total_len = 0; 392 unsigned long s; 393 394 IA64_LOG_LOCK(sal_info_type); 395 396 /* Get the process state information */ 397 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type); 398 399 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer); 400 401 if (total_len) { 402 IA64_LOG_INDEX_INC(sal_info_type); 403 IA64_LOG_UNLOCK(sal_info_type); 404 if (irq_safe) { 405 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " 406 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); 407 } 408 *buffer = (u8 *) log_buffer; 409 return total_len; 410 } else { 411 IA64_LOG_UNLOCK(sal_info_type); 412 return 0; 413 } 414} 415 416static void 417ia64_mca_log_sal_error_record(int sal_info_type) 418{ 419 u8 *buffer; 420 sal_log_record_header_t *rh; 421 u64 size; 422 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; 423#ifdef IA64_MCA_DEBUG_INFO 424 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; 425#endif 426 427 size = ia64_log_get(sal_info_type, &buffer, irq_safe); 428 if (!size) 429 return; 430 431 salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe); 432 433 if (irq_safe) 434 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n", 435 smp_processor_id(), 436 sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN"); 437 438 /* Clear logs from corrected errors in case there's no user-level logger */ 439 rh = (sal_log_record_header_t *)buffer; 440 if (rh->severity == sal_log_severity_corrected) 441 ia64_sal_clear_state_info(sal_info_type); 442} 443 444/* 445 * search_mca_table 446 * See if the MCA surfaced in an instruction range 447 * that has been tagged as recoverable. 448 * 449 * Inputs 450 * first First address range to check 451 * last Last address range to check 452 * ip Instruction pointer, address we are looking for 453 * 454 * Return value: 455 * 1 on Success (in the table)/ 0 on Failure (not in the table) 456 */ 457int 458search_mca_table (const struct mca_table_entry *first, 459 const struct mca_table_entry *last, 460 unsigned long ip) 461{ 462 const struct mca_table_entry *curr; 463 u64 curr_start, curr_end; 464 465 curr = first; 466 while (curr <= last) { 467 curr_start = (u64) &curr->start_addr + curr->start_addr; 468 curr_end = (u64) &curr->end_addr + curr->end_addr; 469 470 if ((ip >= curr_start) && (ip <= curr_end)) { 471 return 1; 472 } 473 curr++; 474 } 475 return 0; 476} 477 478/* Given an address, look for it in the mca tables. */ 479int mca_recover_range(unsigned long addr) 480{ 481 extern struct mca_table_entry __start___mca_table[]; 482 extern struct mca_table_entry __stop___mca_table[]; 483 484 return search_mca_table(__start___mca_table, __stop___mca_table-1, addr); 485} 486EXPORT_SYMBOL_GPL(mca_recover_range); 487 488#ifdef CONFIG_ACPI 489 490int cpe_vector = -1; 491int ia64_cpe_irq = -1; 492 493static irqreturn_t 494ia64_mca_cpe_int_handler (int cpe_irq, void *arg) 495{ 496 static unsigned long cpe_history[CPE_HISTORY_LENGTH]; 497 static int index; 498 static DEFINE_SPINLOCK(cpe_history_lock); 499 500 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 501 __FUNCTION__, cpe_irq, smp_processor_id()); 502 503 /* SAL spec states this should run w/ interrupts enabled */ 504 local_irq_enable(); 505 506 spin_lock(&cpe_history_lock); 507 if (!cpe_poll_enabled && cpe_vector >= 0) { 508 509 int i, count = 1; /* we know 1 happened now */ 510 unsigned long now = jiffies; 511 512 for (i = 0; i < CPE_HISTORY_LENGTH; i++) { 513 if (now - cpe_history[i] <= HZ) 514 count++; 515 } 516 517 IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH); 518 if (count >= CPE_HISTORY_LENGTH) { 519 520 cpe_poll_enabled = 1; 521 spin_unlock(&cpe_history_lock); 522 disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR)); 523 524 /* 525 * Corrected errors will still be corrected, but 526 * make sure there's a log somewhere that indicates 527 * something is generating more than we can handle. 528 */ 529 printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n"); 530 531 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); 532 533 /* lock already released, get out now */ 534 goto out; 535 } else { 536 cpe_history[index++] = now; 537 if (index == CPE_HISTORY_LENGTH) 538 index = 0; 539 } 540 } 541 spin_unlock(&cpe_history_lock); 542out: 543 /* Get the CPE error record and log it */ 544 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); 545 546 return IRQ_HANDLED; 547} 548 549#endif /* CONFIG_ACPI */ 550 551#ifdef CONFIG_ACPI 552/* 553 * ia64_mca_register_cpev 554 * 555 * Register the corrected platform error vector with SAL. 556 * 557 * Inputs 558 * cpev Corrected Platform Error Vector number 559 * 560 * Outputs 561 * None 562 */ 563static void __init 564ia64_mca_register_cpev (int cpev) 565{ 566 /* Register the CPE interrupt vector with SAL */ 567 struct ia64_sal_retval isrv; 568 569 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0); 570 if (isrv.status) { 571 printk(KERN_ERR "Failed to register Corrected Platform " 572 "Error interrupt vector with SAL (status %ld)\n", isrv.status); 573 return; 574 } 575 576 IA64_MCA_DEBUG("%s: corrected platform error " 577 "vector %#x registered\n", __FUNCTION__, cpev); 578} 579#endif /* CONFIG_ACPI */ 580 581/* 582 * ia64_mca_cmc_vector_setup 583 * 584 * Setup the corrected machine check vector register in the processor. 585 * (The interrupt is masked on boot. ia64_mca_late_init unmask this.) 586 * This function is invoked on a per-processor basis. 587 * 588 * Inputs 589 * None 590 * 591 * Outputs 592 * None 593 */ 594void __cpuinit 595ia64_mca_cmc_vector_setup (void) 596{ 597 cmcv_reg_t cmcv; 598 599 cmcv.cmcv_regval = 0; 600 cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */ 601 cmcv.cmcv_vector = IA64_CMC_VECTOR; 602 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 603 604 IA64_MCA_DEBUG("%s: CPU %d corrected " 605 "machine check vector %#x registered.\n", 606 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR); 607 608 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", 609 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); 610} 611 612/* 613 * ia64_mca_cmc_vector_disable 614 * 615 * Mask the corrected machine check vector register in the processor. 616 * This function is invoked on a per-processor basis. 617 * 618 * Inputs 619 * dummy(unused) 620 * 621 * Outputs 622 * None 623 */ 624static void 625ia64_mca_cmc_vector_disable (void *dummy) 626{ 627 cmcv_reg_t cmcv; 628 629 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); 630 631 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ 632 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 633 634 IA64_MCA_DEBUG("%s: CPU %d corrected " 635 "machine check vector %#x disabled.\n", 636 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector); 637} 638 639/* 640 * ia64_mca_cmc_vector_enable 641 * 642 * Unmask the corrected machine check vector register in the processor. 643 * This function is invoked on a per-processor basis. 644 * 645 * Inputs 646 * dummy(unused) 647 * 648 * Outputs 649 * None 650 */ 651static void 652ia64_mca_cmc_vector_enable (void *dummy) 653{ 654 cmcv_reg_t cmcv; 655 656 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); 657 658 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ 659 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 660 661 IA64_MCA_DEBUG("%s: CPU %d corrected " 662 "machine check vector %#x enabled.\n", 663 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector); 664} 665 666/* 667 * ia64_mca_cmc_vector_disable_keventd 668 * 669 * Called via keventd (smp_call_function() is not safe in interrupt context) to 670 * disable the cmc interrupt vector. 671 */ 672static void 673ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) 674{ 675 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); 676} 677 678/* 679 * ia64_mca_cmc_vector_enable_keventd 680 * 681 * Called via keventd (smp_call_function() is not safe in interrupt context) to 682 * enable the cmc interrupt vector. 683 */ 684static void 685ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) 686{ 687 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); 688} 689 690/* 691 * ia64_mca_wakeup 692 * 693 * Send an inter-cpu interrupt to wake-up a particular cpu 694 * and mark that cpu to be out of rendez. 695 * 696 * Inputs : cpuid 697 * Outputs : None 698 */ 699static void 700ia64_mca_wakeup(int cpu) 701{ 702 platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); 703 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 704 705} 706 707/* 708 * ia64_mca_wakeup_all 709 * 710 * Wakeup all the cpus which have rendez'ed previously. 711 * 712 * Inputs : None 713 * Outputs : None 714 */ 715static void 716ia64_mca_wakeup_all(void) 717{ 718 int cpu; 719 720 /* Clear the Rendez checkin flag for all cpus */ 721 for_each_online_cpu(cpu) { 722 if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) 723 ia64_mca_wakeup(cpu); 724 } 725 726} 727 728/* 729 * ia64_mca_rendez_interrupt_handler 730 * 731 * This is handler used to put slave processors into spinloop 732 * while the monarch processor does the mca handling and later 733 * wake each slave up once the monarch is done. 734 * 735 * Inputs : None 736 * Outputs : None 737 */ 738static irqreturn_t 739ia64_mca_rendez_int_handler(int rendez_irq, void *arg) 740{ 741 unsigned long flags; 742 int cpu = smp_processor_id(); 743 struct ia64_mca_notify_die nd = 744 { .sos = NULL, .monarch_cpu = &monarch_cpu }; 745 746 /* Mask all interrupts */ 747 local_irq_save(flags); 748 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 749 (long)&nd, 0, 0) == NOTIFY_STOP) 750 ia64_mca_spin(__FUNCTION__); 751 752 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 753 /* Register with the SAL monarch that the slave has 754 * reached SAL 755 */ 756 ia64_sal_mc_rendez(); 757 758 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 759 (long)&nd, 0, 0) == NOTIFY_STOP) 760 ia64_mca_spin(__FUNCTION__); 761 762 /* Wait for the monarch cpu to exit. */ 763 while (monarch_cpu != -1) 764 cpu_relax(); /* spin until monarch leaves */ 765 766 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 767 (long)&nd, 0, 0) == NOTIFY_STOP) 768 ia64_mca_spin(__FUNCTION__); 769 770 /* Enable all interrupts */ 771 local_irq_restore(flags); 772 return IRQ_HANDLED; 773} 774 775/* 776 * ia64_mca_wakeup_int_handler 777 * 778 * The interrupt handler for processing the inter-cpu interrupt to the 779 * slave cpu which was spinning in the rendez loop. 780 * Since this spinning is done by turning off the interrupts and 781 * polling on the wakeup-interrupt bit in the IRR, there is 782 * nothing useful to be done in the handler. 783 * 784 * Inputs : wakeup_irq (Wakeup-interrupt bit) 785 * arg (Interrupt handler specific argument) 786 * Outputs : None 787 * 788 */ 789static irqreturn_t 790ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg) 791{ 792 return IRQ_HANDLED; 793} 794 795/* Function pointer for extra MCA recovery */ 796int (*ia64_mca_ucmc_extension) 797 (void*,struct ia64_sal_os_state*) 798 = NULL; 799 800int 801ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) 802{ 803 if (ia64_mca_ucmc_extension) 804 return 1; 805 806 ia64_mca_ucmc_extension = fn; 807 return 0; 808} 809 810void 811ia64_unreg_MCA_extension(void) 812{ 813 if (ia64_mca_ucmc_extension) 814 ia64_mca_ucmc_extension = NULL; 815} 816 817EXPORT_SYMBOL(ia64_reg_MCA_extension); 818EXPORT_SYMBOL(ia64_unreg_MCA_extension); 819 820 821static inline void 822copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) 823{ 824 u64 fslot, tslot, nat; 825 *tr = *fr; 826 fslot = ((unsigned long)fr >> 3) & 63; 827 tslot = ((unsigned long)tr >> 3) & 63; 828 *tnat &= ~(1UL << tslot); 829 nat = (fnat >> fslot) & 1; 830 *tnat |= (nat << tslot); 831} 832 833/* Change the comm field on the MCA/INT task to include the pid that 834 * was interrupted, it makes for easier debugging. If that pid was 0 835 * (swapper or nested MCA/INIT) then use the start of the previous comm 836 * field suffixed with its cpu. 837 */ 838 839static void 840ia64_mca_modify_comm(const struct task_struct *previous_current) 841{ 842 char *p, comm[sizeof(current->comm)]; 843 if (previous_current->pid) 844 snprintf(comm, sizeof(comm), "%s %d", 845 current->comm, previous_current->pid); 846 else { 847 int l; 848 if ((p = strchr(previous_current->comm, ' '))) 849 l = p - previous_current->comm; 850 else 851 l = strlen(previous_current->comm); 852 snprintf(comm, sizeof(comm), "%s %*s %d", 853 current->comm, l, previous_current->comm, 854 task_thread_info(previous_current)->cpu); 855 } 856 memcpy(current->comm, comm, sizeof(current->comm)); 857} 858 859/* On entry to this routine, we are running on the per cpu stack, see 860 * mca_asm.h. The original stack has not been touched by this event. Some of 861 * the original stack's registers will be in the RBS on this stack. This stack 862 * also contains a partial pt_regs and switch_stack, the rest of the data is in 863 * PAL minstate. 864 * 865 * The first thing to do is modify the original stack to look like a blocked 866 * task so we can run backtrace on the original task. Also mark the per cpu 867 * stack as current to ensure that we use the correct task state, it also means 868 * that we can do backtrace on the MCA/INIT handler code itself. 869 */ 870 871static struct task_struct * 872ia64_mca_modify_original_stack(struct pt_regs *regs, 873 const struct switch_stack *sw, 874 struct ia64_sal_os_state *sos, 875 const char *type) 876{ 877 char *p; 878 ia64_va va; 879 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ 880 const pal_min_state_area_t *ms = sos->pal_min_state; 881 struct task_struct *previous_current; 882 struct pt_regs *old_regs; 883 struct switch_stack *old_sw; 884 unsigned size = sizeof(struct pt_regs) + 885 sizeof(struct switch_stack) + 16; 886 u64 *old_bspstore, *old_bsp; 887 u64 *new_bspstore, *new_bsp; 888 u64 old_unat, old_rnat, new_rnat, nat; 889 u64 slots, loadrs = regs->loadrs; 890 u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; 891 u64 ar_bspstore = regs->ar_bspstore; 892 u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); 893 const u64 *bank; 894 const char *msg; 895 int cpu = smp_processor_id(); 896 897 previous_current = curr_task(cpu); 898 set_curr_task(cpu, current); 899 if ((p = strchr(current->comm, ' '))) 900 *p = '\0'; 901 902 /* Best effort attempt to cope with MCA/INIT delivered while in 903 * physical mode. 904 */ 905 regs->cr_ipsr = ms->pmsa_ipsr; 906 if (ia64_psr(regs)->dt == 0) { 907 va.l = r12; 908 if (va.f.reg == 0) { 909 va.f.reg = 7; 910 r12 = va.l; 911 } 912 va.l = r13; 913 if (va.f.reg == 0) { 914 va.f.reg = 7; 915 r13 = va.l; 916 } 917 } 918 if (ia64_psr(regs)->rt == 0) { 919 va.l = ar_bspstore; 920 if (va.f.reg == 0) { 921 va.f.reg = 7; 922 ar_bspstore = va.l; 923 } 924 va.l = ar_bsp; 925 if (va.f.reg == 0) { 926 va.f.reg = 7; 927 ar_bsp = va.l; 928 } 929 } 930 931 /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers 932 * have been copied to the old stack, the old stack may fail the 933 * validation tests below. So ia64_old_stack() must restore the dirty 934 * registers from the new stack. The old and new bspstore probably 935 * have different alignments, so loadrs calculated on the old bsp 936 * cannot be used to restore from the new bsp. Calculate a suitable 937 * loadrs for the new stack and save it in the new pt_regs, where 938 * ia64_old_stack() can get it. 939 */ 940 old_bspstore = (u64 *)ar_bspstore; 941 old_bsp = (u64 *)ar_bsp; 942 slots = ia64_rse_num_regs(old_bspstore, old_bsp); 943 new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); 944 new_bsp = ia64_rse_skip_regs(new_bspstore, slots); 945 regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; 946 947 /* Verify the previous stack state before we change it */ 948 if (user_mode(regs)) { 949 msg = "occurred in user space"; 950 /* previous_current is guaranteed to be valid when the task was 951 * in user space, so ... 952 */ 953 ia64_mca_modify_comm(previous_current); 954 goto no_mod; 955 } 956 957 if (!mca_recover_range(ms->pmsa_iip)) { 958 if (r13 != sos->prev_IA64_KR_CURRENT) { 959 msg = "inconsistent previous current and r13"; 960 goto no_mod; 961 } 962 if ((r12 - r13) >= KERNEL_STACK_SIZE) { 963 msg = "inconsistent r12 and r13"; 964 goto no_mod; 965 } 966 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { 967 msg = "inconsistent ar.bspstore and r13"; 968 goto no_mod; 969 } 970 va.p = old_bspstore; 971 if (va.f.reg < 5) { 972 msg = "old_bspstore is in the wrong region"; 973 goto no_mod; 974 } 975 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { 976 msg = "inconsistent ar.bsp and r13"; 977 goto no_mod; 978 } 979 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; 980 if (ar_bspstore + size > r12) { 981 msg = "no room for blocked state"; 982 goto no_mod; 983 } 984 } 985 986 ia64_mca_modify_comm(previous_current); 987 988 /* Make the original task look blocked. First stack a struct pt_regs, 989 * describing the state at the time of interrupt. mca_asm.S built a 990 * partial pt_regs, copy it and fill in the blanks using minstate. 991 */ 992 p = (char *)r12 - sizeof(*regs); 993 old_regs = (struct pt_regs *)p; 994 memcpy(old_regs, regs, sizeof(*regs)); 995 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use 996 * pmsa_{xip,xpsr,xfs} 997 */ 998 if (ia64_psr(regs)->ic) { 999 old_regs->cr_iip = ms->pmsa_iip; 1000 old_regs->cr_ipsr = ms->pmsa_ipsr; 1001 old_regs->cr_ifs = ms->pmsa_ifs; 1002 } else { 1003 old_regs->cr_iip = ms->pmsa_xip; 1004 old_regs->cr_ipsr = ms->pmsa_xpsr; 1005 old_regs->cr_ifs = ms->pmsa_xfs; 1006 } 1007 old_regs->pr = ms->pmsa_pr; 1008 old_regs->b0 = ms->pmsa_br0; 1009 old_regs->loadrs = loadrs; 1010 old_regs->ar_rsc = ms->pmsa_rsc; 1011 old_unat = old_regs->ar_unat; 1012 copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); 1013 copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); 1014 copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); 1015 copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); 1016 copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); 1017 copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); 1018 copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); 1019 copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); 1020 copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); 1021 copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); 1022 copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); 1023 if (ia64_psr(old_regs)->bn) 1024 bank = ms->pmsa_bank1_gr; 1025 else 1026 bank = ms->pmsa_bank0_gr; 1027 copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); 1028 copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); 1029 copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); 1030 copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); 1031 copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); 1032 copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); 1033 copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); 1034 copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); 1035 copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); 1036 copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); 1037 copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); 1038 copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); 1039 copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); 1040 copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); 1041 copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); 1042 copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); 1043 1044 /* Next stack a struct switch_stack. mca_asm.S built a partial 1045 * switch_stack, copy it and fill in the blanks using pt_regs and 1046 * minstate. 1047 * 1048 * In the synthesized switch_stack, b0 points to ia64_leave_kernel, 1049 * ar.pfs is set to 0. 1050 * 1051 * unwind.c::unw_unwind() does special processing for interrupt frames. 1052 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate 1053 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not 1054 * that this is documented, of course. Set PRED_NON_SYSCALL in the 1055 * switch_stack on the original stack so it will unwind correctly when 1056 * unwind.c reads pt_regs. 1057 * 1058 * thread.ksp is updated to point to the synthesized switch_stack. 1059 */ 1060 p -= sizeof(struct switch_stack); 1061 old_sw = (struct switch_stack *)p; 1062 memcpy(old_sw, sw, sizeof(*sw)); 1063 old_sw->caller_unat = old_unat; 1064 old_sw->ar_fpsr = old_regs->ar_fpsr; 1065 copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); 1066 copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); 1067 copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); 1068 copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); 1069 old_sw->b0 = (u64)ia64_leave_kernel; 1070 old_sw->b1 = ms->pmsa_br1; 1071 old_sw->ar_pfs = 0; 1072 old_sw->ar_unat = old_unat; 1073 old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); 1074 previous_current->thread.ksp = (u64)p - 16; 1075 1076 /* Finally copy the original stack's registers back to its RBS. 1077 * Registers from ar.bspstore through ar.bsp at the time of the event 1078 * are in the current RBS, copy them back to the original stack. The 1079 * copy must be done register by register because the original bspstore 1080 * and the current one have different alignments, so the saved RNAT 1081 * data occurs at different places. 1082 * 1083 * mca_asm does cover, so the old_bsp already includes all registers at 1084 * the time of MCA/INIT. It also does flushrs, so all registers before 1085 * this function have been written to backing store on the MCA/INIT 1086 * stack. 1087 */ 1088 new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); 1089 old_rnat = regs->ar_rnat; 1090 while (slots--) { 1091 if (ia64_rse_is_rnat_slot(new_bspstore)) { 1092 new_rnat = ia64_get_rnat(new_bspstore++); 1093 } 1094 if (ia64_rse_is_rnat_slot(old_bspstore)) { 1095 *old_bspstore++ = old_rnat; 1096 old_rnat = 0; 1097 } 1098 nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; 1099 old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); 1100 old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); 1101 *old_bspstore++ = *new_bspstore++; 1102 } 1103 old_sw->ar_bspstore = (unsigned long)old_bspstore; 1104 old_sw->ar_rnat = old_rnat; 1105 1106 sos->prev_task = previous_current; 1107 return previous_current; 1108 1109no_mod: 1110 printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1111 smp_processor_id(), type, msg); 1112 return previous_current; 1113} 1114 1115/* The monarch/slave interaction is based on monarch_cpu and requires that all 1116 * slaves have entered rendezvous before the monarch leaves. If any cpu has 1117 * not entered rendezvous yet then wait a bit. The assumption is that any 1118 * slave that has not rendezvoused after a reasonable time is never going to do 1119 * so. In this context, slave includes cpus that respond to the MCA rendezvous 1120 * interrupt, as well as cpus that receive the INIT slave event. 1121 */ 1122 1123static void 1124ia64_wait_for_slaves(int monarch, const char *type) 1125{ 1126 int c, wait = 0, missing = 0; 1127 for_each_online_cpu(c) { 1128 if (c == monarch) 1129 continue; 1130 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { 1131 udelay(1000); /* short wait first */ 1132 wait = 1; 1133 break; 1134 } 1135 } 1136 if (!wait) 1137 goto all_in; 1138 for_each_online_cpu(c) { 1139 if (c == monarch) 1140 continue; 1141 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { 1142 udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */ 1143 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) 1144 missing = 1; 1145 break; 1146 } 1147 } 1148 if (!missing) 1149 goto all_in; 1150 /* 1151 * Maybe slave(s) dead. Print buffered messages immediately. 1152 */ 1153 ia64_mlogbuf_finish(0); 1154 mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); 1155 for_each_online_cpu(c) { 1156 if (c == monarch) 1157 continue; 1158 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) 1159 mprintk(" %d", c); 1160 } 1161 mprintk("\n"); 1162 return; 1163 1164all_in: 1165 mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); 1166 return; 1167} 1168 1169/* 1170 * ia64_mca_handler 1171 * 1172 * This is uncorrectable machine check handler called from OS_MCA 1173 * dispatch code which is in turn called from SAL_CHECK(). 1174 * This is the place where the core of OS MCA handling is done. 1175 * Right now the logs are extracted and displayed in a well-defined 1176 * format. This handler code is supposed to be run only on the 1177 * monarch processor. Once the monarch is done with MCA handling 1178 * further MCA logging is enabled by clearing logs. 1179 * Monarch also has the duty of sending wakeup-IPIs to pull the 1180 * slave processors out of rendezvous spinloop. 1181 */ 1182void 1183ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, 1184 struct ia64_sal_os_state *sos) 1185{ 1186 int recover, cpu = smp_processor_id(); 1187 struct task_struct *previous_current; 1188 struct ia64_mca_notify_die nd = 1189 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1190 1191 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " 1192 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); 1193 1194 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1195 monarch_cpu = cpu; 1196 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1197 == NOTIFY_STOP) 1198 ia64_mca_spin(__FUNCTION__); 1199 ia64_wait_for_slaves(cpu, "MCA"); 1200 1201 /* Wakeup all the processors which are spinning in the rendezvous loop. 1202 * They will leave SAL, then spin in the OS with interrupts disabled 1203 * until this monarch cpu leaves the MCA handler. That gets control 1204 * back to the OS so we can backtrace the other cpus, backtrace when 1205 * spinning in SAL does not work. 1206 */ 1207 ia64_mca_wakeup_all(); 1208 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) 1209 == NOTIFY_STOP) 1210 ia64_mca_spin(__FUNCTION__); 1211 1212 /* Get the MCA error record and log it */ 1213 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); 1214 1215 /* MCA error recovery */ 1216 recover = (ia64_mca_ucmc_extension 1217 && ia64_mca_ucmc_extension( 1218 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), 1219 sos)); 1220 1221 if (recover) { 1222 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); 1223 rh->severity = sal_log_severity_corrected; 1224 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 1225 sos->os_status = IA64_MCA_CORRECTED; 1226 } else { 1227 /* Dump buffered message to console */ 1228 ia64_mlogbuf_finish(1); 1229#ifdef CONFIG_KEXEC 1230 atomic_set(&kdump_in_progress, 1); 1231 monarch_cpu = -1; 1232#endif 1233 } 1234 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1235 == NOTIFY_STOP) 1236 ia64_mca_spin(__FUNCTION__); 1237 1238 set_curr_task(cpu, previous_current); 1239 monarch_cpu = -1; 1240} 1241 1242static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); 1243static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); 1244 1245/* 1246 * ia64_mca_cmc_int_handler 1247 * 1248 * This is corrected machine check interrupt handler. 1249 * Right now the logs are extracted and displayed in a well-defined 1250 * format. 1251 * 1252 * Inputs 1253 * interrupt number 1254 * client data arg ptr 1255 * 1256 * Outputs 1257 * None 1258 */ 1259static irqreturn_t 1260ia64_mca_cmc_int_handler(int cmc_irq, void *arg) 1261{ 1262 static unsigned long cmc_history[CMC_HISTORY_LENGTH]; 1263 static int index; 1264 static DEFINE_SPINLOCK(cmc_history_lock); 1265 1266 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 1267 __FUNCTION__, cmc_irq, smp_processor_id()); 1268 1269 /* SAL spec states this should run w/ interrupts enabled */ 1270 local_irq_enable(); 1271 1272 spin_lock(&cmc_history_lock); 1273 if (!cmc_polling_enabled) { 1274 int i, count = 1; /* we know 1 happened now */ 1275 unsigned long now = jiffies; 1276 1277 for (i = 0; i < CMC_HISTORY_LENGTH; i++) { 1278 if (now - cmc_history[i] <= HZ) 1279 count++; 1280 } 1281 1282 IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH); 1283 if (count >= CMC_HISTORY_LENGTH) { 1284 1285 cmc_polling_enabled = 1; 1286 spin_unlock(&cmc_history_lock); 1287 /* If we're being hit with CMC interrupts, we won't 1288 * ever execute the schedule_work() below. Need to 1289 * disable CMC interrupts on this processor now. 1290 */ 1291 ia64_mca_cmc_vector_disable(NULL); 1292 schedule_work(&cmc_disable_work); 1293 1294 /* 1295 * Corrected errors will still be corrected, but 1296 * make sure there's a log somewhere that indicates 1297 * something is generating more than we can handle. 1298 */ 1299 printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n"); 1300 1301 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1302 1303 /* lock already released, get out now */ 1304 goto out; 1305 } else { 1306 cmc_history[index++] = now; 1307 if (index == CMC_HISTORY_LENGTH) 1308 index = 0; 1309 } 1310 } 1311 spin_unlock(&cmc_history_lock); 1312out: 1313 /* Get the CMC error record and log it */ 1314 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); 1315 1316 return IRQ_HANDLED; 1317} 1318 1319/* 1320 * ia64_mca_cmc_int_caller 1321 * 1322 * Triggered by sw interrupt from CMC polling routine. Calls 1323 * real interrupt handler and either triggers a sw interrupt 1324 * on the next cpu or does cleanup at the end. 1325 * 1326 * Inputs 1327 * interrupt number 1328 * client data arg ptr 1329 * Outputs 1330 * handled 1331 */ 1332static irqreturn_t 1333ia64_mca_cmc_int_caller(int cmc_irq, void *arg) 1334{ 1335 static int start_count = -1; 1336 unsigned int cpuid; 1337 1338 cpuid = smp_processor_id(); 1339 1340 /* If first cpu, update count */ 1341 if (start_count == -1) 1342 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC); 1343 1344 ia64_mca_cmc_int_handler(cmc_irq, arg); 1345 1346 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1347 1348 if (cpuid < NR_CPUS) { 1349 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1350 } else { 1351 /* If no log record, switch out of polling mode */ 1352 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { 1353 1354 printk(KERN_WARNING "Returning to interrupt driven CMC handler\n"); 1355 schedule_work(&cmc_enable_work); 1356 cmc_polling_enabled = 0; 1357 1358 } else { 1359 1360 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1361 } 1362 1363 start_count = -1; 1364 } 1365 1366 return IRQ_HANDLED; 1367} 1368 1369/* 1370 * ia64_mca_cmc_poll 1371 * 1372 * Poll for Corrected Machine Checks (CMCs) 1373 * 1374 * Inputs : dummy(unused) 1375 * Outputs : None 1376 * 1377 */ 1378static void 1379ia64_mca_cmc_poll (unsigned long dummy) 1380{ 1381 /* Trigger a CMC interrupt cascade */ 1382 platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1383} 1384 1385/* 1386 * ia64_mca_cpe_int_caller 1387 * 1388 * Triggered by sw interrupt from CPE polling routine. Calls 1389 * real interrupt handler and either triggers a sw interrupt 1390 * on the next cpu or does cleanup at the end. 1391 * 1392 * Inputs 1393 * interrupt number 1394 * client data arg ptr 1395 * Outputs 1396 * handled 1397 */ 1398#ifdef CONFIG_ACPI 1399 1400static irqreturn_t 1401ia64_mca_cpe_int_caller(int cpe_irq, void *arg) 1402{ 1403 static int start_count = -1; 1404 static int poll_time = MIN_CPE_POLL_INTERVAL; 1405 unsigned int cpuid; 1406 1407 cpuid = smp_processor_id(); 1408 1409 /* If first cpu, update count */ 1410 if (start_count == -1) 1411 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE); 1412 1413 ia64_mca_cpe_int_handler(cpe_irq, arg); 1414 1415 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1416 1417 if (cpuid < NR_CPUS) { 1418 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1419 } else { 1420 /* 1421 * If a log was recorded, increase our polling frequency, 1422 * otherwise, backoff or return to interrupt mode. 1423 */ 1424 if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) { 1425 poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2); 1426 } else if (cpe_vector < 0) { 1427 poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2); 1428 } else { 1429 poll_time = MIN_CPE_POLL_INTERVAL; 1430 1431 printk(KERN_WARNING "Returning to interrupt driven CPE handler\n"); 1432 enable_irq(local_vector_to_irq(IA64_CPE_VECTOR)); 1433 cpe_poll_enabled = 0; 1434 } 1435 1436 if (cpe_poll_enabled) 1437 mod_timer(&cpe_poll_timer, jiffies + poll_time); 1438 start_count = -1; 1439 } 1440 1441 return IRQ_HANDLED; 1442} 1443 1444/* 1445 * ia64_mca_cpe_poll 1446 * 1447 * Poll for Corrected Platform Errors (CPEs), trigger interrupt 1448 * on first cpu, from there it will trickle through all the cpus. 1449 * 1450 * Inputs : dummy(unused) 1451 * Outputs : None 1452 * 1453 */ 1454static void 1455ia64_mca_cpe_poll (unsigned long dummy) 1456{ 1457 /* Trigger a CPE interrupt cascade */ 1458 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1459} 1460 1461#endif /* CONFIG_ACPI */ 1462 1463static int 1464default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data) 1465{ 1466 int c; 1467 struct task_struct *g, *t; 1468 if (val != DIE_INIT_MONARCH_PROCESS) 1469 return NOTIFY_DONE; 1470#ifdef CONFIG_KEXEC 1471 if (atomic_read(&kdump_in_progress)) 1472 return NOTIFY_DONE; 1473#endif 1474 1475 BREAK_LOGLEVEL(console_loglevel); 1476 ia64_mlogbuf_dump_from_init(); 1477 1478 printk(KERN_ERR "Processes interrupted by INIT -"); 1479 for_each_online_cpu(c) { 1480 struct ia64_sal_os_state *s; 1481 t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); 1482 s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); 1483 g = s->prev_task; 1484 if (g) { 1485 if (g->pid) 1486 printk(" %d", g->pid); 1487 else 1488 printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); 1489 } 1490 } 1491 printk("\n\n"); 1492 if (read_trylock(&tasklist_lock)) { 1493 do_each_thread (g, t) { 1494 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); 1495 show_stack(t, NULL); 1496 } while_each_thread (g, t); 1497 read_unlock(&tasklist_lock); 1498 } 1499 RESTORE_LOGLEVEL(console_loglevel); 1500 return NOTIFY_DONE; 1501} 1502 1503/* 1504 * C portion of the OS INIT handler 1505 * 1506 * Called from ia64_os_init_dispatch 1507 * 1508 * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for 1509 * this event. This code is used for both monarch and slave INIT events, see 1510 * sos->monarch. 1511 * 1512 * All INIT events switch to the INIT stack and change the previous process to 1513 * blocked status. If one of the INIT events is the monarch then we are 1514 * probably processing the nmi button/command. Use the monarch cpu to dump all 1515 * the processes. The slave INIT events all spin until the monarch cpu 1516 * returns. We can also get INIT slave events for MCA, in which case the MCA 1517 * process is the monarch. 1518 */ 1519 1520void 1521ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, 1522 struct ia64_sal_os_state *sos) 1523{ 1524 static atomic_t slaves; 1525 static atomic_t monarchs; 1526 struct task_struct *previous_current; 1527 int cpu = smp_processor_id(); 1528 struct ia64_mca_notify_die nd = 1529 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1530 1531 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); 1532 1533 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1534 sos->proc_state_param, cpu, sos->monarch); 1535 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); 1536 1537 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); 1538 sos->os_status = IA64_INIT_RESUME; 1539 1540 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1541 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1542 __FUNCTION__, cpu); 1543 atomic_dec(&slaves); 1544 sos->monarch = 1; 1545 } 1546 1547 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1548 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1549 __FUNCTION__, cpu); 1550 atomic_dec(&monarchs); 1551 sos->monarch = 0; 1552 } 1553 1554 if (!sos->monarch) { 1555 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1556 while (monarch_cpu == -1) 1557 cpu_relax(); /* spin until monarch enters */ 1558 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1559 == NOTIFY_STOP) 1560 ia64_mca_spin(__FUNCTION__); 1561 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1562 == NOTIFY_STOP) 1563 ia64_mca_spin(__FUNCTION__); 1564 while (monarch_cpu != -1) 1565 cpu_relax(); /* spin until monarch leaves */ 1566 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1567 == NOTIFY_STOP) 1568 ia64_mca_spin(__FUNCTION__); 1569 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1570 set_curr_task(cpu, previous_current); 1571 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1572 atomic_dec(&slaves); 1573 return; 1574 } 1575 1576 monarch_cpu = cpu; 1577 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1578 == NOTIFY_STOP) 1579 ia64_mca_spin(__FUNCTION__); 1580 1581 /* 1582 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1583 * generated via the BMC's command-line interface, but since the console is on the 1584 * same serial line, the user will need some time to switch out of the BMC before 1585 * the dump begins. 1586 */ 1587 mprintk("Delaying for 5 seconds...\n"); 1588 udelay(5*1000000); 1589 ia64_wait_for_slaves(cpu, "INIT"); 1590 /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through 1591 * to default_monarch_init_process() above and just print all the 1592 * tasks. 1593 */ 1594 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1595 == NOTIFY_STOP) 1596 ia64_mca_spin(__FUNCTION__); 1597 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1598 == NOTIFY_STOP) 1599 ia64_mca_spin(__FUNCTION__); 1600 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1601 atomic_dec(&monarchs); 1602 set_curr_task(cpu, previous_current); 1603 monarch_cpu = -1; 1604 return; 1605} 1606 1607static int __init 1608ia64_mca_disable_cpe_polling(char *str) 1609{ 1610 cpe_poll_enabled = 0; 1611 return 1; 1612} 1613 1614__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); 1615 1616static struct irqaction cmci_irqaction = { 1617 .handler = ia64_mca_cmc_int_handler, 1618 .flags = IRQF_DISABLED, 1619 .name = "cmc_hndlr" 1620}; 1621 1622static struct irqaction cmcp_irqaction = { 1623 .handler = ia64_mca_cmc_int_caller, 1624 .flags = IRQF_DISABLED, 1625 .name = "cmc_poll" 1626}; 1627 1628static struct irqaction mca_rdzv_irqaction = { 1629 .handler = ia64_mca_rendez_int_handler, 1630 .flags = IRQF_DISABLED, 1631 .name = "mca_rdzv" 1632}; 1633 1634static struct irqaction mca_wkup_irqaction = { 1635 .handler = ia64_mca_wakeup_int_handler, 1636 .flags = IRQF_DISABLED, 1637 .name = "mca_wkup" 1638}; 1639 1640#ifdef CONFIG_ACPI 1641static struct irqaction mca_cpe_irqaction = { 1642 .handler = ia64_mca_cpe_int_handler, 1643 .flags = IRQF_DISABLED, 1644 .name = "cpe_hndlr" 1645}; 1646 1647static struct irqaction mca_cpep_irqaction = { 1648 .handler = ia64_mca_cpe_int_caller, 1649 .flags = IRQF_DISABLED, 1650 .name = "cpe_poll" 1651}; 1652#endif /* CONFIG_ACPI */ 1653 1654/* Minimal format of the MCA/INIT stacks. The pseudo processes that run on 1655 * these stacks can never sleep, they cannot return from the kernel to user 1656 * space, they do not appear in a normal ps listing. So there is no need to 1657 * format most of the fields. 1658 */ 1659 1660static void __cpuinit 1661format_mca_init_stack(void *mca_data, unsigned long offset, 1662 const char *type, int cpu) 1663{ 1664 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); 1665 struct thread_info *ti; 1666 memset(p, 0, KERNEL_STACK_SIZE); 1667 ti = task_thread_info(p); 1668 ti->flags = _TIF_MCA_INIT; 1669 ti->preempt_count = 1; 1670 ti->task = p; 1671 ti->cpu = cpu; 1672 p->stack = ti; 1673 p->state = TASK_UNINTERRUPTIBLE; 1674 cpu_set(cpu, p->cpus_allowed); 1675 INIT_LIST_HEAD(&p->tasks); 1676 p->parent = p->real_parent = p->group_leader = p; 1677 INIT_LIST_HEAD(&p->children); 1678 INIT_LIST_HEAD(&p->sibling); 1679 strncpy(p->comm, type, sizeof(p->comm)-1); 1680} 1681 1682/* Do per-CPU MCA-related initialization. */ 1683 1684void __cpuinit 1685ia64_mca_cpu_init(void *cpu_data) 1686{ 1687 void *pal_vaddr; 1688 static int first_time = 1; 1689 1690 if (first_time) { 1691 void *mca_data; 1692 int cpu; 1693 1694 first_time = 0; 1695 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) 1696 * NR_CPUS + KERNEL_STACK_SIZE); 1697 mca_data = (void *)(((unsigned long)mca_data + 1698 KERNEL_STACK_SIZE - 1) & 1699 (-KERNEL_STACK_SIZE)); 1700 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1701 format_mca_init_stack(mca_data, 1702 offsetof(struct ia64_mca_cpu, mca_stack), 1703 "MCA", cpu); 1704 format_mca_init_stack(mca_data, 1705 offsetof(struct ia64_mca_cpu, init_stack), 1706 "INIT", cpu); 1707 __per_cpu_mca[cpu] = __pa(mca_data); 1708 mca_data += sizeof(struct ia64_mca_cpu); 1709 } 1710 } 1711 1712 /* 1713 * The MCA info structure was allocated earlier and its 1714 * physical address saved in __per_cpu_mca[cpu]. Copy that 1715 * address * to ia64_mca_data so we can access it as a per-CPU 1716 * variable. 1717 */ 1718 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; 1719 1720 /* 1721 * Stash away a copy of the PTE needed to map the per-CPU page. 1722 * We may need it during MCA recovery. 1723 */ 1724 __get_cpu_var(ia64_mca_per_cpu_pte) = 1725 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); 1726 1727 /* 1728 * Also, stash away a copy of the PAL address and the PTE 1729 * needed to map it. 1730 */ 1731 pal_vaddr = efi_get_pal_addr(); 1732 if (!pal_vaddr) 1733 return; 1734 __get_cpu_var(ia64_mca_pal_base) = 1735 GRANULEROUNDDOWN((unsigned long) pal_vaddr); 1736 __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), 1737 PAGE_KERNEL)); 1738} 1739 1740/* 1741 * ia64_mca_init 1742 * 1743 * Do all the system level mca specific initialization. 1744 * 1745 * 1. Register spinloop and wakeup request interrupt vectors 1746 * 1747 * 2. Register OS_MCA handler entry point 1748 * 1749 * 3. Register OS_INIT handler entry point 1750 * 1751 * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. 1752 * 1753 * Note that this initialization is done very early before some kernel 1754 * services are available. 1755 * 1756 * Inputs : None 1757 * 1758 * Outputs : None 1759 */ 1760void __init 1761ia64_mca_init(void) 1762{ 1763 ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; 1764 ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; 1765 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; 1766 int i; 1767 s64 rc; 1768 struct ia64_sal_retval isrv; 1769 u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ 1770 static struct notifier_block default_init_monarch_nb = { 1771 .notifier_call = default_monarch_init_process, 1772 .priority = 0/* we need to notified last */ 1773 }; 1774 1775 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); 1776 1777 /* Clear the Rendez checkin flag for all cpus */ 1778 for(i = 0 ; i < NR_CPUS; i++) 1779 ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1780 1781 /* 1782 * Register the rendezvous spinloop and wakeup mechanism with SAL 1783 */ 1784 1785 /* Register the rendezvous interrupt vector with SAL */ 1786 while (1) { 1787 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, 1788 SAL_MC_PARAM_MECHANISM_INT, 1789 IA64_MCA_RENDEZ_VECTOR, 1790 timeout, 1791 SAL_MC_PARAM_RZ_ALWAYS); 1792 rc = isrv.status; 1793 if (rc == 0) 1794 break; 1795 if (rc == -2) { 1796 printk(KERN_INFO "Increasing MCA rendezvous timeout from " 1797 "%ld to %ld milliseconds\n", timeout, isrv.v0); 1798 timeout = isrv.v0; 1799 (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); 1800 continue; 1801 } 1802 printk(KERN_ERR "Failed to register rendezvous interrupt " 1803 "with SAL (status %ld)\n", rc); 1804 return; 1805 } 1806 1807 /* Register the wakeup interrupt vector with SAL */ 1808 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, 1809 SAL_MC_PARAM_MECHANISM_INT, 1810 IA64_MCA_WAKEUP_VECTOR, 1811 0, 0); 1812 rc = isrv.status; 1813 if (rc) { 1814 printk(KERN_ERR "Failed to register wakeup interrupt with SAL " 1815 "(status %ld)\n", rc); 1816 return; 1817 } 1818 1819 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); 1820 1821 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); 1822 ia64_mc_info.imi_mca_handler_size = 0; 1823 1824 /* Register the os mca handler with SAL */ 1825 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 1826 ia64_mc_info.imi_mca_handler, 1827 ia64_tpa(mca_hldlr_ptr->gp), 1828 ia64_mc_info.imi_mca_handler_size, 1829 0, 0, 0))) 1830 { 1831 printk(KERN_ERR "Failed to register OS MCA handler with SAL " 1832 "(status %ld)\n", rc); 1833 return; 1834 } 1835 1836 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, 1837 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); 1838 1839 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); 1840 ia64_mc_info.imi_monarch_init_handler_size = 0; 1841 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); 1842 ia64_mc_info.imi_slave_init_handler_size = 0; 1843 1844 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, 1845 ia64_mc_info.imi_monarch_init_handler); 1846 1847 /* Register the os init handler with SAL */ 1848 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 1849 ia64_mc_info.imi_monarch_init_handler, 1850 ia64_tpa(ia64_getreg(_IA64_REG_GP)), 1851 ia64_mc_info.imi_monarch_init_handler_size, 1852 ia64_mc_info.imi_slave_init_handler, 1853 ia64_tpa(ia64_getreg(_IA64_REG_GP)), 1854 ia64_mc_info.imi_slave_init_handler_size))) 1855 { 1856 printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " 1857 "(status %ld)\n", rc); 1858 return; 1859 } 1860 if (register_die_notifier(&default_init_monarch_nb)) { 1861 printk(KERN_ERR "Failed to register default monarch INIT process\n"); 1862 return; 1863 } 1864 1865 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); 1866 1867 /* 1868 * Configure the CMCI/P vector and handler. Interrupts for CMC are 1869 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 1870 */ 1871 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); 1872 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); 1873 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ 1874 1875 /* Setup the MCA rendezvous interrupt vector */ 1876 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); 1877 1878 /* Setup the MCA wakeup interrupt vector */ 1879 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 1880 1881#ifdef CONFIG_ACPI 1882 /* Setup the CPEI/P handler */ 1883 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 1884#endif 1885 1886 /* Initialize the areas set aside by the OS to buffer the 1887 * platform/processor error states for MCA/INIT/CMC 1888 * handling. 1889 */ 1890 ia64_log_init(SAL_INFO_TYPE_MCA); 1891 ia64_log_init(SAL_INFO_TYPE_INIT); 1892 ia64_log_init(SAL_INFO_TYPE_CMC); 1893 ia64_log_init(SAL_INFO_TYPE_CPE); 1894 1895 mca_init = 1; 1896 printk(KERN_INFO "MCA related initialization done\n"); 1897} 1898 1899/* 1900 * ia64_mca_late_init 1901 * 1902 * Opportunity to setup things that require initialization later 1903 * than ia64_mca_init. Setup a timer to poll for CPEs if the 1904 * platform doesn't support an interrupt driven mechanism. 1905 * 1906 * Inputs : None 1907 * Outputs : Status 1908 */ 1909static int __init 1910ia64_mca_late_init(void) 1911{ 1912 if (!mca_init) 1913 return 0; 1914 1915 /* Setup the CMCI/P vector and handler */ 1916 init_timer(&cmc_poll_timer); 1917 cmc_poll_timer.function = ia64_mca_cmc_poll; 1918 1919 /* Unmask/enable the vector */ 1920 cmc_polling_enabled = 0; 1921 schedule_work(&cmc_enable_work); 1922 1923 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); 1924 1925#ifdef CONFIG_ACPI 1926 /* Setup the CPEI/P vector and handler */ 1927 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); 1928 init_timer(&cpe_poll_timer); 1929 cpe_poll_timer.function = ia64_mca_cpe_poll; 1930 1931 { 1932 irq_desc_t *desc; 1933 unsigned int irq; 1934 1935 if (cpe_vector >= 0) { 1936 /* If platform supports CPEI, enable the irq. */ 1937 cpe_poll_enabled = 0; 1938 for (irq = 0; irq < NR_IRQS; ++irq) 1939 if (irq_to_vector(irq) == cpe_vector) { 1940 desc = irq_desc + irq; 1941 desc->status |= IRQ_PER_CPU; 1942 setup_irq(irq, &mca_cpe_irqaction); 1943 ia64_cpe_irq = irq; 1944 } 1945 ia64_mca_register_cpev(cpe_vector); 1946 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); 1947 } else { 1948 /* If platform doesn't support CPEI, get the timer going. */ 1949 if (cpe_poll_enabled) { 1950 ia64_mca_cpe_poll(0UL); 1951 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); 1952 } 1953 } 1954 } 1955#endif 1956 1957 return 0; 1958} 1959 1960device_initcall(ia64_mca_late_init); 1961