1/* 2 * Xen event channels 3 * 4 * Xen models interrupts with abstract event channels. Because each 5 * domain gets 1024 event channels, but NR_IRQ is not that large, we 6 * must dynamically map irqs<->event channels. The event channels 7 * interface with the rest of the kernel by defining a xen interrupt 8 * chip. When an event is recieved, it is mapped to an irq and sent 9 * through the normal interrupt processing path. 10 * 11 * There are four kinds of events which can be mapped to an event 12 * channel: 13 * 14 * 1. Inter-domain notifications. This includes all the virtual 15 * device events, since they're driven by front-ends in another domain 16 * (typically dom0). 17 * 2. VIRQs, typically used for timers. These are per-cpu events. 18 * 3. IPIs. 19 * 4. Hardware interrupts. Not supported at present. 20 * 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 22 */ 23 24#include <linux/linkage.h> 25#include <linux/interrupt.h> 26#include <linux/irq.h> 27#include <linux/module.h> 28#include <linux/string.h> 29#include <linux/bootmem.h> 30#include <linux/slab.h> 31 32#include <asm/desc.h> 33#include <asm/ptrace.h> 34#include <asm/irq.h> 35#include <asm/idle.h> 36#include <asm/sync_bitops.h> 37#include <asm/xen/hypercall.h> 38#include <asm/xen/hypervisor.h> 39 40#include <xen/xen.h> 41#include <xen/hvm.h> 42#include <xen/xen-ops.h> 43#include <xen/events.h> 44#include <xen/interface/xen.h> 45#include <xen/interface/event_channel.h> 46#include <xen/interface/hvm/hvm_op.h> 47#include <xen/interface/hvm/params.h> 48 49/* 50 * This lock protects updates to the following mapping and reference-count 51 * arrays. The lock does not need to be acquired to read the mapping tables. 52 */ 53static DEFINE_SPINLOCK(irq_mapping_update_lock); 54 55/* IRQ <-> VIRQ mapping. */ 56static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 57 58/* IRQ <-> IPI mapping */ 59static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 60 61/* Interrupt types. */ 62enum xen_irq_type { 63 IRQT_UNBOUND = 0, 64 IRQT_PIRQ, 65 IRQT_VIRQ, 66 IRQT_IPI, 67 IRQT_EVTCHN 68}; 69 70/* 71 * Packed IRQ information: 72 * type - enum xen_irq_type 73 * event channel - irq->event channel mapping 74 * cpu - cpu this event channel is bound to 75 * index - type-specific information: 76 * PIRQ - vector, with MSB being "needs EIO" 77 * VIRQ - virq number 78 * IPI - IPI vector 79 * EVTCHN - 80 */ 81struct irq_info 82{ 83 enum xen_irq_type type; /* type */ 84 unsigned short evtchn; /* event channel */ 85 unsigned short cpu; /* cpu bound */ 86 87 union { 88 unsigned short virq; 89 enum ipi_vector ipi; 90 struct { 91 unsigned short gsi; 92 unsigned short vector; 93 } pirq; 94 } u; 95}; 96 97static struct irq_info irq_info[NR_IRQS]; 98 99static int evtchn_to_irq[NR_EVENT_CHANNELS] = { 100 [0 ... NR_EVENT_CHANNELS-1] = -1 101}; 102struct cpu_evtchn_s { 103 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; 104}; 105static struct cpu_evtchn_s *cpu_evtchn_mask_p; 106static inline unsigned long *cpu_evtchn_mask(int cpu) 107{ 108 return cpu_evtchn_mask_p[cpu].bits; 109} 110 111/* Xen will never allocate port zero for any purpose. */ 112#define VALID_EVTCHN(chn) ((chn) != 0) 113 114static struct irq_chip xen_dynamic_chip; 115static struct irq_chip xen_percpu_chip; 116 117/* Constructor for packed IRQ information. */ 118static struct irq_info mk_unbound_info(void) 119{ 120 return (struct irq_info) { .type = IRQT_UNBOUND }; 121} 122 123static struct irq_info mk_evtchn_info(unsigned short evtchn) 124{ 125 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, 126 .cpu = 0 }; 127} 128 129static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) 130{ 131 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, 132 .cpu = 0, .u.ipi = ipi }; 133} 134 135static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) 136{ 137 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, 138 .cpu = 0, .u.virq = virq }; 139} 140 141static struct irq_info mk_pirq_info(unsigned short evtchn, 142 unsigned short gsi, unsigned short vector) 143{ 144 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, 145 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; 146} 147 148/* 149 * Accessors for packed IRQ information. 150 */ 151static struct irq_info *info_for_irq(unsigned irq) 152{ 153 return &irq_info[irq]; 154} 155 156static unsigned int evtchn_from_irq(unsigned irq) 157{ 158 return info_for_irq(irq)->evtchn; 159} 160 161unsigned irq_from_evtchn(unsigned int evtchn) 162{ 163 return evtchn_to_irq[evtchn]; 164} 165EXPORT_SYMBOL_GPL(irq_from_evtchn); 166 167static enum ipi_vector ipi_from_irq(unsigned irq) 168{ 169 struct irq_info *info = info_for_irq(irq); 170 171 BUG_ON(info == NULL); 172 BUG_ON(info->type != IRQT_IPI); 173 174 return info->u.ipi; 175} 176 177static unsigned virq_from_irq(unsigned irq) 178{ 179 struct irq_info *info = info_for_irq(irq); 180 181 BUG_ON(info == NULL); 182 BUG_ON(info->type != IRQT_VIRQ); 183 184 return info->u.virq; 185} 186 187static unsigned gsi_from_irq(unsigned irq) 188{ 189 struct irq_info *info = info_for_irq(irq); 190 191 BUG_ON(info == NULL); 192 BUG_ON(info->type != IRQT_PIRQ); 193 194 return info->u.pirq.gsi; 195} 196 197static unsigned vector_from_irq(unsigned irq) 198{ 199 struct irq_info *info = info_for_irq(irq); 200 201 BUG_ON(info == NULL); 202 BUG_ON(info->type != IRQT_PIRQ); 203 204 return info->u.pirq.vector; 205} 206 207static enum xen_irq_type type_from_irq(unsigned irq) 208{ 209 return info_for_irq(irq)->type; 210} 211 212static unsigned cpu_from_irq(unsigned irq) 213{ 214 return info_for_irq(irq)->cpu; 215} 216 217static unsigned int cpu_from_evtchn(unsigned int evtchn) 218{ 219 int irq = evtchn_to_irq[evtchn]; 220 unsigned ret = 0; 221 222 if (irq != -1) 223 ret = cpu_from_irq(irq); 224 225 return ret; 226} 227 228static inline unsigned long active_evtchns(unsigned int cpu, 229 struct shared_info *sh, 230 unsigned int idx) 231{ 232 return (sh->evtchn_pending[idx] & 233 cpu_evtchn_mask(cpu)[idx] & 234 ~sh->evtchn_mask[idx]); 235} 236 237static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 238{ 239 int irq = evtchn_to_irq[chn]; 240 241 BUG_ON(irq == -1); 242#ifdef CONFIG_SMP 243 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); 244#endif 245 246 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 247 __set_bit(chn, cpu_evtchn_mask(cpu)); 248 249 irq_info[irq].cpu = cpu; 250} 251 252static void init_evtchn_cpu_bindings(void) 253{ 254#ifdef CONFIG_SMP 255 struct irq_desc *desc; 256 int i; 257 258 /* By default all event channels notify CPU#0. */ 259 for_each_irq_desc(i, desc) { 260 cpumask_copy(desc->affinity, cpumask_of(0)); 261 } 262#endif 263 264 memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); 265} 266 267static inline void clear_evtchn(int port) 268{ 269 struct shared_info *s = HYPERVISOR_shared_info; 270 sync_clear_bit(port, &s->evtchn_pending[0]); 271} 272 273static inline void set_evtchn(int port) 274{ 275 struct shared_info *s = HYPERVISOR_shared_info; 276 sync_set_bit(port, &s->evtchn_pending[0]); 277} 278 279static inline int test_evtchn(int port) 280{ 281 struct shared_info *s = HYPERVISOR_shared_info; 282 return sync_test_bit(port, &s->evtchn_pending[0]); 283} 284 285 286/** 287 * notify_remote_via_irq - send event to remote end of event channel via irq 288 * @irq: irq of event channel to send event to 289 * 290 * Unlike notify_remote_via_evtchn(), this is safe to use across 291 * save/restore. Notifications on a broken connection are silently 292 * dropped. 293 */ 294void notify_remote_via_irq(int irq) 295{ 296 int evtchn = evtchn_from_irq(irq); 297 298 if (VALID_EVTCHN(evtchn)) 299 notify_remote_via_evtchn(evtchn); 300} 301EXPORT_SYMBOL_GPL(notify_remote_via_irq); 302 303static void mask_evtchn(int port) 304{ 305 struct shared_info *s = HYPERVISOR_shared_info; 306 sync_set_bit(port, &s->evtchn_mask[0]); 307} 308 309static void unmask_evtchn(int port) 310{ 311 struct shared_info *s = HYPERVISOR_shared_info; 312 unsigned int cpu = get_cpu(); 313 314 BUG_ON(!irqs_disabled()); 315 316 /* Slow path (hypercall) if this is a non-local port. */ 317 if (unlikely(cpu != cpu_from_evtchn(port))) { 318 struct evtchn_unmask unmask = { .port = port }; 319 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 320 } else { 321 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 322 323 sync_clear_bit(port, &s->evtchn_mask[0]); 324 325 /* 326 * The following is basically the equivalent of 327 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose 328 * the interrupt edge' if the channel is masked. 329 */ 330 if (sync_test_bit(port, &s->evtchn_pending[0]) && 331 !sync_test_and_set_bit(port / BITS_PER_LONG, 332 &vcpu_info->evtchn_pending_sel)) 333 vcpu_info->evtchn_upcall_pending = 1; 334 } 335 336 put_cpu(); 337} 338 339static int find_unbound_irq(void) 340{ 341 int irq; 342 struct irq_desc *desc; 343 344 for (irq = 0; irq < nr_irqs; irq++) { 345 desc = irq_to_desc(irq); 346 /* only 0->15 have init'd desc; handle irq > 16 */ 347 if (desc == NULL) 348 break; 349 if (desc->chip == &no_irq_chip) 350 break; 351 if (desc->chip != &xen_dynamic_chip) 352 continue; 353 if (irq_info[irq].type == IRQT_UNBOUND) 354 break; 355 } 356 357 if (irq == nr_irqs) 358 panic("No available IRQ to bind to: increase nr_irqs!\n"); 359 360 desc = irq_to_desc_alloc_node(irq, 0); 361 if (WARN_ON(desc == NULL)) 362 return -1; 363 364 dynamic_irq_init_keep_chip_data(irq); 365 366 return irq; 367} 368 369int bind_evtchn_to_irq(unsigned int evtchn) 370{ 371 int irq; 372 373 spin_lock(&irq_mapping_update_lock); 374 375 irq = evtchn_to_irq[evtchn]; 376 377 if (irq == -1) { 378 irq = find_unbound_irq(); 379 380 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 381 handle_edge_irq, "event"); 382 383 evtchn_to_irq[evtchn] = irq; 384 irq_info[irq] = mk_evtchn_info(evtchn); 385 } 386 387 spin_unlock(&irq_mapping_update_lock); 388 389 return irq; 390} 391EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 392 393static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 394{ 395 struct evtchn_bind_ipi bind_ipi; 396 int evtchn, irq; 397 398 spin_lock(&irq_mapping_update_lock); 399 400 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 401 402 if (irq == -1) { 403 irq = find_unbound_irq(); 404 if (irq < 0) 405 goto out; 406 407 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 408 handle_percpu_irq, "ipi"); 409 410 bind_ipi.vcpu = cpu; 411 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 412 &bind_ipi) != 0) 413 BUG(); 414 evtchn = bind_ipi.port; 415 416 evtchn_to_irq[evtchn] = irq; 417 irq_info[irq] = mk_ipi_info(evtchn, ipi); 418 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 419 420 bind_evtchn_to_cpu(evtchn, cpu); 421 } 422 423 out: 424 spin_unlock(&irq_mapping_update_lock); 425 return irq; 426} 427 428 429static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 430{ 431 struct evtchn_bind_virq bind_virq; 432 int evtchn, irq; 433 434 spin_lock(&irq_mapping_update_lock); 435 436 irq = per_cpu(virq_to_irq, cpu)[virq]; 437 438 if (irq == -1) { 439 bind_virq.virq = virq; 440 bind_virq.vcpu = cpu; 441 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 442 &bind_virq) != 0) 443 BUG(); 444 evtchn = bind_virq.port; 445 446 irq = find_unbound_irq(); 447 448 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 449 handle_percpu_irq, "virq"); 450 451 evtchn_to_irq[evtchn] = irq; 452 irq_info[irq] = mk_virq_info(evtchn, virq); 453 454 per_cpu(virq_to_irq, cpu)[virq] = irq; 455 456 bind_evtchn_to_cpu(evtchn, cpu); 457 } 458 459 spin_unlock(&irq_mapping_update_lock); 460 461 return irq; 462} 463 464static void unbind_from_irq(unsigned int irq) 465{ 466 struct evtchn_close close; 467 int evtchn = evtchn_from_irq(irq); 468 469 spin_lock(&irq_mapping_update_lock); 470 471 if (VALID_EVTCHN(evtchn)) { 472 close.port = evtchn; 473 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 474 BUG(); 475 476 switch (type_from_irq(irq)) { 477 case IRQT_VIRQ: 478 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 479 [virq_from_irq(irq)] = -1; 480 break; 481 case IRQT_IPI: 482 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) 483 [ipi_from_irq(irq)] = -1; 484 break; 485 default: 486 break; 487 } 488 489 /* Closed ports are implicitly re-bound to VCPU0. */ 490 bind_evtchn_to_cpu(evtchn, 0); 491 492 evtchn_to_irq[evtchn] = -1; 493 } 494 495 if (irq_info[irq].type != IRQT_UNBOUND) { 496 irq_info[irq] = mk_unbound_info(); 497 498 dynamic_irq_cleanup(irq); 499 } 500 501 spin_unlock(&irq_mapping_update_lock); 502} 503 504int bind_evtchn_to_irqhandler(unsigned int evtchn, 505 irq_handler_t handler, 506 unsigned long irqflags, 507 const char *devname, void *dev_id) 508{ 509 unsigned int irq; 510 int retval; 511 512 irq = bind_evtchn_to_irq(evtchn); 513 retval = request_irq(irq, handler, irqflags, devname, dev_id); 514 if (retval != 0) { 515 unbind_from_irq(irq); 516 return retval; 517 } 518 519 return irq; 520} 521EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 522 523int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 524 irq_handler_t handler, 525 unsigned long irqflags, const char *devname, void *dev_id) 526{ 527 unsigned int irq; 528 int retval; 529 530 irq = bind_virq_to_irq(virq, cpu); 531 retval = request_irq(irq, handler, irqflags, devname, dev_id); 532 if (retval != 0) { 533 unbind_from_irq(irq); 534 return retval; 535 } 536 537 return irq; 538} 539EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 540 541int bind_ipi_to_irqhandler(enum ipi_vector ipi, 542 unsigned int cpu, 543 irq_handler_t handler, 544 unsigned long irqflags, 545 const char *devname, 546 void *dev_id) 547{ 548 int irq, retval; 549 550 irq = bind_ipi_to_irq(ipi, cpu); 551 if (irq < 0) 552 return irq; 553 554 irqflags |= IRQF_NO_SUSPEND; 555 retval = request_irq(irq, handler, irqflags, devname, dev_id); 556 if (retval != 0) { 557 unbind_from_irq(irq); 558 return retval; 559 } 560 561 return irq; 562} 563 564void unbind_from_irqhandler(unsigned int irq, void *dev_id) 565{ 566 free_irq(irq, dev_id); 567 unbind_from_irq(irq); 568} 569EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 570 571void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 572{ 573 int irq = per_cpu(ipi_to_irq, cpu)[vector]; 574 BUG_ON(irq < 0); 575 notify_remote_via_irq(irq); 576} 577 578irqreturn_t xen_debug_interrupt(int irq, void *dev_id) 579{ 580 struct shared_info *sh = HYPERVISOR_shared_info; 581 int cpu = smp_processor_id(); 582 int i; 583 unsigned long flags; 584 static DEFINE_SPINLOCK(debug_lock); 585 586 spin_lock_irqsave(&debug_lock, flags); 587 588 printk("vcpu %d\n ", cpu); 589 590 for_each_online_cpu(i) { 591 struct vcpu_info *v = per_cpu(xen_vcpu, i); 592 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, 593 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, 594 v->evtchn_upcall_pending, 595 v->evtchn_pending_sel); 596 } 597 printk("pending:\n "); 598 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) 599 printk("%08lx%s", sh->evtchn_pending[i], 600 i % 8 == 0 ? "\n " : " "); 601 printk("\nmasks:\n "); 602 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 603 printk("%08lx%s", sh->evtchn_mask[i], 604 i % 8 == 0 ? "\n " : " "); 605 606 printk("\nunmasked:\n "); 607 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 608 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], 609 i % 8 == 0 ? "\n " : " "); 610 611 printk("\npending list:\n"); 612 for(i = 0; i < NR_EVENT_CHANNELS; i++) { 613 if (sync_test_bit(i, sh->evtchn_pending)) { 614 printk(" %d: event %d -> irq %d\n", 615 cpu_from_evtchn(i), i, 616 evtchn_to_irq[i]); 617 } 618 } 619 620 spin_unlock_irqrestore(&debug_lock, flags); 621 622 return IRQ_HANDLED; 623} 624 625static DEFINE_PER_CPU(unsigned, xed_nesting_count); 626 627/* 628 * Search the CPUs pending events bitmasks. For each one found, map 629 * the event number to an irq, and feed it into do_IRQ() for 630 * handling. 631 * 632 * Xen uses a two-level bitmap to speed searching. The first level is 633 * a bitset of words which contain pending event bits. The second 634 * level is a bitset of pending events themselves. 635 */ 636static void __xen_evtchn_do_upcall(void) 637{ 638 int cpu = get_cpu(); 639 struct shared_info *s = HYPERVISOR_shared_info; 640 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 641 unsigned count; 642 643 do { 644 unsigned long pending_words; 645 646 vcpu_info->evtchn_upcall_pending = 0; 647 648 if (__get_cpu_var(xed_nesting_count)++) 649 goto out; 650 651#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 652 /* Clear master flag /before/ clearing selector flag. */ 653 wmb(); 654#endif 655 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); 656 while (pending_words != 0) { 657 unsigned long pending_bits; 658 int word_idx = __ffs(pending_words); 659 pending_words &= ~(1UL << word_idx); 660 661 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { 662 int bit_idx = __ffs(pending_bits); 663 int port = (word_idx * BITS_PER_LONG) + bit_idx; 664 int irq = evtchn_to_irq[port]; 665 struct irq_desc *desc; 666 667 if (irq != -1) { 668 desc = irq_to_desc(irq); 669 if (desc) 670 generic_handle_irq_desc(irq, desc); 671 } 672 } 673 } 674 675 BUG_ON(!irqs_disabled()); 676 677 count = __get_cpu_var(xed_nesting_count); 678 __get_cpu_var(xed_nesting_count) = 0; 679 } while (count != 1 || vcpu_info->evtchn_upcall_pending); 680 681out: 682 683 put_cpu(); 684} 685 686void xen_evtchn_do_upcall(struct pt_regs *regs) 687{ 688 struct pt_regs *old_regs = set_irq_regs(regs); 689 690 exit_idle(); 691 irq_enter(); 692 693 __xen_evtchn_do_upcall(); 694 695 irq_exit(); 696 set_irq_regs(old_regs); 697} 698 699void xen_hvm_evtchn_do_upcall(void) 700{ 701 __xen_evtchn_do_upcall(); 702} 703EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); 704 705/* Rebind a new event channel to an existing irq. */ 706void rebind_evtchn_irq(int evtchn, int irq) 707{ 708 struct irq_info *info = info_for_irq(irq); 709 710 /* Make sure the irq is masked, since the new event channel 711 will also be masked. */ 712 disable_irq(irq); 713 714 spin_lock(&irq_mapping_update_lock); 715 716 /* After resume the irq<->evtchn mappings are all cleared out */ 717 BUG_ON(evtchn_to_irq[evtchn] != -1); 718 /* Expect irq to have been bound before, 719 so there should be a proper type */ 720 BUG_ON(info->type == IRQT_UNBOUND); 721 722 evtchn_to_irq[evtchn] = irq; 723 irq_info[irq] = mk_evtchn_info(evtchn); 724 725 spin_unlock(&irq_mapping_update_lock); 726 727 /* new event channels are always bound to cpu 0 */ 728 irq_set_affinity(irq, cpumask_of(0)); 729 730 /* Unmask the event channel. */ 731 enable_irq(irq); 732} 733 734/* Rebind an evtchn so that it gets delivered to a specific cpu */ 735static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 736{ 737 struct evtchn_bind_vcpu bind_vcpu; 738 int evtchn = evtchn_from_irq(irq); 739 740 /* events delivered via platform PCI interrupts are always 741 * routed to vcpu 0 */ 742 if (!VALID_EVTCHN(evtchn) || 743 (xen_hvm_domain() && !xen_have_vector_callback)) 744 return -1; 745 746 /* Send future instances of this interrupt to other vcpu. */ 747 bind_vcpu.port = evtchn; 748 bind_vcpu.vcpu = tcpu; 749 750 /* 751 * If this fails, it usually just indicates that we're dealing with a 752 * virq or IPI channel, which don't actually need to be rebound. Ignore 753 * it, but don't do the xenlinux-level rebind in that case. 754 */ 755 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 756 bind_evtchn_to_cpu(evtchn, tcpu); 757 758 return 0; 759} 760 761static int set_affinity_irq(unsigned irq, const struct cpumask *dest) 762{ 763 unsigned tcpu = cpumask_first(dest); 764 765 return rebind_irq_to_cpu(irq, tcpu); 766} 767 768int resend_irq_on_evtchn(unsigned int irq) 769{ 770 int masked, evtchn = evtchn_from_irq(irq); 771 struct shared_info *s = HYPERVISOR_shared_info; 772 773 if (!VALID_EVTCHN(evtchn)) 774 return 1; 775 776 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); 777 sync_set_bit(evtchn, s->evtchn_pending); 778 if (!masked) 779 unmask_evtchn(evtchn); 780 781 return 1; 782} 783 784static void enable_dynirq(unsigned int irq) 785{ 786 int evtchn = evtchn_from_irq(irq); 787 788 if (VALID_EVTCHN(evtchn)) 789 unmask_evtchn(evtchn); 790} 791 792static void disable_dynirq(unsigned int irq) 793{ 794 int evtchn = evtchn_from_irq(irq); 795 796 if (VALID_EVTCHN(evtchn)) 797 mask_evtchn(evtchn); 798} 799 800static void ack_dynirq(unsigned int irq) 801{ 802 int evtchn = evtchn_from_irq(irq); 803 804 move_native_irq(irq); 805 806 if (VALID_EVTCHN(evtchn)) 807 clear_evtchn(evtchn); 808} 809 810static int retrigger_dynirq(unsigned int irq) 811{ 812 int evtchn = evtchn_from_irq(irq); 813 struct shared_info *sh = HYPERVISOR_shared_info; 814 int ret = 0; 815 816 if (VALID_EVTCHN(evtchn)) { 817 int masked; 818 819 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); 820 sync_set_bit(evtchn, sh->evtchn_pending); 821 if (!masked) 822 unmask_evtchn(evtchn); 823 ret = 1; 824 } 825 826 return ret; 827} 828 829static void restore_cpu_virqs(unsigned int cpu) 830{ 831 struct evtchn_bind_virq bind_virq; 832 int virq, irq, evtchn; 833 834 for (virq = 0; virq < NR_VIRQS; virq++) { 835 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 836 continue; 837 838 BUG_ON(virq_from_irq(irq) != virq); 839 840 /* Get a new binding from Xen. */ 841 bind_virq.virq = virq; 842 bind_virq.vcpu = cpu; 843 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 844 &bind_virq) != 0) 845 BUG(); 846 evtchn = bind_virq.port; 847 848 /* Record the new mapping. */ 849 evtchn_to_irq[evtchn] = irq; 850 irq_info[irq] = mk_virq_info(evtchn, virq); 851 bind_evtchn_to_cpu(evtchn, cpu); 852 853 /* Ready for use. */ 854 unmask_evtchn(evtchn); 855 } 856} 857 858static void restore_cpu_ipis(unsigned int cpu) 859{ 860 struct evtchn_bind_ipi bind_ipi; 861 int ipi, irq, evtchn; 862 863 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 864 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 865 continue; 866 867 BUG_ON(ipi_from_irq(irq) != ipi); 868 869 /* Get a new binding from Xen. */ 870 bind_ipi.vcpu = cpu; 871 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 872 &bind_ipi) != 0) 873 BUG(); 874 evtchn = bind_ipi.port; 875 876 /* Record the new mapping. */ 877 evtchn_to_irq[evtchn] = irq; 878 irq_info[irq] = mk_ipi_info(evtchn, ipi); 879 bind_evtchn_to_cpu(evtchn, cpu); 880 881 /* Ready for use. */ 882 unmask_evtchn(evtchn); 883 884 } 885} 886 887/* Clear an irq's pending state, in preparation for polling on it */ 888void xen_clear_irq_pending(int irq) 889{ 890 int evtchn = evtchn_from_irq(irq); 891 892 if (VALID_EVTCHN(evtchn)) 893 clear_evtchn(evtchn); 894} 895 896void xen_set_irq_pending(int irq) 897{ 898 int evtchn = evtchn_from_irq(irq); 899 900 if (VALID_EVTCHN(evtchn)) 901 set_evtchn(evtchn); 902} 903 904bool xen_test_irq_pending(int irq) 905{ 906 int evtchn = evtchn_from_irq(irq); 907 bool ret = false; 908 909 if (VALID_EVTCHN(evtchn)) 910 ret = test_evtchn(evtchn); 911 912 return ret; 913} 914 915/* Poll waiting for an irq to become pending. In the usual case, the 916 irq will be disabled so it won't deliver an interrupt. */ 917void xen_poll_irq(int irq) 918{ 919 evtchn_port_t evtchn = evtchn_from_irq(irq); 920 921 if (VALID_EVTCHN(evtchn)) { 922 struct sched_poll poll; 923 924 poll.nr_ports = 1; 925 poll.timeout = 0; 926 set_xen_guest_handle(poll.ports, &evtchn); 927 928 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 929 BUG(); 930 } 931} 932 933void xen_irq_resume(void) 934{ 935 unsigned int cpu, irq, evtchn; 936 937 init_evtchn_cpu_bindings(); 938 939 /* New event-channel space is not 'live' yet. */ 940 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 941 mask_evtchn(evtchn); 942 943 /* No IRQ <-> event-channel mappings. */ 944 for (irq = 0; irq < nr_irqs; irq++) 945 irq_info[irq].evtchn = 0; /* zap event-channel binding */ 946 947 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 948 evtchn_to_irq[evtchn] = -1; 949 950 for_each_possible_cpu(cpu) { 951 restore_cpu_virqs(cpu); 952 restore_cpu_ipis(cpu); 953 } 954} 955 956static struct irq_chip xen_dynamic_chip __read_mostly = { 957 .name = "xen-dyn", 958 959 .disable = disable_dynirq, 960 .mask = disable_dynirq, 961 .unmask = enable_dynirq, 962 963 .ack = ack_dynirq, 964 .set_affinity = set_affinity_irq, 965 .retrigger = retrigger_dynirq, 966}; 967 968static struct irq_chip xen_percpu_chip __read_mostly = { 969 .name = "xen-percpu", 970 971 .disable = disable_dynirq, 972 .mask = disable_dynirq, 973 .unmask = enable_dynirq, 974 975 .ack = ack_dynirq, 976}; 977 978int xen_set_callback_via(uint64_t via) 979{ 980 struct xen_hvm_param a; 981 a.domid = DOMID_SELF; 982 a.index = HVM_PARAM_CALLBACK_IRQ; 983 a.value = via; 984 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 985} 986EXPORT_SYMBOL_GPL(xen_set_callback_via); 987 988#ifdef CONFIG_XEN_PVHVM 989/* Vector callbacks are better than PCI interrupts to receive event 990 * channel notifications because we can receive vector callbacks on any 991 * vcpu and we don't need PCI support or APIC interactions. */ 992void xen_callback_vector(void) 993{ 994 int rc; 995 uint64_t callback_via; 996 if (xen_have_vector_callback) { 997 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); 998 rc = xen_set_callback_via(callback_via); 999 if (rc) { 1000 printk(KERN_ERR "Request for Xen HVM callback vector" 1001 " failed.\n"); 1002 xen_have_vector_callback = 0; 1003 return; 1004 } 1005 printk(KERN_INFO "Xen HVM callback vector for event delivery is " 1006 "enabled\n"); 1007 /* in the restore case the vector has already been allocated */ 1008 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) 1009 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); 1010 } 1011} 1012#else 1013void xen_callback_vector(void) {} 1014#endif 1015 1016void __init xen_init_IRQ(void) 1017{ 1018 int i; 1019 1020 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), 1021 GFP_KERNEL); 1022 BUG_ON(cpu_evtchn_mask_p == NULL); 1023 1024 init_evtchn_cpu_bindings(); 1025 1026 /* No event channels are 'live' right now. */ 1027 for (i = 0; i < NR_EVENT_CHANNELS; i++) 1028 mask_evtchn(i); 1029 1030 if (xen_hvm_domain()) { 1031 xen_callback_vector(); 1032 native_init_IRQ(); 1033 } else { 1034 irq_ctx_init(smp_processor_id()); 1035 } 1036} 1037