Deleted Added
sdiff udiff text old ( 279325 ) new ( 282274 )
full compact
1/******************************************************************************
2 * xen_intr.c
3 *
4 * Xen event and interrupt services for x86 PV and HVM guests.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
9 *
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/x86/xen/xen_intr.c 279325 2015-02-26 16:05:09Z royger $");
34
35#include "opt_ddb.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/bus.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/interrupt.h>
46#include <sys/pcpu.h>
47#include <sys/smp.h>
48
49#include <vm/vm.h>
50#include <vm/pmap.h>
51
52#include <machine/intr_machdep.h>
53#include <x86/apicvar.h>
54#include <x86/apicreg.h>
55#include <machine/smp.h>
56#include <machine/stdarg.h>
57
58#include <machine/xen/synch_bitops.h>
59#include <machine/xen/xen-os.h>
60#include <machine/xen/xenvar.h>
61
62#include <xen/hypervisor.h>
63#include <xen/xen_intr.h>
64#include <xen/evtchn/evtchnvar.h>
65
66#include <dev/xen/xenpci/xenpcivar.h>
67#include <dev/pci/pcivar.h>
68
69#ifdef DDB
70#include <ddb/ddb.h>
71#endif
72
73static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
74
75/**
76 * Per-cpu event channel processing state.
77 */
78struct xen_intr_pcpu_data {
79 /**
80 * The last event channel bitmap section (level one bit) processed.
81 * This is used to ensure we scan all ports before
82 * servicing an already servied port again.
83 */
84 u_int last_processed_l1i;
85
86 /**
87 * The last event channel processed within the event channel
88 * bitmap being scanned.
89 */
90 u_int last_processed_l2i;
91
92 /** Pointer to this CPU's interrupt statistic counter. */
93 u_long *evtchn_intrcnt;
94
95 /**
96 * A bitmap of ports that can be serviced from this CPU.
97 * A set bit means interrupt handling is enabled.
98 */
99 u_long evtchn_enabled[sizeof(u_long) * 8];
100};
101
102/*
103 * Start the scan at port 0 by initializing the last scanned
104 * location as the highest numbered event channel port.
105 */
106DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
107 .last_processed_l1i = LONG_BIT - 1,
108 .last_processed_l2i = LONG_BIT - 1
109};
110
111DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
112
113#define XEN_EEXIST 17 /* Xen "already exists" error */
114#define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */
115#define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
116
117#define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
118
119struct xenisrc {
120 struct intsrc xi_intsrc;
121 enum evtchn_type xi_type;
122 int xi_cpu; /* VCPU for delivery. */
123 int xi_vector; /* Global isrc vector number. */
124 evtchn_port_t xi_port;
125 int xi_pirq;
126 int xi_virq;
127 void *xi_cookie;
128 u_int xi_close:1; /* close on unbind? */
129 u_int xi_activehi:1;
130 u_int xi_edgetrigger:1;
131 u_int xi_masked:1;
132};
133
134#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
135
136static void xen_intr_suspend(struct pic *);
137static void xen_intr_resume(struct pic *, bool suspend_cancelled);
138static void xen_intr_enable_source(struct intsrc *isrc);
139static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
140static void xen_intr_eoi_source(struct intsrc *isrc);
141static void xen_intr_enable_intr(struct intsrc *isrc);
142static void xen_intr_disable_intr(struct intsrc *isrc);
143static int xen_intr_vector(struct intsrc *isrc);
144static int xen_intr_source_pending(struct intsrc *isrc);
145static int xen_intr_config_intr(struct intsrc *isrc,
146 enum intr_trigger trig, enum intr_polarity pol);
147static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
148
149static void xen_intr_pirq_enable_source(struct intsrc *isrc);
150static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
151static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
152static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
153static void xen_intr_pirq_disable_intr(struct intsrc *isrc);
154static int xen_intr_pirq_config_intr(struct intsrc *isrc,
155 enum intr_trigger trig, enum intr_polarity pol);
156
157/**
158 * PIC interface for all event channel port types except physical IRQs.
159 */
160struct pic xen_intr_pic = {
161 .pic_enable_source = xen_intr_enable_source,
162 .pic_disable_source = xen_intr_disable_source,
163 .pic_eoi_source = xen_intr_eoi_source,
164 .pic_enable_intr = xen_intr_enable_intr,
165 .pic_disable_intr = xen_intr_disable_intr,
166 .pic_vector = xen_intr_vector,
167 .pic_source_pending = xen_intr_source_pending,
168 .pic_suspend = xen_intr_suspend,
169 .pic_resume = xen_intr_resume,
170 .pic_config_intr = xen_intr_config_intr,
171 .pic_assign_cpu = xen_intr_assign_cpu
172};
173
174/**
175 * PIC interface for all event channel representing
176 * physical interrupt sources.
177 */
178struct pic xen_intr_pirq_pic = {
179 .pic_enable_source = xen_intr_pirq_enable_source,
180 .pic_disable_source = xen_intr_pirq_disable_source,
181 .pic_eoi_source = xen_intr_pirq_eoi_source,
182 .pic_enable_intr = xen_intr_pirq_enable_intr,
183 .pic_disable_intr = xen_intr_pirq_disable_intr,
184 .pic_vector = xen_intr_vector,
185 .pic_source_pending = xen_intr_source_pending,
186 .pic_config_intr = xen_intr_pirq_config_intr,
187 .pic_assign_cpu = xen_intr_assign_cpu
188};
189
190static struct mtx xen_intr_isrc_lock;
191static int xen_intr_auto_vector_count;
192static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
193static u_long *xen_intr_pirq_eoi_map;
194static boolean_t xen_intr_pirq_eoi_map_enabled;
195
196/*------------------------- Private Functions --------------------------------*/
197/**
198 * Disable signal delivery for an event channel port on the
199 * specified CPU.
200 *
201 * \param port The event channel port to mask.
202 *
203 * This API is used to manage the port<=>CPU binding of event
204 * channel handlers.
205 *
206 * \note This operation does not preclude reception of an event
207 * for this event channel on another CPU. To mask the
208 * event channel globally, use evtchn_mask().
209 */
210static inline void
211evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
212{
213 struct xen_intr_pcpu_data *pcpu;
214
215 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
216 clear_bit(port, pcpu->evtchn_enabled);
217}
218
219/**
220 * Enable signal delivery for an event channel port on the
221 * specified CPU.
222 *
223 * \param port The event channel port to unmask.
224 *
225 * This API is used to manage the port<=>CPU binding of event
226 * channel handlers.
227 *
228 * \note This operation does not guarantee that event delivery
229 * is enabled for this event channel port. The port must
230 * also be globally enabled. See evtchn_unmask().
231 */
232static inline void
233evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
234{
235 struct xen_intr_pcpu_data *pcpu;
236
237 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
238 set_bit(port, pcpu->evtchn_enabled);
239}
240
241/**
242 * Allocate and register a per-cpu Xen upcall interrupt counter.
243 *
244 * \param cpu The cpu for which to register this interrupt count.
245 */
246static void
247xen_intr_intrcnt_add(u_int cpu)
248{
249 char buf[MAXCOMLEN + 1];
250 struct xen_intr_pcpu_data *pcpu;
251
252 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
253 if (pcpu->evtchn_intrcnt != NULL)
254 return;
255
256 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
257 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
258}
259
260/**
261 * Search for an already allocated but currently unused Xen interrupt
262 * source object.
263 *
264 * \param type Restrict the search to interrupt sources of the given
265 * type.
266 *
267 * \return A pointer to a free Xen interrupt source object or NULL.
268 */
269static struct xenisrc *
270xen_intr_find_unused_isrc(enum evtchn_type type)
271{
272 int isrc_idx;
273
274 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
275
276 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
277 struct xenisrc *isrc;
278 u_int vector;
279
280 vector = FIRST_EVTCHN_INT + isrc_idx;
281 isrc = (struct xenisrc *)intr_lookup_source(vector);
282 if (isrc != NULL
283 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
284 KASSERT(isrc->xi_intsrc.is_handlers == 0,
285 ("Free evtchn still has handlers"));
286 isrc->xi_type = type;
287 return (isrc);
288 }
289 }
290 return (NULL);
291}
292
293/**
294 * Allocate a Xen interrupt source object.
295 *
296 * \param type The type of interrupt source to create.
297 *
298 * \return A pointer to a newly allocated Xen interrupt source
299 * object or NULL.
300 */
301static struct xenisrc *
302xen_intr_alloc_isrc(enum evtchn_type type, int vector)
303{
304 static int warned;
305 struct xenisrc *isrc;
306
307 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
308
309 if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
310 if (!warned) {
311 warned = 1;
312 printf("xen_intr_alloc: Event channels exhausted.\n");
313 }
314 return (NULL);
315 }
316
317 if (type != EVTCHN_TYPE_PIRQ) {
318 vector = FIRST_EVTCHN_INT + xen_intr_auto_vector_count;
319 xen_intr_auto_vector_count++;
320 }
321
322 KASSERT((intr_lookup_source(vector) == NULL),
323 ("Trying to use an already allocated vector"));
324
325 mtx_unlock(&xen_intr_isrc_lock);
326 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
327 isrc->xi_intsrc.is_pic =
328 (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic;
329 isrc->xi_vector = vector;
330 isrc->xi_type = type;
331 intr_register_source(&isrc->xi_intsrc);
332 mtx_lock(&xen_intr_isrc_lock);
333
334 return (isrc);
335}
336
337/**
338 * Attempt to free an active Xen interrupt source object.
339 *
340 * \param isrc The interrupt source object to release.
341 *
342 * \returns EBUSY if the source is still in use, otherwise 0.
343 */
344static int
345xen_intr_release_isrc(struct xenisrc *isrc)
346{
347
348 mtx_lock(&xen_intr_isrc_lock);
349 if (isrc->xi_intsrc.is_handlers != 0) {
350 mtx_unlock(&xen_intr_isrc_lock);
351 return (EBUSY);
352 }
353 evtchn_mask_port(isrc->xi_port);
354 evtchn_clear_port(isrc->xi_port);
355
356 /* Rebind port to CPU 0. */
357 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
358 evtchn_cpu_unmask_port(0, isrc->xi_port);
359
360 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
361 struct evtchn_close close = { .port = isrc->xi_port };
362 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
363 panic("EVTCHNOP_close failed");
364 }
365
366 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
367 isrc->xi_cpu = 0;
368 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
369 isrc->xi_port = 0;
370 isrc->xi_cookie = NULL;
371 mtx_unlock(&xen_intr_isrc_lock);
372 return (0);
373}
374
375/**
376 * Associate an interrupt handler with an already allocated local Xen
377 * event channel port.
378 *
379 * \param isrcp The returned Xen interrupt object associated with
380 * the specified local port.
381 * \param local_port The event channel to bind.
382 * \param type The event channel type of local_port.
383 * \param intr_owner The device making this bind request.
384 * \param filter An interrupt filter handler. Specify NULL
385 * to always dispatch to the ithread handler.
386 * \param handler An interrupt ithread handler. Optional (can
387 * specify NULL) if all necessary event actions
388 * are performed by filter.
389 * \param arg Argument to present to both filter and handler.
390 * \param irqflags Interrupt handler flags. See sys/bus.h.
391 * \param handlep Pointer to an opaque handle used to manage this
392 * registration.
393 *
394 * \returns 0 on success, otherwise an errno.
395 */
396static int
397xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
398 enum evtchn_type type, device_t intr_owner, driver_filter_t filter,
399 driver_intr_t handler, void *arg, enum intr_type flags,
400 xen_intr_handle_t *port_handlep)
401{
402 struct xenisrc *isrc;
403 int error;
404
405 *isrcp = NULL;
406 if (port_handlep == NULL) {
407 device_printf(intr_owner,
408 "xen_intr_bind_isrc: Bad event handle\n");
409 return (EINVAL);
410 }
411
412 mtx_lock(&xen_intr_isrc_lock);
413 isrc = xen_intr_find_unused_isrc(type);
414 if (isrc == NULL) {
415 isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR);
416 if (isrc == NULL) {
417 mtx_unlock(&xen_intr_isrc_lock);
418 return (ENOSPC);
419 }
420 }
421 isrc->xi_port = local_port;
422 xen_intr_port_to_isrc[local_port] = isrc;
423 mtx_unlock(&xen_intr_isrc_lock);
424
425 /* Assign the opaque handler (the event channel port) */
426 *port_handlep = &isrc->xi_port;
427
428#ifdef SMP
429 if (type == EVTCHN_TYPE_PORT) {
430 /*
431 * By default all interrupts are assigned to vCPU#0
432 * unless specified otherwise, so shuffle them to balance
433 * the interrupt load.
434 */
435 xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu());
436 }
437#endif
438
439 if (filter == NULL && handler == NULL) {
440 /*
441 * No filter/handler provided, leave the event channel
442 * masked and without a valid handler, the caller is
443 * in charge of setting that up.
444 */
445 *isrcp = isrc;
446 return (0);
447 }
448
449 error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
450 *port_handlep);
451 if (error != 0) {
452 xen_intr_release_isrc(isrc);
453 return (error);
454 }
455 *isrcp = isrc;
456 return (0);
457}
458
459/**
460 * Lookup a Xen interrupt source object given an interrupt binding handle.
461 *
462 * \param handle A handle initialized by a previous call to
463 * xen_intr_bind_isrc().
464 *
465 * \returns A pointer to the Xen interrupt source object associated
466 * with the given interrupt handle. NULL if no association
467 * currently exists.
468 */
469static struct xenisrc *
470xen_intr_isrc(xen_intr_handle_t handle)
471{
472 evtchn_port_t port;
473
474 if (handle == NULL)
475 return (NULL);
476
477 port = *(evtchn_port_t *)handle;
478 if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS)
479 return (NULL);
480
481 return (xen_intr_port_to_isrc[port]);
482}
483
484/**
485 * Determine the event channel ports at the given section of the
486 * event port bitmap which have pending events for the given cpu.
487 *
488 * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
489 * \param sh The Xen shared info area.
490 * \param idx The index of the section of the event channel bitmap to
491 * inspect.
492 *
493 * \returns A u_long with bits set for every event channel with pending
494 * events.
495 */
496static inline u_long
497xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
498 u_int idx)
499{
500 return (sh->evtchn_pending[idx]
501 & ~sh->evtchn_mask[idx]
502 & pcpu->evtchn_enabled[idx]);
503}
504
505/**
506 * Interrupt handler for processing all Xen event channel events.
507 *
508 * \param trap_frame The trap frame context for the current interrupt.
509 */
510void
511xen_intr_handle_upcall(struct trapframe *trap_frame)
512{
513 u_int l1i, l2i, port, cpu;
514 u_long masked_l1, masked_l2;
515 struct xenisrc *isrc;
516 shared_info_t *s;
517 vcpu_info_t *v;
518 struct xen_intr_pcpu_data *pc;
519 u_long l1, l2;
520
521 /*
522 * Disable preemption in order to always check and fire events
523 * on the right vCPU
524 */
525 critical_enter();
526
527 cpu = PCPU_GET(cpuid);
528 pc = DPCPU_PTR(xen_intr_pcpu);
529 s = HYPERVISOR_shared_info;
530 v = DPCPU_GET(vcpu_info);
531
532 if (xen_hvm_domain() && !xen_vector_callback_enabled) {
533 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
534 }
535
536 v->evtchn_upcall_pending = 0;
537
538#if 0
539#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
540 /* Clear master flag /before/ clearing selector flag. */
541 wmb();
542#endif
543#endif
544
545 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
546
547 l1i = pc->last_processed_l1i;
548 l2i = pc->last_processed_l2i;
549 (*pc->evtchn_intrcnt)++;
550
551 while (l1 != 0) {
552
553 l1i = (l1i + 1) % LONG_BIT;
554 masked_l1 = l1 & ((~0UL) << l1i);
555
556 if (masked_l1 == 0) {
557 /*
558 * if we masked out all events, wrap around
559 * to the beginning.
560 */
561 l1i = LONG_BIT - 1;
562 l2i = LONG_BIT - 1;
563 continue;
564 }
565 l1i = ffsl(masked_l1) - 1;
566
567 do {
568 l2 = xen_intr_active_ports(pc, s, l1i);
569
570 l2i = (l2i + 1) % LONG_BIT;
571 masked_l2 = l2 & ((~0UL) << l2i);
572
573 if (masked_l2 == 0) {
574 /* if we masked out all events, move on */
575 l2i = LONG_BIT - 1;
576 break;
577 }
578 l2i = ffsl(masked_l2) - 1;
579
580 /* process port */
581 port = (l1i * LONG_BIT) + l2i;
582 synch_clear_bit(port, &s->evtchn_pending[0]);
583
584 isrc = xen_intr_port_to_isrc[port];
585 if (__predict_false(isrc == NULL))
586 continue;
587
588 /* Make sure we are firing on the right vCPU */
589 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
590 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
591 PCPU_GET(cpuid), isrc->xi_cpu));
592
593 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
594
595 /*
596 * If this is the final port processed,
597 * we'll pick up here+1 next time.
598 */
599 pc->last_processed_l1i = l1i;
600 pc->last_processed_l2i = l2i;
601
602 } while (l2i != LONG_BIT - 1);
603
604 l2 = xen_intr_active_ports(pc, s, l1i);
605 if (l2 == 0) {
606 /*
607 * We handled all ports, so we can clear the
608 * selector bit.
609 */
610 l1 &= ~(1UL << l1i);
611 }
612 }
613 critical_exit();
614}
615
616static int
617xen_intr_init(void *dummy __unused)
618{
619 shared_info_t *s = HYPERVISOR_shared_info;
620 struct xen_intr_pcpu_data *pcpu;
621 struct physdev_pirq_eoi_gmfn eoi_gmfn;
622 int i, rc;
623
624 if (!xen_domain())
625 return (0);
626
627 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
628
629 /*
630 * Register interrupt count manually as we aren't
631 * guaranteed to see a call to xen_intr_assign_cpu()
632 * before our first interrupt. Also set the per-cpu
633 * mask of CPU#0 to enable all, since by default
634 * all event channels are bound to CPU#0.
635 */
636 CPU_FOREACH(i) {
637 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
638 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
639 sizeof(pcpu->evtchn_enabled));
640 xen_intr_intrcnt_add(i);
641 }
642
643 for (i = 0; i < nitems(s->evtchn_mask); i++)
644 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
645
646 /* Try to register PIRQ EOI map */
647 xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO);
648 eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map));
649 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
650 if (rc != 0 && bootverbose)
651 printf("Xen interrupts: unable to register PIRQ EOI map\n");
652 else
653 xen_intr_pirq_eoi_map_enabled = true;
654
655 intr_register_pic(&xen_intr_pic);
656 intr_register_pic(&xen_intr_pirq_pic);
657
658 if (bootverbose)
659 printf("Xen interrupt system initialized\n");
660
661 return (0);
662}
663SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
664
665/*--------------------------- Common PIC Functions ---------------------------*/
666/**
667 * Prepare this PIC for system suspension.
668 */
669static void
670xen_intr_suspend(struct pic *unused)
671{
672}
673
674static void
675xen_rebind_ipi(struct xenisrc *isrc)
676{
677#ifdef SMP
678 int cpu = isrc->xi_cpu;
679 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
680 int error;
681 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
682
683 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
684 &bind_ipi);
685 if (error != 0)
686 panic("unable to rebind xen IPI: %d", error);
687
688 isrc->xi_port = bind_ipi.port;
689 isrc->xi_cpu = 0;
690 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
691
692 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
693 cpu_apic_ids[cpu]);
694 if (error)
695 panic("unable to bind xen IPI to CPU#%d: %d",
696 cpu, error);
697
698 evtchn_unmask_port(bind_ipi.port);
699#else
700 panic("Resume IPI event channel on UP");
701#endif
702}
703
704static void
705xen_rebind_virq(struct xenisrc *isrc)
706{
707 int cpu = isrc->xi_cpu;
708 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
709 int error;
710 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
711 .vcpu = vcpu_id };
712
713 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
714 &bind_virq);
715 if (error != 0)
716 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
717
718 isrc->xi_port = bind_virq.port;
719 isrc->xi_cpu = 0;
720 xen_intr_port_to_isrc[bind_virq.port] = isrc;
721
722#ifdef SMP
723 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
724 cpu_apic_ids[cpu]);
725 if (error)
726 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
727 isrc->xi_virq, cpu, error);
728#endif
729
730 evtchn_unmask_port(bind_virq.port);
731}
732
733/**
734 * Return this PIC to service after being suspended.
735 */
736static void
737xen_intr_resume(struct pic *unused, bool suspend_cancelled)
738{
739 shared_info_t *s = HYPERVISOR_shared_info;
740 struct xenisrc *isrc;
741 u_int isrc_idx;
742 int i;
743
744 if (suspend_cancelled)
745 return;
746
747 /* Reset the per-CPU masks */
748 CPU_FOREACH(i) {
749 struct xen_intr_pcpu_data *pcpu;
750
751 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
752 memset(pcpu->evtchn_enabled,
753 i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled));
754 }
755
756 /* Mask all event channels. */
757 for (i = 0; i < nitems(s->evtchn_mask); i++)
758 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
759
760 /* Remove port -> isrc mappings */
761 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
762
763 /* Free unused isrcs and rebind VIRQs and IPIs */
764 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
765 u_int vector;
766
767 vector = FIRST_EVTCHN_INT + isrc_idx;
768 isrc = (struct xenisrc *)intr_lookup_source(vector);
769 if (isrc != NULL) {
770 isrc->xi_port = 0;
771 switch (isrc->xi_type) {
772 case EVTCHN_TYPE_IPI:
773 xen_rebind_ipi(isrc);
774 break;
775 case EVTCHN_TYPE_VIRQ:
776 xen_rebind_virq(isrc);
777 break;
778 default:
779 isrc->xi_cpu = 0;
780 break;
781 }
782 }
783 }
784}
785
786/**
787 * Disable a Xen interrupt source.
788 *
789 * \param isrc The interrupt source to disable.
790 */
791static void
792xen_intr_disable_intr(struct intsrc *base_isrc)
793{
794 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
795
796 evtchn_mask_port(isrc->xi_port);
797}
798
799/**
800 * Determine the global interrupt vector number for
801 * a Xen interrupt source.
802 *
803 * \param isrc The interrupt source to query.
804 *
805 * \return The vector number corresponding to the given interrupt source.
806 */
807static int
808xen_intr_vector(struct intsrc *base_isrc)
809{
810 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
811
812 return (isrc->xi_vector);
813}
814
815/**
816 * Determine whether or not interrupt events are pending on the
817 * the given interrupt source.
818 *
819 * \param isrc The interrupt source to query.
820 *
821 * \returns 0 if no events are pending, otherwise non-zero.
822 */
823static int
824xen_intr_source_pending(struct intsrc *isrc)
825{
826 /*
827 * EventChannels are edge triggered and never masked.
828 * There can be no pending events.
829 */
830 return (0);
831}
832
833/**
834 * Perform configuration of an interrupt source.
835 *
836 * \param isrc The interrupt source to configure.
837 * \param trig Edge or level.
838 * \param pol Active high or low.
839 *
840 * \returns 0 if no events are pending, otherwise non-zero.
841 */
842static int
843xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
844 enum intr_polarity pol)
845{
846 /* Configuration is only possible via the evtchn apis. */
847 return (ENODEV);
848}
849
850/**
851 * Configure CPU affinity for interrupt source event delivery.
852 *
853 * \param isrc The interrupt source to configure.
854 * \param apic_id The apic id of the CPU for handling future events.
855 *
856 * \returns 0 if successful, otherwise an errno.
857 */
858static int
859xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
860{
861#ifdef SMP
862 struct evtchn_bind_vcpu bind_vcpu;
863 struct xenisrc *isrc;
864 u_int to_cpu, vcpu_id;
865 int error, masked;
866
867#ifdef XENHVM
868 if (xen_vector_callback_enabled == 0)
869 return (EOPNOTSUPP);
870#endif
871
872 to_cpu = apic_cpuid(apic_id);
873 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
874 xen_intr_intrcnt_add(to_cpu);
875
876 mtx_lock(&xen_intr_isrc_lock);
877 isrc = (struct xenisrc *)base_isrc;
878 if (!is_valid_evtchn(isrc->xi_port)) {
879 mtx_unlock(&xen_intr_isrc_lock);
880 return (EINVAL);
881 }
882
883 /*
884 * Mask the event channel while binding it to prevent interrupt
885 * delivery with an inconsistent state in isrc->xi_cpu.
886 */
887 masked = evtchn_test_and_set_mask(isrc->xi_port);
888 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
889 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
890 /*
891 * Virtual IRQs are associated with a cpu by
892 * the Hypervisor at evtchn_bind_virq time, so
893 * all we need to do is update the per-CPU masks.
894 */
895 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
896 isrc->xi_cpu = to_cpu;
897 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
898 goto out;
899 }
900
901 bind_vcpu.port = isrc->xi_port;
902 bind_vcpu.vcpu = vcpu_id;
903
904 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
905 if (isrc->xi_cpu != to_cpu) {
906 if (error == 0) {
907 /* Commit to new binding by removing the old one. */
908 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
909 isrc->xi_cpu = to_cpu;
910 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
911 }
912 }
913
914out:
915 if (masked == 0)
916 evtchn_unmask_port(isrc->xi_port);
917 mtx_unlock(&xen_intr_isrc_lock);
918 return (0);
919#else
920 return (EOPNOTSUPP);
921#endif
922}
923
924/*------------------- Virtual Interrupt Source PIC Functions -----------------*/
925/*
926 * Mask a level triggered interrupt source.
927 *
928 * \param isrc The interrupt source to mask (if necessary).
929 * \param eoi If non-zero, perform any necessary end-of-interrupt
930 * acknowledgements.
931 */
932static void
933xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
934{
935 struct xenisrc *isrc;
936
937 isrc = (struct xenisrc *)base_isrc;
938
939 /*
940 * NB: checking if the event channel is already masked is
941 * needed because the event channel user-space device
942 * masks event channels on it's filter as part of it's
943 * normal operation, and those shouldn't be automatically
944 * unmasked by the generic interrupt code. The event channel
945 * device will unmask them when needed.
946 */
947 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
948}
949
950/*
951 * Unmask a level triggered interrupt source.
952 *
953 * \param isrc The interrupt source to unmask (if necessary).
954 */
955static void
956xen_intr_enable_source(struct intsrc *base_isrc)
957{
958 struct xenisrc *isrc;
959
960 isrc = (struct xenisrc *)base_isrc;
961
962 if (isrc->xi_masked == 0)
963 evtchn_unmask_port(isrc->xi_port);
964}
965
966/*
967 * Perform any necessary end-of-interrupt acknowledgements.
968 *
969 * \param isrc The interrupt source to EOI.
970 */
971static void
972xen_intr_eoi_source(struct intsrc *base_isrc)
973{
974}
975
976/*
977 * Enable and unmask the interrupt source.
978 *
979 * \param isrc The interrupt source to enable.
980 */
981static void
982xen_intr_enable_intr(struct intsrc *base_isrc)
983{
984 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
985
986 evtchn_unmask_port(isrc->xi_port);
987}
988
989/*------------------ Physical Interrupt Source PIC Functions -----------------*/
990/*
991 * Mask a level triggered interrupt source.
992 *
993 * \param isrc The interrupt source to mask (if necessary).
994 * \param eoi If non-zero, perform any necessary end-of-interrupt
995 * acknowledgements.
996 */
997static void
998xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
999{
1000 struct xenisrc *isrc;
1001
1002 isrc = (struct xenisrc *)base_isrc;
1003
1004 if (isrc->xi_edgetrigger == 0)
1005 evtchn_mask_port(isrc->xi_port);
1006 if (eoi == PIC_EOI)
1007 xen_intr_pirq_eoi_source(base_isrc);
1008}
1009
1010/*
1011 * Unmask a level triggered interrupt source.
1012 *
1013 * \param isrc The interrupt source to unmask (if necessary).
1014 */
1015static void
1016xen_intr_pirq_enable_source(struct intsrc *base_isrc)
1017{
1018 struct xenisrc *isrc;
1019
1020 isrc = (struct xenisrc *)base_isrc;
1021
1022 if (isrc->xi_edgetrigger == 0)
1023 evtchn_unmask_port(isrc->xi_port);
1024}
1025
1026/*
1027 * Perform any necessary end-of-interrupt acknowledgements.
1028 *
1029 * \param isrc The interrupt source to EOI.
1030 */
1031static void
1032xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
1033{
1034 struct xenisrc *isrc;
1035 int error;
1036
1037 isrc = (struct xenisrc *)base_isrc;
1038
1039 if (test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
1040 struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
1041
1042 error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
1043 if (error != 0)
1044 panic("Unable to EOI PIRQ#%d: %d\n",
1045 isrc->xi_pirq, error);
1046 }
1047}
1048
1049/*
1050 * Enable and unmask the interrupt source.
1051 *
1052 * \param isrc The interrupt source to enable.
1053 */
1054static void
1055xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
1056{
1057 struct xenisrc *isrc;
1058 struct evtchn_bind_pirq bind_pirq;
1059 struct physdev_irq_status_query irq_status;
1060 int error;
1061
1062 isrc = (struct xenisrc *)base_isrc;
1063
1064 if (!xen_intr_pirq_eoi_map_enabled) {
1065 irq_status.irq = isrc->xi_pirq;
1066 error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query,
1067 &irq_status);
1068 if (error)
1069 panic("unable to get status of IRQ#%d", isrc->xi_pirq);
1070
1071 if (irq_status.flags & XENIRQSTAT_needs_eoi) {
1072 /*
1073 * Since the dynamic PIRQ EOI map is not available
1074 * mark the PIRQ as needing EOI unconditionally.
1075 */
1076 set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
1077 }
1078 }
1079
1080 bind_pirq.pirq = isrc->xi_pirq;
1081 bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE;
1082 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
1083 if (error)
1084 panic("unable to bind IRQ#%d", isrc->xi_pirq);
1085
1086 isrc->xi_port = bind_pirq.port;
1087
1088 mtx_lock(&xen_intr_isrc_lock);
1089 KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL),
1090 ("trying to override an already setup event channel port"));
1091 xen_intr_port_to_isrc[bind_pirq.port] = isrc;
1092 mtx_unlock(&xen_intr_isrc_lock);
1093
1094 evtchn_unmask_port(isrc->xi_port);
1095}
1096
1097/*
1098 * Disable an interrupt source.
1099 *
1100 * \param isrc The interrupt source to disable.
1101 */
1102static void
1103xen_intr_pirq_disable_intr(struct intsrc *base_isrc)
1104{
1105 struct xenisrc *isrc;
1106 struct evtchn_close close;
1107 int error;
1108
1109 isrc = (struct xenisrc *)base_isrc;
1110
1111 evtchn_mask_port(isrc->xi_port);
1112
1113 close.port = isrc->xi_port;
1114 error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
1115 if (error)
1116 panic("unable to close event channel %d IRQ#%d",
1117 isrc->xi_port, isrc->xi_pirq);
1118
1119 mtx_lock(&xen_intr_isrc_lock);
1120 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
1121 mtx_unlock(&xen_intr_isrc_lock);
1122
1123 isrc->xi_port = 0;
1124}
1125
1126/**
1127 * Perform configuration of an interrupt source.
1128 *
1129 * \param isrc The interrupt source to configure.
1130 * \param trig Edge or level.
1131 * \param pol Active high or low.
1132 *
1133 * \returns 0 if no events are pending, otherwise non-zero.
1134 */
1135static int
1136xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig,
1137 enum intr_polarity pol)
1138{
1139 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1140 struct physdev_setup_gsi setup_gsi;
1141 int error;
1142
1143 KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM),
1144 ("%s: Conforming trigger or polarity\n", __func__));
1145
1146 setup_gsi.gsi = isrc->xi_pirq;
1147 setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1;
1148 setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1;
1149
1150 error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
1151 if (error == -XEN_EEXIST) {
1152 if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) ||
1153 (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH)))
1154 panic("unable to reconfigure interrupt IRQ#%d",
1155 isrc->xi_pirq);
1156 error = 0;
1157 }
1158 if (error)
1159 panic("unable to configure IRQ#%d\n", isrc->xi_pirq);
1160
1161 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1162 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1163
1164 return (0);
1165}
1166
1167/*--------------------------- Public Functions -------------------------------*/
1168/*------- API comments for these methods can be found in xen/xenintr.h -------*/
1169int
1170xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
1171 driver_filter_t filter, driver_intr_t handler, void *arg,
1172 enum intr_type flags, xen_intr_handle_t *port_handlep)
1173{
1174 struct xenisrc *isrc;
1175 int error;
1176
1177 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT, dev,
1178 filter, handler, arg, flags, port_handlep);
1179 if (error != 0)
1180 return (error);
1181
1182 /*
1183 * The Event Channel API didn't open this port, so it is not
1184 * responsible for closing it automatically on unbind.
1185 */
1186 isrc->xi_close = 0;
1187 return (0);
1188}
1189
1190int
1191xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
1192 driver_filter_t filter, driver_intr_t handler, void *arg,
1193 enum intr_type flags, xen_intr_handle_t *port_handlep)
1194{
1195 struct xenisrc *isrc;
1196 struct evtchn_alloc_unbound alloc_unbound;
1197 int error;
1198
1199 alloc_unbound.dom = DOMID_SELF;
1200 alloc_unbound.remote_dom = remote_domain;
1201 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1202 &alloc_unbound);
1203 if (error != 0) {
1204 /*
1205 * XXX Trap Hypercall error code Linuxisms in
1206 * the HYPERCALL layer.
1207 */
1208 return (-error);
1209 }
1210
1211 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1212 dev, filter, handler, arg, flags,
1213 port_handlep);
1214 if (error != 0) {
1215 evtchn_close_t close = { .port = alloc_unbound.port };
1216 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1217 panic("EVTCHNOP_close failed");
1218 return (error);
1219 }
1220
1221 isrc->xi_close = 1;
1222 return (0);
1223}
1224
1225int
1226xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1227 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1228 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1229{
1230 struct xenisrc *isrc;
1231 struct evtchn_bind_interdomain bind_interdomain;
1232 int error;
1233
1234 bind_interdomain.remote_dom = remote_domain;
1235 bind_interdomain.remote_port = remote_port;
1236 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1237 &bind_interdomain);
1238 if (error != 0) {
1239 /*
1240 * XXX Trap Hypercall error code Linuxisms in
1241 * the HYPERCALL layer.
1242 */
1243 return (-error);
1244 }
1245
1246 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1247 EVTCHN_TYPE_PORT, dev, filter, handler,
1248 arg, flags, port_handlep);
1249 if (error) {
1250 evtchn_close_t close = { .port = bind_interdomain.local_port };
1251 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1252 panic("EVTCHNOP_close failed");
1253 return (error);
1254 }
1255
1256 /*
1257 * The Event Channel API opened this port, so it is
1258 * responsible for closing it automatically on unbind.
1259 */
1260 isrc->xi_close = 1;
1261 return (0);
1262}
1263
1264int
1265xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1266 driver_filter_t filter, driver_intr_t handler, void *arg,
1267 enum intr_type flags, xen_intr_handle_t *port_handlep)
1268{
1269 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1270 struct xenisrc *isrc;
1271 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1272 int error;
1273
1274 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1275 xen_intr_intrcnt_add(cpu);
1276
1277 isrc = NULL;
1278 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1279 if (error != 0) {
1280 /*
1281 * XXX Trap Hypercall error code Linuxisms in
1282 * the HYPERCALL layer.
1283 */
1284 return (-error);
1285 }
1286
1287 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ, dev,
1288 filter, handler, arg, flags, port_handlep);
1289
1290#ifdef SMP
1291 if (error == 0)
1292 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1293#endif
1294
1295 if (error != 0) {
1296 evtchn_close_t close = { .port = bind_virq.port };
1297
1298 xen_intr_unbind(*port_handlep);
1299 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1300 panic("EVTCHNOP_close failed");
1301 return (error);
1302 }
1303
1304#ifdef SMP
1305 if (isrc->xi_cpu != cpu) {
1306 /*
1307 * Too early in the boot process for the generic interrupt
1308 * code to perform the binding. Update our event channel
1309 * masks manually so events can't fire on the wrong cpu
1310 * during AP startup.
1311 */
1312 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1313 }
1314#endif
1315
1316 /*
1317 * The Event Channel API opened this port, so it is
1318 * responsible for closing it automatically on unbind.
1319 */
1320 isrc->xi_close = 1;
1321 isrc->xi_virq = virq;
1322
1323 return (0);
1324}
1325
1326int
1327xen_intr_alloc_and_bind_ipi(device_t dev, u_int cpu,
1328 driver_filter_t filter, enum intr_type flags,
1329 xen_intr_handle_t *port_handlep)
1330{
1331#ifdef SMP
1332 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1333 struct xenisrc *isrc;
1334 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1335 int error;
1336
1337 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1338 xen_intr_intrcnt_add(cpu);
1339
1340 isrc = NULL;
1341 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1342 if (error != 0) {
1343 /*
1344 * XXX Trap Hypercall error code Linuxisms in
1345 * the HYPERCALL layer.
1346 */
1347 return (-error);
1348 }
1349
1350 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1351 dev, filter, NULL, NULL, flags,
1352 port_handlep);
1353 if (error == 0)
1354 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1355
1356 if (error != 0) {
1357 evtchn_close_t close = { .port = bind_ipi.port };
1358
1359 xen_intr_unbind(*port_handlep);
1360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1361 panic("EVTCHNOP_close failed");
1362 return (error);
1363 }
1364
1365 if (isrc->xi_cpu != cpu) {
1366 /*
1367 * Too early in the boot process for the generic interrupt
1368 * code to perform the binding. Update our event channel
1369 * masks manually so events can't fire on the wrong cpu
1370 * during AP startup.
1371 */
1372 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1373 }
1374
1375 /*
1376 * The Event Channel API opened this port, so it is
1377 * responsible for closing it automatically on unbind.
1378 */
1379 isrc->xi_close = 1;
1380 return (0);
1381#else
1382 return (EOPNOTSUPP);
1383#endif
1384}
1385
1386int
1387xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol)
1388{
1389 struct physdev_map_pirq map_pirq;
1390 struct xenisrc *isrc;
1391 int error;
1392
1393 if (vector == 0)
1394 return (EINVAL);
1395
1396 if (bootverbose)
1397 printf("xen: register IRQ#%d\n", vector);
1398
1399 map_pirq.domid = DOMID_SELF;
1400 map_pirq.type = MAP_PIRQ_TYPE_GSI;
1401 map_pirq.index = vector;
1402 map_pirq.pirq = vector;
1403
1404 error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq);
1405 if (error) {
1406 printf("xen: unable to map IRQ#%d\n", vector);
1407 return (error);
1408 }
1409
1410 mtx_lock(&xen_intr_isrc_lock);
1411 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector);
1412 mtx_unlock(&xen_intr_isrc_lock);
1413 KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt"));
1414 isrc->xi_pirq = vector;
1415 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1416 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1417
1418 return (0);
1419}
1420
1421int
1422xen_register_msi(device_t dev, int vector, int count)
1423{
1424 struct physdev_map_pirq msi_irq;
1425 struct xenisrc *isrc;
1426 int ret;
1427
1428 memset(&msi_irq, 0, sizeof(msi_irq));
1429 msi_irq.domid = DOMID_SELF;
1430 msi_irq.type = count == 1 ?
1431 MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI;
1432 msi_irq.index = -1;
1433 msi_irq.pirq = -1;
1434 msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16);
1435 msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev);
1436 msi_irq.entry_nr = count;
1437
1438 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq);
1439 if (ret != 0)
1440 return (ret);
1441 if (count != msi_irq.entry_nr) {
1442 panic("unable to setup all requested MSI vectors "
1443 "(expected %d got %d)", count, msi_irq.entry_nr);
1444 }
1445
1446 mtx_lock(&xen_intr_isrc_lock);
1447 for (int i = 0; i < count; i++) {
1448 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i);
1449 KASSERT(isrc != NULL,
1450 ("xen: unable to allocate isrc for interrupt"));
1451 isrc->xi_pirq = msi_irq.pirq + i;
1452 /* MSI interrupts are always edge triggered */
1453 isrc->xi_edgetrigger = 1;
1454 }
1455 mtx_unlock(&xen_intr_isrc_lock);
1456
1457 return (0);
1458}
1459
1460int
1461xen_release_msi(int vector)
1462{
1463 struct physdev_unmap_pirq unmap;
1464 struct xenisrc *isrc;
1465 int ret;
1466
1467 isrc = (struct xenisrc *)intr_lookup_source(vector);
1468 if (isrc == NULL)
1469 return (ENXIO);
1470
1471 unmap.pirq = isrc->xi_pirq;
1472 ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap);
1473 if (ret != 0)
1474 return (ret);
1475
1476 xen_intr_release_isrc(isrc);
1477
1478 return (0);
1479}
1480
1481int
1482xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1483{
1484 char descr[MAXCOMLEN + 1];
1485 struct xenisrc *isrc;
1486 va_list ap;
1487
1488 isrc = xen_intr_isrc(port_handle);
1489 if (isrc == NULL)
1490 return (EINVAL);
1491
1492 va_start(ap, fmt);
1493 vsnprintf(descr, sizeof(descr), fmt, ap);
1494 va_end(ap);
1495 return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
1496}
1497
1498void
1499xen_intr_unbind(xen_intr_handle_t *port_handlep)
1500{
1501 struct xenisrc *isrc;
1502
1503 KASSERT(port_handlep != NULL,
1504 ("NULL xen_intr_handle_t passed to xen_intr_unbind"));
1505
1506 isrc = xen_intr_isrc(*port_handlep);
1507 *port_handlep = NULL;
1508 if (isrc == NULL)
1509 return;
1510
1511 if (isrc->xi_cookie != NULL)
1512 intr_remove_handler(isrc->xi_cookie);
1513 xen_intr_release_isrc(isrc);
1514}
1515
1516void
1517xen_intr_signal(xen_intr_handle_t handle)
1518{
1519 struct xenisrc *isrc;
1520
1521 isrc = xen_intr_isrc(handle);
1522 if (isrc != NULL) {
1523 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1524 isrc->xi_type == EVTCHN_TYPE_IPI,
1525 ("evtchn_signal on something other than a local port"));
1526 struct evtchn_send send = { .port = isrc->xi_port };
1527 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1528 }
1529}
1530
1531evtchn_port_t
1532xen_intr_port(xen_intr_handle_t handle)
1533{
1534 struct xenisrc *isrc;
1535
1536 isrc = xen_intr_isrc(handle);
1537 if (isrc == NULL)
1538 return (0);
1539
1540 return (isrc->xi_port);
1541}
1542
1543int
1544xen_intr_add_handler(device_t dev, driver_filter_t filter,
1545 driver_intr_t handler, void *arg, enum intr_type flags,
1546 xen_intr_handle_t handle)
1547{
1548 struct xenisrc *isrc;
1549 int error;
1550
1551 isrc = xen_intr_isrc(handle);
1552 if (isrc == NULL || isrc->xi_cookie != NULL)
1553 return (EINVAL);
1554
1555 error = intr_add_handler(device_get_nameunit(dev), isrc->xi_vector,
1556 filter, handler, arg, flags|INTR_EXCL, &isrc->xi_cookie);
1557 if (error != 0) {
1558 device_printf(dev,
1559 "xen_intr_add_handler: intr_add_handler failed: %d\n",
1560 error);
1561 }
1562
1563 return (error);
1564}
1565
1566#ifdef DDB
1567static const char *
1568xen_intr_print_type(enum evtchn_type type)
1569{
1570 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1571 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1572 [EVTCHN_TYPE_PIRQ] = "PIRQ",
1573 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1574 [EVTCHN_TYPE_IPI] = "IPI",
1575 [EVTCHN_TYPE_PORT] = "PORT",
1576 };
1577
1578 if (type >= EVTCHN_TYPE_COUNT)
1579 return ("UNKNOWN");
1580
1581 return (evtchn_type_to_string[type]);
1582}
1583
1584static void
1585xen_intr_dump_port(struct xenisrc *isrc)
1586{
1587 struct xen_intr_pcpu_data *pcpu;
1588 shared_info_t *s = HYPERVISOR_shared_info;
1589 int i;
1590
1591 db_printf("Port %d Type: %s\n",
1592 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1593 if (isrc->xi_type == EVTCHN_TYPE_PIRQ) {
1594 db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
1595 "NeedsEOI: %d\n",
1596 isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
1597 !!test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
1598 }
1599 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1600 db_printf("\tVirq: %d\n", isrc->xi_virq);
1601
1602 db_printf("\tMasked: %d Pending: %d\n",
1603 !!test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1604 !!test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1605
1606 db_printf("\tPer-CPU Masks: ");
1607 CPU_FOREACH(i) {
1608 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1609 db_printf("cpu#%d: %d ", i,
1610 !!test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1611 }
1612 db_printf("\n");
1613}
1614
1615DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1616{
1617 int i;
1618
1619 if (!xen_domain()) {
1620 db_printf("Only available on Xen guests\n");
1621 return;
1622 }
1623
1624 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1625 struct xenisrc *isrc;
1626
1627 isrc = xen_intr_port_to_isrc[i];
1628 if (isrc == NULL)
1629 continue;
1630
1631 xen_intr_dump_port(isrc);
1632 }
1633}
1634#endif /* DDB */