Deleted Added
sdiff udiff text old ( 279325 ) new ( 282274 )
full compact
1/******************************************************************************
2 * xen_intr.c
3 *
4 * Xen event and interrupt services for x86 HVM guests.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
9 *
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/x86/xen/xen_intr.c 282274 2015-04-30 15:48:48Z jhb $");
34
35#include "opt_ddb.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/bus.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/interrupt.h>
46#include <sys/pcpu.h>
47#include <sys/smp.h>
48
49#include <vm/vm.h>
50#include <vm/pmap.h>
51
52#include <machine/intr_machdep.h>
53#include <x86/apicvar.h>
54#include <x86/apicreg.h>
55#include <machine/smp.h>
56#include <machine/stdarg.h>
57
58#include <machine/xen/synch_bitops.h>
59#include <machine/xen/xen-os.h>
60#include <machine/xen/xenvar.h>
61
62#include <xen/hypervisor.h>
63#include <xen/xen_intr.h>
64#include <xen/evtchn/evtchnvar.h>
65
66#include <dev/xen/xenpci/xenpcivar.h>
67#include <dev/pci/pcivar.h>
68
69#ifdef DDB
70#include <ddb/ddb.h>
71#endif
72
73static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
74
75/**
76 * Per-cpu event channel processing state.
77 */
78struct xen_intr_pcpu_data {
79 /**
80 * The last event channel bitmap section (level one bit) processed.
81 * This is used to ensure we scan all ports before
82 * servicing an already servied port again.
83 */
84 u_int last_processed_l1i;
85
86 /**
87 * The last event channel processed within the event channel
88 * bitmap being scanned.
89 */
90 u_int last_processed_l2i;
91
92 /** Pointer to this CPU's interrupt statistic counter. */
93 u_long *evtchn_intrcnt;
94
95 /**
96 * A bitmap of ports that can be serviced from this CPU.
97 * A set bit means interrupt handling is enabled.
98 */
99 u_long evtchn_enabled[sizeof(u_long) * 8];
100};
101
102/*
103 * Start the scan at port 0 by initializing the last scanned
104 * location as the highest numbered event channel port.
105 */
106DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
107 .last_processed_l1i = LONG_BIT - 1,
108 .last_processed_l2i = LONG_BIT - 1
109};
110
111DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
112
113#define XEN_EEXIST 17 /* Xen "already exists" error */
114#define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */
115#define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
116
117#define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
118
119struct xenisrc {
120 struct intsrc xi_intsrc;
121 enum evtchn_type xi_type;
122 int xi_cpu; /* VCPU for delivery. */
123 int xi_vector; /* Global isrc vector number. */
124 evtchn_port_t xi_port;
125 int xi_pirq;
126 int xi_virq;
127 void *xi_cookie;
128 u_int xi_close:1; /* close on unbind? */
129 u_int xi_activehi:1;
130 u_int xi_edgetrigger:1;
131 u_int xi_masked:1;
132};
133
134#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
135
136static void xen_intr_suspend(struct pic *);
137static void xen_intr_resume(struct pic *, bool suspend_cancelled);
138static void xen_intr_enable_source(struct intsrc *isrc);
139static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
140static void xen_intr_eoi_source(struct intsrc *isrc);
141static void xen_intr_enable_intr(struct intsrc *isrc);
142static void xen_intr_disable_intr(struct intsrc *isrc);
143static int xen_intr_vector(struct intsrc *isrc);
144static int xen_intr_source_pending(struct intsrc *isrc);
145static int xen_intr_config_intr(struct intsrc *isrc,
146 enum intr_trigger trig, enum intr_polarity pol);
147static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
148
149static void xen_intr_pirq_enable_source(struct intsrc *isrc);
150static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
151static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
152static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
153static void xen_intr_pirq_disable_intr(struct intsrc *isrc);
154static int xen_intr_pirq_config_intr(struct intsrc *isrc,
155 enum intr_trigger trig, enum intr_polarity pol);
156
157/**
158 * PIC interface for all event channel port types except physical IRQs.
159 */
160struct pic xen_intr_pic = {
161 .pic_enable_source = xen_intr_enable_source,
162 .pic_disable_source = xen_intr_disable_source,
163 .pic_eoi_source = xen_intr_eoi_source,
164 .pic_enable_intr = xen_intr_enable_intr,
165 .pic_disable_intr = xen_intr_disable_intr,
166 .pic_vector = xen_intr_vector,
167 .pic_source_pending = xen_intr_source_pending,
168 .pic_suspend = xen_intr_suspend,
169 .pic_resume = xen_intr_resume,
170 .pic_config_intr = xen_intr_config_intr,
171 .pic_assign_cpu = xen_intr_assign_cpu
172};
173
174/**
175 * PIC interface for all event channel representing
176 * physical interrupt sources.
177 */
178struct pic xen_intr_pirq_pic = {
179 .pic_enable_source = xen_intr_pirq_enable_source,
180 .pic_disable_source = xen_intr_pirq_disable_source,
181 .pic_eoi_source = xen_intr_pirq_eoi_source,
182 .pic_enable_intr = xen_intr_pirq_enable_intr,
183 .pic_disable_intr = xen_intr_pirq_disable_intr,
184 .pic_vector = xen_intr_vector,
185 .pic_source_pending = xen_intr_source_pending,
186 .pic_config_intr = xen_intr_pirq_config_intr,
187 .pic_assign_cpu = xen_intr_assign_cpu
188};
189
190static struct mtx xen_intr_isrc_lock;
191static int xen_intr_auto_vector_count;
192static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
193static u_long *xen_intr_pirq_eoi_map;
194static boolean_t xen_intr_pirq_eoi_map_enabled;
195
196/*------------------------- Private Functions --------------------------------*/
197/**
198 * Disable signal delivery for an event channel port on the
199 * specified CPU.
200 *
201 * \param port The event channel port to mask.
202 *
203 * This API is used to manage the port<=>CPU binding of event
204 * channel handlers.
205 *
206 * \note This operation does not preclude reception of an event
207 * for this event channel on another CPU. To mask the
208 * event channel globally, use evtchn_mask().
209 */
210static inline void
211evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
212{
213 struct xen_intr_pcpu_data *pcpu;
214
215 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
216 clear_bit(port, pcpu->evtchn_enabled);
217}
218
219/**
220 * Enable signal delivery for an event channel port on the
221 * specified CPU.
222 *
223 * \param port The event channel port to unmask.
224 *
225 * This API is used to manage the port<=>CPU binding of event
226 * channel handlers.
227 *
228 * \note This operation does not guarantee that event delivery
229 * is enabled for this event channel port. The port must
230 * also be globally enabled. See evtchn_unmask().
231 */
232static inline void
233evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
234{
235 struct xen_intr_pcpu_data *pcpu;
236
237 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
238 set_bit(port, pcpu->evtchn_enabled);
239}
240
241/**
242 * Allocate and register a per-cpu Xen upcall interrupt counter.
243 *
244 * \param cpu The cpu for which to register this interrupt count.
245 */
246static void
247xen_intr_intrcnt_add(u_int cpu)
248{
249 char buf[MAXCOMLEN + 1];
250 struct xen_intr_pcpu_data *pcpu;
251
252 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
253 if (pcpu->evtchn_intrcnt != NULL)
254 return;
255
256 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
257 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
258}
259
260/**
261 * Search for an already allocated but currently unused Xen interrupt
262 * source object.
263 *
264 * \param type Restrict the search to interrupt sources of the given
265 * type.
266 *
267 * \return A pointer to a free Xen interrupt source object or NULL.
268 */
269static struct xenisrc *
270xen_intr_find_unused_isrc(enum evtchn_type type)
271{
272 int isrc_idx;
273
274 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
275
276 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
277 struct xenisrc *isrc;
278 u_int vector;
279
280 vector = FIRST_EVTCHN_INT + isrc_idx;
281 isrc = (struct xenisrc *)intr_lookup_source(vector);
282 if (isrc != NULL
283 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
284 KASSERT(isrc->xi_intsrc.is_handlers == 0,
285 ("Free evtchn still has handlers"));
286 isrc->xi_type = type;
287 return (isrc);
288 }
289 }
290 return (NULL);
291}
292
293/**
294 * Allocate a Xen interrupt source object.
295 *
296 * \param type The type of interrupt source to create.
297 *
298 * \return A pointer to a newly allocated Xen interrupt source
299 * object or NULL.
300 */
301static struct xenisrc *
302xen_intr_alloc_isrc(enum evtchn_type type, int vector)
303{
304 static int warned;
305 struct xenisrc *isrc;
306
307 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
308
309 if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
310 if (!warned) {
311 warned = 1;
312 printf("xen_intr_alloc: Event channels exhausted.\n");
313 }
314 return (NULL);
315 }
316
317 if (type != EVTCHN_TYPE_PIRQ) {
318 vector = FIRST_EVTCHN_INT + xen_intr_auto_vector_count;
319 xen_intr_auto_vector_count++;
320 }
321
322 KASSERT((intr_lookup_source(vector) == NULL),
323 ("Trying to use an already allocated vector"));
324
325 mtx_unlock(&xen_intr_isrc_lock);
326 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
327 isrc->xi_intsrc.is_pic =
328 (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic;
329 isrc->xi_vector = vector;
330 isrc->xi_type = type;
331 intr_register_source(&isrc->xi_intsrc);
332 mtx_lock(&xen_intr_isrc_lock);
333
334 return (isrc);
335}
336
337/**
338 * Attempt to free an active Xen interrupt source object.
339 *
340 * \param isrc The interrupt source object to release.
341 *
342 * \returns EBUSY if the source is still in use, otherwise 0.
343 */
344static int
345xen_intr_release_isrc(struct xenisrc *isrc)
346{
347
348 mtx_lock(&xen_intr_isrc_lock);
349 if (isrc->xi_intsrc.is_handlers != 0) {
350 mtx_unlock(&xen_intr_isrc_lock);
351 return (EBUSY);
352 }
353 evtchn_mask_port(isrc->xi_port);
354 evtchn_clear_port(isrc->xi_port);
355
356 /* Rebind port to CPU 0. */
357 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
358 evtchn_cpu_unmask_port(0, isrc->xi_port);
359
360 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
361 struct evtchn_close close = { .port = isrc->xi_port };
362 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
363 panic("EVTCHNOP_close failed");
364 }
365
366 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
367 isrc->xi_cpu = 0;
368 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
369 isrc->xi_port = 0;
370 isrc->xi_cookie = NULL;
371 mtx_unlock(&xen_intr_isrc_lock);
372 return (0);
373}
374
375/**
376 * Associate an interrupt handler with an already allocated local Xen
377 * event channel port.
378 *
379 * \param isrcp The returned Xen interrupt object associated with
380 * the specified local port.
381 * \param local_port The event channel to bind.
382 * \param type The event channel type of local_port.
383 * \param intr_owner The device making this bind request.
384 * \param filter An interrupt filter handler. Specify NULL
385 * to always dispatch to the ithread handler.
386 * \param handler An interrupt ithread handler. Optional (can
387 * specify NULL) if all necessary event actions
388 * are performed by filter.
389 * \param arg Argument to present to both filter and handler.
390 * \param irqflags Interrupt handler flags. See sys/bus.h.
391 * \param handlep Pointer to an opaque handle used to manage this
392 * registration.
393 *
394 * \returns 0 on success, otherwise an errno.
395 */
396static int
397xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
398 enum evtchn_type type, device_t intr_owner, driver_filter_t filter,
399 driver_intr_t handler, void *arg, enum intr_type flags,
400 xen_intr_handle_t *port_handlep)
401{
402 struct xenisrc *isrc;
403 int error;
404
405 *isrcp = NULL;
406 if (port_handlep == NULL) {
407 device_printf(intr_owner,
408 "xen_intr_bind_isrc: Bad event handle\n");
409 return (EINVAL);
410 }
411
412 mtx_lock(&xen_intr_isrc_lock);
413 isrc = xen_intr_find_unused_isrc(type);
414 if (isrc == NULL) {
415 isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR);
416 if (isrc == NULL) {
417 mtx_unlock(&xen_intr_isrc_lock);
418 return (ENOSPC);
419 }
420 }
421 isrc->xi_port = local_port;
422 xen_intr_port_to_isrc[local_port] = isrc;
423 mtx_unlock(&xen_intr_isrc_lock);
424
425 /* Assign the opaque handler (the event channel port) */
426 *port_handlep = &isrc->xi_port;
427
428#ifdef SMP
429 if (type == EVTCHN_TYPE_PORT) {
430 /*
431 * By default all interrupts are assigned to vCPU#0
432 * unless specified otherwise, so shuffle them to balance
433 * the interrupt load.
434 */
435 xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu());
436 }
437#endif
438
439 if (filter == NULL && handler == NULL) {
440 /*
441 * No filter/handler provided, leave the event channel
442 * masked and without a valid handler, the caller is
443 * in charge of setting that up.
444 */
445 *isrcp = isrc;
446 return (0);
447 }
448
449 error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
450 *port_handlep);
451 if (error != 0) {
452 xen_intr_release_isrc(isrc);
453 return (error);
454 }
455 *isrcp = isrc;
456 return (0);
457}
458
459/**
460 * Lookup a Xen interrupt source object given an interrupt binding handle.
461 *
462 * \param handle A handle initialized by a previous call to
463 * xen_intr_bind_isrc().
464 *
465 * \returns A pointer to the Xen interrupt source object associated
466 * with the given interrupt handle. NULL if no association
467 * currently exists.
468 */
469static struct xenisrc *
470xen_intr_isrc(xen_intr_handle_t handle)
471{
472 evtchn_port_t port;
473
474 if (handle == NULL)
475 return (NULL);
476
477 port = *(evtchn_port_t *)handle;
478 if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS)
479 return (NULL);
480
481 return (xen_intr_port_to_isrc[port]);
482}
483
484/**
485 * Determine the event channel ports at the given section of the
486 * event port bitmap which have pending events for the given cpu.
487 *
488 * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
489 * \param sh The Xen shared info area.
490 * \param idx The index of the section of the event channel bitmap to
491 * inspect.
492 *
493 * \returns A u_long with bits set for every event channel with pending
494 * events.
495 */
496static inline u_long
497xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
498 u_int idx)
499{
500 return (sh->evtchn_pending[idx]
501 & ~sh->evtchn_mask[idx]
502 & pcpu->evtchn_enabled[idx]);
503}
504
505/**
506 * Interrupt handler for processing all Xen event channel events.
507 *
508 * \param trap_frame The trap frame context for the current interrupt.
509 */
510void
511xen_intr_handle_upcall(struct trapframe *trap_frame)
512{
513 u_int l1i, l2i, port, cpu;
514 u_long masked_l1, masked_l2;
515 struct xenisrc *isrc;
516 shared_info_t *s;
517 vcpu_info_t *v;
518 struct xen_intr_pcpu_data *pc;
519 u_long l1, l2;
520
521 /*
522 * Disable preemption in order to always check and fire events
523 * on the right vCPU
524 */
525 critical_enter();
526
527 cpu = PCPU_GET(cpuid);
528 pc = DPCPU_PTR(xen_intr_pcpu);
529 s = HYPERVISOR_shared_info;
530 v = DPCPU_GET(vcpu_info);
531
532 if (xen_hvm_domain() && !xen_vector_callback_enabled) {
533 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
534 }
535
536 v->evtchn_upcall_pending = 0;
537
538#if 0
539#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
540 /* Clear master flag /before/ clearing selector flag. */
541 wmb();
542#endif
543#endif
544
545 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
546
547 l1i = pc->last_processed_l1i;
548 l2i = pc->last_processed_l2i;
549 (*pc->evtchn_intrcnt)++;
550
551 while (l1 != 0) {
552
553 l1i = (l1i + 1) % LONG_BIT;
554 masked_l1 = l1 & ((~0UL) << l1i);
555
556 if (masked_l1 == 0) {
557 /*
558 * if we masked out all events, wrap around
559 * to the beginning.
560 */
561 l1i = LONG_BIT - 1;
562 l2i = LONG_BIT - 1;
563 continue;
564 }
565 l1i = ffsl(masked_l1) - 1;
566
567 do {
568 l2 = xen_intr_active_ports(pc, s, l1i);
569
570 l2i = (l2i + 1) % LONG_BIT;
571 masked_l2 = l2 & ((~0UL) << l2i);
572
573 if (masked_l2 == 0) {
574 /* if we masked out all events, move on */
575 l2i = LONG_BIT - 1;
576 break;
577 }
578 l2i = ffsl(masked_l2) - 1;
579
580 /* process port */
581 port = (l1i * LONG_BIT) + l2i;
582 synch_clear_bit(port, &s->evtchn_pending[0]);
583
584 isrc = xen_intr_port_to_isrc[port];
585 if (__predict_false(isrc == NULL))
586 continue;
587
588 /* Make sure we are firing on the right vCPU */
589 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
590 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
591 PCPU_GET(cpuid), isrc->xi_cpu));
592
593 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
594
595 /*
596 * If this is the final port processed,
597 * we'll pick up here+1 next time.
598 */
599 pc->last_processed_l1i = l1i;
600 pc->last_processed_l2i = l2i;
601
602 } while (l2i != LONG_BIT - 1);
603
604 l2 = xen_intr_active_ports(pc, s, l1i);
605 if (l2 == 0) {
606 /*
607 * We handled all ports, so we can clear the
608 * selector bit.
609 */
610 l1 &= ~(1UL << l1i);
611 }
612 }
613 critical_exit();
614}
615
616static int
617xen_intr_init(void *dummy __unused)
618{
619 shared_info_t *s = HYPERVISOR_shared_info;
620 struct xen_intr_pcpu_data *pcpu;
621 struct physdev_pirq_eoi_gmfn eoi_gmfn;
622 int i, rc;
623
624 if (!xen_domain())
625 return (0);
626
627 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
628
629 /*
630 * Register interrupt count manually as we aren't
631 * guaranteed to see a call to xen_intr_assign_cpu()
632 * before our first interrupt. Also set the per-cpu
633 * mask of CPU#0 to enable all, since by default
634 * all event channels are bound to CPU#0.
635 */
636 CPU_FOREACH(i) {
637 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
638 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
639 sizeof(pcpu->evtchn_enabled));
640 xen_intr_intrcnt_add(i);
641 }
642
643 for (i = 0; i < nitems(s->evtchn_mask); i++)
644 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
645
646 /* Try to register PIRQ EOI map */
647 xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO);
648 eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map));
649 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
650 if (rc != 0 && bootverbose)
651 printf("Xen interrupts: unable to register PIRQ EOI map\n");
652 else
653 xen_intr_pirq_eoi_map_enabled = true;
654
655 intr_register_pic(&xen_intr_pic);
656 intr_register_pic(&xen_intr_pirq_pic);
657
658 if (bootverbose)
659 printf("Xen interrupt system initialized\n");
660
661 return (0);
662}
663SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
664
665/*--------------------------- Common PIC Functions ---------------------------*/
666/**
667 * Prepare this PIC for system suspension.
668 */
669static void
670xen_intr_suspend(struct pic *unused)
671{
672}
673
674static void
675xen_rebind_ipi(struct xenisrc *isrc)
676{
677#ifdef SMP
678 int cpu = isrc->xi_cpu;
679 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
680 int error;
681 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
682
683 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
684 &bind_ipi);
685 if (error != 0)
686 panic("unable to rebind xen IPI: %d", error);
687
688 isrc->xi_port = bind_ipi.port;
689 isrc->xi_cpu = 0;
690 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
691
692 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
693 cpu_apic_ids[cpu]);
694 if (error)
695 panic("unable to bind xen IPI to CPU#%d: %d",
696 cpu, error);
697
698 evtchn_unmask_port(bind_ipi.port);
699#else
700 panic("Resume IPI event channel on UP");
701#endif
702}
703
704static void
705xen_rebind_virq(struct xenisrc *isrc)
706{
707 int cpu = isrc->xi_cpu;
708 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
709 int error;
710 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
711 .vcpu = vcpu_id };
712
713 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
714 &bind_virq);
715 if (error != 0)
716 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
717
718 isrc->xi_port = bind_virq.port;
719 isrc->xi_cpu = 0;
720 xen_intr_port_to_isrc[bind_virq.port] = isrc;
721
722#ifdef SMP
723 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
724 cpu_apic_ids[cpu]);
725 if (error)
726 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
727 isrc->xi_virq, cpu, error);
728#endif
729
730 evtchn_unmask_port(bind_virq.port);
731}
732
733/**
734 * Return this PIC to service after being suspended.
735 */
736static void
737xen_intr_resume(struct pic *unused, bool suspend_cancelled)
738{
739 shared_info_t *s = HYPERVISOR_shared_info;
740 struct xenisrc *isrc;
741 u_int isrc_idx;
742 int i;
743
744 if (suspend_cancelled)
745 return;
746
747 /* Reset the per-CPU masks */
748 CPU_FOREACH(i) {
749 struct xen_intr_pcpu_data *pcpu;
750
751 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
752 memset(pcpu->evtchn_enabled,
753 i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled));
754 }
755
756 /* Mask all event channels. */
757 for (i = 0; i < nitems(s->evtchn_mask); i++)
758 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
759
760 /* Remove port -> isrc mappings */
761 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
762
763 /* Free unused isrcs and rebind VIRQs and IPIs */
764 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
765 u_int vector;
766
767 vector = FIRST_EVTCHN_INT + isrc_idx;
768 isrc = (struct xenisrc *)intr_lookup_source(vector);
769 if (isrc != NULL) {
770 isrc->xi_port = 0;
771 switch (isrc->xi_type) {
772 case EVTCHN_TYPE_IPI:
773 xen_rebind_ipi(isrc);
774 break;
775 case EVTCHN_TYPE_VIRQ:
776 xen_rebind_virq(isrc);
777 break;
778 default:
779 isrc->xi_cpu = 0;
780 break;
781 }
782 }
783 }
784}
785
786/**
787 * Disable a Xen interrupt source.
788 *
789 * \param isrc The interrupt source to disable.
790 */
791static void
792xen_intr_disable_intr(struct intsrc *base_isrc)
793{
794 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
795
796 evtchn_mask_port(isrc->xi_port);
797}
798
799/**
800 * Determine the global interrupt vector number for
801 * a Xen interrupt source.
802 *
803 * \param isrc The interrupt source to query.
804 *
805 * \return The vector number corresponding to the given interrupt source.
806 */
807static int
808xen_intr_vector(struct intsrc *base_isrc)
809{
810 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
811
812 return (isrc->xi_vector);
813}
814
815/**
816 * Determine whether or not interrupt events are pending on the
817 * the given interrupt source.
818 *
819 * \param isrc The interrupt source to query.
820 *
821 * \returns 0 if no events are pending, otherwise non-zero.
822 */
823static int
824xen_intr_source_pending(struct intsrc *isrc)
825{
826 /*
827 * EventChannels are edge triggered and never masked.
828 * There can be no pending events.
829 */
830 return (0);
831}
832
833/**
834 * Perform configuration of an interrupt source.
835 *
836 * \param isrc The interrupt source to configure.
837 * \param trig Edge or level.
838 * \param pol Active high or low.
839 *
840 * \returns 0 if no events are pending, otherwise non-zero.
841 */
842static int
843xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
844 enum intr_polarity pol)
845{
846 /* Configuration is only possible via the evtchn apis. */
847 return (ENODEV);
848}
849
850/**
851 * Configure CPU affinity for interrupt source event delivery.
852 *
853 * \param isrc The interrupt source to configure.
854 * \param apic_id The apic id of the CPU for handling future events.
855 *
856 * \returns 0 if successful, otherwise an errno.
857 */
858static int
859xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
860{
861#ifdef SMP
862 struct evtchn_bind_vcpu bind_vcpu;
863 struct xenisrc *isrc;
864 u_int to_cpu, vcpu_id;
865 int error, masked;
866
867 if (xen_vector_callback_enabled == 0)
868 return (EOPNOTSUPP);
869
870 to_cpu = apic_cpuid(apic_id);
871 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
872 xen_intr_intrcnt_add(to_cpu);
873
874 mtx_lock(&xen_intr_isrc_lock);
875 isrc = (struct xenisrc *)base_isrc;
876 if (!is_valid_evtchn(isrc->xi_port)) {
877 mtx_unlock(&xen_intr_isrc_lock);
878 return (EINVAL);
879 }
880
881 /*
882 * Mask the event channel while binding it to prevent interrupt
883 * delivery with an inconsistent state in isrc->xi_cpu.
884 */
885 masked = evtchn_test_and_set_mask(isrc->xi_port);
886 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
887 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
888 /*
889 * Virtual IRQs are associated with a cpu by
890 * the Hypervisor at evtchn_bind_virq time, so
891 * all we need to do is update the per-CPU masks.
892 */
893 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
894 isrc->xi_cpu = to_cpu;
895 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
896 goto out;
897 }
898
899 bind_vcpu.port = isrc->xi_port;
900 bind_vcpu.vcpu = vcpu_id;
901
902 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
903 if (isrc->xi_cpu != to_cpu) {
904 if (error == 0) {
905 /* Commit to new binding by removing the old one. */
906 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
907 isrc->xi_cpu = to_cpu;
908 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
909 }
910 }
911
912out:
913 if (masked == 0)
914 evtchn_unmask_port(isrc->xi_port);
915 mtx_unlock(&xen_intr_isrc_lock);
916 return (0);
917#else
918 return (EOPNOTSUPP);
919#endif
920}
921
922/*------------------- Virtual Interrupt Source PIC Functions -----------------*/
923/*
924 * Mask a level triggered interrupt source.
925 *
926 * \param isrc The interrupt source to mask (if necessary).
927 * \param eoi If non-zero, perform any necessary end-of-interrupt
928 * acknowledgements.
929 */
930static void
931xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
932{
933 struct xenisrc *isrc;
934
935 isrc = (struct xenisrc *)base_isrc;
936
937 /*
938 * NB: checking if the event channel is already masked is
939 * needed because the event channel user-space device
940 * masks event channels on it's filter as part of it's
941 * normal operation, and those shouldn't be automatically
942 * unmasked by the generic interrupt code. The event channel
943 * device will unmask them when needed.
944 */
945 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
946}
947
948/*
949 * Unmask a level triggered interrupt source.
950 *
951 * \param isrc The interrupt source to unmask (if necessary).
952 */
953static void
954xen_intr_enable_source(struct intsrc *base_isrc)
955{
956 struct xenisrc *isrc;
957
958 isrc = (struct xenisrc *)base_isrc;
959
960 if (isrc->xi_masked == 0)
961 evtchn_unmask_port(isrc->xi_port);
962}
963
964/*
965 * Perform any necessary end-of-interrupt acknowledgements.
966 *
967 * \param isrc The interrupt source to EOI.
968 */
969static void
970xen_intr_eoi_source(struct intsrc *base_isrc)
971{
972}
973
974/*
975 * Enable and unmask the interrupt source.
976 *
977 * \param isrc The interrupt source to enable.
978 */
979static void
980xen_intr_enable_intr(struct intsrc *base_isrc)
981{
982 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
983
984 evtchn_unmask_port(isrc->xi_port);
985}
986
987/*------------------ Physical Interrupt Source PIC Functions -----------------*/
988/*
989 * Mask a level triggered interrupt source.
990 *
991 * \param isrc The interrupt source to mask (if necessary).
992 * \param eoi If non-zero, perform any necessary end-of-interrupt
993 * acknowledgements.
994 */
995static void
996xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
997{
998 struct xenisrc *isrc;
999
1000 isrc = (struct xenisrc *)base_isrc;
1001
1002 if (isrc->xi_edgetrigger == 0)
1003 evtchn_mask_port(isrc->xi_port);
1004 if (eoi == PIC_EOI)
1005 xen_intr_pirq_eoi_source(base_isrc);
1006}
1007
1008/*
1009 * Unmask a level triggered interrupt source.
1010 *
1011 * \param isrc The interrupt source to unmask (if necessary).
1012 */
1013static void
1014xen_intr_pirq_enable_source(struct intsrc *base_isrc)
1015{
1016 struct xenisrc *isrc;
1017
1018 isrc = (struct xenisrc *)base_isrc;
1019
1020 if (isrc->xi_edgetrigger == 0)
1021 evtchn_unmask_port(isrc->xi_port);
1022}
1023
1024/*
1025 * Perform any necessary end-of-interrupt acknowledgements.
1026 *
1027 * \param isrc The interrupt source to EOI.
1028 */
1029static void
1030xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
1031{
1032 struct xenisrc *isrc;
1033 int error;
1034
1035 isrc = (struct xenisrc *)base_isrc;
1036
1037 if (test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
1038 struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
1039
1040 error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
1041 if (error != 0)
1042 panic("Unable to EOI PIRQ#%d: %d\n",
1043 isrc->xi_pirq, error);
1044 }
1045}
1046
1047/*
1048 * Enable and unmask the interrupt source.
1049 *
1050 * \param isrc The interrupt source to enable.
1051 */
1052static void
1053xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
1054{
1055 struct xenisrc *isrc;
1056 struct evtchn_bind_pirq bind_pirq;
1057 struct physdev_irq_status_query irq_status;
1058 int error;
1059
1060 isrc = (struct xenisrc *)base_isrc;
1061
1062 if (!xen_intr_pirq_eoi_map_enabled) {
1063 irq_status.irq = isrc->xi_pirq;
1064 error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query,
1065 &irq_status);
1066 if (error)
1067 panic("unable to get status of IRQ#%d", isrc->xi_pirq);
1068
1069 if (irq_status.flags & XENIRQSTAT_needs_eoi) {
1070 /*
1071 * Since the dynamic PIRQ EOI map is not available
1072 * mark the PIRQ as needing EOI unconditionally.
1073 */
1074 set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
1075 }
1076 }
1077
1078 bind_pirq.pirq = isrc->xi_pirq;
1079 bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE;
1080 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
1081 if (error)
1082 panic("unable to bind IRQ#%d", isrc->xi_pirq);
1083
1084 isrc->xi_port = bind_pirq.port;
1085
1086 mtx_lock(&xen_intr_isrc_lock);
1087 KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL),
1088 ("trying to override an already setup event channel port"));
1089 xen_intr_port_to_isrc[bind_pirq.port] = isrc;
1090 mtx_unlock(&xen_intr_isrc_lock);
1091
1092 evtchn_unmask_port(isrc->xi_port);
1093}
1094
1095/*
1096 * Disable an interrupt source.
1097 *
1098 * \param isrc The interrupt source to disable.
1099 */
1100static void
1101xen_intr_pirq_disable_intr(struct intsrc *base_isrc)
1102{
1103 struct xenisrc *isrc;
1104 struct evtchn_close close;
1105 int error;
1106
1107 isrc = (struct xenisrc *)base_isrc;
1108
1109 evtchn_mask_port(isrc->xi_port);
1110
1111 close.port = isrc->xi_port;
1112 error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
1113 if (error)
1114 panic("unable to close event channel %d IRQ#%d",
1115 isrc->xi_port, isrc->xi_pirq);
1116
1117 mtx_lock(&xen_intr_isrc_lock);
1118 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
1119 mtx_unlock(&xen_intr_isrc_lock);
1120
1121 isrc->xi_port = 0;
1122}
1123
1124/**
1125 * Perform configuration of an interrupt source.
1126 *
1127 * \param isrc The interrupt source to configure.
1128 * \param trig Edge or level.
1129 * \param pol Active high or low.
1130 *
1131 * \returns 0 if no events are pending, otherwise non-zero.
1132 */
1133static int
1134xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig,
1135 enum intr_polarity pol)
1136{
1137 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1138 struct physdev_setup_gsi setup_gsi;
1139 int error;
1140
1141 KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM),
1142 ("%s: Conforming trigger or polarity\n", __func__));
1143
1144 setup_gsi.gsi = isrc->xi_pirq;
1145 setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1;
1146 setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1;
1147
1148 error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
1149 if (error == -XEN_EEXIST) {
1150 if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) ||
1151 (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH)))
1152 panic("unable to reconfigure interrupt IRQ#%d",
1153 isrc->xi_pirq);
1154 error = 0;
1155 }
1156 if (error)
1157 panic("unable to configure IRQ#%d\n", isrc->xi_pirq);
1158
1159 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1160 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1161
1162 return (0);
1163}
1164
1165/*--------------------------- Public Functions -------------------------------*/
1166/*------- API comments for these methods can be found in xen/xenintr.h -------*/
1167int
1168xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
1169 driver_filter_t filter, driver_intr_t handler, void *arg,
1170 enum intr_type flags, xen_intr_handle_t *port_handlep)
1171{
1172 struct xenisrc *isrc;
1173 int error;
1174
1175 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT, dev,
1176 filter, handler, arg, flags, port_handlep);
1177 if (error != 0)
1178 return (error);
1179
1180 /*
1181 * The Event Channel API didn't open this port, so it is not
1182 * responsible for closing it automatically on unbind.
1183 */
1184 isrc->xi_close = 0;
1185 return (0);
1186}
1187
1188int
1189xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
1190 driver_filter_t filter, driver_intr_t handler, void *arg,
1191 enum intr_type flags, xen_intr_handle_t *port_handlep)
1192{
1193 struct xenisrc *isrc;
1194 struct evtchn_alloc_unbound alloc_unbound;
1195 int error;
1196
1197 alloc_unbound.dom = DOMID_SELF;
1198 alloc_unbound.remote_dom = remote_domain;
1199 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1200 &alloc_unbound);
1201 if (error != 0) {
1202 /*
1203 * XXX Trap Hypercall error code Linuxisms in
1204 * the HYPERCALL layer.
1205 */
1206 return (-error);
1207 }
1208
1209 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1210 dev, filter, handler, arg, flags,
1211 port_handlep);
1212 if (error != 0) {
1213 evtchn_close_t close = { .port = alloc_unbound.port };
1214 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1215 panic("EVTCHNOP_close failed");
1216 return (error);
1217 }
1218
1219 isrc->xi_close = 1;
1220 return (0);
1221}
1222
1223int
1224xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1225 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1226 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1227{
1228 struct xenisrc *isrc;
1229 struct evtchn_bind_interdomain bind_interdomain;
1230 int error;
1231
1232 bind_interdomain.remote_dom = remote_domain;
1233 bind_interdomain.remote_port = remote_port;
1234 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1235 &bind_interdomain);
1236 if (error != 0) {
1237 /*
1238 * XXX Trap Hypercall error code Linuxisms in
1239 * the HYPERCALL layer.
1240 */
1241 return (-error);
1242 }
1243
1244 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1245 EVTCHN_TYPE_PORT, dev, filter, handler,
1246 arg, flags, port_handlep);
1247 if (error) {
1248 evtchn_close_t close = { .port = bind_interdomain.local_port };
1249 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1250 panic("EVTCHNOP_close failed");
1251 return (error);
1252 }
1253
1254 /*
1255 * The Event Channel API opened this port, so it is
1256 * responsible for closing it automatically on unbind.
1257 */
1258 isrc->xi_close = 1;
1259 return (0);
1260}
1261
1262int
1263xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1264 driver_filter_t filter, driver_intr_t handler, void *arg,
1265 enum intr_type flags, xen_intr_handle_t *port_handlep)
1266{
1267 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1268 struct xenisrc *isrc;
1269 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1270 int error;
1271
1272 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1273 xen_intr_intrcnt_add(cpu);
1274
1275 isrc = NULL;
1276 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1277 if (error != 0) {
1278 /*
1279 * XXX Trap Hypercall error code Linuxisms in
1280 * the HYPERCALL layer.
1281 */
1282 return (-error);
1283 }
1284
1285 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ, dev,
1286 filter, handler, arg, flags, port_handlep);
1287
1288#ifdef SMP
1289 if (error == 0)
1290 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1291#endif
1292
1293 if (error != 0) {
1294 evtchn_close_t close = { .port = bind_virq.port };
1295
1296 xen_intr_unbind(*port_handlep);
1297 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1298 panic("EVTCHNOP_close failed");
1299 return (error);
1300 }
1301
1302#ifdef SMP
1303 if (isrc->xi_cpu != cpu) {
1304 /*
1305 * Too early in the boot process for the generic interrupt
1306 * code to perform the binding. Update our event channel
1307 * masks manually so events can't fire on the wrong cpu
1308 * during AP startup.
1309 */
1310 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1311 }
1312#endif
1313
1314 /*
1315 * The Event Channel API opened this port, so it is
1316 * responsible for closing it automatically on unbind.
1317 */
1318 isrc->xi_close = 1;
1319 isrc->xi_virq = virq;
1320
1321 return (0);
1322}
1323
1324int
1325xen_intr_alloc_and_bind_ipi(device_t dev, u_int cpu,
1326 driver_filter_t filter, enum intr_type flags,
1327 xen_intr_handle_t *port_handlep)
1328{
1329#ifdef SMP
1330 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1331 struct xenisrc *isrc;
1332 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1333 int error;
1334
1335 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1336 xen_intr_intrcnt_add(cpu);
1337
1338 isrc = NULL;
1339 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1340 if (error != 0) {
1341 /*
1342 * XXX Trap Hypercall error code Linuxisms in
1343 * the HYPERCALL layer.
1344 */
1345 return (-error);
1346 }
1347
1348 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1349 dev, filter, NULL, NULL, flags,
1350 port_handlep);
1351 if (error == 0)
1352 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1353
1354 if (error != 0) {
1355 evtchn_close_t close = { .port = bind_ipi.port };
1356
1357 xen_intr_unbind(*port_handlep);
1358 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1359 panic("EVTCHNOP_close failed");
1360 return (error);
1361 }
1362
1363 if (isrc->xi_cpu != cpu) {
1364 /*
1365 * Too early in the boot process for the generic interrupt
1366 * code to perform the binding. Update our event channel
1367 * masks manually so events can't fire on the wrong cpu
1368 * during AP startup.
1369 */
1370 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1371 }
1372
1373 /*
1374 * The Event Channel API opened this port, so it is
1375 * responsible for closing it automatically on unbind.
1376 */
1377 isrc->xi_close = 1;
1378 return (0);
1379#else
1380 return (EOPNOTSUPP);
1381#endif
1382}
1383
1384int
1385xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol)
1386{
1387 struct physdev_map_pirq map_pirq;
1388 struct xenisrc *isrc;
1389 int error;
1390
1391 if (vector == 0)
1392 return (EINVAL);
1393
1394 if (bootverbose)
1395 printf("xen: register IRQ#%d\n", vector);
1396
1397 map_pirq.domid = DOMID_SELF;
1398 map_pirq.type = MAP_PIRQ_TYPE_GSI;
1399 map_pirq.index = vector;
1400 map_pirq.pirq = vector;
1401
1402 error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq);
1403 if (error) {
1404 printf("xen: unable to map IRQ#%d\n", vector);
1405 return (error);
1406 }
1407
1408 mtx_lock(&xen_intr_isrc_lock);
1409 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector);
1410 mtx_unlock(&xen_intr_isrc_lock);
1411 KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt"));
1412 isrc->xi_pirq = vector;
1413 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1414 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1415
1416 return (0);
1417}
1418
1419int
1420xen_register_msi(device_t dev, int vector, int count)
1421{
1422 struct physdev_map_pirq msi_irq;
1423 struct xenisrc *isrc;
1424 int ret;
1425
1426 memset(&msi_irq, 0, sizeof(msi_irq));
1427 msi_irq.domid = DOMID_SELF;
1428 msi_irq.type = count == 1 ?
1429 MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI;
1430 msi_irq.index = -1;
1431 msi_irq.pirq = -1;
1432 msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16);
1433 msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev);
1434 msi_irq.entry_nr = count;
1435
1436 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq);
1437 if (ret != 0)
1438 return (ret);
1439 if (count != msi_irq.entry_nr) {
1440 panic("unable to setup all requested MSI vectors "
1441 "(expected %d got %d)", count, msi_irq.entry_nr);
1442 }
1443
1444 mtx_lock(&xen_intr_isrc_lock);
1445 for (int i = 0; i < count; i++) {
1446 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i);
1447 KASSERT(isrc != NULL,
1448 ("xen: unable to allocate isrc for interrupt"));
1449 isrc->xi_pirq = msi_irq.pirq + i;
1450 /* MSI interrupts are always edge triggered */
1451 isrc->xi_edgetrigger = 1;
1452 }
1453 mtx_unlock(&xen_intr_isrc_lock);
1454
1455 return (0);
1456}
1457
1458int
1459xen_release_msi(int vector)
1460{
1461 struct physdev_unmap_pirq unmap;
1462 struct xenisrc *isrc;
1463 int ret;
1464
1465 isrc = (struct xenisrc *)intr_lookup_source(vector);
1466 if (isrc == NULL)
1467 return (ENXIO);
1468
1469 unmap.pirq = isrc->xi_pirq;
1470 ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap);
1471 if (ret != 0)
1472 return (ret);
1473
1474 xen_intr_release_isrc(isrc);
1475
1476 return (0);
1477}
1478
1479int
1480xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1481{
1482 char descr[MAXCOMLEN + 1];
1483 struct xenisrc *isrc;
1484 va_list ap;
1485
1486 isrc = xen_intr_isrc(port_handle);
1487 if (isrc == NULL)
1488 return (EINVAL);
1489
1490 va_start(ap, fmt);
1491 vsnprintf(descr, sizeof(descr), fmt, ap);
1492 va_end(ap);
1493 return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
1494}
1495
1496void
1497xen_intr_unbind(xen_intr_handle_t *port_handlep)
1498{
1499 struct xenisrc *isrc;
1500
1501 KASSERT(port_handlep != NULL,
1502 ("NULL xen_intr_handle_t passed to xen_intr_unbind"));
1503
1504 isrc = xen_intr_isrc(*port_handlep);
1505 *port_handlep = NULL;
1506 if (isrc == NULL)
1507 return;
1508
1509 if (isrc->xi_cookie != NULL)
1510 intr_remove_handler(isrc->xi_cookie);
1511 xen_intr_release_isrc(isrc);
1512}
1513
1514void
1515xen_intr_signal(xen_intr_handle_t handle)
1516{
1517 struct xenisrc *isrc;
1518
1519 isrc = xen_intr_isrc(handle);
1520 if (isrc != NULL) {
1521 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1522 isrc->xi_type == EVTCHN_TYPE_IPI,
1523 ("evtchn_signal on something other than a local port"));
1524 struct evtchn_send send = { .port = isrc->xi_port };
1525 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1526 }
1527}
1528
1529evtchn_port_t
1530xen_intr_port(xen_intr_handle_t handle)
1531{
1532 struct xenisrc *isrc;
1533
1534 isrc = xen_intr_isrc(handle);
1535 if (isrc == NULL)
1536 return (0);
1537
1538 return (isrc->xi_port);
1539}
1540
1541int
1542xen_intr_add_handler(device_t dev, driver_filter_t filter,
1543 driver_intr_t handler, void *arg, enum intr_type flags,
1544 xen_intr_handle_t handle)
1545{
1546 struct xenisrc *isrc;
1547 int error;
1548
1549 isrc = xen_intr_isrc(handle);
1550 if (isrc == NULL || isrc->xi_cookie != NULL)
1551 return (EINVAL);
1552
1553 error = intr_add_handler(device_get_nameunit(dev), isrc->xi_vector,
1554 filter, handler, arg, flags|INTR_EXCL, &isrc->xi_cookie);
1555 if (error != 0) {
1556 device_printf(dev,
1557 "xen_intr_add_handler: intr_add_handler failed: %d\n",
1558 error);
1559 }
1560
1561 return (error);
1562}
1563
1564#ifdef DDB
1565static const char *
1566xen_intr_print_type(enum evtchn_type type)
1567{
1568 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1569 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1570 [EVTCHN_TYPE_PIRQ] = "PIRQ",
1571 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1572 [EVTCHN_TYPE_IPI] = "IPI",
1573 [EVTCHN_TYPE_PORT] = "PORT",
1574 };
1575
1576 if (type >= EVTCHN_TYPE_COUNT)
1577 return ("UNKNOWN");
1578
1579 return (evtchn_type_to_string[type]);
1580}
1581
1582static void
1583xen_intr_dump_port(struct xenisrc *isrc)
1584{
1585 struct xen_intr_pcpu_data *pcpu;
1586 shared_info_t *s = HYPERVISOR_shared_info;
1587 int i;
1588
1589 db_printf("Port %d Type: %s\n",
1590 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1591 if (isrc->xi_type == EVTCHN_TYPE_PIRQ) {
1592 db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
1593 "NeedsEOI: %d\n",
1594 isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
1595 !!test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
1596 }
1597 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1598 db_printf("\tVirq: %d\n", isrc->xi_virq);
1599
1600 db_printf("\tMasked: %d Pending: %d\n",
1601 !!test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1602 !!test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1603
1604 db_printf("\tPer-CPU Masks: ");
1605 CPU_FOREACH(i) {
1606 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1607 db_printf("cpu#%d: %d ", i,
1608 !!test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1609 }
1610 db_printf("\n");
1611}
1612
1613DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1614{
1615 int i;
1616
1617 if (!xen_domain()) {
1618 db_printf("Only available on Xen guests\n");
1619 return;
1620 }
1621
1622 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1623 struct xenisrc *isrc;
1624
1625 isrc = xen_intr_port_to_isrc[i];
1626 if (isrc == NULL)
1627 continue;
1628
1629 xen_intr_dump_port(isrc);
1630 }
1631}
1632#endif /* DDB */