1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
5 * Copyright 2019 Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/types.h>
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/conf.h>
34#include <sys/kernel.h>
35#include <sys/limits.h>
36#include <sys/module.h>
37#include <sys/sysctl.h>
38#include <sys/systm.h>
39#include <sys/malloc.h>
40
41#include <machine/bus.h>
42#include <machine/resource.h>
43#include <machine/intr_machdep.h>
44#include <sys/rman.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47
48#include <sys/pciio.h>
49#include <dev/pci/pcivar.h>
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pci_private.h>
52#include <dev/pci/pcib_private.h>
53
54#include <dev/vmd/vmd.h>
55
56#include "pcib_if.h"
57
58struct vmd_type {
59	u_int16_t	vmd_vid;
60	u_int16_t	vmd_did;
61	char		*vmd_name;
62	int		flags;
63#define BUS_RESTRICT	1
64#define VECTOR_OFFSET	2
65#define CAN_BYPASS_MSI	4
66};
67
68#define VMD_CAP		0x40
69#define VMD_BUS_RESTRICT	0x1
70
71#define VMD_CONFIG	0x44
72#define VMD_BYPASS_MSI		0x2
73#define VMD_BUS_START(x)	((x >> 8) & 0x3)
74
75#define VMD_LOCK	0x70
76
77SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
78    "Intel Volume Management Device tuning parameters");
79
80/*
81 * By default all VMD devices remap children MSI/MSI-X interrupts into their
82 * own.  It creates additional isolation, but also complicates things due to
83 * sharing, etc.  Fortunately some VMD devices can bypass the remapping.
84 */
85static int vmd_bypass_msi = 1;
86SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0,
87    "Bypass MSI remapping on capable hardware");
88
89/*
90 * All MSIs within a group share address, so VMD can't distinguish them.
91 * It makes no sense to use more than one per device, only if required by
92 * some specific device drivers.
93 */
94static int vmd_max_msi = 1;
95SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0,
96    "Maximum number of MSI vectors per device");
97
98/*
99 * MSI-X can use different addresses, but we have limited number of MSI-X
100 * we can route to, so use conservative default to try to avoid sharing.
101 */
102static int vmd_max_msix = 3;
103SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0,
104    "Maximum number of MSI-X vectors per device");
105
106static struct vmd_type vmd_devs[] = {
107        { 0x8086, 0x201d, "Intel Volume Management Device", 0 },
108        { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI },
109        { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
110        { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
111        { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
112        { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
113        { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
114        { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
115        { 0, 0, NULL, 0 }
116};
117
118static int
119vmd_probe(device_t dev)
120{
121	struct vmd_type *t;
122	uint16_t vid, did;
123
124	vid = pci_get_vendor(dev);
125	did = pci_get_device(dev);
126	for (t = vmd_devs; t->vmd_name != NULL; t++) {
127		if (vid == t->vmd_vid && did == t->vmd_did) {
128			device_set_desc(dev, t->vmd_name);
129			return (BUS_PROBE_DEFAULT);
130		}
131	}
132	return (ENXIO);
133}
134
135static void
136vmd_free(struct vmd_softc *sc)
137{
138	struct vmd_irq *vi;
139	struct vmd_irq_user *u;
140	int i;
141
142	if (sc->psc.bus.rman.rm_end != 0)
143		rman_fini(&sc->psc.bus.rman);
144	if (sc->psc.mem.rman.rm_end != 0)
145		rman_fini(&sc->psc.mem.rman);
146	while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) {
147		LIST_REMOVE(u, viu_link);
148		free(u, M_DEVBUF);
149	}
150	if (sc->vmd_irq != NULL) {
151		for (i = 0; i < sc->vmd_msix_count; i++) {
152			vi = &sc->vmd_irq[i];
153			if (vi->vi_res == NULL)
154				continue;
155			bus_teardown_intr(sc->psc.dev, vi->vi_res,
156			    vi->vi_handle);
157			bus_release_resource(sc->psc.dev, SYS_RES_IRQ,
158			    vi->vi_rid, vi->vi_res);
159		}
160	}
161	free(sc->vmd_irq, M_DEVBUF);
162	sc->vmd_irq = NULL;
163	pci_release_msi(sc->psc.dev);
164	for (i = 0; i < VMD_MAX_BAR; i++) {
165		if (sc->vmd_regs_res[i] != NULL)
166			bus_release_resource(sc->psc.dev, SYS_RES_MEMORY,
167			    sc->vmd_regs_rid[i], sc->vmd_regs_res[i]);
168	}
169}
170
171/* Hidden PCI Roots are hidden in BAR(0). */
172
173static uint32_t
174vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
175{
176	struct vmd_softc *sc;
177	bus_addr_t offset;
178
179	sc = device_get_softc(dev);
180	if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
181		return (0xffffffff);
182
183	offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
184
185	switch (width) {
186	case 4:
187		return (bus_read_4(sc->vmd_regs_res[0], offset));
188	case 2:
189		return (bus_read_2(sc->vmd_regs_res[0], offset));
190	case 1:
191		return (bus_read_1(sc->vmd_regs_res[0], offset));
192	default:
193		__assert_unreachable();
194		return (0xffffffff);
195	}
196}
197
198static void
199vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
200    uint32_t val, int width)
201{
202	struct vmd_softc *sc;
203	bus_addr_t offset;
204
205	sc = device_get_softc(dev);
206	if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
207		return;
208
209	offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
210
211	switch (width) {
212	case 4:
213		return (bus_write_4(sc->vmd_regs_res[0], offset, val));
214	case 2:
215		return (bus_write_2(sc->vmd_regs_res[0], offset, val));
216	case 1:
217		return (bus_write_1(sc->vmd_regs_res[0], offset, val));
218	default:
219		__assert_unreachable();
220	}
221}
222
223static void
224vmd_set_msi_bypass(device_t dev, bool enable)
225{
226	uint16_t val;
227
228	val = pci_read_config(dev, VMD_CONFIG, 2);
229	if (enable)
230		val |= VMD_BYPASS_MSI;
231	else
232		val &= ~VMD_BYPASS_MSI;
233	pci_write_config(dev, VMD_CONFIG, val, 2);
234}
235
236static int
237vmd_intr(void *arg)
238{
239	/*
240	 * We have nothing to do here, but we have to register some interrupt
241	 * handler to make PCI code setup and enable the MSI-X vector.
242	 */
243	return (FILTER_STRAY);
244}
245
246static int
247vmd_attach(device_t dev)
248{
249	struct vmd_softc *sc;
250	struct pcib_secbus *bus;
251	struct pcib_window *w;
252	struct vmd_type *t;
253	struct vmd_irq *vi;
254	uint16_t vid, did;
255	uint32_t bar;
256	int i, j, error;
257	char buf[64];
258
259	sc = device_get_softc(dev);
260	bzero(sc, sizeof(*sc));
261	sc->psc.dev = dev;
262	sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev);
263
264	pci_enable_busmaster(dev);
265
266	for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) {
267		sc->vmd_regs_rid[i] = PCIR_BAR(j);
268		bar = pci_read_config(dev, PCIR_BAR(0), 4);
269		if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
270		    PCIM_BAR_MEM_64)
271			j++;
272		if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev,
273		    SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) {
274			device_printf(dev, "Cannot allocate resources\n");
275			goto fail;
276		}
277	}
278
279	vid = pci_get_vendor(dev);
280	did = pci_get_device(dev);
281	for (t = vmd_devs; t->vmd_name != NULL; t++) {
282		if (vid == t->vmd_vid && did == t->vmd_did)
283			break;
284	}
285
286	sc->vmd_bus_start = 0;
287	if ((t->flags & BUS_RESTRICT) &&
288	    (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) {
289		switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) {
290		case 0:
291			sc->vmd_bus_start = 0;
292			break;
293		case 1:
294			sc->vmd_bus_start = 128;
295			break;
296		case 2:
297			sc->vmd_bus_start = 224;
298			break;
299		default:
300			device_printf(dev, "Unknown bus offset\n");
301			goto fail;
302		}
303	}
304	sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start +
305	    (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1);
306
307	bus = &sc->psc.bus;
308	bus->sec = sc->vmd_bus_start;
309	bus->sub = sc->vmd_bus_end;
310	bus->dev = dev;
311	bus->rman.rm_start = 0;
312	bus->rman.rm_end = PCI_BUSMAX;
313	bus->rman.rm_type = RMAN_ARRAY;
314	snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
315	bus->rman.rm_descr = strdup(buf, M_DEVBUF);
316	error = rman_init(&bus->rman);
317	if (error) {
318		device_printf(dev, "Failed to initialize bus rman\n");
319		bus->rman.rm_end = 0;
320		goto fail;
321	}
322	error = rman_manage_region(&bus->rman, sc->vmd_bus_start,
323	    sc->vmd_bus_end);
324	if (error) {
325		device_printf(dev, "Failed to add resource to bus rman\n");
326		goto fail;
327	}
328
329	w = &sc->psc.mem;
330	w->rman.rm_type = RMAN_ARRAY;
331	snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev));
332	w->rman.rm_descr = strdup(buf, M_DEVBUF);
333	error = rman_init(&w->rman);
334	if (error) {
335		device_printf(dev, "Failed to initialize memory rman\n");
336		w->rman.rm_end = 0;
337		goto fail;
338	}
339	error = rman_manage_region(&w->rman,
340	    rman_get_start(sc->vmd_regs_res[1]),
341	    rman_get_end(sc->vmd_regs_res[1]));
342	if (error) {
343		device_printf(dev, "Failed to add resource to memory rman\n");
344		goto fail;
345	}
346	error = rman_manage_region(&w->rman,
347	    rman_get_start(sc->vmd_regs_res[2]) + 0x2000,
348	    rman_get_end(sc->vmd_regs_res[2]));
349	if (error) {
350		device_printf(dev, "Failed to add resource to memory rman\n");
351		goto fail;
352	}
353
354	LIST_INIT(&sc->vmd_users);
355	sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0;
356	sc->vmd_msix_count = pci_msix_count(dev);
357	if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) {
358		sc->vmd_msix_count = 0;
359		vmd_set_msi_bypass(dev, true);
360	} else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
361		sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
362		    sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO);
363		for (i = 0; i < sc->vmd_msix_count; i++) {
364			vi = &sc->vmd_irq[i];
365			vi->vi_rid = i + 1;
366			vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
367			    &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE);
368			if (vi->vi_res == NULL) {
369				device_printf(dev, "Failed to allocate irq\n");
370				goto fail;
371			}
372			vi->vi_irq = rman_get_start(vi->vi_res);
373			if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC |
374			    INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) {
375				device_printf(dev, "Can't set up interrupt\n");
376				bus_release_resource(dev, SYS_RES_IRQ,
377				    vi->vi_rid, vi->vi_res);
378				vi->vi_res = NULL;
379				goto fail;
380			}
381		}
382		vmd_set_msi_bypass(dev, false);
383	}
384
385	sc->vmd_dma_tag = bus_get_dma_tag(dev);
386
387	sc->psc.child = device_add_child(dev, "pci", -1);
388	return (bus_generic_attach(dev));
389
390fail:
391	vmd_free(sc);
392	return (ENXIO);
393}
394
395static int
396vmd_detach(device_t dev)
397{
398	struct vmd_softc *sc = device_get_softc(dev);
399	int error;
400
401	error = bus_generic_detach(dev);
402	if (error)
403		return (error);
404	error = device_delete_children(dev);
405	if (error)
406		return (error);
407	if (sc->vmd_msix_count == 0)
408		vmd_set_msi_bypass(dev, false);
409	vmd_free(sc);
410	return (0);
411}
412
413static bus_dma_tag_t
414vmd_get_dma_tag(device_t dev, device_t child)
415{
416	struct vmd_softc *sc = device_get_softc(dev);
417
418	return (sc->vmd_dma_tag);
419}
420
421static struct rman *
422vmd_get_rman(device_t dev, int type, u_int flags)
423{
424	struct vmd_softc *sc = device_get_softc(dev);
425
426	switch (type) {
427	case SYS_RES_MEMORY:
428		return (&sc->psc.mem.rman);
429	case PCI_RES_BUS:
430		return (&sc->psc.bus.rman);
431	default:
432		/* VMD hardware does not support I/O ports. */
433		return (NULL);
434	}
435}
436
437static struct resource *
438vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
439    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
440{
441	struct resource *res;
442
443	if (type == SYS_RES_IRQ) {
444		/* VMD hardware does not support legacy interrupts. */
445		if (*rid == 0)
446			return (NULL);
447		return (bus_generic_alloc_resource(dev, child, type, rid,
448		    start, end, count, flags | RF_SHAREABLE));
449	}
450	res = bus_generic_rman_alloc_resource(dev, child, type, rid, start,
451	    end, count, flags);
452	if (bootverbose && res != NULL) {
453		switch (type) {
454		case SYS_RES_MEMORY:
455			device_printf(dev,
456			    "allocated memory range (%#jx-%#jx) for rid %d of %s\n",
457			    rman_get_start(res), rman_get_end(res), *rid,
458			    pcib_child_name(child));
459			break;
460		case PCI_RES_BUS:
461			device_printf(dev,
462			    "allocated bus range (%ju-%ju) for rid %d of %s\n",
463			    rman_get_start(res), rman_get_end(res), *rid,
464			    pcib_child_name(child));
465			break;
466		}
467	}
468	return (res);
469}
470
471static int
472vmd_adjust_resource(device_t dev, device_t child,
473    struct resource *r, rman_res_t start, rman_res_t end)
474{
475
476	if (rman_get_type(r) == SYS_RES_IRQ) {
477		return (bus_generic_adjust_resource(dev, child, r, start, end));
478	}
479	return (bus_generic_rman_adjust_resource(dev, child, r, start, end));
480}
481
482static int
483vmd_release_resource(device_t dev, device_t child, struct resource *r)
484{
485
486	if (rman_get_type(r) == SYS_RES_IRQ) {
487		return (bus_generic_release_resource(dev, child, r));
488	}
489	return (bus_generic_rman_release_resource(dev, child, r));
490}
491
492static int
493vmd_activate_resource(device_t dev, device_t child, struct resource *r)
494{
495	if (rman_get_type(r) == SYS_RES_IRQ) {
496		return (bus_generic_activate_resource(dev, child, r));
497	}
498	return (bus_generic_rman_activate_resource(dev, child, r));
499}
500
501static int
502vmd_deactivate_resource(device_t dev, device_t child, struct resource *r)
503{
504	if (rman_get_type(r) == SYS_RES_IRQ) {
505		return (bus_generic_deactivate_resource(dev, child, r));
506	}
507	return (bus_generic_rman_deactivate_resource(dev, child, r));
508}
509
510static struct resource *
511vmd_find_parent_resource(struct vmd_softc *sc, struct resource *r)
512{
513	for (int i = 1; i < 3; i++) {
514		if (rman_get_start(sc->vmd_regs_res[i]) <= rman_get_start(r) &&
515		    rman_get_end(sc->vmd_regs_res[i]) >= rman_get_end(r))
516			return (sc->vmd_regs_res[i]);
517	}
518	return (NULL);
519}
520
521static int
522vmd_map_resource(device_t dev, device_t child, struct resource *r,
523    struct resource_map_request *argsp, struct resource_map *map)
524{
525	struct vmd_softc *sc = device_get_softc(dev);
526	struct resource_map_request args;
527	struct resource *pres;
528	rman_res_t length, start;
529	int error;
530
531	/* Resources must be active to be mapped. */
532	if (!(rman_get_flags(r) & RF_ACTIVE))
533		return (ENXIO);
534
535	resource_init_map_request(&args);
536	error = resource_validate_map_request(r, argsp, &args, &start, &length);
537	if (error)
538		return (error);
539
540	pres = vmd_find_parent_resource(sc, r);
541	if (pres == NULL)
542		return (ENOENT);
543
544	args.offset = start - rman_get_start(pres);
545	args.length = length;
546	return (bus_generic_map_resource(dev, child, pres, &args, map));
547}
548
549static int
550vmd_unmap_resource(device_t dev, device_t child, struct resource *r,
551    struct resource_map *map)
552{
553	struct vmd_softc *sc = device_get_softc(dev);
554
555	r = vmd_find_parent_resource(sc, r);
556	if (r == NULL)
557		return (ENOENT);
558	return (bus_generic_unmap_resource(dev, child, r, map));
559}
560
561static int
562vmd_route_interrupt(device_t dev, device_t child, int pin)
563{
564
565	/* VMD hardware does not support legacy interrupts. */
566	return (PCI_INVALID_IRQ);
567}
568
569static int
570vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount,
571    int *irqs)
572{
573	struct vmd_softc *sc = device_get_softc(dev);
574	struct vmd_irq_user *u;
575	int i, ibest = 0, best = INT_MAX;
576
577	if (sc->vmd_msix_count == 0) {
578		return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)),
579		    child, count, maxcount, irqs));
580	}
581
582	if (count > vmd_max_msi)
583		return (ENOSPC);
584	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
585		if (u->viu_child == child)
586			return (EBUSY);
587	}
588
589	for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
590		if (best > sc->vmd_irq[i].vi_nusers) {
591			best = sc->vmd_irq[i].vi_nusers;
592			ibest = i;
593		}
594	}
595
596	u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
597	u->viu_child = child;
598	u->viu_vector = ibest;
599	LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
600	sc->vmd_irq[ibest].vi_nusers += count;
601
602	for (i = 0; i < count; i++)
603		irqs[i] = sc->vmd_irq[ibest].vi_irq;
604	return (0);
605}
606
607static int
608vmd_release_msi(device_t dev, device_t child, int count, int *irqs)
609{
610	struct vmd_softc *sc = device_get_softc(dev);
611	struct vmd_irq_user *u;
612
613	if (sc->vmd_msix_count == 0) {
614		return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)),
615		    child, count, irqs));
616	}
617
618	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
619		if (u->viu_child == child) {
620			sc->vmd_irq[u->viu_vector].vi_nusers -= count;
621			LIST_REMOVE(u, viu_link);
622			free(u, M_DEVBUF);
623			return (0);
624		}
625	}
626	return (EINVAL);
627}
628
629static int
630vmd_alloc_msix(device_t dev, device_t child, int *irq)
631{
632	struct vmd_softc *sc = device_get_softc(dev);
633	struct vmd_irq_user *u;
634	int i, ibest = 0, best = INT_MAX;
635
636	if (sc->vmd_msix_count == 0) {
637		return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)),
638		    child, irq));
639	}
640
641	i = 0;
642	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
643		if (u->viu_child == child)
644			i++;
645	}
646	if (i >= vmd_max_msix)
647		return (ENOSPC);
648
649	for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
650		if (best > sc->vmd_irq[i].vi_nusers) {
651			best = sc->vmd_irq[i].vi_nusers;
652			ibest = i;
653		}
654	}
655
656	u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
657	u->viu_child = child;
658	u->viu_vector = ibest;
659	LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
660	sc->vmd_irq[ibest].vi_nusers++;
661
662	*irq = sc->vmd_irq[ibest].vi_irq;
663	return (0);
664}
665
666static int
667vmd_release_msix(device_t dev, device_t child, int irq)
668{
669	struct vmd_softc *sc = device_get_softc(dev);
670	struct vmd_irq_user *u;
671
672	if (sc->vmd_msix_count == 0) {
673		return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
674		    child, irq));
675	}
676
677	LIST_FOREACH(u, &sc->vmd_users, viu_link) {
678		if (u->viu_child == child &&
679		    sc->vmd_irq[u->viu_vector].vi_irq == irq) {
680			sc->vmd_irq[u->viu_vector].vi_nusers--;
681			LIST_REMOVE(u, viu_link);
682			free(u, M_DEVBUF);
683			return (0);
684		}
685	}
686	return (EINVAL);
687}
688
689static int
690vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data)
691{
692	struct vmd_softc *sc = device_get_softc(dev);
693	int i;
694
695	if (sc->vmd_msix_count == 0) {
696		return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)),
697		    child, irq, addr, data));
698	}
699
700	for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
701		if (sc->vmd_irq[i].vi_irq == irq)
702			break;
703	}
704	if (i >= sc->vmd_msix_count)
705		return (EINVAL);
706	*addr = MSI_INTEL_ADDR_BASE | (i << 12);
707	*data = 0;
708	return (0);
709}
710
711static device_method_t vmd_pci_methods[] = {
712	/* Device interface */
713	DEVMETHOD(device_probe,			vmd_probe),
714	DEVMETHOD(device_attach,		vmd_attach),
715	DEVMETHOD(device_detach,		vmd_detach),
716	DEVMETHOD(device_suspend,		bus_generic_suspend),
717	DEVMETHOD(device_resume,		bus_generic_resume),
718	DEVMETHOD(device_shutdown,		bus_generic_shutdown),
719
720	/* Bus interface */
721	DEVMETHOD(bus_get_dma_tag,		vmd_get_dma_tag),
722	DEVMETHOD(bus_get_rman,			vmd_get_rman),
723	DEVMETHOD(bus_read_ivar,		pcib_read_ivar),
724	DEVMETHOD(bus_write_ivar,		pcib_write_ivar),
725	DEVMETHOD(bus_alloc_resource,		vmd_alloc_resource),
726	DEVMETHOD(bus_adjust_resource,		vmd_adjust_resource),
727	DEVMETHOD(bus_release_resource,		vmd_release_resource),
728	DEVMETHOD(bus_activate_resource,	vmd_activate_resource),
729	DEVMETHOD(bus_deactivate_resource,	vmd_deactivate_resource),
730	DEVMETHOD(bus_map_resource,		vmd_map_resource),
731	DEVMETHOD(bus_unmap_resource,		vmd_unmap_resource),
732	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
733	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
734
735	/* pcib interface */
736	DEVMETHOD(pcib_maxslots,		pcib_maxslots),
737	DEVMETHOD(pcib_read_config,		vmd_read_config),
738	DEVMETHOD(pcib_write_config,		vmd_write_config),
739	DEVMETHOD(pcib_route_interrupt,		vmd_route_interrupt),
740	DEVMETHOD(pcib_alloc_msi,		vmd_alloc_msi),
741	DEVMETHOD(pcib_release_msi,		vmd_release_msi),
742	DEVMETHOD(pcib_alloc_msix,		vmd_alloc_msix),
743	DEVMETHOD(pcib_release_msix,		vmd_release_msix),
744	DEVMETHOD(pcib_map_msi,			vmd_map_msi),
745	DEVMETHOD(pcib_request_feature,		pcib_request_feature_allow),
746
747	DEVMETHOD_END
748};
749
750DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
751DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL);
752MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
753    vmd_devs, nitems(vmd_devs) - 1);
754