1/*	$NetBSD: linux_pci.c,v 1.30 2024/06/24 21:23:53 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifdef _KERNEL_OPT
33#include "acpica.h"
34#include "opt_pci.h"
35#endif
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.30 2024/06/24 21:23:53 riastradh Exp $");
39
40#if NACPICA > 0
41#include <dev/acpi/acpivar.h>
42#include <dev/acpi/acpi_pci.h>
43#endif
44
45#include <linux/pci.h>
46
47#include <drm/drm_agp_netbsd.h>
48
49device_t
50pci_dev_dev(struct pci_dev *pdev)
51{
52
53	return pdev->pd_dev;
54}
55
56void
57pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
58{
59	pdev->pd_drvdata = drvdata;
60}
61
62void *
63pci_get_drvdata(struct pci_dev *pdev)
64{
65	return pdev->pd_drvdata;
66}
67
68const char *
69pci_name(struct pci_dev *pdev)
70{
71
72	/* XXX not sure this has the right format */
73	return device_xname(pci_dev_dev(pdev));
74}
75
76/*
77 * Setup enough of a parent that we can access config space.
78 * This is gross and grovels pci(4) and ppb(4) internals.
79 */
80static struct pci_dev *
81alloc_fake_parent_device(device_t parent, const struct pci_attach_args *pa)
82{
83
84	if (parent == NULL || !device_is_a(parent, "pci"))
85		return NULL;
86
87	device_t pparent = device_parent(parent);
88	if (pparent == NULL || !device_is_a(pparent, "ppb"))
89		return NULL;
90
91	struct pci_softc *pcisc = device_private(parent);
92	struct ppb_softc *ppbsc = device_private(pparent);
93
94	struct pci_dev *parentdev = kmem_zalloc(sizeof(*parentdev), KM_SLEEP);
95
96	/* Copy this device's pci_attach_args{} as a base-line. */
97	struct pci_attach_args *npa = &parentdev->pd_pa;
98	*npa = *pa;
99
100	/* Now update with stuff found in parent. */
101	npa->pa_iot = pcisc->sc_iot;
102	npa->pa_memt = pcisc->sc_memt;
103	npa->pa_dmat = pcisc->sc_dmat;
104	npa->pa_dmat64 = pcisc->sc_dmat64;
105	npa->pa_pc = pcisc->sc_pc;
106	npa->pa_flags = 0;	/* XXX? */
107
108	/* Copy the parent tag, and read some info about it. */
109	npa->pa_tag = ppbsc->sc_tag;
110	pcireg_t id = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_ID_REG);
111	pcireg_t subid = pci_conf_read(npa->pa_pc, npa->pa_tag,
112	    PCI_SUBSYS_ID_REG);
113	pcireg_t class = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_CLASS_REG);
114
115	/*
116	 * Fill in as much of pci_attach_args and pci_dev as reasonably possible.
117	 * Most of this is not used currently.
118	 */
119	int bus, device, function;
120	pci_decompose_tag(npa->pa_pc, npa->pa_tag, &bus, &device, &function);
121	npa->pa_device = device;
122	npa->pa_function = function;
123	npa->pa_bus = bus;
124	npa->pa_id = id;
125	npa->pa_class = class;
126	npa->pa_intrswiz = pcisc->sc_intrswiz;
127	npa->pa_intrtag = pcisc->sc_intrtag;
128	npa->pa_intrpin = PCI_INTERRUPT_PIN_NONE;
129
130	parentdev->pd_dev = parent;
131
132	parentdev->bus = NULL;
133	parentdev->devfn = device << 3 | function;
134	parentdev->vendor = PCI_VENDOR(id);
135	parentdev->device = PCI_PRODUCT(id);
136	parentdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subid);
137	parentdev->subsystem_device = PCI_SUBSYS_ID(subid);
138	parentdev->revision = PCI_REVISION(class);
139	parentdev->class = __SHIFTOUT(class, 0xffffff00UL); /* ? */
140
141	return parentdev;
142}
143
144void
145linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
146    const struct pci_attach_args *pa, int kludges)
147{
148	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
149	    PCI_SUBSYS_ID_REG);
150	unsigned i;
151
152	memset(pdev, 0, sizeof(*pdev)); /* paranoia */
153
154	pdev->pd_pa = *pa;
155	pdev->pd_kludges = kludges;
156	pdev->pd_rom_vaddr = NULL;
157	pdev->pd_dev = dev;
158#if (NACPICA > 0)
159	const int seg = pci_get_segment(pa->pa_pc);
160	pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
161	    pa->pa_device, pa->pa_function);
162#else
163	pdev->pd_ad = NULL;
164#endif
165	pdev->pd_saved_state = NULL;
166	pdev->pd_intr_handles = NULL;
167	pdev->pd_drvdata = NULL;
168	pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
169	pdev->bus->pb_pc = pa->pa_pc;
170	pdev->bus->pb_dev = parent;
171	pdev->bus->number = pa->pa_bus;
172	/*
173	 * NetBSD doesn't have an easy "am I PCIe" or "give me PCIe speed
174	 * from capability" function, but we already emulate the Linux
175	 * versions that do.
176	 */
177	if (pci_is_pcie(pdev)) {
178		pdev->bus->max_bus_speed = pcie_get_speed_cap(pdev);
179	} else {
180		/* XXX: Do AGP/PCI-X, etc.? */
181		pdev->bus->max_bus_speed = PCI_SPEED_UNKNOWN;
182	}
183	pdev->bus->self = alloc_fake_parent_device(parent, pa);
184	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
185	pdev->vendor = PCI_VENDOR(pa->pa_id);
186	pdev->device = PCI_PRODUCT(pa->pa_id);
187	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
188	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
189	pdev->revision = PCI_REVISION(pa->pa_class);
190	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
191
192	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
193	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
194		const int reg = PCI_BAR(i);
195
196		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
197		    pa->pa_tag, reg);
198		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
199			pdev->pd_resources[i].type,
200			&pdev->pd_resources[i].addr,
201			&pdev->pd_resources[i].size,
202			&pdev->pd_resources[i].flags)) {
203			pdev->pd_resources[i].addr = 0;
204			pdev->pd_resources[i].size = 0;
205			pdev->pd_resources[i].flags = 0;
206		}
207		pdev->pd_resources[i].kva = NULL;
208		pdev->pd_resources[i].mapped = false;
209	}
210}
211
212int
213pci_find_capability(struct pci_dev *pdev, int cap)
214{
215
216	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
217	    NULL, NULL);
218}
219
220int
221pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
222{
223
224	KASSERT(!ISSET(reg, 3));
225	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
226	return 0;
227}
228
229int
230pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
231{
232
233	KASSERT(!ISSET(reg, 1));
234	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
235	    (reg &~ 2)) >> (8 * (reg & 2));
236	return 0;
237}
238
239int
240pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
241{
242
243	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
244	    (reg &~ 3)) >> (8 * (reg & 3));
245	return 0;
246}
247
248int
249pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
250{
251
252	KASSERT(!ISSET(reg, 3));
253	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
254	return 0;
255}
256
257int
258pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
259    uint32_t *valuep)
260{
261	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
262	    PCI_FUNC(devfn));
263
264	KASSERT(!ISSET(reg, 1));
265	*valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
266	return 0;
267}
268
269int
270pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
271    uint16_t *valuep)
272{
273	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
274	    PCI_FUNC(devfn));
275
276	KASSERT(!ISSET(reg, 1));
277	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
278	return 0;
279}
280
281int
282pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
283    uint8_t *valuep)
284{
285	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
286	    PCI_FUNC(devfn));
287
288	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
289	return 0;
290}
291
292int
293pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
294    uint32_t value)
295{
296	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
297	    PCI_FUNC(devfn));
298
299	KASSERT(!ISSET(reg, 3));
300	pci_conf_write(bus->pb_pc, tag, reg, value);
301	return 0;
302}
303
304static void
305pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
306    uint32_t value)
307{
308	const uint32_t mask = ~((~0UL) << (8 * bytes));
309	const int reg32 = (reg &~ 3);
310	const unsigned int shift = (8 * (reg & 3));
311	uint32_t value32;
312
313	KASSERT(bytes <= 4);
314	KASSERT(!ISSET(value, ~mask));
315	value32 = pci_conf_read(pc, tag, reg32);
316	value32 &=~ (mask << shift);
317	value32 |= (value << shift);
318	pci_conf_write(pc, tag, reg32, value32);
319}
320
321int
322pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
323{
324
325	KASSERT(!ISSET(reg, 1));
326	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
327	return 0;
328}
329
330int
331pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
332{
333
334	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
335	return 0;
336}
337
338int
339pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
340    uint16_t value)
341{
342	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
343	    PCI_FUNC(devfn));
344
345	KASSERT(!ISSET(reg, 1));
346	pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
347	return 0;
348}
349
350int
351pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
352    uint8_t value)
353{
354	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
355	    PCI_FUNC(devfn));
356
357	pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
358	return 0;
359}
360
361int
362pci_enable_msi(struct pci_dev *pdev)
363{
364	const struct pci_attach_args *const pa = &pdev->pd_pa;
365
366	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
367		return -EINVAL;
368
369	pdev->msi_enabled = 1;
370	return 0;
371}
372
373void
374pci_disable_msi(struct pci_dev *pdev __unused)
375{
376	const struct pci_attach_args *const pa = &pdev->pd_pa;
377
378	if (pdev->pd_intr_handles != NULL) {
379		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
380		pdev->pd_intr_handles = NULL;
381	}
382	pdev->msi_enabled = 0;
383}
384
385void
386pci_set_master(struct pci_dev *pdev)
387{
388	pcireg_t csr;
389
390	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
391	    PCI_COMMAND_STATUS_REG);
392	csr |= PCI_COMMAND_MASTER_ENABLE;
393	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
394	    PCI_COMMAND_STATUS_REG, csr);
395}
396
397void
398pci_clear_master(struct pci_dev *pdev)
399{
400	pcireg_t csr;
401
402	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
403	    PCI_COMMAND_STATUS_REG);
404	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
405	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
406	    PCI_COMMAND_STATUS_REG, csr);
407}
408
409int
410pcie_capability_read_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
411{
412	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
413	pcitag_t tag = pdev->pd_pa.pa_tag;
414	int off;
415
416	*valuep = 0;
417
418	/* Must have capabilities. */
419	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
420		return 1;
421
422	*valuep = pci_conf_read(pc, tag, off + reg);
423
424	return 0;
425}
426
427int
428pcie_capability_read_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
429{
430	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
431	pcitag_t tag = pdev->pd_pa.pa_tag;
432	int off;
433
434	*valuep = 0;
435
436	/* Must have capabilities. */
437	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
438		return 1;
439
440	*valuep = pci_conf_read(pc, tag, off + (reg &~ 2)) >> (8 * (reg & 2));
441
442	return 0;
443}
444
445int
446pcie_capability_write_dword(struct pci_dev *pdev, int reg, uint32_t value)
447{
448	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
449	pcitag_t tag = pdev->pd_pa.pa_tag;
450	int off;
451
452	/* Must have capabilities. */
453	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
454		return 1;
455
456	pci_conf_write(pc, tag, off + reg, value);
457
458	return 0;
459}
460
461int
462pcie_capability_write_word(struct pci_dev *pdev, int reg, uint16_t value)
463{
464	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
465	pcitag_t tag = pdev->pd_pa.pa_tag;
466	int off;
467
468	/* Must have capabilities. */
469	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
470		return 1;
471
472	pci_rmw_config(pc, tag, off + reg, 2, value);
473
474	return 0;
475}
476
477/* From PCIe 5.0 7.5.3.4 "Device Control Register" */
478static const unsigned readrqmax[] = {
479	128,
480	256,
481	512,
482	1024,
483	2048,
484	4096,
485};
486
487int
488pcie_get_readrq(struct pci_dev *pdev)
489{
490	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
491	pcitag_t tag = pdev->pd_pa.pa_tag;
492	unsigned val;
493	int off;
494
495	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
496		return -EINVAL; /* XXX NetBSD->Linux */
497
498	val = __SHIFTOUT(pci_conf_read(pc, tag, off + PCIE_DCSR),
499	    PCIE_DCSR_MAX_READ_REQ);
500
501	if (val >= __arraycount(readrqmax))
502		val = 0;
503	return readrqmax[val];
504}
505
506int
507pcie_set_readrq(struct pci_dev *pdev, int val)
508{
509	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
510	pcitag_t tag = pdev->pd_pa.pa_tag;
511	pcireg_t reg, newval = 0;
512	unsigned i;
513	int off;
514
515	for (i = 0; i < __arraycount(readrqmax); i++) {
516		if (readrqmax[i] == val) {
517			newval = i;
518			break;
519		}
520	}
521
522	if (i == __arraycount(readrqmax))
523		return -EINVAL;
524
525	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
526		return -EINVAL; /* XXX NetBSD->Linux */
527
528	reg = pci_conf_read(pc, tag, off + PCIE_DCSR);
529	reg &= ~PCIE_DCSR_MAX_READ_REQ | (newval << 12);
530	pci_conf_write(pc, tag, off + PCIE_DCSR, reg);
531
532	return 0;
533}
534
535bus_addr_t
536pcibios_align_resource(void *p, const struct resource *resource,
537    bus_addr_t addr, bus_size_t size)
538{
539	panic("pcibios_align_resource has accessed unaligned neurons!");
540}
541
542int
543pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
544    bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
545    bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
546	bus_size_t) __unused,
547    struct pci_dev *pdev)
548{
549	const struct pci_attach_args *const pa = &pdev->pd_pa;
550	bus_space_tag_t bst;
551	int error;
552
553	switch (resource->flags) {
554	case IORESOURCE_MEM:
555		bst = pa->pa_memt;
556		break;
557
558	case IORESOURCE_IO:
559		bst = pa->pa_iot;
560		break;
561
562	default:
563		panic("I don't know what kind of resource you want!");
564	}
565
566	resource->r_bst = bst;
567	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
568	    size, align, 0, 0, &resource->start, &resource->r_bsh);
569	if (error)
570		return error;
571
572	resource->end = start + (size - 1);
573	return 0;
574}
575
576struct pci_domain_bus_and_slot {
577	int domain, bus, slot;
578};
579
580static int
581pci_match_domain_bus_and_slot(void *cookie, const struct pci_attach_args *pa)
582{
583	const struct pci_domain_bus_and_slot *C = cookie;
584
585	if (pci_get_segment(pa->pa_pc) != C->domain)
586		return 0;
587	if (pa->pa_bus != C->bus)
588		return 0;
589	if (PCI_DEVFN(pa->pa_device, pa->pa_function) != C->slot)
590		return 0;
591
592	return 1;
593}
594
595struct pci_dev *
596pci_get_domain_bus_and_slot(int domain, int bus, int slot)
597{
598	struct pci_attach_args pa;
599	struct pci_domain_bus_and_slot context = {domain, bus, slot},
600	    *C = &context;
601
602	if (!pci_find_device1(&pa, &pci_match_domain_bus_and_slot, C))
603		return NULL;
604
605	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
606	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
607
608	return pdev;
609}
610
611void
612pci_dev_put(struct pci_dev *pdev)
613{
614
615	if (pdev == NULL)
616		return;
617
618	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
619	kmem_free(pdev->bus, sizeof(*pdev->bus));
620	kmem_free(pdev, sizeof(*pdev));
621}
622
623struct pci_get_class_state {
624	uint32_t		class_subclass_interface;
625	const struct pci_dev	*from;
626};
627
628static int
629pci_get_class_match(void *cookie, const struct pci_attach_args *pa)
630{
631	struct pci_get_class_state *C = cookie;
632
633	if (C->from) {
634		if ((pci_get_segment(C->from->pd_pa.pa_pc) ==
635			pci_get_segment(pa->pa_pc)) &&
636		    C->from->pd_pa.pa_bus == pa->pa_bus &&
637		    C->from->pd_pa.pa_device == pa->pa_device &&
638		    C->from->pd_pa.pa_function == pa->pa_function)
639			C->from = NULL;
640		return 0;
641	}
642	if (C->class_subclass_interface !=
643	    (PCI_CLASS(pa->pa_class) << 16 |
644		PCI_SUBCLASS(pa->pa_class) << 8 |
645		PCI_INTERFACE(pa->pa_class)))
646		return 0;
647
648	return 1;
649}
650
651struct pci_dev *
652pci_get_class(uint32_t class_subclass_interface, struct pci_dev *from)
653{
654	struct pci_get_class_state context = {class_subclass_interface, from},
655	    *C = &context;
656	struct pci_attach_args pa;
657	struct pci_dev *pdev = NULL;
658
659	if (!pci_find_device1(&pa, &pci_get_class_match, C))
660		goto out;
661	pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
662	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
663
664out:	if (from)
665		pci_dev_put(from);
666	return pdev;
667}
668
669int
670pci_dev_present(const struct pci_device_id *ids)
671{
672
673	/* XXX implement me -- pci_find_device doesn't pass a cookie */
674	return 0;
675}
676
677void
678pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
679{
680
681	/* XXX Disable the ROM address decoder.  */
682	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
683	KASSERT(vaddr == pdev->pd_rom_vaddr);
684	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
685	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
686	pdev->pd_rom_vaddr = NULL;
687}
688
689/* XXX Whattakludge!  Should move this in sys/arch/.  */
690static int
691pci_map_rom_md(struct pci_dev *pdev)
692{
693#if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
694	const bus_addr_t rom_base = 0xc0000;
695	const bus_size_t rom_size = 0x20000;
696	bus_space_handle_t rom_bsh;
697	int error;
698
699	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
700		return ENXIO;
701	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
702		return ENXIO;
703	/* XXX Check whether this is the primary VGA card?  */
704	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
705	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
706	if (error)
707		return ENXIO;
708
709	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
710	pdev->pd_rom_bsh = rom_bsh;
711	pdev->pd_rom_size = rom_size;
712	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
713
714	return 0;
715#else
716	return ENXIO;
717#endif
718}
719
720void __pci_rom_iomem *
721pci_map_rom(struct pci_dev *pdev, size_t *sizep)
722{
723
724	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
725
726	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
727		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
728		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
729	    != 0)
730		goto fail_mi;
731	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
732
733	/* XXX This type is obviously wrong in general...  */
734	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
735		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
736		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
737		pci_unmap_rom(pdev, NULL);
738		goto fail_mi;
739	}
740	goto success;
741
742fail_mi:
743	if (pci_map_rom_md(pdev) != 0)
744		goto fail_md;
745
746	/* XXX This type is obviously wrong in general...  */
747	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
748		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
749		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
750		pci_unmap_rom(pdev, NULL);
751		goto fail_md;
752	}
753
754success:
755	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
756	*sizep = pdev->pd_rom_found_size;
757	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
758	    pdev->pd_rom_found_bsh);
759	return pdev->pd_rom_vaddr;
760
761fail_md:
762	return NULL;
763}
764
765void __pci_rom_iomem *
766pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
767{
768
769	*sizep = 0;
770	return NULL;
771}
772
773int
774pci_enable_rom(struct pci_dev *pdev)
775{
776	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
777	const pcitag_t tag = pdev->pd_pa.pa_tag;
778	pcireg_t addr;
779	int s;
780
781	/* XXX Don't do anything if the ROM isn't there.  */
782
783	s = splhigh();
784	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
785	addr |= PCI_MAPREG_ROM_ENABLE;
786	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
787	splx(s);
788
789	return 0;
790}
791
792void
793pci_disable_rom(struct pci_dev *pdev)
794{
795	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
796	const pcitag_t tag = pdev->pd_pa.pa_tag;
797	pcireg_t addr;
798	int s;
799
800	s = splhigh();
801	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
802	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
803	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
804	splx(s);
805}
806
807bus_addr_t
808pci_resource_start(struct pci_dev *pdev, unsigned i)
809{
810
811	if (i >= PCI_NUM_RESOURCES)
812		panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
813	return pdev->pd_resources[i].addr;
814}
815
816bus_size_t
817pci_resource_len(struct pci_dev *pdev, unsigned i)
818{
819
820	if (i >= PCI_NUM_RESOURCES)
821		panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
822	return pdev->pd_resources[i].size;
823}
824
825bus_addr_t
826pci_resource_end(struct pci_dev *pdev, unsigned i)
827{
828
829	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
830}
831
832int
833pci_resource_flags(struct pci_dev *pdev, unsigned i)
834{
835
836	if (i >= PCI_NUM_RESOURCES)
837		panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
838	return pdev->pd_resources[i].flags;
839}
840
841void __pci_iomem *
842pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
843{
844	int error;
845
846	KASSERT(i < PCI_NUM_RESOURCES);
847	KASSERT(pdev->pd_resources[i].kva == NULL);
848
849	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
850		return NULL;
851	if (pdev->pd_resources[i].size < size)
852		return NULL;
853	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
854	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
855	    &pdev->pd_resources[i].bsh);
856	if (error)
857		return NULL;
858	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
859	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
860	    pdev->pd_resources[i].bsh);
861	pdev->pd_resources[i].mapped = true;
862
863	return pdev->pd_resources[i].kva;
864}
865
866void
867pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
868{
869	unsigned i;
870
871	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
872	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
873		if (pdev->pd_resources[i].kva == kva)
874			break;
875	}
876	KASSERT(i < PCI_NUM_RESOURCES);
877
878	pdev->pd_resources[i].kva = NULL;
879	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
880	    pdev->pd_resources[i].size);
881}
882
883void
884pci_save_state(struct pci_dev *pdev)
885{
886
887	KASSERT(pdev->pd_saved_state == NULL);
888	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
889	    KM_SLEEP);
890	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
891	    pdev->pd_saved_state);
892}
893
894void
895pci_restore_state(struct pci_dev *pdev)
896{
897
898	KASSERT(pdev->pd_saved_state != NULL);
899	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
900	    pdev->pd_saved_state);
901	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
902	pdev->pd_saved_state = NULL;
903}
904
905bool
906pci_is_pcie(struct pci_dev *pdev)
907{
908
909	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
910}
911
912bool
913pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
914{
915
916	/* XXX Cop-out.  */
917	if (mask > DMA_BIT_MASK(32))
918		return pci_dma64_available(&pdev->pd_pa);
919	else
920		return true;
921}
922
923bool
924pci_is_thunderbolt_attached(struct pci_dev *pdev)
925{
926
927	/* XXX Cop-out.  */
928	return false;
929}
930
931bool
932pci_is_root_bus(struct pci_bus *bus)
933{
934
935	return bus->number == 0;
936}
937
938int
939pci_domain_nr(struct pci_bus *bus)
940{
941
942	return pci_get_segment(bus->pb_pc);
943}
944
945/*
946 * We explicitly rename pci_enable/disable_device so that you have to
947 * review each use of them, since NetBSD's PCI API does _not_ respect
948 * our local enablecnt here, but there are different parts of NetBSD
949 * that automatically enable/disable like PMF, so you have to decide
950 * for each one whether to call it or not.
951 */
952
953int
954linux_pci_enable_device(struct pci_dev *pdev)
955{
956	const struct pci_attach_args *pa = &pdev->pd_pa;
957	pcireg_t csr;
958	int s;
959
960	if (pdev->pd_enablecnt++)
961		return 0;
962
963	s = splhigh();
964	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
965	/* If someone else (firmware) already enabled it, credit them.  */
966	if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
967		pdev->pd_enablecnt++;
968	csr |= PCI_COMMAND_IO_ENABLE;
969	csr |= PCI_COMMAND_MEM_ENABLE;
970	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
971	splx(s);
972
973	return 0;
974}
975
976void
977linux_pci_disable_device(struct pci_dev *pdev)
978{
979	const struct pci_attach_args *pa = &pdev->pd_pa;
980	pcireg_t csr;
981	int s;
982
983	if (--pdev->pd_enablecnt)
984		return;
985
986	s = splhigh();
987	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
988	csr &= ~PCI_COMMAND_IO_ENABLE;
989	csr &= ~PCI_COMMAND_MEM_ENABLE;
990	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
991	splx(s);
992}
993
994void
995linux_pci_dev_destroy(struct pci_dev *pdev)
996{
997	unsigned i;
998
999	if (pdev->bus->self != NULL) {
1000		kmem_free(pdev->bus->self, sizeof(*pdev->bus->self));
1001	}
1002	if (pdev->bus != NULL) {
1003		kmem_free(pdev->bus, sizeof(*pdev->bus));
1004		pdev->bus = NULL;
1005	}
1006	if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
1007		pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
1008		pdev->pd_rom_vaddr = 0;
1009	}
1010	for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
1011		if (!pdev->pd_resources[i].mapped)
1012			continue;
1013		bus_space_unmap(pdev->pd_resources[i].bst,
1014		    pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
1015	}
1016
1017	/* There is no way these should be still in use.  */
1018	KASSERT(pdev->pd_saved_state == NULL);
1019	KASSERT(pdev->pd_intr_handles == NULL);
1020}
1021
1022enum pci_bus_speed
1023pcie_get_speed_cap(struct pci_dev *dev)
1024{
1025	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
1026	pcitag_t tag = dev->pd_pa.pa_tag;
1027	pcireg_t lcap, lcap2, xcap;
1028	int off;
1029
1030	/* Must have capabilities. */
1031	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
1032		return PCI_SPEED_UNKNOWN;
1033
1034	/* Only PCIe 3.x has LCAP2. */
1035	xcap = pci_conf_read(pc, tag, off + PCIE_XCAP);
1036	if (__SHIFTOUT(xcap, PCIE_XCAP_VER_MASK) >= 2) {
1037		lcap2 = pci_conf_read(pc, tag, off + PCIE_LCAP2);
1038		if (lcap2) {
1039			if ((lcap2 & PCIE_LCAP2_SUP_LNKS64) != 0) {
1040				return PCIE_SPEED_64_0GT;
1041			}
1042			if ((lcap2 & PCIE_LCAP2_SUP_LNKS32) != 0) {
1043				return PCIE_SPEED_32_0GT;
1044			}
1045			if ((lcap2 & PCIE_LCAP2_SUP_LNKS16) != 0) {
1046				return PCIE_SPEED_16_0GT;
1047			}
1048			if ((lcap2 & PCIE_LCAP2_SUP_LNKS8) != 0) {
1049				return PCIE_SPEED_8_0GT;
1050			}
1051			if ((lcap2 & PCIE_LCAP2_SUP_LNKS5) != 0) {
1052				return PCIE_SPEED_5_0GT;
1053			}
1054			if ((lcap2 & PCIE_LCAP2_SUP_LNKS2) != 0) {
1055				return PCIE_SPEED_2_5GT;
1056			}
1057		}
1058	}
1059
1060	lcap = pci_conf_read(pc, tag, off + PCIE_LCAP);
1061	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_64) {
1062		return PCIE_SPEED_64_0GT;
1063	}
1064	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_32) {
1065		return PCIE_SPEED_32_0GT;
1066	}
1067	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_16) {
1068		return PCIE_SPEED_16_0GT;
1069	}
1070	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_8) {
1071		return PCIE_SPEED_8_0GT;
1072	}
1073	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_5) {
1074		return PCIE_SPEED_5_0GT;
1075	}
1076	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_2) {
1077		return PCIE_SPEED_2_5GT;
1078	}
1079
1080	return PCI_SPEED_UNKNOWN;
1081}
1082
1083/*
1084 * This should walk the tree, it only checks this device currently.
1085 * It also does not write to limiting_dev (the only caller in drm2
1086 * currently does not use it.)
1087 */
1088unsigned
1089pcie_bandwidth_available(struct pci_dev *dev,
1090    struct pci_dev **limiting_dev,
1091    enum pci_bus_speed *speed,
1092    enum pcie_link_width *width)
1093{
1094	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
1095	pcitag_t tag = dev->pd_pa.pa_tag;
1096	pcireg_t lcsr;
1097	unsigned per_line_speed, num_lanes;
1098	int off;
1099
1100	/* Must have capabilities. */
1101	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
1102		return 0;
1103
1104	if (speed)
1105		*speed = PCI_SPEED_UNKNOWN;
1106	if (width)
1107		*width = 0;
1108
1109	lcsr = pci_conf_read(pc, tag, off + PCIE_LCSR);
1110
1111	switch (lcsr & PCIE_LCSR_NLW) {
1112	case PCIE_LCSR_NLW_X1:
1113	case PCIE_LCSR_NLW_X2:
1114	case PCIE_LCSR_NLW_X4:
1115	case PCIE_LCSR_NLW_X8:
1116	case PCIE_LCSR_NLW_X12:
1117	case PCIE_LCSR_NLW_X16:
1118	case PCIE_LCSR_NLW_X32:
1119		num_lanes = __SHIFTOUT(lcsr, PCIE_LCSR_NLW);
1120		if (width)
1121			*width = num_lanes;
1122		break;
1123	default:
1124		num_lanes = 0;
1125		break;
1126	}
1127
1128	switch (__SHIFTOUT(lcsr, PCIE_LCSR_LINKSPEED)) {
1129	case PCIE_LCSR_LINKSPEED_2:
1130		*speed = PCIE_SPEED_2_5GT;
1131		per_line_speed = 2500 * 8 / 10;
1132		break;
1133	case PCIE_LCSR_LINKSPEED_5:
1134		*speed = PCIE_SPEED_5_0GT;
1135		per_line_speed = 5000 * 8 / 10;
1136		break;
1137	case PCIE_LCSR_LINKSPEED_8:
1138		*speed = PCIE_SPEED_8_0GT;
1139		per_line_speed = 8000 * 128 / 130;
1140		break;
1141	case PCIE_LCSR_LINKSPEED_16:
1142		*speed = PCIE_SPEED_16_0GT;
1143		per_line_speed = 16000 * 128 / 130;
1144		break;
1145	case PCIE_LCSR_LINKSPEED_32:
1146		*speed = PCIE_SPEED_32_0GT;
1147		per_line_speed = 32000 * 128 / 130;
1148		break;
1149	case PCIE_LCSR_LINKSPEED_64:
1150		*speed = PCIE_SPEED_64_0GT;
1151		per_line_speed = 64000 * 128 / 130;
1152		break;
1153	default:
1154		per_line_speed = 0;
1155	}
1156
1157	return num_lanes * per_line_speed;
1158}
1159