1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2014 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
5 */
6
7#define LOG_CATEGORY UCLASS_PCI
8
9#include <common.h>
10#include <dm.h>
11#include <errno.h>
12#include <init.h>
13#include <log.h>
14#include <malloc.h>
15#include <pci.h>
16#include <spl.h>
17#include <asm/global_data.h>
18#include <asm/io.h>
19#include <dm/device-internal.h>
20#include <dm/lists.h>
21#include <dm/uclass-internal.h>
22#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
23#include <asm/fsp/fsp_support.h>
24#endif
25#include <dt-bindings/pci/pci.h>
26#include <linux/delay.h>
27#include <linux/printk.h>
28#include "pci_internal.h"
29
30DECLARE_GLOBAL_DATA_PTR;
31
32int pci_get_bus(int busnum, struct udevice **busp)
33{
34	int ret;
35
36	ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
37
38	/* Since buses may not be numbered yet try a little harder with bus 0 */
39	if (ret == -ENODEV) {
40		ret = uclass_first_device_err(UCLASS_PCI, busp);
41		if (ret)
42			return ret;
43		ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
44	}
45
46	return ret;
47}
48
49struct udevice *pci_get_controller(struct udevice *dev)
50{
51	while (device_is_on_pci_bus(dev))
52		dev = dev->parent;
53
54	return dev;
55}
56
57pci_dev_t dm_pci_get_bdf(const struct udevice *dev)
58{
59	struct pci_child_plat *pplat = dev_get_parent_plat(dev);
60	struct udevice *bus = dev->parent;
61
62	/*
63	 * This error indicates that @dev is a device on an unprobed PCI bus.
64	 * The bus likely has bus=seq == -1, so the PCI_ADD_BUS() macro below
65	 * will produce a bad BDF>
66	 *
67	 * A common cause of this problem is that this function is called in the
68	 * of_to_plat() method of @dev. Accessing the PCI bus in that
69	 * method is not allowed, since it has not yet been probed. To fix this,
70	 * move that access to the probe() method of @dev instead.
71	 */
72	if (!device_active(bus))
73		log_err("PCI: Device '%s' on unprobed bus '%s'\n", dev->name,
74			bus->name);
75	return PCI_ADD_BUS(dev_seq(bus), pplat->devfn);
76}
77
78/**
79 * pci_get_bus_max() - returns the bus number of the last active bus
80 *
81 * Return: last bus number, or -1 if no active buses
82 */
83static int pci_get_bus_max(void)
84{
85	struct udevice *bus;
86	struct uclass *uc;
87	int ret = -1;
88
89	ret = uclass_get(UCLASS_PCI, &uc);
90	uclass_foreach_dev(bus, uc) {
91		if (dev_seq(bus) > ret)
92			ret = dev_seq(bus);
93	}
94
95	debug("%s: ret=%d\n", __func__, ret);
96
97	return ret;
98}
99
100int pci_last_busno(void)
101{
102	return pci_get_bus_max();
103}
104
105int pci_get_ff(enum pci_size_t size)
106{
107	switch (size) {
108	case PCI_SIZE_8:
109		return 0xff;
110	case PCI_SIZE_16:
111		return 0xffff;
112	default:
113		return 0xffffffff;
114	}
115}
116
117static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf,
118				ofnode *rnode)
119{
120	struct fdt_pci_addr addr;
121	ofnode node;
122	int ret;
123
124	dev_for_each_subnode(node, bus) {
125		ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg",
126					   &addr, NULL);
127		if (ret)
128			continue;
129
130		if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf))
131			continue;
132
133		*rnode = node;
134		break;
135	}
136};
137
138int pci_bus_find_devfn(const struct udevice *bus, pci_dev_t find_devfn,
139		       struct udevice **devp)
140{
141	struct udevice *dev;
142
143	for (device_find_first_child(bus, &dev);
144	     dev;
145	     device_find_next_child(&dev)) {
146		struct pci_child_plat *pplat;
147
148		pplat = dev_get_parent_plat(dev);
149		if (pplat && pplat->devfn == find_devfn) {
150			*devp = dev;
151			return 0;
152		}
153	}
154
155	return -ENODEV;
156}
157
158int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
159{
160	struct udevice *bus;
161	int ret;
162
163	ret = pci_get_bus(PCI_BUS(bdf), &bus);
164	if (ret)
165		return ret;
166	return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
167}
168
169static int pci_device_matches_ids(struct udevice *dev,
170				  const struct pci_device_id *ids)
171{
172	struct pci_child_plat *pplat;
173	int i;
174
175	pplat = dev_get_parent_plat(dev);
176	if (!pplat)
177		return -EINVAL;
178	for (i = 0; ids[i].vendor != 0; i++) {
179		if (pplat->vendor == ids[i].vendor &&
180		    pplat->device == ids[i].device)
181			return i;
182	}
183
184	return -EINVAL;
185}
186
187int pci_bus_find_devices(struct udevice *bus, const struct pci_device_id *ids,
188			 int *indexp, struct udevice **devp)
189{
190	struct udevice *dev;
191
192	/* Scan all devices on this bus */
193	for (device_find_first_child(bus, &dev);
194	     dev;
195	     device_find_next_child(&dev)) {
196		if (pci_device_matches_ids(dev, ids) >= 0) {
197			if ((*indexp)-- <= 0) {
198				*devp = dev;
199				return 0;
200			}
201		}
202	}
203
204	return -ENODEV;
205}
206
207int pci_find_device_id(const struct pci_device_id *ids, int index,
208		       struct udevice **devp)
209{
210	struct udevice *bus;
211
212	/* Scan all known buses */
213	for (uclass_first_device(UCLASS_PCI, &bus);
214	     bus;
215	     uclass_next_device(&bus)) {
216		if (!pci_bus_find_devices(bus, ids, &index, devp))
217			return 0;
218	}
219	*devp = NULL;
220
221	return -ENODEV;
222}
223
224static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
225				  unsigned int device, int *indexp,
226				  struct udevice **devp)
227{
228	struct pci_child_plat *pplat;
229	struct udevice *dev;
230
231	for (device_find_first_child(bus, &dev);
232	     dev;
233	     device_find_next_child(&dev)) {
234		pplat = dev_get_parent_plat(dev);
235		if (pplat->vendor == vendor && pplat->device == device) {
236			if (!(*indexp)--) {
237				*devp = dev;
238				return 0;
239			}
240		}
241	}
242
243	return -ENODEV;
244}
245
246int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
247		       struct udevice **devp)
248{
249	struct udevice *bus;
250
251	/* Scan all known buses */
252	for (uclass_first_device(UCLASS_PCI, &bus);
253	     bus;
254	     uclass_next_device(&bus)) {
255		if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
256			return device_probe(*devp);
257	}
258	*devp = NULL;
259
260	return -ENODEV;
261}
262
263int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
264{
265	struct udevice *dev;
266
267	/* Scan all known buses */
268	for (pci_find_first_device(&dev);
269	     dev;
270	     pci_find_next_device(&dev)) {
271		struct pci_child_plat *pplat = dev_get_parent_plat(dev);
272
273		if (pplat->class == find_class && !index--) {
274			*devp = dev;
275			return device_probe(*devp);
276		}
277	}
278	*devp = NULL;
279
280	return -ENODEV;
281}
282
283int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
284			 unsigned long value, enum pci_size_t size)
285{
286	struct dm_pci_ops *ops;
287
288	ops = pci_get_ops(bus);
289	if (!ops->write_config)
290		return -ENOSYS;
291	if (offset < 0 || offset >= 4096)
292		return -EINVAL;
293	return ops->write_config(bus, bdf, offset, value, size);
294}
295
296int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
297			    u32 clr, u32 set)
298{
299	ulong val;
300	int ret;
301
302	ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
303	if (ret)
304		return ret;
305	val &= ~clr;
306	val |= set;
307
308	return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
309}
310
311static int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
312			    enum pci_size_t size)
313{
314	struct udevice *bus;
315	int ret;
316
317	ret = pci_get_bus(PCI_BUS(bdf), &bus);
318	if (ret)
319		return ret;
320
321	return pci_bus_write_config(bus, bdf, offset, value, size);
322}
323
324int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
325			enum pci_size_t size)
326{
327	struct udevice *bus;
328
329	for (bus = dev; device_is_on_pci_bus(bus);)
330		bus = bus->parent;
331	return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
332				    size);
333}
334
335int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
336{
337	return pci_write_config(bdf, offset, value, PCI_SIZE_32);
338}
339
340int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
341{
342	return pci_write_config(bdf, offset, value, PCI_SIZE_16);
343}
344
345int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
346{
347	return pci_write_config(bdf, offset, value, PCI_SIZE_8);
348}
349
350int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
351{
352	return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
353}
354
355int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
356{
357	return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
358}
359
360int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
361{
362	return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
363}
364
365int pci_bus_read_config(const struct udevice *bus, pci_dev_t bdf, int offset,
366			unsigned long *valuep, enum pci_size_t size)
367{
368	struct dm_pci_ops *ops;
369
370	ops = pci_get_ops(bus);
371	if (!ops->read_config) {
372		*valuep = pci_conv_32_to_size(~0, offset, size);
373		return -ENOSYS;
374	}
375	if (offset < 0 || offset >= 4096) {
376		*valuep = pci_conv_32_to_size(0, offset, size);
377		return -EINVAL;
378	}
379	return ops->read_config(bus, bdf, offset, valuep, size);
380}
381
382static int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
383			   enum pci_size_t size)
384{
385	struct udevice *bus;
386	int ret;
387
388	ret = pci_get_bus(PCI_BUS(bdf), &bus);
389	if (ret)
390		return ret;
391
392	return pci_bus_read_config(bus, bdf, offset, valuep, size);
393}
394
395int dm_pci_read_config(const struct udevice *dev, int offset,
396		       unsigned long *valuep, enum pci_size_t size)
397{
398	const struct udevice *bus;
399
400	for (bus = dev; device_is_on_pci_bus(bus);)
401		bus = bus->parent;
402	return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
403				   size);
404}
405
406int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
407{
408	unsigned long value;
409	int ret;
410
411	ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
412	if (ret)
413		return ret;
414	*valuep = value;
415
416	return 0;
417}
418
419int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
420{
421	unsigned long value;
422	int ret;
423
424	ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
425	if (ret)
426		return ret;
427	*valuep = value;
428
429	return 0;
430}
431
432int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
433{
434	unsigned long value;
435	int ret;
436
437	ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
438	if (ret)
439		return ret;
440	*valuep = value;
441
442	return 0;
443}
444
445int dm_pci_read_config8(const struct udevice *dev, int offset, u8 *valuep)
446{
447	unsigned long value;
448	int ret;
449
450	ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
451	if (ret)
452		return ret;
453	*valuep = value;
454
455	return 0;
456}
457
458int dm_pci_read_config16(const struct udevice *dev, int offset, u16 *valuep)
459{
460	unsigned long value;
461	int ret;
462
463	ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
464	if (ret)
465		return ret;
466	*valuep = value;
467
468	return 0;
469}
470
471int dm_pci_read_config32(const struct udevice *dev, int offset, u32 *valuep)
472{
473	unsigned long value;
474	int ret;
475
476	ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
477	if (ret)
478		return ret;
479	*valuep = value;
480
481	return 0;
482}
483
484int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
485{
486	u8 val;
487	int ret;
488
489	ret = dm_pci_read_config8(dev, offset, &val);
490	if (ret)
491		return ret;
492	val &= ~clr;
493	val |= set;
494
495	return dm_pci_write_config8(dev, offset, val);
496}
497
498int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
499{
500	u16 val;
501	int ret;
502
503	ret = dm_pci_read_config16(dev, offset, &val);
504	if (ret)
505		return ret;
506	val &= ~clr;
507	val |= set;
508
509	return dm_pci_write_config16(dev, offset, val);
510}
511
512int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
513{
514	u32 val;
515	int ret;
516
517	ret = dm_pci_read_config32(dev, offset, &val);
518	if (ret)
519		return ret;
520	val &= ~clr;
521	val |= set;
522
523	return dm_pci_write_config32(dev, offset, val);
524}
525
526static void set_vga_bridge_bits(struct udevice *dev)
527{
528	struct udevice *parent = dev->parent;
529	u16 bc;
530
531	while (dev_seq(parent) != 0) {
532		dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
533		bc |= PCI_BRIDGE_CTL_VGA;
534		dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
535		parent = parent->parent;
536	}
537}
538
539int pci_auto_config_devices(struct udevice *bus)
540{
541	struct pci_controller *hose = dev_get_uclass_priv(bus);
542	struct pci_child_plat *pplat;
543	unsigned int sub_bus;
544	struct udevice *dev;
545
546	sub_bus = dev_seq(bus);
547	debug("%s: start\n", __func__);
548	pciauto_config_init(hose);
549	for (device_find_first_child(bus, &dev);
550	     dev;
551	     device_find_next_child(&dev)) {
552		unsigned int max_bus;
553		int ret;
554
555		debug("%s: device %s\n", __func__, dev->name);
556		if (dev_has_ofnode(dev) &&
557		    dev_read_bool(dev, "pci,no-autoconfig"))
558			continue;
559		ret = dm_pciauto_config_device(dev);
560		if (ret < 0)
561			return log_msg_ret("auto", ret);
562		max_bus = ret;
563		sub_bus = max(sub_bus, max_bus);
564
565		if (dev_get_parent(dev) == bus)
566			continue;
567
568		pplat = dev_get_parent_plat(dev);
569		if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
570			set_vga_bridge_bits(dev);
571	}
572	if (hose->last_busno < sub_bus)
573		hose->last_busno = sub_bus;
574	debug("%s: done\n", __func__);
575
576	return log_msg_ret("sub", sub_bus);
577}
578
579int pci_generic_mmap_write_config(
580	const struct udevice *bus,
581	int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
582		      void **addrp),
583	pci_dev_t bdf,
584	uint offset,
585	ulong value,
586	enum pci_size_t size)
587{
588	void *address;
589
590	if (addr_f(bus, bdf, offset, &address) < 0)
591		return 0;
592
593	switch (size) {
594	case PCI_SIZE_8:
595		writeb(value, address);
596		return 0;
597	case PCI_SIZE_16:
598		writew(value, address);
599		return 0;
600	case PCI_SIZE_32:
601		writel(value, address);
602		return 0;
603	default:
604		return -EINVAL;
605	}
606}
607
608int pci_generic_mmap_read_config(
609	const struct udevice *bus,
610	int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
611		      void **addrp),
612	pci_dev_t bdf,
613	uint offset,
614	ulong *valuep,
615	enum pci_size_t size)
616{
617	void *address;
618
619	if (addr_f(bus, bdf, offset, &address) < 0) {
620		*valuep = pci_get_ff(size);
621		return 0;
622	}
623
624	switch (size) {
625	case PCI_SIZE_8:
626		*valuep = readb(address);
627		return 0;
628	case PCI_SIZE_16:
629		*valuep = readw(address);
630		return 0;
631	case PCI_SIZE_32:
632		*valuep = readl(address);
633		return 0;
634	default:
635		return -EINVAL;
636	}
637}
638
639int dm_pci_hose_probe_bus(struct udevice *bus)
640{
641	u8 header_type;
642	int sub_bus;
643	int ret;
644	int ea_pos;
645	u8 reg;
646
647	debug("%s\n", __func__);
648
649	dm_pci_read_config8(bus, PCI_HEADER_TYPE, &header_type);
650	header_type &= 0x7f;
651	if (header_type != PCI_HEADER_TYPE_BRIDGE) {
652		debug("%s: Skipping PCI device %d with Non-Bridge Header Type 0x%x\n",
653		      __func__, PCI_DEV(dm_pci_get_bdf(bus)), header_type);
654		return log_msg_ret("probe", -EINVAL);
655	}
656
657	if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
658		ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
659	else
660		ea_pos = 0;
661
662	if (ea_pos) {
663		dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
664				    &reg);
665		sub_bus = reg;
666	} else {
667		sub_bus = pci_get_bus_max() + 1;
668	}
669	debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
670	dm_pciauto_prescan_setup_bridge(bus, sub_bus);
671
672	ret = device_probe(bus);
673	if (ret) {
674		debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
675		      ret);
676		return log_msg_ret("probe", ret);
677	}
678
679	if (!ea_pos)
680		sub_bus = pci_get_bus_max();
681
682	dm_pciauto_postscan_setup_bridge(bus, sub_bus);
683
684	return sub_bus;
685}
686
687/**
688 * pci_match_one_device - Tell if a PCI device structure has a matching
689 *                        PCI device id structure
690 * @id: single PCI device id structure to match
691 * @find: the PCI device id structure to match against
692 *
693 * Returns true if the finding pci_device_id structure matched or false if
694 * there is no match.
695 */
696static bool pci_match_one_id(const struct pci_device_id *id,
697			     const struct pci_device_id *find)
698{
699	if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
700	    (id->device == PCI_ANY_ID || id->device == find->device) &&
701	    (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
702	    (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
703	    !((id->class ^ find->class) & id->class_mask))
704		return true;
705
706	return false;
707}
708
709/**
710 * pci_need_device_pre_reloc() - Check if a device should be bound
711 *
712 * This checks a list of vendor/device-ID values indicating devices that should
713 * be bound before relocation.
714 *
715 * @bus: Bus to check
716 * @vendor: Vendor ID to check
717 * @device: Device ID to check
718 * Return: true if the vendor/device is in the list, false if not
719 */
720static bool pci_need_device_pre_reloc(struct udevice *bus, uint vendor,
721				      uint device)
722{
723	u32 vendev;
724	int index;
725
726	if (spl_phase() == PHASE_SPL && CONFIG_IS_ENABLED(PCI_PNP))
727		return true;
728
729	for (index = 0;
730	     !dev_read_u32_index(bus, "u-boot,pci-pre-reloc", index,
731				 &vendev);
732	     index++) {
733		if (vendev == PCI_VENDEV(vendor, device))
734			return true;
735	}
736
737	return false;
738}
739
740/**
741 * pci_find_and_bind_driver() - Find and bind the right PCI driver
742 *
743 * This only looks at certain fields in the descriptor.
744 *
745 * @parent:	Parent bus
746 * @find_id:	Specification of the driver to find
747 * @bdf:	Bus/device/function addreess - see PCI_BDF()
748 * @devp:	Returns a pointer to the device created
749 * Return: 0 if OK, -EPERM if the device is not needed before relocation and
750 *	   therefore was not created, other -ve value on error
751 */
752static int pci_find_and_bind_driver(struct udevice *parent,
753				    struct pci_device_id *find_id,
754				    pci_dev_t bdf, struct udevice **devp)
755{
756	struct pci_driver_entry *start, *entry;
757	ofnode node = ofnode_null();
758	const char *drv;
759	int n_ents;
760	int ret;
761	char name[30], *str;
762	bool bridge;
763
764	*devp = NULL;
765
766	debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
767	      find_id->vendor, find_id->device);
768
769	/* Determine optional OF node */
770	if (ofnode_valid(dev_ofnode(parent)))
771		pci_dev_find_ofnode(parent, bdf, &node);
772
773	if (ofnode_valid(node) && !ofnode_is_enabled(node)) {
774		debug("%s: Ignoring disabled device\n", __func__);
775		return log_msg_ret("dis", -EPERM);
776	}
777
778	start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
779	n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
780	for (entry = start; entry != start + n_ents; entry++) {
781		const struct pci_device_id *id;
782		struct udevice *dev;
783		const struct driver *drv;
784
785		for (id = entry->match;
786		     id->vendor || id->subvendor || id->class_mask;
787		     id++) {
788			if (!pci_match_one_id(id, find_id))
789				continue;
790
791			drv = entry->driver;
792
793			/*
794			 * In the pre-relocation phase, we only bind devices
795			 * whose driver has the DM_FLAG_PRE_RELOC set, to save
796			 * precious memory space as on some platforms as that
797			 * space is pretty limited (ie: using Cache As RAM).
798			 */
799			if (!(gd->flags & GD_FLG_RELOC) &&
800			    !(drv->flags & DM_FLAG_PRE_RELOC) &&
801			    (!CONFIG_IS_ENABLED(PCI_PNP) ||
802			     spl_phase() != PHASE_SPL))
803				return log_msg_ret("pre", -EPERM);
804
805			/*
806			 * We could pass the descriptor to the driver as
807			 * plat (instead of NULL) and allow its bind()
808			 * method to return -ENOENT if it doesn't support this
809			 * device. That way we could continue the search to
810			 * find another driver. For now this doesn't seem
811			 * necesssary, so just bind the first match.
812			 */
813			ret = device_bind(parent, drv, drv->name, NULL, node,
814					  &dev);
815			if (ret)
816				goto error;
817			debug("%s: Match found: %s\n", __func__, drv->name);
818			dev->driver_data = id->driver_data;
819			*devp = dev;
820			return 0;
821		}
822	}
823
824	bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
825	/*
826	 * In the pre-relocation phase, we only bind bridge devices to save
827	 * precious memory space as on some platforms as that space is pretty
828	 * limited (ie: using Cache As RAM).
829	 */
830	if (!(gd->flags & GD_FLG_RELOC) && !bridge &&
831	    !pci_need_device_pre_reloc(parent, find_id->vendor,
832				       find_id->device))
833		return log_msg_ret("notbr", -EPERM);
834
835	/* Bind a generic driver so that the device can be used */
836	sprintf(name, "pci_%x:%x.%x", dev_seq(parent), PCI_DEV(bdf),
837		PCI_FUNC(bdf));
838	str = strdup(name);
839	if (!str)
840		return -ENOMEM;
841	drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
842
843	ret = device_bind_driver_to_node(parent, drv, str, node, devp);
844	if (ret) {
845		debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
846		free(str);
847		return ret;
848	}
849	debug("%s: No match found: bound generic driver instead\n", __func__);
850
851	return 0;
852
853error:
854	debug("%s: No match found: error %d\n", __func__, ret);
855	return ret;
856}
857
858__weak extern void board_pci_fixup_dev(struct udevice *bus, struct udevice *dev)
859{
860}
861
862int pci_bind_bus_devices(struct udevice *bus)
863{
864	ulong vendor, device;
865	ulong header_type;
866	pci_dev_t bdf, end;
867	bool found_multi;
868	int ari_off;
869	int ret;
870
871	found_multi = false;
872	end = PCI_BDF(dev_seq(bus), PCI_MAX_PCI_DEVICES - 1,
873		      PCI_MAX_PCI_FUNCTIONS - 1);
874	for (bdf = PCI_BDF(dev_seq(bus), 0, 0); bdf <= end;
875	     bdf += PCI_BDF(0, 0, 1)) {
876		struct pci_child_plat *pplat;
877		struct udevice *dev;
878		ulong class;
879
880		if (!PCI_FUNC(bdf))
881			found_multi = false;
882		if (PCI_FUNC(bdf) && !found_multi)
883			continue;
884
885		/* Check only the first access, we don't expect problems */
886		ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
887					  PCI_SIZE_16);
888		if (ret || vendor == 0xffff || vendor == 0x0000)
889			continue;
890
891		pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
892				    &header_type, PCI_SIZE_8);
893
894		if (!PCI_FUNC(bdf))
895			found_multi = header_type & 0x80;
896
897		debug("%s: bus %d/%s: found device %x, function %d", __func__,
898		      dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
899		pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
900				    PCI_SIZE_16);
901		pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
902				    PCI_SIZE_32);
903		class >>= 8;
904
905		/* Find this device in the device tree */
906		ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
907		debug(": find ret=%d\n", ret);
908
909		/* If nothing in the device tree, bind a device */
910		if (ret == -ENODEV) {
911			struct pci_device_id find_id;
912			ulong val;
913
914			memset(&find_id, '\0', sizeof(find_id));
915			find_id.vendor = vendor;
916			find_id.device = device;
917			find_id.class = class;
918			if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
919				pci_bus_read_config(bus, bdf,
920						    PCI_SUBSYSTEM_VENDOR_ID,
921						    &val, PCI_SIZE_32);
922				find_id.subvendor = val & 0xffff;
923				find_id.subdevice = val >> 16;
924			}
925			ret = pci_find_and_bind_driver(bus, &find_id, bdf,
926						       &dev);
927		} else {
928			debug("device: %s\n", dev->name);
929		}
930		if (ret == -EPERM)
931			continue;
932		else if (ret)
933			return ret;
934
935		/* Update the platform data */
936		pplat = dev_get_parent_plat(dev);
937		pplat->devfn = PCI_MASK_BUS(bdf);
938		pplat->vendor = vendor;
939		pplat->device = device;
940		pplat->class = class;
941
942		if (IS_ENABLED(CONFIG_PCI_ARID)) {
943			ari_off = dm_pci_find_ext_capability(dev,
944							     PCI_EXT_CAP_ID_ARI);
945			if (ari_off) {
946				u16 ari_cap;
947
948				/*
949				 * Read Next Function number in ARI Cap
950				 * Register
951				 */
952				dm_pci_read_config16(dev, ari_off + 4,
953						     &ari_cap);
954				/*
955				 * Update next scan on this function number,
956				 * subtract 1 in BDF to satisfy loop increment.
957				 */
958				if (ari_cap & 0xff00) {
959					bdf = PCI_BDF(PCI_BUS(bdf),
960						      PCI_DEV(ari_cap),
961						      PCI_FUNC(ari_cap));
962					bdf = bdf - 0x100;
963				}
964			}
965		}
966
967		board_pci_fixup_dev(bus, dev);
968	}
969
970	return 0;
971}
972
973static int decode_regions(struct pci_controller *hose, ofnode parent_node,
974			   ofnode node)
975{
976	int pci_addr_cells, addr_cells, size_cells;
977	int cells_per_record;
978	struct bd_info *bd;
979	const u32 *prop;
980	int max_regions;
981	int len;
982	int i;
983
984	/* handle booting from coreboot, etc. */
985	if (!ll_boot_init())
986		return 0;
987
988	prop = ofnode_get_property(node, "ranges", &len);
989	if (!prop) {
990		debug("%s: Cannot decode regions\n", __func__);
991		return -EINVAL;
992	}
993
994	pci_addr_cells = ofnode_read_simple_addr_cells(node);
995	addr_cells = ofnode_read_simple_addr_cells(parent_node);
996	size_cells = ofnode_read_simple_size_cells(node);
997
998	/* PCI addresses are always 3-cells */
999	len /= sizeof(u32);
1000	cells_per_record = pci_addr_cells + addr_cells + size_cells;
1001	hose->region_count = 0;
1002	debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
1003	      cells_per_record);
1004
1005	/* Dynamically allocate the regions array */
1006	max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
1007	hose->regions = (struct pci_region *)
1008		calloc(1, max_regions * sizeof(struct pci_region));
1009	if (!hose->regions)
1010		return -ENOMEM;
1011
1012	for (i = 0; i < max_regions; i++, len -= cells_per_record) {
1013		u64 pci_addr, addr, size;
1014		int space_code;
1015		u32 flags;
1016		int type;
1017		int pos;
1018
1019		if (len < cells_per_record)
1020			break;
1021		flags = fdt32_to_cpu(prop[0]);
1022		space_code = (flags >> 24) & 3;
1023		pci_addr = fdtdec_get_number(prop + 1, 2);
1024		prop += pci_addr_cells;
1025		addr = fdtdec_get_number(prop, addr_cells);
1026		prop += addr_cells;
1027		size = fdtdec_get_number(prop, size_cells);
1028		prop += size_cells;
1029		debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n",
1030		      __func__, hose->region_count, pci_addr, addr, size, space_code);
1031		if (space_code & 2) {
1032			type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
1033					PCI_REGION_MEM;
1034		} else if (space_code & 1) {
1035			type = PCI_REGION_IO;
1036		} else {
1037			continue;
1038		}
1039
1040		if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
1041		    type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
1042			debug(" - pci_addr beyond the 32-bit boundary, ignoring\n");
1043			continue;
1044		}
1045
1046		if (!IS_ENABLED(CONFIG_PHYS_64BIT) && upper_32_bits(addr)) {
1047			debug(" - addr beyond the 32-bit boundary, ignoring\n");
1048			continue;
1049		}
1050
1051		if (~((pci_addr_t)0) - pci_addr < size) {
1052			debug(" - PCI range exceeds max address, ignoring\n");
1053			continue;
1054		}
1055
1056		if (~((phys_addr_t)0) - addr < size) {
1057			debug(" - phys range exceeds max address, ignoring\n");
1058			continue;
1059		}
1060
1061		pos = -1;
1062		if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) {
1063			for (i = 0; i < hose->region_count; i++) {
1064				if (hose->regions[i].flags == type)
1065					pos = i;
1066			}
1067		}
1068
1069		if (pos == -1)
1070			pos = hose->region_count++;
1071		debug(" - type=%d, pos=%d\n", type, pos);
1072		pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
1073	}
1074
1075	/* Add a region for our local memory */
1076	bd = gd->bd;
1077	if (!bd)
1078		return 0;
1079
1080	for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
1081		if (bd->bi_dram[i].size) {
1082			phys_addr_t start = bd->bi_dram[i].start;
1083
1084			if (IS_ENABLED(CONFIG_PCI_MAP_SYSTEM_MEMORY))
1085				start = virt_to_phys((void *)(uintptr_t)bd->bi_dram[i].start);
1086
1087			pci_set_region(hose->regions + hose->region_count++,
1088				       start, start, bd->bi_dram[i].size,
1089				       PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
1090		}
1091	}
1092
1093	return 0;
1094}
1095
1096static int pci_uclass_pre_probe(struct udevice *bus)
1097{
1098	struct pci_controller *hose;
1099	struct uclass *uc;
1100	int ret;
1101
1102	debug("%s, bus=%d/%s, parent=%s\n", __func__, dev_seq(bus), bus->name,
1103	      bus->parent->name);
1104	hose = dev_get_uclass_priv(bus);
1105
1106	/*
1107	 * Set the sequence number, if device_bind() doesn't. We want control
1108	 * of this so that numbers are allocated as devices are probed. That
1109	 * ensures that sub-bus numbered is correct (sub-buses must get numbers
1110	 * higher than their parents)
1111	 */
1112	if (dev_seq(bus) == -1) {
1113		ret = uclass_get(UCLASS_PCI, &uc);
1114		if (ret)
1115			return ret;
1116		bus->seq_ = uclass_find_next_free_seq(uc);
1117	}
1118
1119	/* For bridges, use the top-level PCI controller */
1120	if (!device_is_on_pci_bus(bus)) {
1121		hose->ctlr = bus;
1122		ret = decode_regions(hose, dev_ofnode(bus->parent),
1123				     dev_ofnode(bus));
1124		if (ret)
1125			return ret;
1126	} else {
1127		struct pci_controller *parent_hose;
1128
1129		parent_hose = dev_get_uclass_priv(bus->parent);
1130		hose->ctlr = parent_hose->bus;
1131	}
1132
1133	hose->bus = bus;
1134	hose->first_busno = dev_seq(bus);
1135	hose->last_busno = dev_seq(bus);
1136	if (dev_has_ofnode(bus)) {
1137		hose->skip_auto_config_until_reloc =
1138			dev_read_bool(bus,
1139				      "u-boot,skip-auto-config-until-reloc");
1140	}
1141
1142	return 0;
1143}
1144
1145static int pci_uclass_post_probe(struct udevice *bus)
1146{
1147	struct pci_controller *hose = dev_get_uclass_priv(bus);
1148	int ret;
1149
1150	debug("%s: probing bus %d\n", __func__, dev_seq(bus));
1151	ret = pci_bind_bus_devices(bus);
1152	if (ret)
1153		return log_msg_ret("bind", ret);
1154
1155	if (CONFIG_IS_ENABLED(PCI_PNP) && ll_boot_init() &&
1156	    (!hose->skip_auto_config_until_reloc ||
1157	     (gd->flags & GD_FLG_RELOC))) {
1158		ret = pci_auto_config_devices(bus);
1159		if (ret < 0)
1160			return log_msg_ret("cfg", ret);
1161	}
1162
1163#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
1164	/*
1165	 * Per Intel FSP specification, we should call FSP notify API to
1166	 * inform FSP that PCI enumeration has been done so that FSP will
1167	 * do any necessary initialization as required by the chipset's
1168	 * BIOS Writer's Guide (BWG).
1169	 *
1170	 * Unfortunately we have to put this call here as with driver model,
1171	 * the enumeration is all done on a lazy basis as needed, so until
1172	 * something is touched on PCI it won't happen.
1173	 *
1174	 * Note we only call this 1) after U-Boot is relocated, and 2)
1175	 * root bus has finished probing.
1176	 */
1177	if ((gd->flags & GD_FLG_RELOC) && dev_seq(bus) == 0 && ll_boot_init()) {
1178		ret = fsp_init_phase_pci();
1179		if (ret)
1180			return log_msg_ret("fsp", ret);
1181	}
1182#endif
1183
1184	return 0;
1185}
1186
1187static int pci_uclass_child_post_bind(struct udevice *dev)
1188{
1189	struct pci_child_plat *pplat;
1190
1191	if (!dev_has_ofnode(dev))
1192		return 0;
1193
1194	pplat = dev_get_parent_plat(dev);
1195
1196	/* Extract vendor id and device id if available */
1197	ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device);
1198
1199	/* Extract the devfn from fdt_pci_addr */
1200	pplat->devfn = pci_get_devfn(dev);
1201
1202	return 0;
1203}
1204
1205static int pci_bridge_read_config(const struct udevice *bus, pci_dev_t bdf,
1206				  uint offset, ulong *valuep,
1207				  enum pci_size_t size)
1208{
1209	struct pci_controller *hose = dev_get_uclass_priv(bus);
1210
1211	return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
1212}
1213
1214static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
1215				   uint offset, ulong value,
1216				   enum pci_size_t size)
1217{
1218	struct pci_controller *hose = dev_get_uclass_priv(bus);
1219
1220	return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
1221}
1222
1223static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
1224{
1225	struct udevice *dev;
1226
1227	/*
1228	 * Scan through all the PCI controllers. On x86 there will only be one
1229	 * but that is not necessarily true on other hardware.
1230	 */
1231	while (bus) {
1232		device_find_first_child(bus, &dev);
1233		if (dev) {
1234			*devp = dev;
1235			return 0;
1236		}
1237		uclass_next_device(&bus);
1238	}
1239
1240	return 0;
1241}
1242
1243int pci_find_next_device(struct udevice **devp)
1244{
1245	struct udevice *child = *devp;
1246	struct udevice *bus = child->parent;
1247
1248	/* First try all the siblings */
1249	*devp = NULL;
1250	while (child) {
1251		device_find_next_child(&child);
1252		if (child) {
1253			*devp = child;
1254			return 0;
1255		}
1256	}
1257
1258	/* We ran out of siblings. Try the next bus */
1259	uclass_next_device(&bus);
1260
1261	return bus ? skip_to_next_device(bus, devp) : 0;
1262}
1263
1264int pci_find_first_device(struct udevice **devp)
1265{
1266	struct udevice *bus;
1267
1268	*devp = NULL;
1269	uclass_first_device(UCLASS_PCI, &bus);
1270
1271	return skip_to_next_device(bus, devp);
1272}
1273
1274ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
1275{
1276	switch (size) {
1277	case PCI_SIZE_8:
1278		return (value >> ((offset & 3) * 8)) & 0xff;
1279	case PCI_SIZE_16:
1280		return (value >> ((offset & 2) * 8)) & 0xffff;
1281	default:
1282		return value;
1283	}
1284}
1285
1286ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
1287			  enum pci_size_t size)
1288{
1289	uint off_mask;
1290	uint val_mask, shift;
1291	ulong ldata, mask;
1292
1293	switch (size) {
1294	case PCI_SIZE_8:
1295		off_mask = 3;
1296		val_mask = 0xff;
1297		break;
1298	case PCI_SIZE_16:
1299		off_mask = 2;
1300		val_mask = 0xffff;
1301		break;
1302	default:
1303		return value;
1304	}
1305	shift = (offset & off_mask) * 8;
1306	ldata = (value & val_mask) << shift;
1307	mask = val_mask << shift;
1308	value = (old & ~mask) | ldata;
1309
1310	return value;
1311}
1312
1313int pci_get_dma_regions(struct udevice *dev, struct pci_region *memp, int index)
1314{
1315	int pci_addr_cells, addr_cells, size_cells;
1316	int cells_per_record;
1317	const u32 *prop;
1318	int len;
1319	int i = 0;
1320
1321	prop = ofnode_get_property(dev_ofnode(dev), "dma-ranges", &len);
1322	if (!prop) {
1323		log_err("PCI: Device '%s': Cannot decode dma-ranges\n",
1324			dev->name);
1325		return -EINVAL;
1326	}
1327
1328	pci_addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev));
1329	addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev->parent));
1330	size_cells = ofnode_read_simple_size_cells(dev_ofnode(dev));
1331
1332	/* PCI addresses are always 3-cells */
1333	len /= sizeof(u32);
1334	cells_per_record = pci_addr_cells + addr_cells + size_cells;
1335	debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
1336	      cells_per_record);
1337
1338	while (len) {
1339		memp->bus_start = fdtdec_get_number(prop + 1, 2);
1340		prop += pci_addr_cells;
1341		memp->phys_start = fdtdec_get_number(prop, addr_cells);
1342		prop += addr_cells;
1343		memp->size = fdtdec_get_number(prop, size_cells);
1344		prop += size_cells;
1345
1346		if (i == index)
1347			return 0;
1348		i++;
1349		len -= cells_per_record;
1350	}
1351
1352	return -EINVAL;
1353}
1354
1355int pci_get_regions(struct udevice *dev, struct pci_region **iop,
1356		    struct pci_region **memp, struct pci_region **prefp)
1357{
1358	struct udevice *bus = pci_get_controller(dev);
1359	struct pci_controller *hose = dev_get_uclass_priv(bus);
1360	int i;
1361
1362	*iop = NULL;
1363	*memp = NULL;
1364	*prefp = NULL;
1365	for (i = 0; i < hose->region_count; i++) {
1366		switch (hose->regions[i].flags) {
1367		case PCI_REGION_IO:
1368			if (!*iop || (*iop)->size < hose->regions[i].size)
1369				*iop = hose->regions + i;
1370			break;
1371		case PCI_REGION_MEM:
1372			if (!*memp || (*memp)->size < hose->regions[i].size)
1373				*memp = hose->regions + i;
1374			break;
1375		case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
1376			if (!*prefp || (*prefp)->size < hose->regions[i].size)
1377				*prefp = hose->regions + i;
1378			break;
1379		}
1380	}
1381
1382	return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
1383}
1384
1385u32 dm_pci_read_bar32(const struct udevice *dev, int barnum)
1386{
1387	u32 addr;
1388	int bar;
1389
1390	bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1391	dm_pci_read_config32(dev, bar, &addr);
1392
1393	/*
1394	 * If we get an invalid address, return this so that comparisons with
1395	 * FDT_ADDR_T_NONE work correctly
1396	 */
1397	if (addr == 0xffffffff)
1398		return addr;
1399	else if (addr & PCI_BASE_ADDRESS_SPACE_IO)
1400		return addr & PCI_BASE_ADDRESS_IO_MASK;
1401	else
1402		return addr & PCI_BASE_ADDRESS_MEM_MASK;
1403}
1404
1405void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
1406{
1407	int bar;
1408
1409	bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1410	dm_pci_write_config32(dev, bar, addr);
1411}
1412
1413phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
1414			       size_t len, unsigned long mask,
1415			       unsigned long flags)
1416{
1417	struct udevice *ctlr;
1418	struct pci_controller *hose;
1419	struct pci_region *res;
1420	pci_addr_t offset;
1421	int i;
1422
1423	/* The root controller has the region information */
1424	ctlr = pci_get_controller(dev);
1425	hose = dev_get_uclass_priv(ctlr);
1426
1427	if (hose->region_count == 0)
1428		return bus_addr;
1429
1430	for (i = 0; i < hose->region_count; i++) {
1431		res = &hose->regions[i];
1432
1433		if ((res->flags & mask) != flags)
1434			continue;
1435
1436		if (bus_addr < res->bus_start)
1437			continue;
1438
1439		offset = bus_addr - res->bus_start;
1440		if (offset >= res->size)
1441			continue;
1442
1443		if (len > res->size - offset)
1444			continue;
1445
1446		return res->phys_start + offset;
1447	}
1448
1449	puts("dm_pci_bus_to_phys: invalid physical address\n");
1450	return 0;
1451}
1452
1453pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1454			      size_t len, unsigned long mask,
1455			      unsigned long flags)
1456{
1457	struct udevice *ctlr;
1458	struct pci_controller *hose;
1459	struct pci_region *res;
1460	phys_addr_t offset;
1461	int i;
1462
1463	/* The root controller has the region information */
1464	ctlr = pci_get_controller(dev);
1465	hose = dev_get_uclass_priv(ctlr);
1466
1467	if (hose->region_count == 0)
1468		return phys_addr;
1469
1470	for (i = 0; i < hose->region_count; i++) {
1471		res = &hose->regions[i];
1472
1473		if ((res->flags & mask) != flags)
1474			continue;
1475
1476		if (phys_addr < res->phys_start)
1477			continue;
1478
1479		offset = phys_addr - res->phys_start;
1480		if (offset >= res->size)
1481			continue;
1482
1483		if (len > res->size - offset)
1484			continue;
1485
1486		return res->bus_start + offset;
1487	}
1488
1489	puts("dm_pci_phys_to_bus: invalid physical address\n");
1490	return 0;
1491}
1492
1493static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
1494				      struct pci_child_plat *pdata)
1495{
1496	phys_addr_t addr = 0;
1497
1498	/*
1499	 * In the case of a Virtual Function device using BAR
1500	 * base and size, add offset for VFn BAR(1, 2, 3...n)
1501	 */
1502	if (pdata->is_virtfn) {
1503		size_t sz;
1504		u32 ea_entry;
1505
1506		/* MaxOffset, 1st DW */
1507		dm_pci_read_config32(dev, ea_off + 8, &ea_entry);
1508		sz = ea_entry & PCI_EA_FIELD_MASK;
1509		/* Fill up lower 2 bits */
1510		sz |= (~PCI_EA_FIELD_MASK);
1511
1512		if (ea_entry & PCI_EA_IS_64) {
1513			/* MaxOffset 2nd DW */
1514			dm_pci_read_config32(dev, ea_off + 16, &ea_entry);
1515			sz |= ((u64)ea_entry) << 32;
1516		}
1517
1518		addr = (pdata->virtid - 1) * (sz + 1);
1519	}
1520
1521	return addr;
1522}
1523
1524static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, size_t offset,
1525			       size_t len, int ea_off,
1526			       struct pci_child_plat *pdata)
1527{
1528	int ea_cnt, i, entry_size;
1529	int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
1530	u32 ea_entry;
1531	phys_addr_t addr;
1532
1533	if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
1534		/*
1535		 * In the case of a Virtual Function device, device is
1536		 * Physical function, so pdata will point to required VF
1537		 * specific data.
1538		 */
1539		if (pdata->is_virtfn)
1540			bar_id += PCI_EA_BEI_VF_BAR0;
1541	}
1542
1543	/* EA capability structure header */
1544	dm_pci_read_config32(dev, ea_off, &ea_entry);
1545	ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK;
1546	ea_off += PCI_EA_FIRST_ENT;
1547
1548	for (i = 0; i < ea_cnt; i++, ea_off += entry_size) {
1549		/* Entry header */
1550		dm_pci_read_config32(dev, ea_off, &ea_entry);
1551		entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2;
1552
1553		if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id)
1554			continue;
1555
1556		/* Base address, 1st DW */
1557		dm_pci_read_config32(dev, ea_off + 4, &ea_entry);
1558		addr = ea_entry & PCI_EA_FIELD_MASK;
1559		if (ea_entry & PCI_EA_IS_64) {
1560			/* Base address, 2nd DW, skip over 4B MaxOffset */
1561			dm_pci_read_config32(dev, ea_off + 12, &ea_entry);
1562			addr |= ((u64)ea_entry) << 32;
1563		}
1564
1565		if (IS_ENABLED(CONFIG_PCI_SRIOV))
1566			addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
1567
1568		if (~((phys_addr_t)0) - addr < offset)
1569			return NULL;
1570
1571		/* size ignored for now */
1572		return map_physmem(addr + offset, len, MAP_NOCACHE);
1573	}
1574
1575	return 0;
1576}
1577
1578void *dm_pci_map_bar(struct udevice *dev, int bar, size_t offset, size_t len,
1579		     unsigned long mask, unsigned long flags)
1580{
1581	struct pci_child_plat *pdata = dev_get_parent_plat(dev);
1582	struct udevice *udev = dev;
1583	pci_addr_t pci_bus_addr;
1584	u32 bar_response;
1585	int ea_off;
1586
1587	if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
1588		/*
1589		 * In case of Virtual Function devices, use PF udevice
1590		 * as EA capability is defined in Physical Function
1591		 */
1592		if (pdata->is_virtfn)
1593			udev = pdata->pfdev;
1594	}
1595
1596	/*
1597	 * if the function supports Enhanced Allocation use that instead of
1598	 * BARs
1599	 * Incase of virtual functions, pdata will help read VF BEI
1600	 * and EA entry size.
1601	 */
1602	if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
1603		ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
1604	else
1605		ea_off = 0;
1606
1607	if (ea_off)
1608		return dm_pci_map_ea_bar(udev, bar, offset, len, ea_off, pdata);
1609
1610	/* read BAR address */
1611	dm_pci_read_config32(udev, bar, &bar_response);
1612	pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
1613
1614	/* This has a lot of baked in assumptions, but essentially tries
1615	 * to mirror the behavior of BAR assignment for 64 Bit enabled
1616	 * hosts and 64 bit placeable BARs in the auto assign code.
1617	 */
1618#if defined(CONFIG_SYS_PCI_64BIT)
1619	if (bar_response & PCI_BASE_ADDRESS_MEM_TYPE_64) {
1620		dm_pci_read_config32(udev, bar + 4, &bar_response);
1621		pci_bus_addr |= (pci_addr_t)bar_response << 32;
1622	}
1623#endif /* CONFIG_SYS_PCI_64BIT */
1624
1625	if (~((pci_addr_t)0) - pci_bus_addr < offset)
1626		return NULL;
1627
1628	/*
1629	 * Forward the length argument to dm_pci_bus_to_virt. The length will
1630	 * be used to check that the entire address range has been declared as
1631	 * a PCI range, but a better check would be to probe for the size of
1632	 * the bar and prevent overflow more locally.
1633	 */
1634	return dm_pci_bus_to_virt(udev, pci_bus_addr + offset, len, mask, flags,
1635				  MAP_NOCACHE);
1636}
1637
1638static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
1639{
1640	int ttl = PCI_FIND_CAP_TTL;
1641	u8 id;
1642	u16 ent;
1643
1644	dm_pci_read_config8(dev, pos, &pos);
1645
1646	while (ttl--) {
1647		if (pos < PCI_STD_HEADER_SIZEOF)
1648			break;
1649		pos &= ~3;
1650		dm_pci_read_config16(dev, pos, &ent);
1651
1652		id = ent & 0xff;
1653		if (id == 0xff)
1654			break;
1655		if (id == cap)
1656			return pos;
1657		pos = (ent >> 8);
1658	}
1659
1660	return 0;
1661}
1662
1663int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap)
1664{
1665	return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT,
1666					    cap);
1667}
1668
1669int dm_pci_find_capability(struct udevice *dev, int cap)
1670{
1671	u16 status;
1672	u8 header_type;
1673	u8 pos;
1674
1675	dm_pci_read_config16(dev, PCI_STATUS, &status);
1676	if (!(status & PCI_STATUS_CAP_LIST))
1677		return 0;
1678
1679	dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type);
1680	if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS)
1681		pos = PCI_CB_CAPABILITY_LIST;
1682	else
1683		pos = PCI_CAPABILITY_LIST;
1684
1685	return _dm_pci_find_next_capability(dev, pos, cap);
1686}
1687
1688int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap)
1689{
1690	u32 header;
1691	int ttl;
1692	int pos = PCI_CFG_SPACE_SIZE;
1693
1694	/* minimum 8 bytes per capability */
1695	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1696
1697	if (start)
1698		pos = start;
1699
1700	dm_pci_read_config32(dev, pos, &header);
1701	/*
1702	 * If we have no capabilities, this is indicated by cap ID,
1703	 * cap version and next pointer all being 0.
1704	 */
1705	if (header == 0)
1706		return 0;
1707
1708	while (ttl--) {
1709		if (PCI_EXT_CAP_ID(header) == cap)
1710			return pos;
1711
1712		pos = PCI_EXT_CAP_NEXT(header);
1713		if (pos < PCI_CFG_SPACE_SIZE)
1714			break;
1715
1716		dm_pci_read_config32(dev, pos, &header);
1717	}
1718
1719	return 0;
1720}
1721
1722int dm_pci_find_ext_capability(struct udevice *dev, int cap)
1723{
1724	return dm_pci_find_next_ext_capability(dev, 0, cap);
1725}
1726
1727int dm_pci_flr(struct udevice *dev)
1728{
1729	int pcie_off;
1730	u32 cap;
1731
1732	/* look for PCI Express Capability */
1733	pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP);
1734	if (!pcie_off)
1735		return -ENOENT;
1736
1737	/* check FLR capability */
1738	dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap);
1739	if (!(cap & PCI_EXP_DEVCAP_FLR))
1740		return -ENOENT;
1741
1742	dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0,
1743			       PCI_EXP_DEVCTL_BCR_FLR);
1744
1745	/* wait 100ms, per PCI spec */
1746	mdelay(100);
1747
1748	return 0;
1749}
1750
1751#if defined(CONFIG_PCI_SRIOV)
1752int pci_sriov_init(struct udevice *pdev, int vf_en)
1753{
1754	u16 vendor, device;
1755	struct udevice *bus;
1756	struct udevice *dev;
1757	pci_dev_t bdf;
1758	u16 ctrl;
1759	u16 num_vfs;
1760	u16 total_vf;
1761	u16 vf_offset;
1762	u16 vf_stride;
1763	int vf, ret;
1764	int pos;
1765
1766	pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1767	if (!pos) {
1768		debug("Error: SRIOV capability not found\n");
1769		return -ENOENT;
1770	}
1771
1772	dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
1773
1774	dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
1775	if (vf_en > total_vf)
1776		vf_en = total_vf;
1777	dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en);
1778
1779	ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
1780	dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl);
1781
1782	dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs);
1783	if (num_vfs > vf_en)
1784		num_vfs = vf_en;
1785
1786	dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset);
1787	dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride);
1788
1789	dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor);
1790	dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device);
1791
1792	bdf = dm_pci_get_bdf(pdev);
1793
1794	ret = pci_get_bus(PCI_BUS(bdf), &bus);
1795	if (ret)
1796		return ret;
1797
1798	bdf += PCI_BDF(0, 0, vf_offset);
1799
1800	for (vf = 0; vf < num_vfs; vf++) {
1801		struct pci_child_plat *pplat;
1802		ulong class;
1803
1804		pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE,
1805				    &class, PCI_SIZE_16);
1806
1807		debug("%s: bus %d/%s: found VF %x:%x\n", __func__,
1808		      dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
1809
1810		/* Find this device in the device tree */
1811		ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
1812
1813		if (ret == -ENODEV) {
1814			struct pci_device_id find_id;
1815
1816			memset(&find_id, '\0', sizeof(find_id));
1817			find_id.vendor = vendor;
1818			find_id.device = device;
1819			find_id.class = class;
1820
1821			ret = pci_find_and_bind_driver(bus, &find_id,
1822						       bdf, &dev);
1823
1824			if (ret)
1825				return ret;
1826		}
1827
1828		/* Update the platform data */
1829		pplat = dev_get_parent_plat(dev);
1830		pplat->devfn = PCI_MASK_BUS(bdf);
1831		pplat->vendor = vendor;
1832		pplat->device = device;
1833		pplat->class = class;
1834		pplat->is_virtfn = true;
1835		pplat->pfdev = pdev;
1836		pplat->virtid = vf * vf_stride + vf_offset;
1837
1838		debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n",
1839		      __func__, dev_seq(dev), dev->name, PCI_DEV(bdf),
1840		      PCI_FUNC(bdf), vendor, device, class, pplat->virtid);
1841		bdf += PCI_BDF(0, 0, vf_stride);
1842	}
1843
1844	return 0;
1845}
1846
1847int pci_sriov_get_totalvfs(struct udevice *pdev)
1848{
1849	u16 total_vf;
1850	int pos;
1851
1852	pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1853	if (!pos) {
1854		debug("Error: SRIOV capability not found\n");
1855		return -ENOENT;
1856	}
1857
1858	dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
1859
1860	return total_vf;
1861}
1862#endif /* SRIOV */
1863
1864UCLASS_DRIVER(pci) = {
1865	.id		= UCLASS_PCI,
1866	.name		= "pci",
1867	.flags		= DM_UC_FLAG_SEQ_ALIAS | DM_UC_FLAG_NO_AUTO_SEQ,
1868	.post_bind	= dm_scan_fdt_dev,
1869	.pre_probe	= pci_uclass_pre_probe,
1870	.post_probe	= pci_uclass_post_probe,
1871	.child_post_bind = pci_uclass_child_post_bind,
1872	.per_device_auto	= sizeof(struct pci_controller),
1873	.per_child_plat_auto	= sizeof(struct pci_child_plat),
1874};
1875
1876static const struct dm_pci_ops pci_bridge_ops = {
1877	.read_config	= pci_bridge_read_config,
1878	.write_config	= pci_bridge_write_config,
1879};
1880
1881static const struct udevice_id pci_bridge_ids[] = {
1882	{ .compatible = "pci-bridge" },
1883	{ }
1884};
1885
1886U_BOOT_DRIVER(pci_bridge_drv) = {
1887	.name		= "pci_bridge_drv",
1888	.id		= UCLASS_PCI,
1889	.of_match	= pci_bridge_ids,
1890	.ops		= &pci_bridge_ops,
1891};
1892
1893UCLASS_DRIVER(pci_generic) = {
1894	.id		= UCLASS_PCI_GENERIC,
1895	.name		= "pci_generic",
1896};
1897
1898static const struct udevice_id pci_generic_ids[] = {
1899	{ .compatible = "pci-generic" },
1900	{ }
1901};
1902
1903U_BOOT_DRIVER(pci_generic_drv) = {
1904	.name		= "pci_generic_drv",
1905	.id		= UCLASS_PCI_GENERIC,
1906	.of_match	= pci_generic_ids,
1907};
1908
1909int pci_init(void)
1910{
1911	struct udevice *bus;
1912
1913	/*
1914	 * Enumerate all known controller devices. Enumeration has the side-
1915	 * effect of probing them, so PCIe devices will be enumerated too.
1916	 */
1917	for (uclass_first_device_check(UCLASS_PCI, &bus);
1918	     bus;
1919	     uclass_next_device_check(&bus)) {
1920		;
1921	}
1922
1923	return 0;
1924}
1925