• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/pci/
1/*
2 *	drivers/pci/setup-bus.c
3 *
4 * Extruded from code written by
5 *      Dave Rusling (david.rusling@reo.mts.dec.com)
6 *      David Mosberger (davidm@cs.arizona.edu)
7 *	David Miller (davem@redhat.com)
8 *
9 * Support routines for initializing a PCI subsystem.
10 */
11
12/*
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 *	     PCI-PCI bridges cleanup, sorted resource allocation.
15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16 *	     Converted to allocation in 3 passes, which gives
17 *	     tighter packing. Prefetchable range support.
18 */
19
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/pci.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/cache.h>
27#include <linux/slab.h>
28#include "pci.h"
29
30struct resource_list_x {
31	struct resource_list_x *next;
32	struct resource *res;
33	struct pci_dev *dev;
34	resource_size_t start;
35	resource_size_t end;
36	unsigned long flags;
37};
38
39static void add_to_failed_list(struct resource_list_x *head,
40				 struct pci_dev *dev, struct resource *res)
41{
42	struct resource_list_x *list = head;
43	struct resource_list_x *ln = list->next;
44	struct resource_list_x *tmp;
45
46	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
47	if (!tmp) {
48		pr_warning("add_to_failed_list: kmalloc() failed!\n");
49		return;
50	}
51
52	tmp->next = ln;
53	tmp->res = res;
54	tmp->dev = dev;
55	tmp->start = res->start;
56	tmp->end = res->end;
57	tmp->flags = res->flags;
58	list->next = tmp;
59}
60
61static void free_failed_list(struct resource_list_x *head)
62{
63	struct resource_list_x *list, *tmp;
64
65	for (list = head->next; list;) {
66		tmp = list;
67		list = list->next;
68		kfree(tmp);
69	}
70
71	head->next = NULL;
72}
73
74static void __dev_sort_resources(struct pci_dev *dev,
75				 struct resource_list *head)
76{
77	u16 class = dev->class >> 8;
78
79	/* Don't touch classless devices or host bridges or ioapics.  */
80	if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
81		return;
82
83	/* Don't touch ioapic devices already enabled by firmware */
84	if (class == PCI_CLASS_SYSTEM_PIC) {
85		u16 command;
86		pci_read_config_word(dev, PCI_COMMAND, &command);
87		if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
88			return;
89	}
90
91	pdev_sort_resources(dev, head);
92}
93
94static void __assign_resources_sorted(struct resource_list *head,
95				 struct resource_list_x *fail_head)
96{
97	struct resource *res;
98	struct resource_list *list, *tmp;
99	int idx;
100
101	for (list = head->next; list;) {
102		res = list->res;
103		idx = res - &list->dev->resource[0];
104
105		if (pci_assign_resource(list->dev, idx)) {
106			if (fail_head && !pci_is_root_bus(list->dev->bus)) {
107				/*
108				 * if the failed res is for ROM BAR, and it will
109				 * be enabled later, don't add it to the list
110				 */
111				if (!((idx == PCI_ROM_RESOURCE) &&
112				      (!(res->flags & IORESOURCE_ROM_ENABLE))))
113					add_to_failed_list(fail_head, list->dev, res);
114			}
115			res->start = 0;
116			res->end = 0;
117			res->flags = 0;
118		}
119		tmp = list;
120		list = list->next;
121		kfree(tmp);
122	}
123}
124
125static void pdev_assign_resources_sorted(struct pci_dev *dev,
126				 struct resource_list_x *fail_head)
127{
128	struct resource_list head;
129
130	head.next = NULL;
131	__dev_sort_resources(dev, &head);
132	__assign_resources_sorted(&head, fail_head);
133
134}
135
136static void pbus_assign_resources_sorted(const struct pci_bus *bus,
137					 struct resource_list_x *fail_head)
138{
139	struct pci_dev *dev;
140	struct resource_list head;
141
142	head.next = NULL;
143	list_for_each_entry(dev, &bus->devices, bus_list)
144		__dev_sort_resources(dev, &head);
145
146	__assign_resources_sorted(&head, fail_head);
147}
148
149void pci_setup_cardbus(struct pci_bus *bus)
150{
151	struct pci_dev *bridge = bus->self;
152	struct resource *res;
153	struct pci_bus_region region;
154
155	dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
156		 bus->secondary, bus->subordinate);
157
158	res = bus->resource[0];
159	pcibios_resource_to_bus(bridge, &region, res);
160	if (res->flags & IORESOURCE_IO) {
161		/*
162		 * The IO resource is allocated a range twice as large as it
163		 * would normally need.  This allows us to set both IO regs.
164		 */
165		dev_info(&bridge->dev, "  bridge window %pR\n", res);
166		pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
167					region.start);
168		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
169					region.end);
170	}
171
172	res = bus->resource[1];
173	pcibios_resource_to_bus(bridge, &region, res);
174	if (res->flags & IORESOURCE_IO) {
175		dev_info(&bridge->dev, "  bridge window %pR\n", res);
176		pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
177					region.start);
178		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
179					region.end);
180	}
181
182	res = bus->resource[2];
183	pcibios_resource_to_bus(bridge, &region, res);
184	if (res->flags & IORESOURCE_MEM) {
185		dev_info(&bridge->dev, "  bridge window %pR\n", res);
186		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
187					region.start);
188		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
189					region.end);
190	}
191
192	res = bus->resource[3];
193	pcibios_resource_to_bus(bridge, &region, res);
194	if (res->flags & IORESOURCE_MEM) {
195		dev_info(&bridge->dev, "  bridge window %pR\n", res);
196		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
197					region.start);
198		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
199					region.end);
200	}
201}
202EXPORT_SYMBOL(pci_setup_cardbus);
203
204/* Initialize bridges with base/limit values we have collected.
205   PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
206   requires that if there is no I/O ports or memory behind the
207   bridge, corresponding range must be turned off by writing base
208   value greater than limit to the bridge's base/limit registers.
209
210   Note: care must be taken when updating I/O base/limit registers
211   of bridges which support 32-bit I/O. This update requires two
212   config space writes, so it's quite possible that an I/O window of
213   the bridge will have some undesirable address (e.g. 0) after the
214   first write. Ditto 64-bit prefetchable MMIO.  */
215static void pci_setup_bridge_io(struct pci_bus *bus)
216{
217	struct pci_dev *bridge = bus->self;
218	struct resource *res;
219	struct pci_bus_region region;
220	u32 l, io_upper16;
221
222	/* Set up the top and bottom of the PCI I/O segment for this bus. */
223	res = bus->resource[0];
224	pcibios_resource_to_bus(bridge, &region, res);
225	if (res->flags & IORESOURCE_IO) {
226		pci_read_config_dword(bridge, PCI_IO_BASE, &l);
227		l &= 0xffff0000;
228		l |= (region.start >> 8) & 0x00f0;
229		l |= region.end & 0xf000;
230		/* Set up upper 16 bits of I/O base/limit. */
231		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
232		dev_info(&bridge->dev, "  bridge window %pR\n", res);
233	} else {
234		/* Clear upper 16 bits of I/O base/limit. */
235		io_upper16 = 0;
236		l = 0x00f0;
237		dev_info(&bridge->dev, "  bridge window [io  disabled]\n");
238	}
239	/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
240	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
241	/* Update lower 16 bits of I/O base/limit. */
242	pci_write_config_dword(bridge, PCI_IO_BASE, l);
243	/* Update upper 16 bits of I/O base/limit. */
244	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
245}
246
247static void pci_setup_bridge_mmio(struct pci_bus *bus)
248{
249	struct pci_dev *bridge = bus->self;
250	struct resource *res;
251	struct pci_bus_region region;
252	u32 l;
253
254	/* Set up the top and bottom of the PCI Memory segment for this bus. */
255	res = bus->resource[1];
256	pcibios_resource_to_bus(bridge, &region, res);
257	if (res->flags & IORESOURCE_MEM) {
258		l = (region.start >> 16) & 0xfff0;
259		l |= region.end & 0xfff00000;
260		dev_info(&bridge->dev, "  bridge window %pR\n", res);
261	} else {
262		l = 0x0000fff0;
263		dev_info(&bridge->dev, "  bridge window [mem disabled]\n");
264	}
265	pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
266}
267
268static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
269{
270	struct pci_dev *bridge = bus->self;
271	struct resource *res;
272	struct pci_bus_region region;
273	u32 l, bu, lu;
274
275	/* Clear out the upper 32 bits of PREF limit.
276	   If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
277	   disables PREF range, which is ok. */
278	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
279
280	/* Set up PREF base/limit. */
281	bu = lu = 0;
282	res = bus->resource[2];
283	pcibios_resource_to_bus(bridge, &region, res);
284	if (res->flags & IORESOURCE_PREFETCH) {
285		l = (region.start >> 16) & 0xfff0;
286		l |= region.end & 0xfff00000;
287		if (res->flags & IORESOURCE_MEM_64) {
288			bu = upper_32_bits(region.start);
289			lu = upper_32_bits(region.end);
290		}
291		dev_info(&bridge->dev, "  bridge window %pR\n", res);
292	} else {
293		l = 0x0000fff0;
294		dev_info(&bridge->dev, "  bridge window [mem pref disabled]\n");
295	}
296	pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
297
298	/* Set the upper 32 bits of PREF base & limit. */
299	pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
300	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
301}
302
303static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
304{
305	struct pci_dev *bridge = bus->self;
306
307	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
308		 bus->secondary, bus->subordinate);
309
310	if (type & IORESOURCE_IO)
311		pci_setup_bridge_io(bus);
312
313	if (type & IORESOURCE_MEM)
314		pci_setup_bridge_mmio(bus);
315
316	if (type & IORESOURCE_PREFETCH)
317		pci_setup_bridge_mmio_pref(bus);
318
319	pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
320}
321
322static void pci_setup_bridge(struct pci_bus *bus)
323{
324	unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
325				  IORESOURCE_PREFETCH;
326
327	__pci_setup_bridge(bus, type);
328}
329
330/* Check whether the bridge supports optional I/O and
331   prefetchable memory ranges. If not, the respective
332   base/limit registers must be read-only and read as 0. */
333static void pci_bridge_check_ranges(struct pci_bus *bus)
334{
335	u16 io;
336	u32 pmem;
337	struct pci_dev *bridge = bus->self;
338	struct resource *b_res;
339
340	b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
341	b_res[1].flags |= IORESOURCE_MEM;
342
343	pci_read_config_word(bridge, PCI_IO_BASE, &io);
344	if (!io) {
345		pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
346		pci_read_config_word(bridge, PCI_IO_BASE, &io);
347 		pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
348 	}
349 	if (io)
350		b_res[0].flags |= IORESOURCE_IO;
351	if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
352		return;
353	pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
354	if (!pmem) {
355		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
356					       0xfff0fff0);
357		pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
358		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
359	}
360	if (pmem) {
361		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
362		if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
363		    PCI_PREF_RANGE_TYPE_64) {
364			b_res[2].flags |= IORESOURCE_MEM_64;
365			b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
366		}
367	}
368
369	/* double check if bridge does support 64 bit pref */
370	if (b_res[2].flags & IORESOURCE_MEM_64) {
371		u32 mem_base_hi, tmp;
372		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
373					 &mem_base_hi);
374		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
375					       0xffffffff);
376		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
377		if (!tmp)
378			b_res[2].flags &= ~IORESOURCE_MEM_64;
379		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
380				       mem_base_hi);
381	}
382}
383
384/* Helper function for sizing routines: find first available
385   bus resource of a given type. Note: we intentionally skip
386   the bus resources which have already been assigned (that is,
387   have non-NULL parent resource). */
388static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
389{
390	int i;
391	struct resource *r;
392	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
393				  IORESOURCE_PREFETCH;
394
395	pci_bus_for_each_resource(bus, r, i) {
396		if (r == &ioport_resource || r == &iomem_resource)
397			continue;
398		if (r && (r->flags & type_mask) == type && !r->parent)
399			return r;
400	}
401	return NULL;
402}
403
404/* Sizing the IO windows of the PCI-PCI bridge is trivial,
405   since these windows have 4K granularity and the IO ranges
406   of non-bridge PCI devices are limited to 256 bytes.
407   We must be careful with the ISA aliasing though. */
408static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
409{
410	struct pci_dev *dev;
411	struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
412	unsigned long size = 0, size1 = 0, old_size;
413
414	if (!b_res)
415 		return;
416
417	list_for_each_entry(dev, &bus->devices, bus_list) {
418		int i;
419
420		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
421			struct resource *r = &dev->resource[i];
422			unsigned long r_size;
423
424			if (r->parent || !(r->flags & IORESOURCE_IO))
425				continue;
426			r_size = resource_size(r);
427
428			if (r_size < 0x400)
429				/* Might be re-aligned for ISA */
430				size += r_size;
431			else
432				size1 += r_size;
433		}
434	}
435	if (size < min_size)
436		size = min_size;
437	old_size = resource_size(b_res);
438	if (old_size == 1)
439		old_size = 0;
440/* To be fixed in 2.5: we should have sort of HAVE_ISA
441   flag in the struct pci_bus. */
442#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
443	size = (size & 0xff) + ((size & ~0xffUL) << 2);
444#endif
445	size = ALIGN(size + size1, 4096);
446	if (size < old_size)
447		size = old_size;
448	if (!size) {
449		if (b_res->start || b_res->end)
450			dev_info(&bus->self->dev, "disabling bridge window "
451				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
452				 bus->secondary, bus->subordinate);
453		b_res->flags = 0;
454		return;
455	}
456	/* Alignment of the IO window is always 4K */
457	b_res->start = 4096;
458	b_res->end = b_res->start + size - 1;
459	b_res->flags |= IORESOURCE_STARTALIGN;
460}
461
462/* Calculate the size of the bus and minimal alignment which
463   guarantees that all child resources fit in this size. */
464static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
465			 unsigned long type, resource_size_t min_size)
466{
467	struct pci_dev *dev;
468	resource_size_t min_align, align, size, old_size;
469	resource_size_t aligns[12];	/* Alignments from 1Mb to 2Gb */
470	int order, max_order;
471	struct resource *b_res = find_free_bus_resource(bus, type);
472	unsigned int mem64_mask = 0;
473
474	if (!b_res)
475		return 0;
476
477	memset(aligns, 0, sizeof(aligns));
478	max_order = 0;
479	size = 0;
480
481	mem64_mask = b_res->flags & IORESOURCE_MEM_64;
482	b_res->flags &= ~IORESOURCE_MEM_64;
483
484	list_for_each_entry(dev, &bus->devices, bus_list) {
485		int i;
486
487		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
488			struct resource *r = &dev->resource[i];
489			resource_size_t r_size;
490
491			if (r->parent || (r->flags & mask) != type)
492				continue;
493			r_size = resource_size(r);
494			/* For bridges size != alignment */
495			align = pci_resource_alignment(dev, r);
496			order = __ffs(align) - 20;
497			if (order > 11) {
498				dev_warn(&dev->dev, "disabling BAR %d: %pR "
499					 "(bad alignment %#llx)\n", i, r,
500					 (unsigned long long) align);
501				r->flags = 0;
502				continue;
503			}
504			size += r_size;
505			if (order < 0)
506				order = 0;
507			/* Exclude ranges with size > align from
508			   calculation of the alignment. */
509			if (r_size == align)
510				aligns[order] += align;
511			if (order > max_order)
512				max_order = order;
513			mem64_mask &= r->flags & IORESOURCE_MEM_64;
514		}
515	}
516	if (size < min_size)
517		size = min_size;
518	old_size = resource_size(b_res);
519	if (old_size == 1)
520		old_size = 0;
521	if (size < old_size)
522		size = old_size;
523
524	align = 0;
525	min_align = 0;
526	for (order = 0; order <= max_order; order++) {
527		resource_size_t align1 = 1;
528
529		align1 <<= (order + 20);
530
531		if (!align)
532			min_align = align1;
533		else if (ALIGN(align + min_align, min_align) < align1)
534			min_align = align1 >> 1;
535		align += aligns[order];
536	}
537	size = ALIGN(size, min_align);
538	if (!size) {
539		if (b_res->start || b_res->end)
540			dev_info(&bus->self->dev, "disabling bridge window "
541				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
542				 bus->secondary, bus->subordinate);
543		b_res->flags = 0;
544		return 1;
545	}
546	b_res->start = min_align;
547	b_res->end = size + min_align - 1;
548	b_res->flags |= IORESOURCE_STARTALIGN;
549	b_res->flags |= mem64_mask;
550	return 1;
551}
552
553static void pci_bus_size_cardbus(struct pci_bus *bus)
554{
555	struct pci_dev *bridge = bus->self;
556	struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
557	u16 ctrl;
558
559	/*
560	 * Reserve some resources for CardBus.  We reserve
561	 * a fixed amount of bus space for CardBus bridges.
562	 */
563	b_res[0].start = 0;
564	b_res[0].end = pci_cardbus_io_size - 1;
565	b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
566
567	b_res[1].start = 0;
568	b_res[1].end = pci_cardbus_io_size - 1;
569	b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
570
571	/*
572	 * Check whether prefetchable memory is supported
573	 * by this bridge.
574	 */
575	pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
576	if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
577		ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
578		pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
579		pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
580	}
581
582	/*
583	 * If we have prefetchable memory support, allocate
584	 * two regions.  Otherwise, allocate one region of
585	 * twice the size.
586	 */
587	if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
588		b_res[2].start = 0;
589		b_res[2].end = pci_cardbus_mem_size - 1;
590		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
591
592		b_res[3].start = 0;
593		b_res[3].end = pci_cardbus_mem_size - 1;
594		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
595	} else {
596		b_res[3].start = 0;
597		b_res[3].end = pci_cardbus_mem_size * 2 - 1;
598		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
599	}
600}
601
602void __ref pci_bus_size_bridges(struct pci_bus *bus)
603{
604	struct pci_dev *dev;
605	unsigned long mask, prefmask;
606	resource_size_t min_mem_size = 0, min_io_size = 0;
607
608	list_for_each_entry(dev, &bus->devices, bus_list) {
609		struct pci_bus *b = dev->subordinate;
610		if (!b)
611			continue;
612
613		switch (dev->class >> 8) {
614		case PCI_CLASS_BRIDGE_CARDBUS:
615			pci_bus_size_cardbus(b);
616			break;
617
618		case PCI_CLASS_BRIDGE_PCI:
619		default:
620			pci_bus_size_bridges(b);
621			break;
622		}
623	}
624
625	/* The root bus? */
626	if (!bus->self)
627		return;
628
629	switch (bus->self->class >> 8) {
630	case PCI_CLASS_BRIDGE_CARDBUS:
631		/* don't size cardbuses yet. */
632		break;
633
634	case PCI_CLASS_BRIDGE_PCI:
635		pci_bridge_check_ranges(bus);
636		if (bus->self->is_hotplug_bridge) {
637			min_io_size  = pci_hotplug_io_size;
638			min_mem_size = pci_hotplug_mem_size;
639		}
640	default:
641		pbus_size_io(bus, min_io_size);
642		/* If the bridge supports prefetchable range, size it
643		   separately. If it doesn't, or its prefetchable window
644		   has already been allocated by arch code, try
645		   non-prefetchable range for both types of PCI memory
646		   resources. */
647		mask = IORESOURCE_MEM;
648		prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
649		if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
650			mask = prefmask; /* Success, size non-prefetch only. */
651		else
652			min_mem_size += min_mem_size;
653		pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
654		break;
655	}
656}
657EXPORT_SYMBOL(pci_bus_size_bridges);
658
659static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
660					 struct resource_list_x *fail_head)
661{
662	struct pci_bus *b;
663	struct pci_dev *dev;
664
665	pbus_assign_resources_sorted(bus, fail_head);
666
667	list_for_each_entry(dev, &bus->devices, bus_list) {
668		b = dev->subordinate;
669		if (!b)
670			continue;
671
672		__pci_bus_assign_resources(b, fail_head);
673
674		switch (dev->class >> 8) {
675		case PCI_CLASS_BRIDGE_PCI:
676			if (!pci_is_enabled(dev))
677				pci_setup_bridge(b);
678			break;
679
680		case PCI_CLASS_BRIDGE_CARDBUS:
681			pci_setup_cardbus(b);
682			break;
683
684		default:
685			dev_info(&dev->dev, "not setting up bridge for bus "
686				 "%04x:%02x\n", pci_domain_nr(b), b->number);
687			break;
688		}
689	}
690}
691
692void __ref pci_bus_assign_resources(const struct pci_bus *bus)
693{
694	__pci_bus_assign_resources(bus, NULL);
695}
696EXPORT_SYMBOL(pci_bus_assign_resources);
697
698static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
699					 struct resource_list_x *fail_head)
700{
701	struct pci_bus *b;
702
703	pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
704
705	b = bridge->subordinate;
706	if (!b)
707		return;
708
709	__pci_bus_assign_resources(b, fail_head);
710
711	switch (bridge->class >> 8) {
712	case PCI_CLASS_BRIDGE_PCI:
713		pci_setup_bridge(b);
714		break;
715
716	case PCI_CLASS_BRIDGE_CARDBUS:
717		pci_setup_cardbus(b);
718		break;
719
720	default:
721		dev_info(&bridge->dev, "not setting up bridge for bus "
722			 "%04x:%02x\n", pci_domain_nr(b), b->number);
723		break;
724	}
725}
726static void pci_bridge_release_resources(struct pci_bus *bus,
727					  unsigned long type)
728{
729	int idx;
730	bool changed = false;
731	struct pci_dev *dev;
732	struct resource *r;
733	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
734				  IORESOURCE_PREFETCH;
735
736	dev = bus->self;
737	for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
738	     idx++) {
739		r = &dev->resource[idx];
740		if ((r->flags & type_mask) != type)
741			continue;
742		if (!r->parent)
743			continue;
744		/*
745		 * if there are children under that, we should release them
746		 *  all
747		 */
748		release_child_resources(r);
749		if (!release_resource(r)) {
750			dev_printk(KERN_DEBUG, &dev->dev,
751				 "resource %d %pR released\n", idx, r);
752			/* keep the old size */
753			r->end = resource_size(r) - 1;
754			r->start = 0;
755			r->flags = 0;
756			changed = true;
757		}
758	}
759
760	if (changed) {
761		/* avoiding touch the one without PREF */
762		if (type & IORESOURCE_PREFETCH)
763			type = IORESOURCE_PREFETCH;
764		__pci_setup_bridge(bus, type);
765	}
766}
767
768enum release_type {
769	leaf_only,
770	whole_subtree,
771};
772/*
773 * try to release pci bridge resources that is from leaf bridge,
774 * so we can allocate big new one later
775 */
776static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
777						   unsigned long type,
778						   enum release_type rel_type)
779{
780	struct pci_dev *dev;
781	bool is_leaf_bridge = true;
782
783	list_for_each_entry(dev, &bus->devices, bus_list) {
784		struct pci_bus *b = dev->subordinate;
785		if (!b)
786			continue;
787
788		is_leaf_bridge = false;
789
790		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
791			continue;
792
793		if (rel_type == whole_subtree)
794			pci_bus_release_bridge_resources(b, type,
795						 whole_subtree);
796	}
797
798	if (pci_is_root_bus(bus))
799		return;
800
801	if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
802		return;
803
804	if ((rel_type == whole_subtree) || is_leaf_bridge)
805		pci_bridge_release_resources(bus, type);
806}
807
808static void pci_bus_dump_res(struct pci_bus *bus)
809{
810	struct resource *res;
811	int i;
812
813	pci_bus_for_each_resource(bus, res, i) {
814		if (!res || !res->end || !res->flags)
815                        continue;
816
817		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
818        }
819}
820
821static void pci_bus_dump_resources(struct pci_bus *bus)
822{
823	struct pci_bus *b;
824	struct pci_dev *dev;
825
826
827	pci_bus_dump_res(bus);
828
829	list_for_each_entry(dev, &bus->devices, bus_list) {
830		b = dev->subordinate;
831		if (!b)
832			continue;
833
834		pci_bus_dump_resources(b);
835	}
836}
837
838void __init
839pci_assign_unassigned_resources(void)
840{
841	struct pci_bus *bus;
842
843	/* Depth first, calculate sizes and alignments of all
844	   subordinate buses. */
845	list_for_each_entry(bus, &pci_root_buses, node) {
846		pci_bus_size_bridges(bus);
847	}
848	/* Depth last, allocate resources and update the hardware. */
849	list_for_each_entry(bus, &pci_root_buses, node) {
850		pci_bus_assign_resources(bus);
851		pci_enable_bridges(bus);
852	}
853
854	/* dump the resource on buses */
855	list_for_each_entry(bus, &pci_root_buses, node) {
856		pci_bus_dump_resources(bus);
857	}
858}
859
860void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
861{
862	struct pci_bus *parent = bridge->subordinate;
863	int tried_times = 0;
864	struct resource_list_x head, *list;
865	int retval;
866	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
867				  IORESOURCE_PREFETCH;
868
869	head.next = NULL;
870
871again:
872	pci_bus_size_bridges(parent);
873	__pci_bridge_assign_resources(bridge, &head);
874
875	tried_times++;
876
877	if (!head.next)
878		goto enable_all;
879
880	if (tried_times >= 2) {
881		/* still fail, don't need to try more */
882		free_failed_list(&head);
883		goto enable_all;
884	}
885
886	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
887			 tried_times + 1);
888
889	/*
890	 * Try to release leaf bridge's resources that doesn't fit resource of
891	 * child device under that bridge
892	 */
893	for (list = head.next; list;) {
894		struct pci_bus *bus = list->dev->bus;
895		unsigned long flags = list->flags;
896
897		pci_bus_release_bridge_resources(bus, flags & type_mask,
898						 whole_subtree);
899		list = list->next;
900	}
901	/* restore size and flags */
902	for (list = head.next; list;) {
903		struct resource *res = list->res;
904
905		res->start = list->start;
906		res->end = list->end;
907		res->flags = list->flags;
908		if (list->dev->subordinate)
909			res->flags = 0;
910
911		list = list->next;
912	}
913	free_failed_list(&head);
914
915	goto again;
916
917enable_all:
918	retval = pci_reenable_device(bridge);
919	pci_set_master(bridge);
920	pci_enable_bridges(parent);
921}
922EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
923