• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/pci/
1#include <linux/pci.h>
2#include <linux/acpi.h>
3#include <linux/init.h>
4#include <linux/irq.h>
5#include <linux/dmi.h>
6#include <linux/slab.h>
7#include <asm/numa.h>
8#include <asm/pci_x86.h>
9
10struct pci_root_info {
11	struct acpi_device *bridge;
12	char *name;
13	unsigned int res_num;
14	struct resource *res;
15	struct pci_bus *bus;
16	int busnum;
17};
18
19static bool pci_use_crs = true;
20
21static int __init set_use_crs(const struct dmi_system_id *id)
22{
23	pci_use_crs = true;
24	return 0;
25}
26
27static const struct dmi_system_id pci_use_crs_table[] __initconst = {
28	/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
29	{
30		.callback = set_use_crs,
31		.ident = "IBM System x3800",
32		.matches = {
33			DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
34			DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
35		},
36	},
37	/* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
38	/* 2006 AMD HT/VIA system with two host bridges */
39        {
40		.callback = set_use_crs,
41		.ident = "ASRock ALiveSATA2-GLAN",
42		.matches = {
43			DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
44                },
45        },
46	{}
47};
48
49void __init pci_acpi_crs_quirks(void)
50{
51	int year;
52
53	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
54		pci_use_crs = false;
55
56	dmi_check_system(pci_use_crs_table);
57
58	/*
59	 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
60	 * takes precedence over anything we figured out above.
61	 */
62	if (pci_probe & PCI_ROOT_NO_CRS)
63		pci_use_crs = false;
64	else if (pci_probe & PCI_USE__CRS)
65		pci_use_crs = true;
66
67	printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
68	       "if necessary, use \"pci=%s\" and report a bug\n",
69	       pci_use_crs ? "Using" : "Ignoring",
70	       pci_use_crs ? "nocrs" : "use_crs");
71}
72
73static acpi_status
74resource_to_addr(struct acpi_resource *resource,
75			struct acpi_resource_address64 *addr)
76{
77	acpi_status status;
78	struct acpi_resource_memory24 *memory24;
79	struct acpi_resource_memory32 *memory32;
80	struct acpi_resource_fixed_memory32 *fixed_memory32;
81
82	memset(addr, 0, sizeof(*addr));
83	switch (resource->type) {
84	case ACPI_RESOURCE_TYPE_MEMORY24:
85		memory24 = &resource->data.memory24;
86		addr->resource_type = ACPI_MEMORY_RANGE;
87		addr->minimum = memory24->minimum;
88		addr->address_length = memory24->address_length;
89		addr->maximum = addr->minimum + addr->address_length - 1;
90		return AE_OK;
91	case ACPI_RESOURCE_TYPE_MEMORY32:
92		memory32 = &resource->data.memory32;
93		addr->resource_type = ACPI_MEMORY_RANGE;
94		addr->minimum = memory32->minimum;
95		addr->address_length = memory32->address_length;
96		addr->maximum = addr->minimum + addr->address_length - 1;
97		return AE_OK;
98	case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
99		fixed_memory32 = &resource->data.fixed_memory32;
100		addr->resource_type = ACPI_MEMORY_RANGE;
101		addr->minimum = fixed_memory32->address;
102		addr->address_length = fixed_memory32->address_length;
103		addr->maximum = addr->minimum + addr->address_length - 1;
104		return AE_OK;
105	case ACPI_RESOURCE_TYPE_ADDRESS16:
106	case ACPI_RESOURCE_TYPE_ADDRESS32:
107	case ACPI_RESOURCE_TYPE_ADDRESS64:
108		status = acpi_resource_to_address64(resource, addr);
109		if (ACPI_SUCCESS(status) &&
110		    (addr->resource_type == ACPI_MEMORY_RANGE ||
111		    addr->resource_type == ACPI_IO_RANGE) &&
112		    addr->address_length > 0) {
113			return AE_OK;
114		}
115		break;
116	}
117	return AE_ERROR;
118}
119
120static acpi_status
121count_resource(struct acpi_resource *acpi_res, void *data)
122{
123	struct pci_root_info *info = data;
124	struct acpi_resource_address64 addr;
125	acpi_status status;
126
127	status = resource_to_addr(acpi_res, &addr);
128	if (ACPI_SUCCESS(status))
129		info->res_num++;
130	return AE_OK;
131}
132
133static acpi_status
134setup_resource(struct acpi_resource *acpi_res, void *data)
135{
136	struct pci_root_info *info = data;
137	struct resource *res;
138	struct acpi_resource_address64 addr;
139	acpi_status status;
140	unsigned long flags;
141	struct resource *root, *conflict;
142	u64 start, end;
143
144	status = resource_to_addr(acpi_res, &addr);
145	if (!ACPI_SUCCESS(status))
146		return AE_OK;
147
148	if (addr.resource_type == ACPI_MEMORY_RANGE) {
149		root = &iomem_resource;
150		flags = IORESOURCE_MEM;
151		if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
152			flags |= IORESOURCE_PREFETCH;
153	} else if (addr.resource_type == ACPI_IO_RANGE) {
154		root = &ioport_resource;
155		flags = IORESOURCE_IO;
156	} else
157		return AE_OK;
158
159	start = addr.minimum + addr.translation_offset;
160	end = addr.maximum + addr.translation_offset;
161
162	res = &info->res[info->res_num];
163	res->name = info->name;
164	res->flags = flags;
165	res->start = start;
166	res->end = end;
167	res->child = NULL;
168
169	if (!pci_use_crs) {
170		dev_printk(KERN_DEBUG, &info->bridge->dev,
171			   "host bridge window %pR (ignored)\n", res);
172		return AE_OK;
173	}
174
175	conflict = insert_resource_conflict(root, res);
176	if (conflict) {
177		dev_err(&info->bridge->dev,
178			"address space collision: host bridge window %pR "
179			"conflicts with %s %pR\n",
180			res, conflict->name, conflict);
181	} else {
182		pci_bus_add_resource(info->bus, res, 0);
183		info->res_num++;
184		if (addr.translation_offset)
185			dev_info(&info->bridge->dev, "host bridge window %pR "
186				 "(PCI address [%#llx-%#llx])\n",
187				 res, res->start - addr.translation_offset,
188				 res->end - addr.translation_offset);
189		else
190			dev_info(&info->bridge->dev,
191				 "host bridge window %pR\n", res);
192	}
193	return AE_OK;
194}
195
196static void
197get_current_resources(struct acpi_device *device, int busnum,
198			int domain, struct pci_bus *bus)
199{
200	struct pci_root_info info;
201	size_t size;
202
203	if (pci_use_crs)
204		pci_bus_remove_resources(bus);
205
206	info.bridge = device;
207	info.bus = bus;
208	info.res_num = 0;
209	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
210				&info);
211	if (!info.res_num)
212		return;
213
214	size = sizeof(*info.res) * info.res_num;
215	info.res = kmalloc(size, GFP_KERNEL);
216	if (!info.res)
217		goto res_alloc_fail;
218
219	info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
220	if (!info.name)
221		goto name_alloc_fail;
222
223	info.res_num = 0;
224	acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
225				&info);
226
227	return;
228
229name_alloc_fail:
230	kfree(info.res);
231res_alloc_fail:
232	return;
233}
234
235struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
236{
237	struct acpi_device *device = root->device;
238	int domain = root->segment;
239	int busnum = root->secondary.start;
240	struct pci_bus *bus;
241	struct pci_sysdata *sd;
242	int node;
243#ifdef CONFIG_ACPI_NUMA
244	int pxm;
245#endif
246
247	if (domain && !pci_domains_supported) {
248		printk(KERN_WARNING "pci_bus %04x:%02x: "
249		       "ignored (multiple domains not supported)\n",
250		       domain, busnum);
251		return NULL;
252	}
253
254	node = -1;
255#ifdef CONFIG_ACPI_NUMA
256	pxm = acpi_get_pxm(device->handle);
257	if (pxm >= 0)
258		node = pxm_to_node(pxm);
259	if (node != -1)
260		set_mp_bus_to_node(busnum, node);
261	else
262#endif
263		node = get_mp_bus_to_node(busnum);
264
265	if (node != -1 && !node_online(node))
266		node = -1;
267
268	/* Allocate per-root-bus (not per bus) arch-specific data.
269	 * TODO: leak; this memory is never freed.
270	 * It's arguable whether it's worth the trouble to care.
271	 */
272	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
273	if (!sd) {
274		printk(KERN_WARNING "pci_bus %04x:%02x: "
275		       "ignored (out of memory)\n", domain, busnum);
276		return NULL;
277	}
278
279	sd->domain = domain;
280	sd->node = node;
281	/*
282	 * Maybe the desired pci bus has been already scanned. In such case
283	 * it is unnecessary to scan the pci bus with the given domain,busnum.
284	 */
285	bus = pci_find_bus(domain, busnum);
286	if (bus) {
287		/*
288		 * If the desired bus exits, the content of bus->sysdata will
289		 * be replaced by sd.
290		 */
291		memcpy(bus->sysdata, sd, sizeof(*sd));
292		kfree(sd);
293	} else {
294		bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
295		if (bus) {
296			get_current_resources(device, busnum, domain, bus);
297			bus->subordinate = pci_scan_child_bus(bus);
298		}
299	}
300
301	if (!bus)
302		kfree(sd);
303
304	if (bus && node != -1) {
305#ifdef CONFIG_ACPI_NUMA
306		if (pxm >= 0)
307			dev_printk(KERN_DEBUG, &bus->dev,
308				   "on NUMA node %d (pxm %d)\n", node, pxm);
309#else
310		dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
311#endif
312	}
313
314	return bus;
315}
316
317int __init pci_acpi_init(void)
318{
319	struct pci_dev *dev = NULL;
320
321	if (acpi_noirq)
322		return -ENODEV;
323
324	printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
325	acpi_irq_penalty_init();
326	pcibios_enable_irq = acpi_pci_irq_enable;
327	pcibios_disable_irq = acpi_pci_irq_disable;
328	x86_init.pci.init_irq = x86_init_noop;
329
330	if (pci_routeirq) {
331		/*
332		 * PCI IRQ routing is set up by pci_enable_device(), but we
333		 * also do it here in case there are still broken drivers that
334		 * don't use pci_enable_device().
335		 */
336		printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
337		for_each_pci_dev(dev)
338			acpi_pci_irq_enable(dev);
339	}
340
341	return 0;
342}
343