1/*
2 * pci.c - Low-Level PCI Access in IA-64
3 *
4 * Derived from bios32.c of i386 tree.
5 *
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 *	David Mosberger-Tang <davidm@hpl.hp.com>
8 *	Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
10 *
11 * Note: Above list of copyright holders is incomplete...
12 */
13
14#include <linux/acpi.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/pci.h>
18#include <linux/init.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22
23#include <asm/machvec.h>
24#include <asm/page.h>
25#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/sal.h>
28#include <asm/smp.h>
29#include <asm/irq.h>
30#include <asm/hw_irq.h>
31
32/*
33 * Low-level SAL-based PCI configuration access functions. Note that SAL
34 * calls are already serialized (via sal_lock), so we don't need another
35 * synchronization mechanism here.
36 */
37
38#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)		\
39	(((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
40
41/* SAL 3.2 adds support for extended config space. */
42
43#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)	\
44	(((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
45
46static int
47pci_sal_read (unsigned int seg, unsigned int bus, unsigned int devfn,
48	      int reg, int len, u32 *value)
49{
50	u64 addr, data = 0;
51	int mode, result;
52
53	if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
54		return -EINVAL;
55
56	if ((seg | reg) <= 255) {
57		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
58		mode = 0;
59	} else {
60		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
61		mode = 1;
62	}
63	result = ia64_sal_pci_config_read(addr, mode, len, &data);
64	if (result != 0)
65		return -EINVAL;
66
67	*value = (u32) data;
68	return 0;
69}
70
71static int
72pci_sal_write (unsigned int seg, unsigned int bus, unsigned int devfn,
73	       int reg, int len, u32 value)
74{
75	u64 addr;
76	int mode, result;
77
78	if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
79		return -EINVAL;
80
81	if ((seg | reg) <= 255) {
82		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
83		mode = 0;
84	} else {
85		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
86		mode = 1;
87	}
88	result = ia64_sal_pci_config_write(addr, mode, len, value);
89	if (result != 0)
90		return -EINVAL;
91	return 0;
92}
93
94static struct pci_raw_ops pci_sal_ops = {
95	.read =		pci_sal_read,
96	.write =	pci_sal_write
97};
98
99struct pci_raw_ops *raw_pci_ops = &pci_sal_ops;
100
101static int
102pci_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
103{
104	return raw_pci_ops->read(pci_domain_nr(bus), bus->number,
105				 devfn, where, size, value);
106}
107
108static int
109pci_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
110{
111	return raw_pci_ops->write(pci_domain_nr(bus), bus->number,
112				  devfn, where, size, value);
113}
114
115struct pci_ops pci_root_ops = {
116	.read = pci_read,
117	.write = pci_write,
118};
119
120/* Called by ACPI when it finds a new root bus.  */
121
122static struct pci_controller * __devinit
123alloc_pci_controller (int seg)
124{
125	struct pci_controller *controller;
126
127	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
128	if (!controller)
129		return NULL;
130
131	controller->segment = seg;
132	controller->node = -1;
133	return controller;
134}
135
136struct pci_root_info {
137	struct pci_controller *controller;
138	char *name;
139};
140
141static unsigned int
142new_space (u64 phys_base, int sparse)
143{
144	u64 mmio_base;
145	int i;
146
147	if (phys_base == 0)
148		return 0;	/* legacy I/O port space */
149
150	mmio_base = (u64) ioremap(phys_base, 0);
151	for (i = 0; i < num_io_spaces; i++)
152		if (io_space[i].mmio_base == mmio_base &&
153		    io_space[i].sparse == sparse)
154			return i;
155
156	if (num_io_spaces == MAX_IO_SPACES) {
157		printk(KERN_ERR "PCI: Too many IO port spaces "
158			"(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
159		return ~0;
160	}
161
162	i = num_io_spaces++;
163	io_space[i].mmio_base = mmio_base;
164	io_space[i].sparse = sparse;
165
166	return i;
167}
168
169static u64 __devinit
170add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
171{
172	struct resource *resource;
173	char *name;
174	u64 base, min, max, base_port;
175	unsigned int sparse = 0, space_nr, len;
176
177	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
178	if (!resource) {
179		printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
180			info->name);
181		goto out;
182	}
183
184	len = strlen(info->name) + 32;
185	name = kzalloc(len, GFP_KERNEL);
186	if (!name) {
187		printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
188			info->name);
189		goto free_resource;
190	}
191
192	min = addr->minimum;
193	max = min + addr->address_length - 1;
194	if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
195		sparse = 1;
196
197	space_nr = new_space(addr->translation_offset, sparse);
198	if (space_nr == ~0)
199		goto free_name;
200
201	base = __pa(io_space[space_nr].mmio_base);
202	base_port = IO_SPACE_BASE(space_nr);
203	snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
204		base_port + min, base_port + max);
205
206	/*
207	 * The SDM guarantees the legacy 0-64K space is sparse, but if the
208	 * mapping is done by the processor (not the bridge), ACPI may not
209	 * mark it as sparse.
210	 */
211	if (space_nr == 0)
212		sparse = 1;
213
214	resource->name  = name;
215	resource->flags = IORESOURCE_MEM;
216	resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
217	resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
218	insert_resource(&iomem_resource, resource);
219
220	return base_port;
221
222free_name:
223	kfree(name);
224free_resource:
225	kfree(resource);
226out:
227	return ~0;
228}
229
230static acpi_status __devinit resource_to_window(struct acpi_resource *resource,
231	struct acpi_resource_address64 *addr)
232{
233	acpi_status status;
234
235	/*
236	 * We're only interested in _CRS descriptors that are
237	 *	- address space descriptors for memory or I/O space
238	 *	- non-zero size
239	 *	- producers, i.e., the address space is routed downstream,
240	 *	  not consumed by the bridge itself
241	 */
242	status = acpi_resource_to_address64(resource, addr);
243	if (ACPI_SUCCESS(status) &&
244	    (addr->resource_type == ACPI_MEMORY_RANGE ||
245	     addr->resource_type == ACPI_IO_RANGE) &&
246	    addr->address_length &&
247	    addr->producer_consumer == ACPI_PRODUCER)
248		return AE_OK;
249
250	return AE_ERROR;
251}
252
253static acpi_status __devinit
254count_window (struct acpi_resource *resource, void *data)
255{
256	unsigned int *windows = (unsigned int *) data;
257	struct acpi_resource_address64 addr;
258	acpi_status status;
259
260	status = resource_to_window(resource, &addr);
261	if (ACPI_SUCCESS(status))
262		(*windows)++;
263
264	return AE_OK;
265}
266
267static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
268{
269	struct pci_root_info *info = data;
270	struct pci_window *window;
271	struct acpi_resource_address64 addr;
272	acpi_status status;
273	unsigned long flags, offset = 0;
274	struct resource *root;
275
276	/* Return AE_OK for non-window resources to keep scanning for more */
277	status = resource_to_window(res, &addr);
278	if (!ACPI_SUCCESS(status))
279		return AE_OK;
280
281	if (addr.resource_type == ACPI_MEMORY_RANGE) {
282		flags = IORESOURCE_MEM;
283		root = &iomem_resource;
284		offset = addr.translation_offset;
285	} else if (addr.resource_type == ACPI_IO_RANGE) {
286		flags = IORESOURCE_IO;
287		root = &ioport_resource;
288		offset = add_io_space(info, &addr);
289		if (offset == ~0)
290			return AE_OK;
291	} else
292		return AE_OK;
293
294	window = &info->controller->window[info->controller->windows++];
295	window->resource.name = info->name;
296	window->resource.flags = flags;
297	window->resource.start = addr.minimum + offset;
298	window->resource.end = window->resource.start + addr.address_length - 1;
299	window->resource.child = NULL;
300	window->offset = offset;
301
302	if (insert_resource(root, &window->resource)) {
303		printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n",
304			window->resource.start, window->resource.end,
305			root->name, info->name);
306	}
307
308	return AE_OK;
309}
310
311static void __devinit
312pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
313{
314	int i, j;
315
316	j = 0;
317	for (i = 0; i < ctrl->windows; i++) {
318		struct resource *res = &ctrl->window[i].resource;
319		if ((res->flags & IORESOURCE_MEM) &&
320		    (res->end - res->start < 16))
321			continue;
322		if (j >= PCI_BUS_NUM_RESOURCES) {
323			printk("Ignoring range [%lx-%lx] (%lx)\n", res->start,
324					res->end, res->flags);
325			continue;
326		}
327		bus->resource[j++] = res;
328	}
329}
330
331struct pci_bus * __devinit
332pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
333{
334	struct pci_root_info info;
335	struct pci_controller *controller;
336	unsigned int windows = 0;
337	struct pci_bus *pbus;
338	char *name;
339	int pxm;
340
341	controller = alloc_pci_controller(domain);
342	if (!controller)
343		goto out1;
344
345	controller->acpi_handle = device->handle;
346
347	pxm = acpi_get_pxm(controller->acpi_handle);
348#ifdef CONFIG_NUMA
349	if (pxm >= 0)
350		controller->node = pxm_to_node(pxm);
351#endif
352
353	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
354			&windows);
355	if (windows) {
356		controller->window =
357			kmalloc_node(sizeof(*controller->window) * windows,
358				     GFP_KERNEL, controller->node);
359		if (!controller->window)
360			goto out2;
361	}
362
363	name = kmalloc(16, GFP_KERNEL);
364	if (!name)
365		goto out3;
366
367	sprintf(name, "PCI Bus %04x:%02x", domain, bus);
368	info.controller = controller;
369	info.name = name;
370	acpi_walk_resources(device->handle, METHOD_NAME__CRS, add_window,
371			&info);
372
373	pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller);
374	if (pbus)
375		pcibios_setup_root_windows(pbus, controller);
376
377	return pbus;
378
379out3:
380	kfree(controller->window);
381out2:
382	kfree(controller);
383out1:
384	return NULL;
385}
386
387void pcibios_resource_to_bus(struct pci_dev *dev,
388		struct pci_bus_region *region, struct resource *res)
389{
390	struct pci_controller *controller = PCI_CONTROLLER(dev);
391	unsigned long offset = 0;
392	int i;
393
394	for (i = 0; i < controller->windows; i++) {
395		struct pci_window *window = &controller->window[i];
396		if (!(window->resource.flags & res->flags))
397			continue;
398		if (window->resource.start > res->start)
399			continue;
400		if (window->resource.end < res->end)
401			continue;
402		offset = window->offset;
403		break;
404	}
405
406	region->start = res->start - offset;
407	region->end = res->end - offset;
408}
409EXPORT_SYMBOL(pcibios_resource_to_bus);
410
411void pcibios_bus_to_resource(struct pci_dev *dev,
412		struct resource *res, struct pci_bus_region *region)
413{
414	struct pci_controller *controller = PCI_CONTROLLER(dev);
415	unsigned long offset = 0;
416	int i;
417
418	for (i = 0; i < controller->windows; i++) {
419		struct pci_window *window = &controller->window[i];
420		if (!(window->resource.flags & res->flags))
421			continue;
422		if (window->resource.start - window->offset > region->start)
423			continue;
424		if (window->resource.end - window->offset < region->end)
425			continue;
426		offset = window->offset;
427		break;
428	}
429
430	res->start = region->start + offset;
431	res->end = region->end + offset;
432}
433EXPORT_SYMBOL(pcibios_bus_to_resource);
434
435static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
436{
437	unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
438	struct resource *devr = &dev->resource[idx];
439
440	if (!dev->bus)
441		return 0;
442	for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) {
443		struct resource *busr = dev->bus->resource[i];
444
445		if (!busr || ((busr->flags ^ devr->flags) & type_mask))
446			continue;
447		if ((devr->start) && (devr->start >= busr->start) &&
448				(devr->end <= busr->end))
449			return 1;
450	}
451	return 0;
452}
453
454static void __devinit
455pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
456{
457	struct pci_bus_region region;
458	int i;
459
460	for (i = start; i < limit; i++) {
461		if (!dev->resource[i].flags)
462			continue;
463		region.start = dev->resource[i].start;
464		region.end = dev->resource[i].end;
465		pcibios_bus_to_resource(dev, &dev->resource[i], &region);
466		if ((is_valid_resource(dev, i)))
467			pci_claim_resource(dev, i);
468	}
469}
470
471void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
472{
473	pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
474}
475EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
476
477static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
478{
479	pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
480}
481
482/*
483 *  Called after each bus is probed, but before its children are examined.
484 */
485void __devinit
486pcibios_fixup_bus (struct pci_bus *b)
487{
488	struct pci_dev *dev;
489
490	if (b->self) {
491		pci_read_bridge_bases(b);
492		pcibios_fixup_bridge_resources(b->self);
493	}
494	list_for_each_entry(dev, &b->devices, bus_list)
495		pcibios_fixup_device_resources(dev);
496	platform_pci_fixup_bus(b);
497
498	return;
499}
500
501void __devinit
502pcibios_update_irq (struct pci_dev *dev, int irq)
503{
504	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
505
506}
507
508static inline int
509pcibios_enable_resources (struct pci_dev *dev, int mask)
510{
511	u16 cmd, old_cmd;
512	int idx;
513	struct resource *r;
514	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
515
516	if (!dev)
517		return -EINVAL;
518
519	pci_read_config_word(dev, PCI_COMMAND, &cmd);
520	old_cmd = cmd;
521	for (idx=0; idx<PCI_NUM_RESOURCES; idx++) {
522		/* Only set up the desired resources.  */
523		if (!(mask & (1 << idx)))
524			continue;
525
526		r = &dev->resource[idx];
527		if (!(r->flags & type_mask))
528			continue;
529		if ((idx == PCI_ROM_RESOURCE) &&
530				(!(r->flags & IORESOURCE_ROM_ENABLE)))
531			continue;
532		if (!r->start && r->end) {
533			printk(KERN_ERR
534			       "PCI: Device %s not available because of resource collisions\n",
535			       pci_name(dev));
536			return -EINVAL;
537		}
538		if (r->flags & IORESOURCE_IO)
539			cmd |= PCI_COMMAND_IO;
540		if (r->flags & IORESOURCE_MEM)
541			cmd |= PCI_COMMAND_MEMORY;
542	}
543	if (cmd != old_cmd) {
544		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
545		pci_write_config_word(dev, PCI_COMMAND, cmd);
546	}
547	return 0;
548}
549
550int
551pcibios_enable_device (struct pci_dev *dev, int mask)
552{
553	int ret;
554
555	ret = pcibios_enable_resources(dev, mask);
556	if (ret < 0)
557		return ret;
558
559	if (!dev->msi_enabled)
560		return acpi_pci_irq_enable(dev);
561	return 0;
562}
563
564void
565pcibios_disable_device (struct pci_dev *dev)
566{
567	BUG_ON(atomic_read(&dev->enable_cnt));
568	if (!dev->msi_enabled)
569		acpi_pci_irq_disable(dev);
570}
571
572void
573pcibios_align_resource (void *data, struct resource *res,
574		        resource_size_t size, resource_size_t align)
575{
576}
577
578/*
579 * PCI BIOS setup, always defaults to SAL interface
580 */
581char * __init
582pcibios_setup (char *str)
583{
584	return str;
585}
586
587int
588pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
589		     enum pci_mmap_state mmap_state, int write_combine)
590{
591	/*
592	 * I/O space cannot be accessed via normal processor loads and
593	 * stores on this platform.
594	 */
595	if (mmap_state == pci_mmap_io)
596		return -EINVAL;
597
598	/*
599	 * Leave vm_pgoff as-is, the PCI space address is the physical
600	 * address on this platform.
601	 */
602	if (write_combine && efi_range_is_wc(vma->vm_start,
603					     vma->vm_end - vma->vm_start))
604		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
605	else
606		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
607
608	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
609			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
610		return -EAGAIN;
611
612	return 0;
613}
614
615/**
616 * ia64_pci_get_legacy_mem - generic legacy mem routine
617 * @bus: bus to get legacy memory base address for
618 *
619 * Find the base of legacy memory for @bus.  This is typically the first
620 * megabyte of bus address space for @bus or is simply 0 on platforms whose
621 * chipsets support legacy I/O and memory routing.  Returns the base address
622 * or an error pointer if an error occurred.
623 *
624 * This is the ia64 generic version of this routine.  Other platforms
625 * are free to override it with a machine vector.
626 */
627char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
628{
629	return (char *)__IA64_UNCACHED_OFFSET;
630}
631
632/**
633 * pci_mmap_legacy_page_range - map legacy memory space to userland
634 * @bus: bus whose legacy space we're mapping
635 * @vma: vma passed in by mmap
636 *
637 * Map legacy memory space for this device back to userspace using a machine
638 * vector to get the base address.
639 */
640int
641pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
642{
643	unsigned long size = vma->vm_end - vma->vm_start;
644	pgprot_t prot;
645	char *addr;
646
647	/*
648	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
649	 * for more details.
650	 */
651	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
652		return -EINVAL;
653	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
654				    vma->vm_page_prot);
655
656	addr = pci_get_legacy_mem(bus);
657	if (IS_ERR(addr))
658		return PTR_ERR(addr);
659
660	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
661	vma->vm_page_prot = prot;
662
663	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
664			    size, vma->vm_page_prot))
665		return -EAGAIN;
666
667	return 0;
668}
669
670/**
671 * ia64_pci_legacy_read - read from legacy I/O space
672 * @bus: bus to read
673 * @port: legacy port value
674 * @val: caller allocated storage for returned value
675 * @size: number of bytes to read
676 *
677 * Simply reads @size bytes from @port and puts the result in @val.
678 *
679 * Again, this (and the write routine) are generic versions that can be
680 * overridden by the platform.  This is necessary on platforms that don't
681 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
682 */
683int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
684{
685	int ret = size;
686
687	switch (size) {
688	case 1:
689		*val = inb(port);
690		break;
691	case 2:
692		*val = inw(port);
693		break;
694	case 4:
695		*val = inl(port);
696		break;
697	default:
698		ret = -EINVAL;
699		break;
700	}
701
702	return ret;
703}
704
705/**
706 * ia64_pci_legacy_write - perform a legacy I/O write
707 * @bus: bus pointer
708 * @port: port to write
709 * @val: value to write
710 * @size: number of bytes to write from @val
711 *
712 * Simply writes @size bytes of @val to @port.
713 */
714int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
715{
716	int ret = size;
717
718	switch (size) {
719	case 1:
720		outb(val, port);
721		break;
722	case 2:
723		outw(val, port);
724		break;
725	case 4:
726		outl(val, port);
727		break;
728	default:
729		ret = -EINVAL;
730		break;
731	}
732
733	return ret;
734}
735
736/* It's defined in drivers/pci/pci.c */
737extern u8 pci_cache_line_size;
738
739/**
740 * set_pci_cacheline_size - determine cacheline size for PCI devices
741 *
742 * We want to use the line-size of the outer-most cache.  We assume
743 * that this line-size is the same for all CPUs.
744 *
745 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
746 */
747static void __init set_pci_cacheline_size(void)
748{
749	u64 levels, unique_caches;
750	s64 status;
751	pal_cache_config_info_t cci;
752
753	status = ia64_pal_cache_summary(&levels, &unique_caches);
754	if (status != 0) {
755		printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
756			"(status=%ld)\n", __FUNCTION__, status);
757		return;
758	}
759
760	status = ia64_pal_cache_config_info(levels - 1,
761				/* cache_type (data_or_unified)= */ 2, &cci);
762	if (status != 0) {
763		printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
764			"(status=%ld)\n", __FUNCTION__, status);
765		return;
766	}
767	pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
768}
769
770static int __init pcibios_init(void)
771{
772	set_pci_cacheline_size();
773	return 0;
774}
775
776subsys_initcall(pcibios_init);
777