1#ifndef _ASM_IA64_PCI_H
2#define _ASM_IA64_PCI_H
3
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/string.h>
8#include <linux/types.h>
9
10#include <asm/io.h>
11#include <asm/scatterlist.h>
12#include <asm/hw_irq.h>
13
14/*
15 * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
16 * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
17 * loader.
18 */
19#define pcibios_assign_all_busses()     0
20#define pcibios_scan_all_fns(a, b)	0
21
22#define PCIBIOS_MIN_IO		0x1000
23#define PCIBIOS_MIN_MEM		0x10000000
24
25void pcibios_config_init(void);
26
27struct pci_dev;
28
29/*
30 * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
31 * correspondence between device bus addresses and CPU physical addresses.
32 * Platforms with a hardware I/O MMU _must_ turn this off to suppress the
33 * bounce buffer handling code in the block and network device layers.
34 * Platforms with separate bus address spaces _must_ turn this off and provide
35 * a device DMA mapping implementation that takes care of the necessary
36 * address translation.
37 *
38 * For now, the ia64 platforms which may have separate/multiple bus address
39 * spaces all have I/O MMUs which support the merging of physically
40 * discontiguous buffers, so we can use that as the sole factor to determine
41 * the setting of PCI_DMA_BUS_IS_PHYS.
42 */
43extern unsigned long ia64_max_iommu_merge_mask;
44#define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
45
46static inline void
47pcibios_set_master (struct pci_dev *dev)
48{
49	/* No special bus mastering setup handling */
50}
51
52static inline void
53pcibios_penalize_isa_irq (int irq, int active)
54{
55	/* We don't do dynamic PCI IRQ allocation */
56}
57
58#include <asm-generic/pci-dma-compat.h>
59
60/* pci_unmap_{single,page} is not a nop, thus... */
61#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
62	dma_addr_t ADDR_NAME;
63#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
64	__u32 LEN_NAME;
65#define pci_unmap_addr(PTR, ADDR_NAME)			\
66	((PTR)->ADDR_NAME)
67#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
68	(((PTR)->ADDR_NAME) = (VAL))
69#define pci_unmap_len(PTR, LEN_NAME)			\
70	((PTR)->LEN_NAME)
71#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
72	(((PTR)->LEN_NAME) = (VAL))
73
74/* The ia64 platform always supports 64-bit addressing. */
75#define pci_dac_dma_supported(pci_dev, mask)		(1)
76#define pci_dac_page_to_dma(dev,pg,off,dir)		((dma_addr_t) page_to_bus(pg) + (off))
77#define pci_dac_dma_to_page(dev,dma_addr)		(virt_to_page(bus_to_virt(dma_addr)))
78#define pci_dac_dma_to_offset(dev,dma_addr)		offset_in_page(dma_addr)
79#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir)	do { } while (0)
80#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir)	do { mb(); } while (0)
81
82#ifdef CONFIG_PCI
83static inline void pci_dma_burst_advice(struct pci_dev *pdev,
84					enum pci_dma_burst_strategy *strat,
85					unsigned long *strategy_parameter)
86{
87	unsigned long cacheline_size;
88	u8 byte;
89
90	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
91	if (byte == 0)
92		cacheline_size = 1024;
93	else
94		cacheline_size = (int) byte * 4;
95
96	*strat = PCI_DMA_BURST_MULTIPLE;
97	*strategy_parameter = cacheline_size;
98}
99#endif
100
101#define HAVE_PCI_MMAP
102extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
103				enum pci_mmap_state mmap_state, int write_combine);
104#define HAVE_PCI_LEGACY
105extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
106				      struct vm_area_struct *vma);
107extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
108				  size_t count);
109extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
110				   size_t count);
111extern int pci_mmap_legacy_mem(struct kobject *kobj,
112			       struct bin_attribute *attr,
113			       struct vm_area_struct *vma);
114
115#define pci_get_legacy_mem platform_pci_get_legacy_mem
116#define pci_legacy_read platform_pci_legacy_read
117#define pci_legacy_write platform_pci_legacy_write
118
119struct pci_window {
120	struct resource resource;
121	u64 offset;
122};
123
124struct pci_controller {
125	void *acpi_handle;
126	void *iommu;
127	int segment;
128	int node;		/* nearest node with memory or -1 for global allocation */
129
130	unsigned int windows;
131	struct pci_window *window;
132
133	void *platform_data;
134};
135
136#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
137#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
138
139extern struct pci_ops pci_root_ops;
140
141static inline int pci_proc_domain(struct pci_bus *bus)
142{
143	return (pci_domain_nr(bus) != 0);
144}
145
146static inline void pcibios_add_platform_entries(struct pci_dev *dev)
147{
148}
149
150extern void pcibios_resource_to_bus(struct pci_dev *dev,
151		struct pci_bus_region *region, struct resource *res);
152
153extern void pcibios_bus_to_resource(struct pci_dev *dev,
154		struct resource *res, struct pci_bus_region *region);
155
156static inline struct resource *
157pcibios_select_root(struct pci_dev *pdev, struct resource *res)
158{
159	struct resource *root = NULL;
160
161	if (res->flags & IORESOURCE_IO)
162		root = &ioport_resource;
163	if (res->flags & IORESOURCE_MEM)
164		root = &iomem_resource;
165
166	return root;
167}
168
169#define pcibios_scan_all_fns(a, b)	0
170
171#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
172static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
173{
174	return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
175}
176
177#endif /* _ASM_IA64_PCI_H */
178