1#ifndef __ASM_PARISC_PCI_H
2#define __ASM_PARISC_PCI_H
3
4#include <asm/scatterlist.h>
5
6/*
7** HP PCI platforms generally support multiple bus adapters.
8**    (workstations 1-~4, servers 2-~32)
9**
10** Newer platforms number the busses across PCI bus adapters *sparsely*.
11** E.g. 0, 8, 16, ...
12**
13** Under a PCI bus, most HP platforms support PPBs up to two or three
14** levels deep. See "Bit3" product line.
15*/
16#define PCI_MAX_BUSSES	256
17
18/* [soapbox on]
19** Who the hell can develop stuff without ASSERT or VASSERT?
20** No one understands all the modules across all platforms.
21** For linux add another dimension - processor architectures.
22**
23** This should be a standard/global macro used liberally
24** in all code. Every respectable engineer I know in HP
25** would support this argument. - grant
26** [soapbox off]
27*/
28#ifdef PCI_DEBUG
29#define ASSERT(expr) \
30	if(!(expr)) { \
31		printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
32		panic(#expr); \
33	}
34#else
35#define ASSERT(expr)
36#endif
37
38
39/*
40** pci_hba_data (aka H2P_OBJECT in HP/UX)
41**
42** This is the "common" or "base" data structure which HBA drivers
43** (eg Dino or LBA) are required to place at the top of their own
44** dev->sysdata structure.  I've heard this called "C inheritance" too.
45**
46** Data needed by pcibios layer belongs here.
47*/
48struct pci_hba_data {
49	unsigned long	base_addr;	/* aka Host Physical Address */
50	const struct parisc_device *dev; /* device from PA bus walk */
51	struct pci_bus *hba_bus;	/* primary PCI bus below HBA */
52	int		hba_num;	/* I/O port space access "key" */
53	struct resource bus_num;	/* PCI bus numbers */
54	struct resource io_space;	/* PIOP */
55	struct resource lmmio_space;	/* bus addresses < 4Gb */
56	struct resource elmmio_space;	/* additional bus addresses < 4Gb */
57	unsigned long   lmmio_space_offset;  /* CPU view - PCI view */
58	void *          iommu;          /* IOMMU this device is under */
59	/* REVISIT - spinlock to protect resources? */
60};
61
62#define HBA_DATA(d)		((struct pci_hba_data *) (d))
63
64/*
65** We support 2^16 I/O ports per HBA.  These are set up in the form
66** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port
67** space address.
68*/
69#define HBA_PORT_SPACE_BITS	16
70
71#define HBA_PORT_BASE(h)	((h) << HBA_PORT_SPACE_BITS)
72#define HBA_PORT_SPACE_SIZE	(1UL << HBA_PORT_SPACE_BITS)
73
74#define PCI_PORT_HBA(a)		((a) >> HBA_PORT_SPACE_BITS)
75#define PCI_PORT_ADDR(a)	((a) & (HBA_PORT_SPACE_SIZE - 1))
76
77/*
78** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses.
79** Note that we currently support only LMMIO.
80*/
81#define PCI_BUS_ADDR(hba,a)	((a) - hba->lmmio_space_offset)
82#define PCI_HOST_ADDR(hba,a)	((a) + hba->lmmio_space_offset)
83
84/*
85** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus
86** (This eliminates some of the warnings).
87*/
88struct pci_bus;
89struct pci_dev;
90
91/*
92** Most PCI devices (eg Tulip, NCR720) also export the same registers
93** to both MMIO and I/O port space.  Due to poor performance of I/O Port
94** access under HP PCI bus adapters, strongly reccomend use of MMIO
95** address space.
96**
97** While I'm at it more PA programming notes:
98**
99** 1) MMIO stores (writes) are posted operations. This means the processor
100**    gets an "ACK" before the write actually gets to the device. A read
101**    to the same device (or typically the bus adapter above it) will
102**    force in-flight write transaction(s) out to the targeted device
103**    before the read can complete.
104**
105** 2) The Programmed I/O (PIO) data may not always be strongly ordered with
106**    respect to DMA on all platforms. Ie PIO data can reach the processor
107**    before in-flight DMA reaches memory. Since most SMP PA platforms
108**    are I/O coherent, it generally doesn't matter...but sometimes
109**    it does.
110**
111** I've helped device driver writers debug both types of problems.
112*/
113struct pci_port_ops {
114	  u8 (*inb)  (struct pci_hba_data *hba, u16 port);
115	 u16 (*inw)  (struct pci_hba_data *hba, u16 port);
116	 u32 (*inl)  (struct pci_hba_data *hba, u16 port);
117	void (*outb) (struct pci_hba_data *hba, u16 port,  u8 data);
118	void (*outw) (struct pci_hba_data *hba, u16 port, u16 data);
119	void (*outl) (struct pci_hba_data *hba, u16 port, u32 data);
120};
121
122
123struct pci_bios_ops {
124	void (*init)(void);
125	void (*fixup_bus)(struct pci_bus *bus);
126};
127
128/*
129** See Documentation/DMA-mapping.txt
130*/
131struct pci_dma_ops {
132	int  (*dma_supported)(struct pci_dev *dev, u64 mask);
133	void *(*alloc_consistent)(struct pci_dev *dev, size_t size, dma_addr_t *iova);
134	void (*free_consistent)(struct pci_dev *dev, size_t size, void *vaddr, dma_addr_t iova);
135	dma_addr_t (*map_single)(struct pci_dev *dev, void *addr, size_t size, int direction);
136	void (*unmap_single)(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction);
137	int  (*map_sg)(struct pci_dev *dev, struct scatterlist *sg, int nents, int direction);
138	void (*unmap_sg)(struct pci_dev *dev, struct scatterlist *sg, int nhwents, int direction);
139	void (*dma_sync_single)(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction);
140	void (*dma_sync_sg)(struct pci_dev *dev, struct scatterlist *sg, int nelems, int direction);
141};
142
143
144/*
145** We could live without the hppa_dma_ops indirection if we didn't want
146** to support 4 different coherent dma models with one binary (they will
147** someday be loadable modules):
148**     I/O MMU        consistent method           dma_sync behavior
149**  =============   ======================       =======================
150**  a) PA-7x00LC    uncachable host memory          flush/purge
151**  b) U2/Uturn      cachable host memory              NOP
152**  c) Ike/Astro     cachable host memory              NOP
153**  d) EPIC/SAGA     memory on EPIC/SAGA         flush/reset DMA channel
154**
155** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
156**
157** Systems (eg PCX-T workstations) that don't fall into the above
158** categories will need to modify the needed drivers to perform
159** flush/purge and allocate "regular" cacheable pages for everything.
160*/
161
162extern struct pci_dma_ops *hppa_dma_ops;
163
164#ifdef CONFIG_PA11
165extern struct pci_dma_ops pcxl_dma_ops;
166extern struct pci_dma_ops pcx_dma_ops;
167#endif
168
169/*
170** Oops hard if we haven't setup hppa_dma_ops by the time the first driver
171** attempts to initialize.
172** Since panic() is a (void)(), pci_dma_panic() is needed to satisfy
173** the (int)() required by pci_dma_supported() interface.
174*/
175static inline int pci_dma_panic(char *msg)
176{
177	extern void panic(const char *, ...);	/* linux/kernel.h */
178	panic(msg);
179	/* NOTREACHED */
180	return -1;
181}
182
183#define pci_dma_supported(p, m)	( \
184	(NULL == hppa_dma_ops) \
185	?  pci_dma_panic("Dynamic DMA support missing...OOPS!\n(Hint: was Astro/Ike/U2/Uturn not claimed?)\n") \
186	: hppa_dma_ops->dma_supported(p,m) \
187)
188
189#define pci_alloc_consistent(p, s, a)	hppa_dma_ops->alloc_consistent(p,s,a)
190#define pci_free_consistent(p, s, v, a)	hppa_dma_ops->free_consistent(p,s,v,a)
191#define pci_map_single(p, v, s, d)	hppa_dma_ops->map_single(p, v, s, d)
192#define pci_unmap_single(p, a, s, d)	hppa_dma_ops->unmap_single(p, a, s, d)
193#define pci_map_sg(p, sg, n, d)		hppa_dma_ops->map_sg(p, sg, n, d)
194#define pci_unmap_sg(p, sg, n, d)	hppa_dma_ops->unmap_sg(p, sg, n, d)
195
196/* pci_unmap_{single,page} is not a nop, thus... */
197#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
198	dma_addr_t ADDR_NAME;
199#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
200	__u32 LEN_NAME;
201#define pci_unmap_addr(PTR, ADDR_NAME)			\
202	((PTR)->ADDR_NAME)
203#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
204	(((PTR)->ADDR_NAME) = (VAL))
205#define pci_unmap_len(PTR, LEN_NAME)			\
206	((PTR)->LEN_NAME)
207#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
208	(((PTR)->LEN_NAME) = (VAL))
209
210/* For U2/Astro/Ike based platforms (which are fully I/O coherent)
211** dma_sync is a NOP. Let's keep the performance path short here.
212*/
213#define pci_dma_sync_single(p, a, s, d)	{ if (hppa_dma_ops->dma_sync_single) \
214	hppa_dma_ops->dma_sync_single(p, a, s, d); \
215	}
216#define pci_dma_sync_sg(p, sg, n, d)	{ if (hppa_dma_ops->dma_sync_sg) \
217	hppa_dma_ops->dma_sync_sg(p, sg, n, d); \
218	}
219
220/* No highmem on parisc, plus we have an IOMMU, so mapping pages is easy. */
221#define pci_map_page(dev, page, off, size, dir) \
222	pci_map_single(dev, (page_address(page) + (off)), size, dir)
223#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir)
224
225/* Don't support DAC yet. */
226#define pci_dac_dma_supported(pci_dev, mask)	(0)
227
228/*
229** Stuff declared in arch/parisc/kernel/pci.c
230*/
231extern struct pci_port_ops *pci_port;
232extern struct pci_bios_ops *pci_bios;
233extern int pci_post_reset_delay;	/* delay after de-asserting #RESET */
234extern int pci_hba_count;
235extern struct pci_hba_data *parisc_pci_hba[];
236
237#ifdef CONFIG_PCI
238extern void pcibios_register_hba(struct pci_hba_data *);
239extern void pcibios_set_master(struct pci_dev *);
240extern void pcibios_assign_unassigned_resources(struct pci_bus *);
241#else
242extern inline void pcibios_register_hba(struct pci_hba_data *x)
243{
244}
245#endif
246
247/*
248** used by drivers/pci/pci.c:pci_do_scan_bus()
249**   0 == check if bridge is numbered before re-numbering.
250**   1 == pci_do_scan_bus() should automatically number all PCI-PCI bridges.
251**
252** REVISIT:
253**   To date, only alpha sets this to one. We'll need to set this
254**   to zero for legacy platforms and one for PAT platforms.
255*/
256#define pcibios_assign_all_busses()     (pdc_type == PDC_TYPE_PAT)
257
258#define PCIBIOS_MIN_IO          0x10
259#define PCIBIOS_MIN_MEM         0x1000 /* NBPG - but pci/setup-res.c dies */
260
261/* Return the index of the PCI controller for device PDEV. */
262#define pci_controller_num(PDEV)	(0)
263
264#define GET_IOC(dev) ((struct ioc *)(HBA_DATA(dev->sysdata)->iommu))
265
266#ifdef CONFIG_IOMMU_CCIO
267struct parisc_device;
268struct ioc;
269void * ccio_get_iommu(const struct parisc_device *dev);
270struct pci_dev * ccio_get_fake(const struct parisc_device *dev);
271int ccio_request_resource(const struct parisc_device *dev,
272		struct resource *res);
273int ccio_allocate_resource(const struct parisc_device *dev,
274		struct resource *res, unsigned long size,
275		unsigned long min, unsigned long max, unsigned long align,
276		void (*alignf)(void *, struct resource *, unsigned long),
277		void *alignf_data);
278#else /* !CONFIG_IOMMU_CCIO */
279#define ccio_get_iommu(dev) NULL
280#define ccio_get_fake(dev) NULL
281#define ccio_request_resource(dev, res) request_resource(&iomem_resource, res)
282#define ccio_allocate_resource(dev, res, size, min, max, align, alignf, data) \
283		allocate_resource(&iomem_resource, res, size, min, max, \
284				align, alignf, data)
285#endif /* !CONFIG_IOMMU_CCIO */
286
287#ifdef CONFIG_IOMMU_SBA
288struct parisc_device;
289void * sba_get_iommu(struct parisc_device *dev);
290#endif
291
292#endif /* __ASM_PARISC_PCI_H */
293