1#ifndef ASMARM_PCI_H 2#define ASMARM_PCI_H 3 4#ifdef __KERNEL__ 5 6#include <asm/arch/hardware.h> 7 8static inline void pcibios_set_master(struct pci_dev *dev) 9{ 10 /* No special bus mastering setup handling */ 11} 12 13static inline void pcibios_penalize_isa_irq(int irq) 14{ 15 /* We don't do dynamic PCI IRQ allocation */ 16} 17 18#include <asm/scatterlist.h> 19#include <asm/io.h> 20 21struct pci_dev; 22 23/* Allocate and map kernel buffer using consistent mode DMA for a device. 24 * hwdev should be valid struct pci_dev pointer for PCI devices, 25 * NULL for PCI-like buses (ISA, EISA). 26 * Returns non-NULL cpu-view pointer to the buffer if successful and 27 * sets *dma_addrp to the pci side dma address as well, else *dma_addrp 28 * is undefined. 29 */ 30extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle); 31 32/* Free and unmap a consistent DMA buffer. 33 * cpu_addr is what was returned from pci_alloc_consistent, 34 * size must be the same as what as passed into pci_alloc_consistent, 35 * and likewise dma_addr must be the same as what *dma_addrp was set to. 36 * 37 * References to the memory and mappings associated with cpu_addr/dma_addr 38 * past this call are illegal. 39 */ 40static inline void 41pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, 42 dma_addr_t dma_handle) 43{ 44 consistent_free(vaddr, size, dma_handle); 45} 46 47/* Map a single buffer of the indicated size for DMA in streaming mode. 48 * The 32-bit bus address to use is returned. 49 * 50 * Once the device is given the dma address, the device owns this memory 51 * until either pci_unmap_single or pci_dma_sync_single is performed. 52 */ 53static inline dma_addr_t 54pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) 55{ 56#ifdef CONFIG_SA1111 57 extern dma_addr_t sa1111_map_single(struct pci_dev *, void *, size_t, int); 58 59 /* 60 * for SA1111 these functions are "magic" and relocate buffers. We 61 * only need to do these if hwdev is non-null; otherwise we expect 62 * the buffer to already be suitable for DMA. 63 */ 64 if (hwdev != NULL) 65 return sa1111_map_single(hwdev, ptr, size, direction); 66#endif 67 consistent_sync(ptr, size, direction); 68 return virt_to_bus(ptr); 69} 70 71/* Unmap a single streaming mode DMA translation. The dma_addr and size 72 * must match what was provided for in a previous pci_map_single call. All 73 * other usages are undefined. 74 * 75 * After this call, reads by the cpu to the buffer are guarenteed to see 76 * whatever the device wrote there. 77 */ 78static inline void 79pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) 80{ 81#ifdef CONFIG_SA1111 82 extern void sa1111_unmap_single(struct pci_dev *, dma_addr_t, size_t, int); 83 84 if (hwdev != NULL) 85 sa1111_unmap_single(hwdev, dma_addr, size, direction); 86#endif 87 /* nothing to do */ 88} 89 90/* Whether pci_unmap_{single,page} is a nop depends upon the 91 * configuration. 92 */ 93#ifdef CONFIG_SA1111 94#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 95 dma_addr_t ADDR_NAME; 96#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ 97 __u32 LEN_NAME; 98#define pci_unmap_addr(PTR, ADDR_NAME) \ 99 ((PTR)->ADDR_NAME) 100#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ 101 (((PTR)->ADDR_NAME) = (VAL)) 102#define pci_unmap_len(PTR, LEN_NAME) \ 103 ((PTR)->LEN_NAME) 104#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 105 (((PTR)->LEN_NAME) = (VAL)) 106#else /* !(CONFIG_SA1111) */ 107#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) 108#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) 109#define pci_unmap_addr(PTR, ADDR_NAME) (0) 110#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 111#define pci_unmap_len(PTR, LEN_NAME) (0) 112#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 113#endif /* CONFIG_SA1111 */ 114 115/* Map a set of buffers described by scatterlist in streaming 116 * mode for DMA. This is the scather-gather version of the 117 * above pci_map_single interface. Here the scatter gather list 118 * elements are each tagged with the appropriate dma address 119 * and length. They are obtained via sg_dma_{address,length}(SG). 120 * 121 * NOTE: An implementation may be able to use a smaller number of 122 * DMA address/length pairs than there are SG table elements. 123 * (for example via virtual mapping capabilities) 124 * The routine returns the number of addr/length pairs actually 125 * used, at most nents. 126 * 127 * Device ownership issues as mentioned above for pci_map_single are 128 * the same here. 129 */ 130static inline int 131pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) 132{ 133 int i; 134 135 for (i = 0; i < nents; i++, sg++) { 136 consistent_sync(sg->address, sg->length, direction); 137 sg->dma_address = virt_to_bus(sg->address); 138 } 139 140 return nents; 141} 142 143/* Unmap a set of streaming mode DMA translations. 144 * Again, cpu read rules concerning calls here are the same as for 145 * pci_unmap_single() above. 146 */ 147static inline void 148pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) 149{ 150 /* nothing to do */ 151} 152 153/* Make physical memory consistent for a single 154 * streaming mode DMA translation after a transfer. 155 * 156 * If you perform a pci_map_single() but wish to interrogate the 157 * buffer using the cpu, yet do not wish to teardown the PCI dma 158 * mapping, you must call this function before doing so. At the 159 * next point you give the PCI dma address back to the card, the 160 * device again owns the buffer. 161 */ 162static inline void 163pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) 164{ 165 consistent_sync(bus_to_virt(dma_handle), size, direction); 166} 167 168/* Make physical memory consistent for a set of streaming 169 * mode DMA translations after a transfer. 170 * 171 * The same as pci_dma_sync_single but for a scatter-gather list, 172 * same rules and usage. 173 */ 174static inline void 175pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) 176{ 177 int i; 178 179 for (i = 0; i < nelems; i++, sg++) 180 consistent_sync(sg->address, sg->length, direction); 181} 182 183/* Return whether the given PCI device DMA address mask can 184 * be supported properly. For example, if your device can 185 * only drive the low 24-bits during PCI bus mastering, then 186 * you would pass 0x00ffffff as the mask to this function. 187 */ 188static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) 189{ 190 return 1; 191} 192 193/* This isn't fine. */ 194#define pci_dac_dma_supported(pci_dev, mask) (0) 195 196/* Return the index of the PCI controller for device PDEV. */ 197#define pci_controller_num(PDEV) (0) 198 199#endif /* __KERNEL__ */ 200 201#endif 202