1#ifndef _ASM_DMA_MAPPING_H 2#define _ASM_DMA_MAPPING_H 3 4#include <linux/device.h> 5#include <asm/cache.h> 6#include <asm/cacheflush.h> 7#include <asm/scatterlist.h> 8#include <asm/io.h> 9 10#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 11#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 12 13extern unsigned long __nongprelbss dma_coherent_mem_start; 14extern unsigned long __nongprelbss dma_coherent_mem_end; 15 16void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); 17void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); 18 19/* 20 * These macros should be used after a pci_map_sg call has been done 21 * to get bus addresses of each of the SG entries and their lengths. 22 * You should only work with the number of sg entries pci_map_sg 23 * returns, or alternatively stop on the first sg_dma_len(sg) which 24 * is 0. 25 */ 26#define sg_dma_address(sg) ((sg)->dma_address) 27#define sg_dma_len(sg) ((sg)->length) 28 29/* 30 * Map a single buffer of the indicated size for DMA in streaming mode. 31 * The 32-bit bus address to use is returned. 32 * 33 * Once the device is given the dma address, the device owns this memory 34 * until either pci_unmap_single or pci_dma_sync_single is performed. 35 */ 36extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 37 enum dma_data_direction direction); 38 39/* 40 * Unmap a single streaming mode DMA translation. The dma_addr and size 41 * must match what was provided for in a previous pci_map_single call. All 42 * other usages are undefined. 43 * 44 * After this call, reads by the cpu to the buffer are guarenteed to see 45 * whatever the device wrote there. 46 */ 47static inline 48void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 49 enum dma_data_direction direction) 50{ 51 BUG_ON(direction == DMA_NONE); 52} 53 54/* 55 * Map a set of buffers described by scatterlist in streaming 56 * mode for DMA. This is the scather-gather version of the 57 * above pci_map_single interface. Here the scatter gather list 58 * elements are each tagged with the appropriate dma address 59 * and length. They are obtained via sg_dma_{address,length}(SG). 60 * 61 * NOTE: An implementation may be able to use a smaller number of 62 * DMA address/length pairs than there are SG table elements. 63 * (for example via virtual mapping capabilities) 64 * The routine returns the number of addr/length pairs actually 65 * used, at most nents. 66 * 67 * Device ownership issues as mentioned above for pci_map_single are 68 * the same here. 69 */ 70extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 71 enum dma_data_direction direction); 72 73/* 74 * Unmap a set of streaming mode DMA translations. 75 * Again, cpu read rules concerning calls here are the same as for 76 * pci_unmap_single() above. 77 */ 78static inline 79void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 80 enum dma_data_direction direction) 81{ 82 BUG_ON(direction == DMA_NONE); 83} 84 85extern 86dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, 87 size_t size, enum dma_data_direction direction); 88 89static inline 90void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 91 enum dma_data_direction direction) 92{ 93 BUG_ON(direction == DMA_NONE); 94} 95 96 97static inline 98void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 99 enum dma_data_direction direction) 100{ 101} 102 103static inline 104void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 105 enum dma_data_direction direction) 106{ 107 flush_write_buffers(); 108} 109 110static inline 111void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 112 unsigned long offset, size_t size, 113 enum dma_data_direction direction) 114{ 115} 116 117static inline 118void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 119 unsigned long offset, size_t size, 120 enum dma_data_direction direction) 121{ 122 flush_write_buffers(); 123} 124 125static inline 126void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 127 enum dma_data_direction direction) 128{ 129} 130 131static inline 132void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 133 enum dma_data_direction direction) 134{ 135 flush_write_buffers(); 136} 137 138static inline 139int dma_mapping_error(dma_addr_t dma_addr) 140{ 141 return 0; 142} 143 144static inline 145int dma_supported(struct device *dev, u64 mask) 146{ 147 /* 148 * we fall back to GFP_DMA when the mask isn't all 1s, 149 * so we can't guarantee allocations that must be 150 * within a tighter range than GFP_DMA.. 151 */ 152 if (mask < 0x00ffffff) 153 return 0; 154 155 return 1; 156} 157 158static inline 159int dma_set_mask(struct device *dev, u64 mask) 160{ 161 if (!dev->dma_mask || !dma_supported(dev, mask)) 162 return -EIO; 163 164 *dev->dma_mask = mask; 165 166 return 0; 167} 168 169static inline 170int dma_get_cache_alignment(void) 171{ 172 return 1 << L1_CACHE_SHIFT; 173} 174 175#define dma_is_consistent(d, h) (1) 176 177static inline 178void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 179 enum dma_data_direction direction) 180{ 181 flush_write_buffers(); 182} 183 184#endif /* _ASM_DMA_MAPPING_H */ 185