1#ifndef __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H 3 4#include <linux/mm.h> 5#include <asm/scatterlist.h> 6#include <asm/cacheflush.h> 7#include <asm/io.h> 8 9extern struct bus_type pci_bus_type; 10 11/* arch/sh/mm/consistent.c */ 12extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); 13extern void consistent_free(void *vaddr, size_t size); 14extern void consistent_sync(void *vaddr, size_t size, int direction); 15 16#define dma_supported(dev, mask) (1) 17 18static inline int dma_set_mask(struct device *dev, u64 mask) 19{ 20 if (!dev->dma_mask || !dma_supported(dev, mask)) 21 return -EIO; 22 23 *dev->dma_mask = mask; 24 25 return 0; 26} 27 28static inline void *dma_alloc_coherent(struct device *dev, size_t size, 29 dma_addr_t *dma_handle, gfp_t flag) 30{ 31 if (sh_mv.mv_consistent_alloc) { 32 void *ret; 33 34 ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); 35 if (ret != NULL) 36 return ret; 37 } 38 39 return consistent_alloc(flag, size, dma_handle); 40} 41 42static inline void dma_free_coherent(struct device *dev, size_t size, 43 void *vaddr, dma_addr_t dma_handle) 44{ 45 if (sh_mv.mv_consistent_free) { 46 int ret; 47 48 ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle); 49 if (ret == 0) 50 return; 51 } 52 53 consistent_free(vaddr, size); 54} 55 56#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 57#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 58#define dma_is_consistent(d, h) (1) 59 60static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 61 enum dma_data_direction dir) 62{ 63 consistent_sync(vaddr, size, (int)dir); 64} 65 66static inline dma_addr_t dma_map_single(struct device *dev, 67 void *ptr, size_t size, 68 enum dma_data_direction dir) 69{ 70#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 71 if (dev->bus == &pci_bus_type) 72 return virt_to_bus(ptr); 73#endif 74 dma_cache_sync(dev, ptr, size, dir); 75 76 return virt_to_bus(ptr); 77} 78 79#define dma_unmap_single(dev, addr, size, dir) do { } while (0) 80 81static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 82 int nents, enum dma_data_direction dir) 83{ 84 int i; 85 86 for (i = 0; i < nents; i++) { 87#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 88 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 89 sg[i].length, dir); 90#endif 91 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 92 } 93 94 return nents; 95} 96 97#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) 98 99static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 100 unsigned long offset, size_t size, 101 enum dma_data_direction dir) 102{ 103 return dma_map_single(dev, page_address(page) + offset, size, dir); 104} 105 106static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 107 size_t size, enum dma_data_direction dir) 108{ 109 dma_unmap_single(dev, dma_address, size, dir); 110} 111 112static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, 113 size_t size, enum dma_data_direction dir) 114{ 115#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 116 if (dev->bus == &pci_bus_type) 117 return; 118#endif 119 dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir); 120} 121 122static inline void dma_sync_single_range(struct device *dev, 123 dma_addr_t dma_handle, 124 unsigned long offset, size_t size, 125 enum dma_data_direction dir) 126{ 127#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 128 if (dev->bus == &pci_bus_type) 129 return; 130#endif 131 dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir); 132} 133 134static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, 135 int nelems, enum dma_data_direction dir) 136{ 137 int i; 138 139 for (i = 0; i < nelems; i++) { 140#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 141 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 142 sg[i].length, dir); 143#endif 144 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 145 } 146} 147 148static inline void dma_sync_single_for_cpu(struct device *dev, 149 dma_addr_t dma_handle, size_t size, 150 enum dma_data_direction dir) 151{ 152 dma_sync_single(dev, dma_handle, size, dir); 153} 154 155static inline void dma_sync_single_for_device(struct device *dev, 156 dma_addr_t dma_handle, 157 size_t size, 158 enum dma_data_direction dir) 159{ 160 dma_sync_single(dev, dma_handle, size, dir); 161} 162 163static inline void dma_sync_sg_for_cpu(struct device *dev, 164 struct scatterlist *sg, int nelems, 165 enum dma_data_direction dir) 166{ 167 dma_sync_sg(dev, sg, nelems, dir); 168} 169 170static inline void dma_sync_sg_for_device(struct device *dev, 171 struct scatterlist *sg, int nelems, 172 enum dma_data_direction dir) 173{ 174 dma_sync_sg(dev, sg, nelems, dir); 175} 176 177 178static inline int dma_get_cache_alignment(void) 179{ 180 /* 181 * Each processor family will define its own L1_CACHE_SHIFT, 182 * L1_CACHE_BYTES wraps to this, so this is always safe. 183 */ 184 return L1_CACHE_BYTES; 185} 186 187static inline int dma_mapping_error(dma_addr_t dma_addr) 188{ 189 return dma_addr == 0; 190} 191#endif /* __ASM_SH_DMA_MAPPING_H */ 192