1/* 2 * Copyright (C) 2004 IBM 3 * 4 * Implements the generic device dma API for powerpc. 5 * the pci and vio busses 6 */ 7#ifndef _ASM_DMA_MAPPING_H 8#define _ASM_DMA_MAPPING_H 9#ifdef __KERNEL__ 10 11#include <linux/types.h> 12#include <linux/cache.h> 13/* need struct page definitions */ 14#include <linux/mm.h> 15#include <asm/scatterlist.h> 16#include <asm/io.h> 17 18#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 19 20#ifdef CONFIG_NOT_COHERENT_CACHE 21/* 22 * DMA-consistent mapping functions for PowerPCs that don't support 23 * cache snooping. These allocate/free a region of uncached mapped 24 * memory space for use with DMA devices. Alternatively, you could 25 * allocate the space "normally" and use the cache management functions 26 * to ensure it is consistent. 27 */ 28extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); 29extern void __dma_free_coherent(size_t size, void *vaddr); 30extern void __dma_sync(void *vaddr, size_t size, int direction); 31extern void __dma_sync_page(struct page *page, unsigned long offset, 32 size_t size, int direction); 33 34#else /* ! CONFIG_NOT_COHERENT_CACHE */ 35/* 36 * Cache coherent cores. 37 */ 38 39#define __dma_alloc_coherent(gfp, size, handle) NULL 40#define __dma_free_coherent(size, addr) ((void)0) 41#define __dma_sync(addr, size, rw) ((void)0) 42#define __dma_sync_page(pg, off, sz, rw) ((void)0) 43 44#endif /* ! CONFIG_NOT_COHERENT_CACHE */ 45 46#ifdef CONFIG_PPC64 47/* 48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO 49 */ 50struct dma_mapping_ops { 51 void * (*alloc_coherent)(struct device *dev, size_t size, 52 dma_addr_t *dma_handle, gfp_t flag); 53 void (*free_coherent)(struct device *dev, size_t size, 54 void *vaddr, dma_addr_t dma_handle); 55 dma_addr_t (*map_single)(struct device *dev, void *ptr, 56 size_t size, enum dma_data_direction direction); 57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, 58 size_t size, enum dma_data_direction direction); 59 int (*map_sg)(struct device *dev, struct scatterlist *sg, 60 int nents, enum dma_data_direction direction); 61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, 62 int nents, enum dma_data_direction direction); 63 int (*dma_supported)(struct device *dev, u64 mask); 64 int (*dac_dma_supported)(struct device *dev, u64 mask); 65 int (*set_dma_mask)(struct device *dev, u64 dma_mask); 66}; 67 68static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 69{ 70 /* We don't handle the NULL dev case for ISA for now. We could 71 * do it via an out of line call but it is not needed for now. The 72 * only ISA DMA device we support is the floppy and we have a hack 73 * in the floppy driver directly to get a device for us. 74 */ 75 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) 76 return NULL; 77 return dev->archdata.dma_ops; 78} 79 80static inline int dma_supported(struct device *dev, u64 mask) 81{ 82 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 83 84 if (unlikely(dma_ops == NULL)) 85 return 0; 86 if (dma_ops->dma_supported == NULL) 87 return 1; 88 return dma_ops->dma_supported(dev, mask); 89} 90 91static inline int dma_set_mask(struct device *dev, u64 dma_mask) 92{ 93 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 94 95 if (unlikely(dma_ops == NULL)) 96 return -EIO; 97 if (dma_ops->set_dma_mask != NULL) 98 return dma_ops->set_dma_mask(dev, dma_mask); 99 if (!dev->dma_mask || !dma_supported(dev, *dev->dma_mask)) 100 return -EIO; 101 *dev->dma_mask = dma_mask; 102 return 0; 103} 104 105static inline void *dma_alloc_coherent(struct device *dev, size_t size, 106 dma_addr_t *dma_handle, gfp_t flag) 107{ 108 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 109 110 BUG_ON(!dma_ops); 111 return dma_ops->alloc_coherent(dev, size, dma_handle, flag); 112} 113 114static inline void dma_free_coherent(struct device *dev, size_t size, 115 void *cpu_addr, dma_addr_t dma_handle) 116{ 117 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 118 119 BUG_ON(!dma_ops); 120 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 121} 122 123static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 124 size_t size, 125 enum dma_data_direction direction) 126{ 127 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 128 129 BUG_ON(!dma_ops); 130 return dma_ops->map_single(dev, cpu_addr, size, direction); 131} 132 133static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 134 size_t size, 135 enum dma_data_direction direction) 136{ 137 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 138 139 BUG_ON(!dma_ops); 140 dma_ops->unmap_single(dev, dma_addr, size, direction); 141} 142 143static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 144 unsigned long offset, size_t size, 145 enum dma_data_direction direction) 146{ 147 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 148 149 BUG_ON(!dma_ops); 150 return dma_ops->map_single(dev, page_address(page) + offset, size, 151 direction); 152} 153 154static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 155 size_t size, 156 enum dma_data_direction direction) 157{ 158 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 159 160 BUG_ON(!dma_ops); 161 dma_ops->unmap_single(dev, dma_address, size, direction); 162} 163 164static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 165 int nents, enum dma_data_direction direction) 166{ 167 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 168 169 BUG_ON(!dma_ops); 170 return dma_ops->map_sg(dev, sg, nents, direction); 171} 172 173static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 174 int nhwentries, 175 enum dma_data_direction direction) 176{ 177 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 178 179 BUG_ON(!dma_ops); 180 dma_ops->unmap_sg(dev, sg, nhwentries, direction); 181} 182 183 184/* 185 * Available generic sets of operations 186 */ 187extern struct dma_mapping_ops dma_iommu_ops; 188extern struct dma_mapping_ops dma_direct_ops; 189 190extern unsigned long dma_direct_offset; 191 192#else /* CONFIG_PPC64 */ 193 194#define dma_supported(dev, mask) (1) 195 196static inline int dma_set_mask(struct device *dev, u64 dma_mask) 197{ 198 if (!dev->dma_mask || !dma_supported(dev, mask)) 199 return -EIO; 200 201 *dev->dma_mask = dma_mask; 202 203 return 0; 204} 205 206static inline void *dma_alloc_coherent(struct device *dev, size_t size, 207 dma_addr_t * dma_handle, 208 gfp_t gfp) 209{ 210#ifdef CONFIG_NOT_COHERENT_CACHE 211 return __dma_alloc_coherent(size, dma_handle, gfp); 212#else 213 void *ret; 214 /* ignore region specifiers */ 215 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 216 217 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) 218 gfp |= GFP_DMA; 219 220 ret = (void *)__get_free_pages(gfp, get_order(size)); 221 222 if (ret != NULL) { 223 memset(ret, 0, size); 224 *dma_handle = virt_to_bus(ret); 225 } 226 227 return ret; 228#endif 229} 230 231static inline void 232dma_free_coherent(struct device *dev, size_t size, void *vaddr, 233 dma_addr_t dma_handle) 234{ 235#ifdef CONFIG_NOT_COHERENT_CACHE 236 __dma_free_coherent(size, vaddr); 237#else 238 free_pages((unsigned long)vaddr, get_order(size)); 239#endif 240} 241 242static inline dma_addr_t 243dma_map_single(struct device *dev, void *ptr, size_t size, 244 enum dma_data_direction direction) 245{ 246 BUG_ON(direction == DMA_NONE); 247 248 __dma_sync(ptr, size, direction); 249 250 return virt_to_bus(ptr); 251} 252 253/* We do nothing. */ 254#define dma_unmap_single(dev, addr, size, dir) ((void)0) 255 256static inline dma_addr_t 257dma_map_page(struct device *dev, struct page *page, 258 unsigned long offset, size_t size, 259 enum dma_data_direction direction) 260{ 261 BUG_ON(direction == DMA_NONE); 262 263 __dma_sync_page(page, offset, size, direction); 264 265 return page_to_bus(page) + offset; 266} 267 268/* We do nothing. */ 269#define dma_unmap_page(dev, handle, size, dir) ((void)0) 270 271static inline int 272dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 273 enum dma_data_direction direction) 274{ 275 int i; 276 277 BUG_ON(direction == DMA_NONE); 278 279 for (i = 0; i < nents; i++, sg++) { 280 BUG_ON(!sg->page); 281 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 282 sg->dma_address = page_to_bus(sg->page) + sg->offset; 283 } 284 285 return nents; 286} 287 288/* We don't do anything here. */ 289#define dma_unmap_sg(dev, sg, nents, dir) ((void)0) 290 291#endif /* CONFIG_PPC64 */ 292 293static inline void dma_sync_single_for_cpu(struct device *dev, 294 dma_addr_t dma_handle, size_t size, 295 enum dma_data_direction direction) 296{ 297 BUG_ON(direction == DMA_NONE); 298 __dma_sync(bus_to_virt(dma_handle), size, direction); 299} 300 301static inline void dma_sync_single_for_device(struct device *dev, 302 dma_addr_t dma_handle, size_t size, 303 enum dma_data_direction direction) 304{ 305 BUG_ON(direction == DMA_NONE); 306 __dma_sync(bus_to_virt(dma_handle), size, direction); 307} 308 309static inline void dma_sync_sg_for_cpu(struct device *dev, 310 struct scatterlist *sg, int nents, 311 enum dma_data_direction direction) 312{ 313 int i; 314 315 BUG_ON(direction == DMA_NONE); 316 317 for (i = 0; i < nents; i++, sg++) 318 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 319} 320 321static inline void dma_sync_sg_for_device(struct device *dev, 322 struct scatterlist *sg, int nents, 323 enum dma_data_direction direction) 324{ 325 int i; 326 327 BUG_ON(direction == DMA_NONE); 328 329 for (i = 0; i < nents; i++, sg++) 330 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 331} 332 333static inline int dma_mapping_error(dma_addr_t dma_addr) 334{ 335#ifdef CONFIG_PPC64 336 return (dma_addr == DMA_ERROR_CODE); 337#else 338 return 0; 339#endif 340} 341 342#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 343#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 344#ifdef CONFIG_NOT_COHERENT_CACHE 345#define dma_is_consistent(d, h) (0) 346#else 347#define dma_is_consistent(d, h) (1) 348#endif 349 350static inline int dma_get_cache_alignment(void) 351{ 352#ifdef CONFIG_PPC64 353 /* no easy way to get cache size on all processors, so return 354 * the maximum possible, to be safe */ 355 return (1 << INTERNODE_CACHE_SHIFT); 356#else 357 /* 358 * Each processor family will define its own L1_CACHE_SHIFT, 359 * L1_CACHE_BYTES wraps to this, so this is always safe. 360 */ 361 return L1_CACHE_BYTES; 362#endif 363} 364 365static inline void dma_sync_single_range_for_cpu(struct device *dev, 366 dma_addr_t dma_handle, unsigned long offset, size_t size, 367 enum dma_data_direction direction) 368{ 369 /* just sync everything for now */ 370 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); 371} 372 373static inline void dma_sync_single_range_for_device(struct device *dev, 374 dma_addr_t dma_handle, unsigned long offset, size_t size, 375 enum dma_data_direction direction) 376{ 377 /* just sync everything for now */ 378 dma_sync_single_for_device(dev, dma_handle, offset + size, direction); 379} 380 381static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 382 enum dma_data_direction direction) 383{ 384 BUG_ON(direction == DMA_NONE); 385 __dma_sync(vaddr, size, (int)direction); 386} 387 388#endif /* __KERNEL__ */ 389#endif /* _ASM_DMA_MAPPING_H */ 390