1/* 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation 3 * 4 * Provide default implementations of the DMA mapping callbacks for 5 * directly mapped busses and busses using the iommu infrastructure 6 */ 7 8#include <linux/device.h> 9#include <linux/dma-mapping.h> 10#include <asm/bug.h> 11#include <asm/iommu.h> 12#include <asm/abs_addr.h> 13 14/* 15 * Generic iommu implementation 16 */ 17 18static inline unsigned long device_to_mask(struct device *dev) 19{ 20 if (dev->dma_mask && *dev->dma_mask) 21 return *dev->dma_mask; 22 /* Assume devices without mask can take 32 bit addresses */ 23 return 0xfffffffful; 24} 25 26 27/* Allocates a contiguous real buffer and creates mappings over it. 28 * Returns the virtual address of the buffer and sets dma_handle 29 * to the dma address (mapping) of the first page. 30 */ 31static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 32 dma_addr_t *dma_handle, gfp_t flag) 33{ 34 return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle, 35 device_to_mask(dev), flag, 36 dev->archdata.numa_node); 37} 38 39static void dma_iommu_free_coherent(struct device *dev, size_t size, 40 void *vaddr, dma_addr_t dma_handle) 41{ 42 iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); 43} 44 45/* Creates TCEs for a user provided buffer. The user buffer must be 46 * contiguous real kernel storage (not vmalloc). The address of the buffer 47 * passed here is the kernel (virtual) address of the buffer. The buffer 48 * need not be page aligned, the dma_addr_t returned will point to the same 49 * byte within the page as vaddr. 50 */ 51static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, 52 size_t size, 53 enum dma_data_direction direction) 54{ 55 return iommu_map_single(dev->archdata.dma_data, vaddr, size, 56 device_to_mask(dev), direction); 57} 58 59 60static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, 61 size_t size, 62 enum dma_data_direction direction) 63{ 64 iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction); 65} 66 67 68static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 69 int nelems, enum dma_data_direction direction) 70{ 71 return iommu_map_sg(dev->archdata.dma_data, sglist, nelems, 72 device_to_mask(dev), direction); 73} 74 75static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, 76 int nelems, enum dma_data_direction direction) 77{ 78 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction); 79} 80 81/* We support DMA to/from any memory page via the iommu */ 82static int dma_iommu_dma_supported(struct device *dev, u64 mask) 83{ 84 struct iommu_table *tbl = dev->archdata.dma_data; 85 86 if (!tbl || tbl->it_offset > mask) { 87 printk(KERN_INFO 88 "Warning: IOMMU offset too big for device mask\n"); 89 if (tbl) 90 printk(KERN_INFO 91 "mask: 0x%08lx, table offset: 0x%08lx\n", 92 mask, tbl->it_offset); 93 else 94 printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", 95 mask); 96 return 0; 97 } else 98 return 1; 99} 100 101struct dma_mapping_ops dma_iommu_ops = { 102 .alloc_coherent = dma_iommu_alloc_coherent, 103 .free_coherent = dma_iommu_free_coherent, 104 .map_single = dma_iommu_map_single, 105 .unmap_single = dma_iommu_unmap_single, 106 .map_sg = dma_iommu_map_sg, 107 .unmap_sg = dma_iommu_unmap_sg, 108 .dma_supported = dma_iommu_dma_supported, 109}; 110EXPORT_SYMBOL(dma_iommu_ops); 111 112/* 113 * Generic direct DMA implementation 114 * 115 * This implementation supports a global offset that can be applied if 116 * the address at which memory is visible to devices is not 0. 117 */ 118unsigned long dma_direct_offset; 119 120static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 121 dma_addr_t *dma_handle, gfp_t flag) 122{ 123 struct page *page; 124 void *ret; 125 int node = dev->archdata.numa_node; 126 127 /* TODO: Maybe use the numa node here too ? */ 128 page = alloc_pages_node(node, flag, get_order(size)); 129 if (page == NULL) 130 return NULL; 131 ret = page_address(page); 132 memset(ret, 0, size); 133 *dma_handle = virt_to_abs(ret) | dma_direct_offset; 134 135 return ret; 136} 137 138static void dma_direct_free_coherent(struct device *dev, size_t size, 139 void *vaddr, dma_addr_t dma_handle) 140{ 141 free_pages((unsigned long)vaddr, get_order(size)); 142} 143 144static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, 145 size_t size, 146 enum dma_data_direction direction) 147{ 148 return virt_to_abs(ptr) | dma_direct_offset; 149} 150 151static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, 152 size_t size, 153 enum dma_data_direction direction) 154{ 155} 156 157static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, 158 int nents, enum dma_data_direction direction) 159{ 160 int i; 161 162 for (i = 0; i < nents; i++, sg++) { 163 sg->dma_address = (page_to_phys(sg->page) + sg->offset) | 164 dma_direct_offset; 165 sg->dma_length = sg->length; 166 } 167 168 return nents; 169} 170 171static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, 172 int nents, enum dma_data_direction direction) 173{ 174} 175 176static int dma_direct_dma_supported(struct device *dev, u64 mask) 177{ 178 /* Could be improved to check for memory though it better be 179 * done via some global so platforms can set the limit in case 180 * they have limited DMA windows 181 */ 182 return mask >= DMA_32BIT_MASK; 183} 184 185struct dma_mapping_ops dma_direct_ops = { 186 .alloc_coherent = dma_direct_alloc_coherent, 187 .free_coherent = dma_direct_free_coherent, 188 .map_single = dma_direct_map_single, 189 .unmap_single = dma_direct_unmap_single, 190 .map_sg = dma_direct_map_sg, 191 .unmap_sg = dma_direct_unmap_sg, 192 .dma_supported = dma_direct_dma_supported, 193}; 194EXPORT_SYMBOL(dma_direct_ops); 195