1219820Sjeff/*- 2219820Sjeff * Copyright (c) 2010 Isilon Systems, Inc. 3219820Sjeff * Copyright (c) 2010 iX Systems, Inc. 4219820Sjeff * Copyright (c) 2010 Panasas, Inc. 5219820Sjeff * All rights reserved. 6219820Sjeff * 7219820Sjeff * Redistribution and use in source and binary forms, with or without 8219820Sjeff * modification, are permitted provided that the following conditions 9219820Sjeff * are met: 10219820Sjeff * 1. Redistributions of source code must retain the above copyright 11219820Sjeff * notice unmodified, this list of conditions, and the following 12219820Sjeff * disclaimer. 13219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright 14219820Sjeff * notice, this list of conditions and the following disclaimer in the 15219820Sjeff * documentation and/or other materials provided with the distribution. 16219820Sjeff * 17219820Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18219820Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19219820Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20219820Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21219820Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22219820Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23219820Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24219820Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25219820Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26219820Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27219820Sjeff */ 28219820Sjeff#ifndef _LINUX_DMA_MAPPING_H_ 29219820Sjeff#define _LINUX_DMA_MAPPING_H_ 30219820Sjeff 31219820Sjeff#include <linux/types.h> 32219820Sjeff#include <linux/device.h> 33219820Sjeff#include <linux/err.h> 34219820Sjeff#include <linux/dma-attrs.h> 35219820Sjeff#include <linux/scatterlist.h> 36219820Sjeff#include <linux/mm.h> 37219820Sjeff#include <linux/page.h> 38219820Sjeff 39219820Sjeff#include <sys/systm.h> 40219820Sjeff#include <sys/malloc.h> 41219820Sjeff 42219820Sjeff#include <vm/vm.h> 43219820Sjeff#include <vm/vm_page.h> 44219820Sjeff#include <vm/pmap.h> 45219820Sjeff 46219820Sjeff#include <machine/bus.h> 47219820Sjeff#include <machine/pmap.h> 48219820Sjeff 49219820Sjeffenum dma_data_direction { 50219820Sjeff DMA_BIDIRECTIONAL = 0, 51219820Sjeff DMA_TO_DEVICE = 1, 52219820Sjeff DMA_FROM_DEVICE = 2, 53219820Sjeff DMA_NONE = 3, 54219820Sjeff}; 55219820Sjeff 56219820Sjeffstruct dma_map_ops { 57219820Sjeff void* (*alloc_coherent)(struct device *dev, size_t size, 58219820Sjeff dma_addr_t *dma_handle, gfp_t gfp); 59219820Sjeff void (*free_coherent)(struct device *dev, size_t size, 60219820Sjeff void *vaddr, dma_addr_t dma_handle); 61219820Sjeff dma_addr_t (*map_page)(struct device *dev, struct page *page, 62219820Sjeff unsigned long offset, size_t size, enum dma_data_direction dir, 63219820Sjeff struct dma_attrs *attrs); 64219820Sjeff void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 65219820Sjeff size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 66219820Sjeff int (*map_sg)(struct device *dev, struct scatterlist *sg, 67219820Sjeff int nents, enum dma_data_direction dir, struct dma_attrs *attrs); 68219820Sjeff void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, 69219820Sjeff enum dma_data_direction dir, struct dma_attrs *attrs); 70219820Sjeff void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, 71219820Sjeff size_t size, enum dma_data_direction dir); 72219820Sjeff void (*sync_single_for_device)(struct device *dev, 73219820Sjeff dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); 74219820Sjeff void (*sync_single_range_for_cpu)(struct device *dev, 75219820Sjeff dma_addr_t dma_handle, unsigned long offset, size_t size, 76219820Sjeff enum dma_data_direction dir); 77219820Sjeff void (*sync_single_range_for_device)(struct device *dev, 78219820Sjeff dma_addr_t dma_handle, unsigned long offset, size_t size, 79219820Sjeff enum dma_data_direction dir); 80219820Sjeff void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, 81219820Sjeff int nents, enum dma_data_direction dir); 82219820Sjeff void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, 83219820Sjeff int nents, enum dma_data_direction dir); 84219820Sjeff int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 85219820Sjeff int (*dma_supported)(struct device *dev, u64 mask); 86219820Sjeff int is_phys; 87219820Sjeff}; 88219820Sjeff 89219820Sjeff#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1)) 90219820Sjeff 91219820Sjeffstatic inline int 92219820Sjeffdma_supported(struct device *dev, u64 mask) 93219820Sjeff{ 94219820Sjeff 95219820Sjeff /* XXX busdma takes care of this elsewhere. */ 96219820Sjeff return (1); 97219820Sjeff} 98219820Sjeff 99219820Sjeffstatic inline int 100219820Sjeffdma_set_mask(struct device *dev, u64 dma_mask) 101219820Sjeff{ 102219820Sjeff 103219820Sjeff if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 104219820Sjeff return -EIO; 105219820Sjeff 106219820Sjeff *dev->dma_mask = dma_mask; 107219820Sjeff return (0); 108219820Sjeff} 109219820Sjeff 110219820Sjeffstatic inline int 111219820Sjeffdma_set_coherent_mask(struct device *dev, u64 mask) 112219820Sjeff{ 113219820Sjeff 114219820Sjeff if (!dma_supported(dev, mask)) 115219820Sjeff return -EIO; 116219820Sjeff /* XXX Currently we don't support a seperate coherent mask. */ 117219820Sjeff return 0; 118219820Sjeff} 119219820Sjeff 120219820Sjeffstatic inline void * 121219820Sjeffdma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 122219820Sjeff gfp_t flag) 123219820Sjeff{ 124219820Sjeff vm_paddr_t high; 125219820Sjeff size_t align; 126219820Sjeff void *mem; 127219820Sjeff 128219820Sjeff if (dev->dma_mask) 129219820Sjeff high = *dev->dma_mask; 130219820Sjeff else 131219820Sjeff high = BUS_SPACE_MAXADDR_32BIT; 132219820Sjeff align = PAGE_SIZE << get_order(size); 133254025Sjeff mem = (void *)kmem_alloc_contig(kmem_arena, size, flag, 0, high, align, 134219820Sjeff 0, VM_MEMATTR_DEFAULT); 135219820Sjeff if (mem) 136219820Sjeff *dma_handle = vtophys(mem); 137219820Sjeff else 138219820Sjeff *dma_handle = 0; 139219820Sjeff return (mem); 140219820Sjeff} 141219820Sjeff 142219820Sjeffstatic inline void 143219820Sjeffdma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 144219820Sjeff dma_addr_t dma_handle) 145219820Sjeff{ 146219820Sjeff 147254025Sjeff kmem_free(kmem_arena, (vm_offset_t)cpu_addr, size); 148219820Sjeff} 149219820Sjeff 150219820Sjeff/* XXX This only works with no iommu. */ 151219820Sjeffstatic inline dma_addr_t 152219820Sjeffdma_map_single_attrs(struct device *dev, void *ptr, size_t size, 153219820Sjeff enum dma_data_direction dir, struct dma_attrs *attrs) 154219820Sjeff{ 155219820Sjeff 156219820Sjeff return vtophys(ptr); 157219820Sjeff} 158219820Sjeff 159219820Sjeffstatic inline void 160219820Sjeffdma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, 161219820Sjeff enum dma_data_direction dir, struct dma_attrs *attrs) 162219820Sjeff{ 163219820Sjeff} 164219820Sjeff 165219820Sjeffstatic inline int 166219820Sjeffdma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 167219820Sjeff enum dma_data_direction dir, struct dma_attrs *attrs) 168219820Sjeff{ 169219820Sjeff struct scatterlist *sg; 170219820Sjeff int i; 171219820Sjeff 172219820Sjeff for_each_sg(sgl, sg, nents, i) 173219820Sjeff sg_dma_address(sg) = sg_phys(sg); 174219820Sjeff 175219820Sjeff return (nents); 176219820Sjeff} 177219820Sjeff 178219820Sjeffstatic inline void 179219820Sjeffdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, 180219820Sjeff enum dma_data_direction dir, struct dma_attrs *attrs) 181219820Sjeff{ 182219820Sjeff} 183219820Sjeff 184219820Sjeffstatic inline dma_addr_t 185219820Sjeffdma_map_page(struct device *dev, struct page *page, 186219820Sjeff unsigned long offset, size_t size, enum dma_data_direction direction) 187219820Sjeff{ 188219820Sjeff 189219820Sjeff return VM_PAGE_TO_PHYS(page) + offset; 190219820Sjeff} 191219820Sjeff 192219820Sjeffstatic inline void 193219820Sjeffdma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 194219820Sjeff enum dma_data_direction direction) 195219820Sjeff{ 196219820Sjeff} 197219820Sjeff 198219820Sjeffstatic inline void 199219820Sjeffdma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 200219820Sjeff enum dma_data_direction direction) 201219820Sjeff{ 202219820Sjeff} 203219820Sjeff 204219820Sjeffstatic inline void 205219820Sjeffdma_sync_single(struct device *dev, dma_addr_t addr, size_t size, 206219820Sjeff enum dma_data_direction dir) 207219820Sjeff{ 208219820Sjeff dma_sync_single_for_cpu(dev, addr, size, dir); 209219820Sjeff} 210219820Sjeff 211219820Sjeffstatic inline void 212219820Sjeffdma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 213219820Sjeff size_t size, enum dma_data_direction direction) 214219820Sjeff{ 215219820Sjeff} 216219820Sjeff 217219820Sjeffstatic inline void 218219820Sjeffdma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 219219820Sjeff enum dma_data_direction direction) 220219820Sjeff{ 221219820Sjeff} 222219820Sjeff 223219820Sjeffstatic inline void 224219820Sjeffdma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 225219820Sjeff enum dma_data_direction direction) 226219820Sjeff{ 227219820Sjeff} 228219820Sjeff 229219820Sjeffstatic inline void 230219820Sjeffdma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 231219820Sjeff unsigned long offset, size_t size, int direction) 232219820Sjeff{ 233219820Sjeff} 234219820Sjeff 235219820Sjeffstatic inline void 236219820Sjeffdma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 237219820Sjeff unsigned long offset, size_t size, int direction) 238219820Sjeff{ 239219820Sjeff} 240219820Sjeff 241219820Sjeffstatic inline int 242219820Sjeffdma_mapping_error(struct device *dev, dma_addr_t dma_addr) 243219820Sjeff{ 244219820Sjeff 245219820Sjeff return (0); 246219820Sjeff} 247219820Sjeff 248255932Salfredstatic inline unsigned int dma_set_max_seg_size(struct device *dev, 249255932Salfred unsigned int size) 250255932Salfred{ 251255932Salfred return (0); 252255932Salfred} 253255932Salfred 254255932Salfred 255219820Sjeff#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 256219820Sjeff#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) 257219820Sjeff#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) 258219820Sjeff#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) 259219820Sjeff 260219820Sjeff#define DEFINE_DMA_UNMAP_ADDR(name) dma_addr_t name 261219820Sjeff#define DEFINE_DMA_UNMAP_LEN(name) __u32 name 262219820Sjeff#define dma_unmap_addr(p, name) ((p)->name) 263219820Sjeff#define dma_unmap_addr_set(p, name, v) (((p)->name) = (v)) 264219820Sjeff#define dma_unmap_len(p, name) ((p)->name) 265219820Sjeff#define dma_unmap_len_set(p, name, v) (((p)->name) = (v)) 266219820Sjeff 267219820Sjeffextern int uma_align_cache; 268219820Sjeff#define dma_get_cache_alignment() uma_align_cache 269219820Sjeff 270219820Sjeff#endif /* _LINUX_DMA_MAPPING_H_ */ 271