1/*
2 * include/asm-xtensa/dma-mapping.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2003 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_DMA_MAPPING_H
12#define _XTENSA_DMA_MAPPING_H
13
14#include <asm/scatterlist.h>
15#include <asm/cache.h>
16#include <asm/io.h>
17#include <linux/mm.h>
18
19/*
20 * DMA-consistent mapping functions.
21 */
22
23extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
24extern void consistent_free(void*, size_t, dma_addr_t);
25extern void consistent_sync(void*, size_t, int);
26
27#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
28#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
29
30void *dma_alloc_coherent(struct device *dev, size_t size,
31			   dma_addr_t *dma_handle, gfp_t flag);
32
33void dma_free_coherent(struct device *dev, size_t size,
34			 void *vaddr, dma_addr_t dma_handle);
35
36static inline dma_addr_t
37dma_map_single(struct device *dev, void *ptr, size_t size,
38	       enum dma_data_direction direction)
39{
40	BUG_ON(direction == DMA_NONE);
41	consistent_sync(ptr, size, direction);
42	return virt_to_phys(ptr);
43}
44
45static inline void
46dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
47		 enum dma_data_direction direction)
48{
49	BUG_ON(direction == DMA_NONE);
50}
51
52static inline int
53dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
54	   enum dma_data_direction direction)
55{
56	int i;
57
58	BUG_ON(direction == DMA_NONE);
59
60	for (i = 0; i < nents; i++, sg++ ) {
61		BUG_ON(!sg->page);
62
63		sg->dma_address = page_to_phys(sg->page) + sg->offset;
64		consistent_sync(page_address(sg->page) + sg->offset,
65				sg->length, direction);
66	}
67
68	return nents;
69}
70
71static inline dma_addr_t
72dma_map_page(struct device *dev, struct page *page, unsigned long offset,
73	     size_t size, enum dma_data_direction direction)
74{
75	BUG_ON(direction == DMA_NONE);
76	return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
77}
78
79static inline void
80dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
81	       enum dma_data_direction direction)
82{
83	BUG_ON(direction == DMA_NONE);
84}
85
86
87static inline void
88dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
89	     enum dma_data_direction direction)
90{
91	BUG_ON(direction == DMA_NONE);
92}
93
94static inline void
95dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
96		enum dma_data_direction direction)
97{
98	consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
99}
100
101static inline void
102dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
103		enum dma_data_direction direction)
104{
105	consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
106}
107
108static inline void
109dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
110		      unsigned long offset, size_t size,
111		      enum dma_data_direction direction)
112{
113
114	consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
115}
116
117static inline void
118dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
119		      unsigned long offset, size_t size,
120		      enum dma_data_direction direction)
121{
122
123	consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
124}
125static inline void
126dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
127		 enum dma_data_direction dir)
128{
129	int i;
130	for (i = 0; i < nelems; i++, sg++)
131		consistent_sync(page_address(sg->page) + sg->offset,
132				sg->length, dir);
133}
134
135static inline void
136dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
137		 enum dma_data_direction dir)
138{
139	int i;
140	for (i = 0; i < nelems; i++, sg++)
141		consistent_sync(page_address(sg->page) + sg->offset,
142				sg->length, dir);
143}
144static inline int
145dma_mapping_error(dma_addr_t dma_addr)
146{
147	return 0;
148}
149
150static inline int
151dma_supported(struct device *dev, u64 mask)
152{
153	return 1;
154}
155
156static inline int
157dma_set_mask(struct device *dev, u64 mask)
158{
159	if(!dev->dma_mask || !dma_supported(dev, mask))
160		return -EIO;
161
162	*dev->dma_mask = mask;
163
164	return 0;
165}
166
167static inline int
168dma_get_cache_alignment(void)
169{
170	return L1_CACHE_BYTES;
171}
172
173#define dma_is_consistent(d, h)	(1)
174
175static inline void
176dma_cache_sync(struct device *dev, void *vaddr, size_t size,
177	       enum dma_data_direction direction)
178{
179	consistent_sync(vaddr, size, direction);
180}
181
182#endif	/* _XTENSA_DMA_MAPPING_H */
183