1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H
3
4
5#ifdef CONFIG_PCI
6
7/* we implement the API below in terms of the existing PCI one,
8 * so include it */
9#include <linux/pci.h>
10/* need struct page definitions */
11#include <linux/mm.h>
12
13#include <asm/of_device.h>
14
15static inline int
16dma_supported(struct device *dev, u64 mask)
17{
18	BUG_ON(dev->bus != &pci_bus_type);
19
20	return pci_dma_supported(to_pci_dev(dev), mask);
21}
22
23static inline int
24dma_set_mask(struct device *dev, u64 dma_mask)
25{
26	BUG_ON(dev->bus != &pci_bus_type);
27
28	return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
29}
30
31static inline void *
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
33		   gfp_t flag)
34{
35	BUG_ON(dev->bus != &pci_bus_type);
36
37	return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag);
38}
39
40static inline void
41dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
42		    dma_addr_t dma_handle)
43{
44	BUG_ON(dev->bus != &pci_bus_type);
45
46	pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
47}
48
49static inline dma_addr_t
50dma_map_single(struct device *dev, void *cpu_addr, size_t size,
51	       enum dma_data_direction direction)
52{
53	BUG_ON(dev->bus != &pci_bus_type);
54
55	return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
56}
57
58static inline void
59dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
60		 enum dma_data_direction direction)
61{
62	BUG_ON(dev->bus != &pci_bus_type);
63
64	pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
65}
66
67static inline dma_addr_t
68dma_map_page(struct device *dev, struct page *page,
69	     unsigned long offset, size_t size,
70	     enum dma_data_direction direction)
71{
72	BUG_ON(dev->bus != &pci_bus_type);
73
74	return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
75}
76
77static inline void
78dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
79	       enum dma_data_direction direction)
80{
81	BUG_ON(dev->bus != &pci_bus_type);
82
83	pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
84}
85
86static inline int
87dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
88	   enum dma_data_direction direction)
89{
90	BUG_ON(dev->bus != &pci_bus_type);
91
92	return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93}
94
95static inline void
96dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
97	     enum dma_data_direction direction)
98{
99	BUG_ON(dev->bus != &pci_bus_type);
100
101	pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
102}
103
104static inline void
105dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
106			enum dma_data_direction direction)
107{
108	BUG_ON(dev->bus != &pci_bus_type);
109
110	pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
111				    size, (int)direction);
112}
113
114static inline void
115dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
116			   enum dma_data_direction direction)
117{
118	BUG_ON(dev->bus != &pci_bus_type);
119
120	pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
121				       size, (int)direction);
122}
123
124static inline void
125dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
126		    enum dma_data_direction direction)
127{
128	BUG_ON(dev->bus != &pci_bus_type);
129
130	pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
131}
132
133static inline void
134dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
135		       enum dma_data_direction direction)
136{
137	BUG_ON(dev->bus != &pci_bus_type);
138
139	pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
140}
141
142static inline int
143dma_mapping_error(dma_addr_t dma_addr)
144{
145	return pci_dma_mapping_error(dma_addr);
146}
147
148#else
149
150struct device;
151struct page;
152struct scatterlist;
153
154static inline int
155dma_supported(struct device *dev, u64 mask)
156{
157	BUG();
158	return 0;
159}
160
161static inline int
162dma_set_mask(struct device *dev, u64 dma_mask)
163{
164	BUG();
165	return 0;
166}
167
168static inline void *dma_alloc_coherent(struct device *dev, size_t size,
169			 dma_addr_t *dma_handle, gfp_t flag)
170{
171	BUG();
172	return NULL;
173}
174
175static inline void dma_free_coherent(struct device *dev, size_t size,
176		       void *vaddr, dma_addr_t dma_handle)
177{
178	BUG();
179}
180
181static inline dma_addr_t
182dma_map_single(struct device *dev, void *cpu_addr, size_t size,
183	       enum dma_data_direction direction)
184{
185	BUG();
186	return 0;
187}
188
189static inline void
190dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
191		 enum dma_data_direction direction)
192{
193	BUG();
194}
195
196static inline dma_addr_t
197dma_map_page(struct device *dev, struct page *page,
198	     unsigned long offset, size_t size,
199	     enum dma_data_direction direction)
200{
201	BUG();
202	return 0;
203}
204
205static inline void
206dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
207	       enum dma_data_direction direction)
208{
209	BUG();
210}
211
212static inline int
213dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
214	   enum dma_data_direction direction)
215{
216	BUG();
217	return 0;
218}
219
220static inline void
221dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
222	     enum dma_data_direction direction)
223{
224	BUG();
225}
226
227static inline void
228dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
229			enum dma_data_direction direction)
230{
231	BUG();
232}
233
234static inline void
235dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
236			   enum dma_data_direction direction)
237{
238	BUG();
239}
240
241static inline void
242dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
243		    enum dma_data_direction direction)
244{
245	BUG();
246}
247
248static inline void
249dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
250		       enum dma_data_direction direction)
251{
252	BUG();
253}
254
255static inline int
256dma_mapping_error(dma_addr_t dma_addr)
257{
258	BUG();
259	return 0;
260}
261
262#endif /* PCI */
263
264
265/* Now for the API extensions over the pci_ one */
266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
269#define dma_is_consistent(d, h)	(1)
270
271static inline int
272dma_get_cache_alignment(void)
273{
274	/* no easy way to get cache size on all processors, so return
275	 * the maximum possible, to be safe */
276	return (1 << INTERNODE_CACHE_SHIFT);
277}
278
279static inline void
280dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281			      unsigned long offset, size_t size,
282			      enum dma_data_direction direction)
283{
284	/* just sync everything, that's all the pci API can do */
285	dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
286}
287
288static inline void
289dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
290				 unsigned long offset, size_t size,
291				 enum dma_data_direction direction)
292{
293	/* just sync everything, that's all the pci API can do */
294	dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
295}
296
297static inline void
298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299	       enum dma_data_direction direction)
300{
301	/* could define this in terms of the dma_cache ... operations,
302	 * but if you get this on a platform, you should convert the platform
303	 * to using the generic device DMA API */
304	BUG();
305}
306
307#endif /* _ASM_SPARC64_DMA_MAPPING_H */
308