1#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
4#include <linux/mm.h>
5#include <asm/scatterlist.h>
6#include <asm/io.h>
7
8struct pci_dev;
9extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
10				    dma_addr_t *dma_handle);
11extern void consistent_free(struct pci_dev *hwdev, size_t size,
12				  void *vaddr, dma_addr_t dma_handle);
13
14#define dma_supported(dev, mask)	(1)
15
16static inline int dma_set_mask(struct device *dev, u64 mask)
17{
18	if (!dev->dma_mask || !dma_supported(dev, mask))
19		return -EIO;
20
21	*dev->dma_mask = mask;
22
23	return 0;
24}
25
26static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27			 dma_addr_t *dma_handle, gfp_t flag)
28{
29	return consistent_alloc(NULL, size, dma_handle);
30}
31
32static inline void dma_free_coherent(struct device *dev, size_t size,
33		       void *vaddr, dma_addr_t dma_handle)
34{
35	consistent_free(NULL, size, vaddr, dma_handle);
36}
37
38#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40#define dma_is_consistent(d, h) (1)
41
42static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
43				  enum dma_data_direction dir)
44{
45	dma_cache_wback_inv((unsigned long)vaddr, size);
46}
47
48static inline dma_addr_t dma_map_single(struct device *dev,
49					void *ptr, size_t size,
50					enum dma_data_direction dir)
51{
52#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
53	if (dev->bus == &pci_bus_type)
54		return virt_to_bus(ptr);
55#endif
56	dma_cache_sync(dev, ptr, size, dir);
57
58	return virt_to_bus(ptr);
59}
60
61#define dma_unmap_single(dev, addr, size, dir)	do { } while (0)
62
63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
64			     int nents, enum dma_data_direction dir)
65{
66	int i;
67
68	for (i = 0; i < nents; i++) {
69#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
70		dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
71			       sg[i].length, dir);
72#endif
73		sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
74	}
75
76	return nents;
77}
78
79#define dma_unmap_sg(dev, sg, nents, dir)	do { } while (0)
80
81static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
82				      unsigned long offset, size_t size,
83				      enum dma_data_direction dir)
84{
85	return dma_map_single(dev, page_address(page) + offset, size, dir);
86}
87
88static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
89				  size_t size, enum dma_data_direction dir)
90{
91	dma_unmap_single(dev, dma_address, size, dir);
92}
93
94static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
95				   size_t size, enum dma_data_direction dir)
96{
97#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
98	if (dev->bus == &pci_bus_type)
99		return;
100#endif
101	dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
102}
103
104static inline void dma_sync_single_range(struct device *dev,
105					 dma_addr_t dma_handle,
106					 unsigned long offset, size_t size,
107					 enum dma_data_direction dir)
108{
109#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
110	if (dev->bus == &pci_bus_type)
111		return;
112#endif
113	dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
114}
115
116static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
117			       int nelems, enum dma_data_direction dir)
118{
119	int i;
120
121	for (i = 0; i < nelems; i++) {
122#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
123		dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
124			       sg[i].length, dir);
125#endif
126		sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
127	}
128}
129
130static inline void dma_sync_single_for_cpu(struct device *dev,
131					   dma_addr_t dma_handle, size_t size,
132					   enum dma_data_direction dir)
133{
134	dma_sync_single(dev, dma_handle, size, dir);
135}
136
137static inline void dma_sync_single_for_device(struct device *dev,
138					   dma_addr_t dma_handle, size_t size,
139					   enum dma_data_direction dir)
140{
141	dma_sync_single(dev, dma_handle, size, dir);
142}
143
144static inline void dma_sync_sg_for_cpu(struct device *dev,
145				       struct scatterlist *sg, int nelems,
146				       enum dma_data_direction dir)
147{
148	dma_sync_sg(dev, sg, nelems, dir);
149}
150
151static inline void dma_sync_sg_for_device(struct device *dev,
152				       struct scatterlist *sg, int nelems,
153				       enum dma_data_direction dir)
154{
155	dma_sync_sg(dev, sg, nelems, dir);
156}
157
158static inline int dma_get_cache_alignment(void)
159{
160	/*
161	 * Each processor family will define its own L1_CACHE_SHIFT,
162	 * L1_CACHE_BYTES wraps to this, so this is always safe.
163	 */
164	return L1_CACHE_BYTES;
165}
166
167static inline int dma_mapping_error(dma_addr_t dma_addr)
168{
169	return dma_addr == 0;
170}
171
172#endif /* __ASM_SH_DMA_MAPPING_H */
173