1#ifndef _ASM_I386_DMA_MAPPING_H
2#define _ASM_I386_DMA_MAPPING_H
3
4#include <linux/mm.h>
5
6#include <asm/cache.h>
7#include <asm/io.h>
8#include <asm/scatterlist.h>
9#include <asm/bug.h>
10
11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14void *dma_alloc_coherent(struct device *dev, size_t size,
15			   dma_addr_t *dma_handle, gfp_t flag);
16
17void dma_free_coherent(struct device *dev, size_t size,
18			 void *vaddr, dma_addr_t dma_handle);
19
20static inline dma_addr_t
21dma_map_single(struct device *dev, void *ptr, size_t size,
22	       enum dma_data_direction direction)
23{
24	BUG_ON(!valid_dma_direction(direction));
25	WARN_ON(size == 0);
26	flush_write_buffers();
27	return virt_to_phys(ptr);
28}
29
30static inline void
31dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
32		 enum dma_data_direction direction)
33{
34	BUG_ON(!valid_dma_direction(direction));
35}
36
37static inline int
38dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
39	   enum dma_data_direction direction)
40{
41	int i;
42
43	BUG_ON(!valid_dma_direction(direction));
44	WARN_ON(nents == 0 || sg[0].length == 0);
45
46	for (i = 0; i < nents; i++ ) {
47		BUG_ON(!sg[i].page);
48
49		sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
50	}
51
52	flush_write_buffers();
53	return nents;
54}
55
56static inline dma_addr_t
57dma_map_page(struct device *dev, struct page *page, unsigned long offset,
58	     size_t size, enum dma_data_direction direction)
59{
60	BUG_ON(!valid_dma_direction(direction));
61	return page_to_phys(page) + offset;
62}
63
64static inline void
65dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
66	       enum dma_data_direction direction)
67{
68	BUG_ON(!valid_dma_direction(direction));
69}
70
71
72static inline void
73dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
74	     enum dma_data_direction direction)
75{
76	BUG_ON(!valid_dma_direction(direction));
77}
78
79static inline void
80dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
81			enum dma_data_direction direction)
82{
83}
84
85static inline void
86dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
87			enum dma_data_direction direction)
88{
89	flush_write_buffers();
90}
91
92static inline void
93dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
94			      unsigned long offset, size_t size,
95			      enum dma_data_direction direction)
96{
97}
98
99static inline void
100dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
101				 unsigned long offset, size_t size,
102				 enum dma_data_direction direction)
103{
104	flush_write_buffers();
105}
106
107static inline void
108dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
109		    enum dma_data_direction direction)
110{
111}
112
113static inline void
114dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
115		    enum dma_data_direction direction)
116{
117	flush_write_buffers();
118}
119
120static inline int
121dma_mapping_error(dma_addr_t dma_addr)
122{
123	return 0;
124}
125
126extern int forbid_dac;
127
128static inline int
129dma_supported(struct device *dev, u64 mask)
130{
131        /*
132         * we fall back to GFP_DMA when the mask isn't all 1s,
133         * so we can't guarantee allocations that must be
134         * within a tighter range than GFP_DMA..
135         */
136        if(mask < 0x00ffffff)
137                return 0;
138
139	if (forbid_dac > 0 && mask > 0xffffffffULL)
140		return 0;
141
142	return 1;
143}
144
145static inline int
146dma_set_mask(struct device *dev, u64 mask)
147{
148	if(!dev->dma_mask || !dma_supported(dev, mask))
149		return -EIO;
150
151	*dev->dma_mask = mask;
152
153	return 0;
154}
155
156static inline int
157dma_get_cache_alignment(void)
158{
159	/* no easy way to get cache size on all x86, so return the
160	 * maximum possible, to be safe */
161	return (1 << INTERNODE_CACHE_SHIFT);
162}
163
164#define dma_is_consistent(d, h)	(1)
165
166static inline void
167dma_cache_sync(struct device *dev, void *vaddr, size_t size,
168	       enum dma_data_direction direction)
169{
170	flush_write_buffers();
171}
172
173#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
174extern int
175dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
176			    dma_addr_t device_addr, size_t size, int flags);
177
178extern void
179dma_release_declared_memory(struct device *dev);
180
181extern void *
182dma_mark_declared_memory_occupied(struct device *dev,
183				  dma_addr_t device_addr, size_t size);
184
185#endif
186