• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/mips/mm/
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
10
11#include <linux/types.h>
12#include <linux/dma-mapping.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/scatterlist.h>
16#include <linux/string.h>
17#include <linux/gfp.h>
18
19#include <asm/cache.h>
20#include <asm/io.h>
21
22#include <dma-coherence.h>
23
24static inline unsigned long dma_addr_to_virt(struct device *dev,
25	dma_addr_t dma_addr)
26{
27	unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
28
29	return (unsigned long)phys_to_virt(addr);
30}
31
32/*
33 * Warning on the terminology - Linux calls an uncached area coherent;
34 * MIPS terminology calls memory areas with hardware maintained coherency
35 * coherent.
36 */
37
38static inline int cpu_is_noncoherent_r10000(struct device *dev)
39{
40	return !plat_device_is_coherent(dev) &&
41	       (current_cpu_type() == CPU_R10000 ||
42	       current_cpu_type() == CPU_R12000);
43}
44
45static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46{
47	gfp_t dma_flag;
48
49	/* ignore region specifiers */
50	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
51
52#ifdef CONFIG_ISA
53	if (dev == NULL)
54		dma_flag = __GFP_DMA;
55	else
56#endif
57#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
58	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
59			dma_flag = __GFP_DMA;
60	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
61			dma_flag = __GFP_DMA32;
62	else
63#endif
64#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
65	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
66		dma_flag = __GFP_DMA32;
67	else
68#endif
69#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
70	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
71		dma_flag = __GFP_DMA;
72	else
73#endif
74		dma_flag = 0;
75
76	/* Don't invoke OOM killer */
77	gfp |= __GFP_NORETRY;
78
79	return gfp | dma_flag;
80}
81
82void *dma_alloc_noncoherent(struct device *dev, size_t size,
83	dma_addr_t * dma_handle, gfp_t gfp)
84{
85	void *ret;
86
87	gfp = massage_gfp_flags(dev, gfp);
88
89	ret = (void *) __get_free_pages(gfp, get_order(size));
90
91	if (ret != NULL) {
92		memset(ret, 0, size);
93		*dma_handle = plat_map_dma_mem(dev, ret, size);
94	}
95
96	return ret;
97}
98
99EXPORT_SYMBOL(dma_alloc_noncoherent);
100
101void *dma_alloc_coherent(struct device *dev, size_t size,
102	dma_addr_t * dma_handle, gfp_t gfp)
103{
104	extern int hw_coherentio;
105	void *ret;
106
107	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
108		return ret;
109
110	gfp = massage_gfp_flags(dev, gfp);
111
112	ret = (void *) __get_free_pages(gfp, get_order(size));
113
114	if (ret) {
115		memset(ret, 0, size);
116		*dma_handle = plat_map_dma_mem(dev, ret, size);
117
118		if (!plat_device_is_coherent(dev)) {
119			dma_cache_wback_inv((unsigned long) ret, size);
120			if (!hw_coherentio)
121				ret = UNCAC_ADDR(ret);
122		}
123	}
124
125	return ret;
126}
127
128EXPORT_SYMBOL(dma_alloc_coherent);
129
130void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
131	dma_addr_t dma_handle)
132{
133	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
134	free_pages((unsigned long) vaddr, get_order(size));
135}
136
137EXPORT_SYMBOL(dma_free_noncoherent);
138
139void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
140	dma_addr_t dma_handle)
141{
142	extern int hw_coherentio;
143	unsigned long addr = (unsigned long) vaddr;
144	int order = get_order(size);
145
146	if (dma_release_from_coherent(dev, order, vaddr))
147		return;
148
149	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
150
151	if (!plat_device_is_coherent(dev))
152		if (!hw_coherentio)
153			addr = CAC_ADDR(addr);
154
155	free_pages(addr, get_order(size));
156}
157
158EXPORT_SYMBOL(dma_free_coherent);
159
160static inline void __dma_sync(unsigned long addr, size_t size,
161	enum dma_data_direction direction)
162{
163	switch (direction) {
164	case DMA_TO_DEVICE:
165		dma_cache_wback(addr, size);
166		break;
167
168	case DMA_FROM_DEVICE:
169		dma_cache_inv(addr, size);
170		break;
171
172	case DMA_BIDIRECTIONAL:
173		dma_cache_wback_inv(addr, size);
174		break;
175
176	default:
177		BUG();
178	}
179}
180
181dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
182	enum dma_data_direction direction)
183{
184	unsigned long addr = (unsigned long) ptr;
185
186	if (!plat_device_is_coherent(dev))
187		__dma_sync(addr, size, direction);
188
189	return plat_map_dma_mem(dev, ptr, size);
190}
191
192EXPORT_SYMBOL(dma_map_single);
193
194void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
195	enum dma_data_direction direction)
196{
197	if (cpu_is_noncoherent_r10000(dev))
198		__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
199		           direction);
200
201	plat_unmap_dma_mem(dev, dma_addr, size, direction);
202}
203
204EXPORT_SYMBOL(dma_unmap_single);
205
206int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
207	enum dma_data_direction direction)
208{
209	int i;
210
211	BUG_ON(direction == DMA_NONE);
212
213	for (i = 0; i < nents; i++, sg++) {
214		unsigned long addr;
215
216		addr = (unsigned long) sg_virt(sg);
217		if (!plat_device_is_coherent(dev) && addr)
218			__dma_sync(addr, sg->length, direction);
219		sg->dma_address = plat_map_dma_mem(dev,
220				                   (void *)addr, sg->length);
221	}
222
223	return nents;
224}
225
226EXPORT_SYMBOL(dma_map_sg);
227
228dma_addr_t dma_map_page(struct device *dev, struct page *page,
229	unsigned long offset, size_t size, enum dma_data_direction direction)
230{
231	BUG_ON(direction == DMA_NONE);
232
233	if (!plat_device_is_coherent(dev)) {
234		unsigned long addr;
235
236		addr = (unsigned long) page_address(page) + offset;
237		__dma_sync(addr, size, direction);
238	}
239
240	return plat_map_dma_mem_page(dev, page) + offset;
241}
242
243EXPORT_SYMBOL(dma_map_page);
244
245void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
246	enum dma_data_direction direction)
247{
248	unsigned long addr;
249	int i;
250
251	BUG_ON(direction == DMA_NONE);
252
253	for (i = 0; i < nhwentries; i++, sg++) {
254		if (!plat_device_is_coherent(dev) &&
255		    direction != DMA_TO_DEVICE) {
256			addr = (unsigned long) sg_virt(sg);
257			if (addr)
258				__dma_sync(addr, sg->length, direction);
259		}
260		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
261	}
262}
263
264EXPORT_SYMBOL(dma_unmap_sg);
265
266void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
267	size_t size, enum dma_data_direction direction)
268{
269	BUG_ON(direction == DMA_NONE);
270
271	if (cpu_is_noncoherent_r10000(dev)) {
272		unsigned long addr;
273
274		addr = dma_addr_to_virt(dev, dma_handle);
275		__dma_sync(addr, size, direction);
276	}
277}
278
279EXPORT_SYMBOL(dma_sync_single_for_cpu);
280
281void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
282	size_t size, enum dma_data_direction direction)
283{
284	BUG_ON(direction == DMA_NONE);
285
286	plat_extra_sync_for_device(dev);
287	if (!plat_device_is_coherent(dev)) {
288		unsigned long addr;
289
290		addr = dma_addr_to_virt(dev, dma_handle);
291		__dma_sync(addr, size, direction);
292	}
293}
294
295EXPORT_SYMBOL(dma_sync_single_for_device);
296
297void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
298	unsigned long offset, size_t size, enum dma_data_direction direction)
299{
300	BUG_ON(direction == DMA_NONE);
301
302	if (cpu_is_noncoherent_r10000(dev)) {
303		unsigned long addr;
304
305		addr = dma_addr_to_virt(dev, dma_handle);
306		__dma_sync(addr + offset, size, direction);
307	}
308}
309
310EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
311
312void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
313	unsigned long offset, size_t size, enum dma_data_direction direction)
314{
315	BUG_ON(direction == DMA_NONE);
316
317	plat_extra_sync_for_device(dev);
318	if (!plat_device_is_coherent(dev)) {
319		unsigned long addr;
320
321		addr = dma_addr_to_virt(dev, dma_handle);
322		__dma_sync(addr + offset, size, direction);
323	}
324}
325
326EXPORT_SYMBOL(dma_sync_single_range_for_device);
327
328void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
329	enum dma_data_direction direction)
330{
331	int i;
332
333	BUG_ON(direction == DMA_NONE);
334
335	/* Make sure that gcc doesn't leave the empty loop body.  */
336	for (i = 0; i < nelems; i++, sg++) {
337		if (cpu_is_noncoherent_r10000(dev))
338			__dma_sync((unsigned long)page_address(sg_page(sg)),
339			           sg->length, direction);
340	}
341}
342
343EXPORT_SYMBOL(dma_sync_sg_for_cpu);
344
345void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
346	enum dma_data_direction direction)
347{
348	int i;
349
350	BUG_ON(direction == DMA_NONE);
351
352	/* Make sure that gcc doesn't leave the empty loop body.  */
353	for (i = 0; i < nelems; i++, sg++) {
354		if (!plat_device_is_coherent(dev))
355			__dma_sync((unsigned long)page_address(sg_page(sg)),
356			           sg->length, direction);
357	}
358}
359
360EXPORT_SYMBOL(dma_sync_sg_for_device);
361
362int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
363{
364	return plat_dma_mapping_error(dev, dma_addr);
365}
366
367EXPORT_SYMBOL(dma_mapping_error);
368
369int dma_supported(struct device *dev, u64 mask)
370{
371	return plat_dma_supported(dev, mask);
372}
373
374EXPORT_SYMBOL(dma_supported);
375
376void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
377	       enum dma_data_direction direction)
378{
379	BUG_ON(direction == DMA_NONE);
380
381	plat_extra_sync_for_device(dev);
382	if (!plat_device_is_coherent(dev))
383		__dma_sync((unsigned long)vaddr, size, direction);
384}
385
386EXPORT_SYMBOL(dma_cache_sync);
387