• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/arm/common/
1/*
2 *  arch/arm/common/dmabounce.c
3 *
4 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 *  limited DMA windows. These functions utilize bounce buffers to
6 *  copy data to/from buffers located outside the DMA region. This
7 *  only works for systems in which DMA memory is at the bottom of
8 *  RAM, the remainder of memory is at the top and the DMA memory
9 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
10 *  DMA windows will require custom implementations that reserve memory
11 *  areas at early bootup.
12 *
13 *  Original version by Brad Parker (brad@heeltoe.com)
14 *  Re-written by Christopher Hoover <ch@murgatroid.com>
15 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 *  Copyright (C) 2002 Hewlett Packard Company.
18 *  Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 *  This program is free software; you can redistribute it and/or
21 *  modify it under the terms of the GNU General Public License
22 *  version 2 as published by the Free Software Foundation.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/page-flags.h>
29#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <linux/dmapool.h>
32#include <linux/list.h>
33#include <linux/scatterlist.h>
34
35#include <asm/cacheflush.h>
36
37#include <typedefs.h>
38#include <bcmdefs.h>
39
40#undef STATS
41
42#ifdef STATS
43#define DO_STATS(X) do { X ; } while (0)
44#else
45#define DO_STATS(X) do { } while (0)
46#endif
47
48/* ************************************************** */
49
50struct safe_buffer {
51	struct list_head node;
52
53	/* original request */
54	void		*ptr;
55	size_t		size;
56	int		direction;
57
58	/* safe buffer info */
59	struct dmabounce_pool *pool;
60	void		*safe;
61	dma_addr_t	safe_dma_addr;
62};
63
64struct dmabounce_pool {
65	unsigned long	size;
66	struct dma_pool	*pool;
67#ifdef STATS
68	unsigned long	allocs;
69#endif
70};
71
72struct dmabounce_device_info {
73	struct device *dev;
74	struct list_head safe_buffers;
75#ifdef STATS
76	unsigned long total_allocs;
77	unsigned long map_op_count;
78	unsigned long bounce_count;
79	int attr_res;
80#endif
81	struct dmabounce_pool	small;
82	struct dmabounce_pool	large;
83
84	rwlock_t lock;
85};
86
87#ifdef STATS
88static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
89			      char *buf)
90{
91	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
92	return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
93		device_info->small.allocs,
94		device_info->large.allocs,
95		device_info->total_allocs - device_info->small.allocs -
96			device_info->large.allocs,
97		device_info->total_allocs,
98		device_info->map_op_count,
99		device_info->bounce_count);
100}
101
102static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
103#endif
104
105
106/* allocate a 'safe' buffer and keep track of it */
107static inline struct safe_buffer *
108alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
109		  size_t size, enum dma_data_direction dir)
110{
111	struct safe_buffer *buf;
112	struct dmabounce_pool *pool;
113	struct device *dev = device_info->dev;
114	unsigned long flags;
115
116	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
117		__func__, ptr, size, dir);
118
119	if (size <= device_info->small.size) {
120		pool = &device_info->small;
121	} else if (size <= device_info->large.size) {
122		pool = &device_info->large;
123	} else {
124		pool = NULL;
125	}
126
127	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
128	if (buf == NULL) {
129		dev_warn(dev, "%s: kmalloc failed\n", __func__);
130		return NULL;
131	}
132
133	buf->ptr = ptr;
134	buf->size = size;
135	buf->direction = dir;
136	buf->pool = pool;
137
138	if (pool) {
139		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
140					   &buf->safe_dma_addr);
141	} else {
142		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
143					       GFP_ATOMIC);
144	}
145
146	if (buf->safe == NULL) {
147		dev_warn(dev,
148			 "%s: could not alloc dma memory (size=%d)\n",
149			 __func__, size);
150		kfree(buf);
151		return NULL;
152	}
153
154#ifdef STATS
155	if (pool)
156		pool->allocs++;
157	device_info->total_allocs++;
158#endif
159
160	write_lock_irqsave(&device_info->lock, flags);
161	list_add(&buf->node, &device_info->safe_buffers);
162	write_unlock_irqrestore(&device_info->lock, flags);
163
164	return buf;
165}
166
167/* determine if a buffer is from our "safe" pool */
168static inline struct safe_buffer *
169find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
170{
171	struct safe_buffer *b, *rb = NULL;
172	unsigned long flags;
173
174	read_lock_irqsave(&device_info->lock, flags);
175
176	list_for_each_entry(b, &device_info->safe_buffers, node)
177		if (b->safe_dma_addr == safe_dma_addr) {
178			rb = b;
179			break;
180		}
181
182	read_unlock_irqrestore(&device_info->lock, flags);
183	return rb;
184}
185
186static inline void
187free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
188{
189	unsigned long flags;
190
191	dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
192
193	write_lock_irqsave(&device_info->lock, flags);
194
195	list_del(&buf->node);
196
197	write_unlock_irqrestore(&device_info->lock, flags);
198
199	if (buf->pool)
200		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
201	else
202		dma_free_coherent(device_info->dev, buf->size, buf->safe,
203				    buf->safe_dma_addr);
204
205	kfree(buf);
206}
207
208/* ************************************************** */
209
210static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
211		dma_addr_t dma_addr, const char *where)
212{
213	if (!dev || !dev->archdata.dmabounce)
214		return NULL;
215	if (dma_mapping_error(dev, dma_addr)) {
216		if (dev)
217			dev_err(dev, "Trying to %s invalid mapping\n", where);
218		else
219			pr_err("unknown device: Trying to %s invalid mapping\n", where);
220		return NULL;
221	}
222	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
223}
224
225static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
226		enum dma_data_direction dir)
227{
228	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
229	dma_addr_t dma_addr;
230	int needs_bounce = 0;
231
232	if (device_info)
233		DO_STATS ( device_info->map_op_count++ );
234
235	dma_addr = virt_to_dma(dev, ptr);
236
237	if (dev->dma_mask) {
238		unsigned long mask = *dev->dma_mask;
239		unsigned long limit;
240
241		limit = (mask + 1) & ~mask;
242		if (limit && size > limit) {
243			dev_err(dev, "DMA mapping too big (requested %#x "
244				"mask %#Lx)\n", size, *dev->dma_mask);
245			return ~0;
246		}
247
248		/*
249		 * Figure out if we need to bounce from the DMA mask.
250		 */
251		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
252	}
253
254	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
255		struct safe_buffer *buf;
256
257		buf = alloc_safe_buffer(device_info, ptr, size, dir);
258		if (buf == 0) {
259			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
260			       __func__, ptr);
261			return 0;
262		}
263
264		dev_dbg(dev,
265			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
266			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
267			buf->safe, buf->safe_dma_addr);
268
269		if ((dir == DMA_TO_DEVICE) ||
270		    (dir == DMA_BIDIRECTIONAL)) {
271			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
272				__func__, ptr, buf->safe, size);
273			memcpy(buf->safe, ptr, size);
274		}
275		ptr = buf->safe;
276
277		dma_addr = buf->safe_dma_addr;
278	} else {
279		/*
280		 * We don't need to sync the DMA buffer since
281		 * it was allocated via the coherent allocators.
282		 */
283		__dma_single_cpu_to_dev(ptr, size, dir);
284	}
285
286	return dma_addr;
287}
288
289static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
290		size_t size, enum dma_data_direction dir)
291{
292	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
293
294	if (buf) {
295		BUG_ON(buf->size != size);
296		BUG_ON(buf->direction != dir);
297
298		dev_dbg(dev,
299			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
300			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
301			buf->safe, buf->safe_dma_addr);
302
303		DO_STATS(dev->archdata.dmabounce->bounce_count++);
304
305		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
306			void *ptr = buf->ptr;
307
308			dev_dbg(dev,
309				"%s: copy back safe %p to unsafe %p size %d\n",
310				__func__, buf->safe, ptr, size);
311			memcpy(ptr, buf->safe, size);
312
313			/*
314			 * Since we may have written to a page cache page,
315			 * we need to ensure that the data will be coherent
316			 * with user mappings.
317			 */
318			__cpuc_flush_dcache_area(ptr, size);
319		}
320		free_safe_buffer(dev->archdata.dmabounce, buf);
321	} else {
322		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
323	}
324}
325
326/* ************************************************** */
327
328/*
329 * see if a buffer address is in an 'unsafe' range.  if it is
330 * allocate a 'safe' buffer and copy the unsafe buffer into it.
331 * substitute the safe buffer for the unsafe one.
332 * (basically move the buffer from an unsafe area to a safe one)
333 */
334dma_addr_t BCMFASTPATH dma_map_single(struct device *dev, void *ptr, size_t size,
335		enum dma_data_direction dir)
336{
337	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
338		__func__, ptr, size, dir);
339
340	BUG_ON(!valid_dma_direction(dir));
341
342	return map_single(dev, ptr, size, dir);
343}
344EXPORT_SYMBOL(dma_map_single);
345
346/*
347 * see if a mapped address was really a "safe" buffer and if so, copy
348 * the data from the safe buffer back to the unsafe buffer and free up
349 * the safe buffer.  (basically return things back to the way they
350 * should be)
351 */
352void BCMFASTPATH dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
353		enum dma_data_direction dir)
354{
355	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
356		__func__, (void *) dma_addr, size, dir);
357
358	unmap_single(dev, dma_addr, size, dir);
359}
360EXPORT_SYMBOL(dma_unmap_single);
361
362dma_addr_t dma_map_page(struct device *dev, struct page *page,
363		unsigned long offset, size_t size, enum dma_data_direction dir)
364{
365	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
366		__func__, page, offset, size, dir);
367
368	BUG_ON(!valid_dma_direction(dir));
369
370	if (PageHighMem(page)) {
371		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
372			     "is not supported\n");
373		return ~0;
374	}
375
376	return map_single(dev, page_address(page) + offset, size, dir);
377}
378EXPORT_SYMBOL(dma_map_page);
379
380/*
381 * see if a mapped address was really a "safe" buffer and if so, copy
382 * the data from the safe buffer back to the unsafe buffer and free up
383 * the safe buffer.  (basically return things back to the way they
384 * should be)
385 */
386void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
387		enum dma_data_direction dir)
388{
389	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
390		__func__, (void *) dma_addr, size, dir);
391
392	unmap_single(dev, dma_addr, size, dir);
393}
394EXPORT_SYMBOL(dma_unmap_page);
395
396int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
397		unsigned long off, size_t sz, enum dma_data_direction dir)
398{
399	struct safe_buffer *buf;
400
401	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
402		__func__, addr, off, sz, dir);
403
404	buf = find_safe_buffer_dev(dev, addr, __func__);
405	if (!buf)
406		return 1;
407
408	BUG_ON(buf->direction != dir);
409
410	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
411		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
412		buf->safe, buf->safe_dma_addr);
413
414	DO_STATS(dev->archdata.dmabounce->bounce_count++);
415
416	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
417		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
418			__func__, buf->safe + off, buf->ptr + off, sz);
419		memcpy(buf->ptr + off, buf->safe + off, sz);
420	}
421	return 0;
422}
423EXPORT_SYMBOL(dmabounce_sync_for_cpu);
424
425int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
426		unsigned long off, size_t sz, enum dma_data_direction dir)
427{
428	struct safe_buffer *buf;
429
430	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
431		__func__, addr, off, sz, dir);
432
433	buf = find_safe_buffer_dev(dev, addr, __func__);
434	if (!buf)
435		return 1;
436
437	BUG_ON(buf->direction != dir);
438
439	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
440		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
441		buf->safe, buf->safe_dma_addr);
442
443	DO_STATS(dev->archdata.dmabounce->bounce_count++);
444
445	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
446		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
447			__func__,buf->ptr + off, buf->safe + off, sz);
448		memcpy(buf->safe + off, buf->ptr + off, sz);
449	}
450	return 0;
451}
452EXPORT_SYMBOL(dmabounce_sync_for_device);
453
454static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
455		const char *name, unsigned long size)
456{
457	pool->size = size;
458	DO_STATS(pool->allocs = 0);
459	pool->pool = dma_pool_create(name, dev, size,
460				     0 /* byte alignment */,
461				     0 /* no page-crossing issues */);
462
463	return pool->pool ? 0 : -ENOMEM;
464}
465
466int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
467		unsigned long large_buffer_size)
468{
469	struct dmabounce_device_info *device_info;
470	int ret;
471
472	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
473	if (!device_info) {
474		dev_err(dev,
475			"Could not allocated dmabounce_device_info\n");
476		return -ENOMEM;
477	}
478
479	ret = dmabounce_init_pool(&device_info->small, dev,
480				  "small_dmabounce_pool", small_buffer_size);
481	if (ret) {
482		dev_err(dev,
483			"dmabounce: could not allocate DMA pool for %ld byte objects\n",
484			small_buffer_size);
485		goto err_free;
486	}
487
488	if (large_buffer_size) {
489		ret = dmabounce_init_pool(&device_info->large, dev,
490					  "large_dmabounce_pool",
491					  large_buffer_size);
492		if (ret) {
493			dev_err(dev,
494				"dmabounce: could not allocate DMA pool for %ld byte objects\n",
495				large_buffer_size);
496			goto err_destroy;
497		}
498	}
499
500	device_info->dev = dev;
501	INIT_LIST_HEAD(&device_info->safe_buffers);
502	rwlock_init(&device_info->lock);
503
504#ifdef STATS
505	device_info->total_allocs = 0;
506	device_info->map_op_count = 0;
507	device_info->bounce_count = 0;
508	device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
509#endif
510
511	dev->archdata.dmabounce = device_info;
512
513	dev_info(dev, "dmabounce: registered device\n");
514
515	return 0;
516
517 err_destroy:
518	dma_pool_destroy(device_info->small.pool);
519 err_free:
520	kfree(device_info);
521	return ret;
522}
523EXPORT_SYMBOL(dmabounce_register_dev);
524
525void dmabounce_unregister_dev(struct device *dev)
526{
527	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
528
529	dev->archdata.dmabounce = NULL;
530
531	if (!device_info) {
532		dev_warn(dev,
533			 "Never registered with dmabounce but attempting"
534			 "to unregister!\n");
535		return;
536	}
537
538	if (!list_empty(&device_info->safe_buffers)) {
539		dev_err(dev,
540			"Removing from dmabounce with pending buffers!\n");
541		BUG();
542	}
543
544	if (device_info->small.pool)
545		dma_pool_destroy(device_info->small.pool);
546	if (device_info->large.pool)
547		dma_pool_destroy(device_info->large.pool);
548
549#ifdef STATS
550	if (device_info->attr_res == 0)
551		device_remove_file(dev, &dev_attr_dmabounce_stats);
552#endif
553
554	kfree(device_info);
555
556	dev_info(dev, "dmabounce: device unregistered\n");
557}
558EXPORT_SYMBOL(dmabounce_unregister_dev);
559
560MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
561MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
562MODULE_LICENSE("GPL");
563