1
2#include <linux/device.h>
3#include <linux/mm.h>
4#include <asm/io.h>		/* Needed for i386 to build */
5#include <asm/scatterlist.h>	/* Needed for i386 to build */
6#include <linux/dma-mapping.h>
7#include <linux/dmapool.h>
8#include <linux/slab.h>
9#include <linux/module.h>
10#include <linux/poison.h>
11#include <linux/sched.h>
12
13/*
14 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
15 * small blocks are easily used by drivers for bus mastering controllers.
16 * This should probably be sharing the guts of the slab allocator.
17 */
18
19struct dma_pool {	/* the pool */
20	struct list_head	page_list;
21	spinlock_t		lock;
22	size_t			blocks_per_page;
23	size_t			size;
24	struct device		*dev;
25	size_t			allocation;
26	char			name [32];
27	wait_queue_head_t	waitq;
28	struct list_head	pools;
29};
30
31struct dma_page {	/* cacheable header for 'allocation' bytes */
32	struct list_head	page_list;
33	void			*vaddr;
34	dma_addr_t		dma;
35	unsigned		in_use;
36	unsigned long		bitmap [0];
37};
38
39#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
40
41static DEFINE_MUTEX (pools_lock);
42
43static ssize_t
44show_pools (struct device *dev, struct device_attribute *attr, char *buf)
45{
46	unsigned temp;
47	unsigned size;
48	char *next;
49	struct dma_page *page;
50	struct dma_pool *pool;
51
52	next = buf;
53	size = PAGE_SIZE;
54
55	temp = scnprintf(next, size, "poolinfo - 0.1\n");
56	size -= temp;
57	next += temp;
58
59	mutex_lock(&pools_lock);
60	list_for_each_entry(pool, &dev->dma_pools, pools) {
61		unsigned pages = 0;
62		unsigned blocks = 0;
63
64		list_for_each_entry(page, &pool->page_list, page_list) {
65			pages++;
66			blocks += page->in_use;
67		}
68
69		/* per-pool info, no real statistics yet */
70		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
71				pool->name,
72				blocks, pages * pool->blocks_per_page,
73				pool->size, pages);
74		size -= temp;
75		next += temp;
76	}
77	mutex_unlock(&pools_lock);
78
79	return PAGE_SIZE - size;
80}
81static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
82
83/**
84 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
85 * @name: name of pool, for diagnostics
86 * @dev: device that will be doing the DMA
87 * @size: size of the blocks in this pool.
88 * @align: alignment requirement for blocks; must be a power of two
89 * @allocation: returned blocks won't cross this boundary (or zero)
90 * Context: !in_interrupt()
91 *
92 * Returns a dma allocation pool with the requested characteristics, or
93 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
94 * may be used to allocate memory.  Such memory will all have "consistent"
95 * DMA mappings, accessible by the device and its driver without using
96 * cache flushing primitives.  The actual size of blocks allocated may be
97 * larger than requested because of alignment.
98 *
99 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
100 * cross that size boundary.  This is useful for devices which have
101 * addressing restrictions on individual DMA transfers, such as not crossing
102 * boundaries of 4KBytes.
103 */
104struct dma_pool *
105dma_pool_create (const char *name, struct device *dev,
106	size_t size, size_t align, size_t allocation)
107{
108	struct dma_pool		*retval;
109
110	if (align == 0)
111		align = 1;
112	if (size == 0)
113		return NULL;
114	else if (size < align)
115		size = align;
116	else if ((size % align) != 0) {
117		size += align + 1;
118		size &= ~(align - 1);
119	}
120
121	if (allocation == 0) {
122		if (PAGE_SIZE < size)
123			allocation = size;
124		else
125			allocation = PAGE_SIZE;
126	} else if (allocation < size)
127		return NULL;
128
129	if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL)))
130		return retval;
131
132	strlcpy (retval->name, name, sizeof retval->name);
133
134	retval->dev = dev;
135
136	INIT_LIST_HEAD (&retval->page_list);
137	spin_lock_init (&retval->lock);
138	retval->size = size;
139	retval->allocation = allocation;
140	retval->blocks_per_page = allocation / size;
141	init_waitqueue_head (&retval->waitq);
142
143	if (dev) {
144		int ret;
145
146		mutex_lock(&pools_lock);
147		if (list_empty (&dev->dma_pools))
148			ret = device_create_file (dev, &dev_attr_pools);
149		else
150			ret = 0;
151		/* note:  not currently insisting "name" be unique */
152		if (!ret)
153			list_add (&retval->pools, &dev->dma_pools);
154		else {
155			kfree(retval);
156			retval = NULL;
157		}
158		mutex_unlock(&pools_lock);
159	} else
160		INIT_LIST_HEAD (&retval->pools);
161
162	return retval;
163}
164
165
166static struct dma_page *
167pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
168{
169	struct dma_page	*page;
170	int		mapsize;
171
172	mapsize = pool->blocks_per_page;
173	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
174	mapsize *= sizeof (long);
175
176	page = kmalloc(mapsize + sizeof *page, mem_flags);
177	if (!page)
178		return NULL;
179	page->vaddr = dma_alloc_coherent (pool->dev,
180					    pool->allocation,
181					    &page->dma,
182					    mem_flags);
183	if (page->vaddr) {
184		memset (page->bitmap, 0xff, mapsize);	// bit set == free
185#ifdef	CONFIG_DEBUG_SLAB
186		memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
187#endif
188		list_add (&page->page_list, &pool->page_list);
189		page->in_use = 0;
190	} else {
191		kfree (page);
192		page = NULL;
193	}
194	return page;
195}
196
197
198static inline int
199is_page_busy (int blocks, unsigned long *bitmap)
200{
201	while (blocks > 0) {
202		if (*bitmap++ != ~0UL)
203			return 1;
204		blocks -= BITS_PER_LONG;
205	}
206	return 0;
207}
208
209static void
210pool_free_page (struct dma_pool *pool, struct dma_page *page)
211{
212	dma_addr_t	dma = page->dma;
213
214#ifdef	CONFIG_DEBUG_SLAB
215	memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
216#endif
217	dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma);
218	list_del (&page->page_list);
219	kfree (page);
220}
221
222
223/**
224 * dma_pool_destroy - destroys a pool of dma memory blocks.
225 * @pool: dma pool that will be destroyed
226 * Context: !in_interrupt()
227 *
228 * Caller guarantees that no more memory from the pool is in use,
229 * and that nothing will try to use the pool after this call.
230 */
231void
232dma_pool_destroy (struct dma_pool *pool)
233{
234	mutex_lock(&pools_lock);
235	list_del (&pool->pools);
236	if (pool->dev && list_empty (&pool->dev->dma_pools))
237		device_remove_file (pool->dev, &dev_attr_pools);
238	mutex_unlock(&pools_lock);
239
240	while (!list_empty (&pool->page_list)) {
241		struct dma_page		*page;
242		page = list_entry (pool->page_list.next,
243				struct dma_page, page_list);
244		if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
245			if (pool->dev)
246				dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n",
247					pool->name, page->vaddr);
248			else
249				printk (KERN_ERR "dma_pool_destroy %s, %p busy\n",
250					pool->name, page->vaddr);
251			/* leak the still-in-use consistent memory */
252			list_del (&page->page_list);
253			kfree (page);
254		} else
255			pool_free_page (pool, page);
256	}
257
258	kfree (pool);
259}
260
261
262/**
263 * dma_pool_alloc - get a block of consistent memory
264 * @pool: dma pool that will produce the block
265 * @mem_flags: GFP_* bitmask
266 * @handle: pointer to dma address of block
267 *
268 * This returns the kernel virtual address of a currently unused block,
269 * and reports its dma address through the handle.
270 * If such a memory block can't be allocated, null is returned.
271 */
272void *
273dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
274{
275	unsigned long		flags;
276	struct dma_page		*page;
277	int			map, block;
278	size_t			offset;
279	void			*retval;
280
281restart:
282	spin_lock_irqsave (&pool->lock, flags);
283	list_for_each_entry(page, &pool->page_list, page_list) {
284		int		i;
285		/* only cachable accesses here ... */
286		for (map = 0, i = 0;
287				i < pool->blocks_per_page;
288				i += BITS_PER_LONG, map++) {
289			if (page->bitmap [map] == 0)
290				continue;
291			block = ffz (~ page->bitmap [map]);
292			if ((i + block) < pool->blocks_per_page) {
293				clear_bit (block, &page->bitmap [map]);
294				offset = (BITS_PER_LONG * map) + block;
295				offset *= pool->size;
296				goto ready;
297			}
298		}
299	}
300	if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
301		if (mem_flags & __GFP_WAIT) {
302			DECLARE_WAITQUEUE (wait, current);
303
304			current->state = TASK_INTERRUPTIBLE;
305			add_wait_queue (&pool->waitq, &wait);
306			spin_unlock_irqrestore (&pool->lock, flags);
307
308			schedule_timeout (POOL_TIMEOUT_JIFFIES);
309
310			remove_wait_queue (&pool->waitq, &wait);
311			goto restart;
312		}
313		retval = NULL;
314		goto done;
315	}
316
317	clear_bit (0, &page->bitmap [0]);
318	offset = 0;
319ready:
320	page->in_use++;
321	retval = offset + page->vaddr;
322	*handle = offset + page->dma;
323#ifdef	CONFIG_DEBUG_SLAB
324	memset (retval, POOL_POISON_ALLOCATED, pool->size);
325#endif
326done:
327	spin_unlock_irqrestore (&pool->lock, flags);
328	return retval;
329}
330
331
332static struct dma_page *
333pool_find_page (struct dma_pool *pool, dma_addr_t dma)
334{
335	unsigned long		flags;
336	struct dma_page		*page;
337
338	spin_lock_irqsave (&pool->lock, flags);
339	list_for_each_entry(page, &pool->page_list, page_list) {
340		if (dma < page->dma)
341			continue;
342		if (dma < (page->dma + pool->allocation))
343			goto done;
344	}
345	page = NULL;
346done:
347	spin_unlock_irqrestore (&pool->lock, flags);
348	return page;
349}
350
351
352/**
353 * dma_pool_free - put block back into dma pool
354 * @pool: the dma pool holding the block
355 * @vaddr: virtual address of block
356 * @dma: dma address of block
357 *
358 * Caller promises neither device nor driver will again touch this block
359 * unless it is first re-allocated.
360 */
361void
362dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
363{
364	struct dma_page		*page;
365	unsigned long		flags;
366	int			map, block;
367
368	if ((page = pool_find_page (pool, dma)) == 0) {
369		if (pool->dev)
370			dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
371				pool->name, vaddr, (unsigned long) dma);
372		else
373			printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
374				pool->name, vaddr, (unsigned long) dma);
375		return;
376	}
377
378	block = dma - page->dma;
379	block /= pool->size;
380	map = block / BITS_PER_LONG;
381	block %= BITS_PER_LONG;
382
383#ifdef	CONFIG_DEBUG_SLAB
384	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
385		if (pool->dev)
386			dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
387				pool->name, vaddr, (unsigned long long) dma);
388		else
389			printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
390				pool->name, vaddr, (unsigned long long) dma);
391		return;
392	}
393	if (page->bitmap [map] & (1UL << block)) {
394		if (pool->dev)
395			dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
396				pool->name, (unsigned long long)dma);
397		else
398			printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
399				pool->name, (unsigned long long)dma);
400		return;
401	}
402	memset (vaddr, POOL_POISON_FREED, pool->size);
403#endif
404
405	spin_lock_irqsave (&pool->lock, flags);
406	page->in_use--;
407	set_bit (block, &page->bitmap [map]);
408	if (waitqueue_active (&pool->waitq))
409		wake_up (&pool->waitq);
410	/*
411	 * Resist a temptation to do
412	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
413	 * Better have a few empty pages hang around.
414	 */
415	spin_unlock_irqrestore (&pool->lock, flags);
416}
417
418/*
419 * Managed DMA pool
420 */
421static void dmam_pool_release(struct device *dev, void *res)
422{
423	struct dma_pool *pool = *(struct dma_pool **)res;
424
425	dma_pool_destroy(pool);
426}
427
428static int dmam_pool_match(struct device *dev, void *res, void *match_data)
429{
430	return *(struct dma_pool **)res == match_data;
431}
432
433/**
434 * dmam_pool_create - Managed dma_pool_create()
435 * @name: name of pool, for diagnostics
436 * @dev: device that will be doing the DMA
437 * @size: size of the blocks in this pool.
438 * @align: alignment requirement for blocks; must be a power of two
439 * @allocation: returned blocks won't cross this boundary (or zero)
440 *
441 * Managed dma_pool_create().  DMA pool created with this function is
442 * automatically destroyed on driver detach.
443 */
444struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
445				  size_t size, size_t align, size_t allocation)
446{
447	struct dma_pool **ptr, *pool;
448
449	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
450	if (!ptr)
451		return NULL;
452
453	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
454	if (pool)
455		devres_add(dev, ptr);
456	else
457		devres_free(ptr);
458
459	return pool;
460}
461
462/**
463 * dmam_pool_destroy - Managed dma_pool_destroy()
464 * @pool: dma pool that will be destroyed
465 *
466 * Managed dma_pool_destroy().
467 */
468void dmam_pool_destroy(struct dma_pool *pool)
469{
470	struct device *dev = pool->dev;
471
472	dma_pool_destroy(pool);
473	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
474}
475
476EXPORT_SYMBOL (dma_pool_create);
477EXPORT_SYMBOL (dma_pool_destroy);
478EXPORT_SYMBOL (dma_pool_alloc);
479EXPORT_SYMBOL (dma_pool_free);
480EXPORT_SYMBOL (dmam_pool_create);
481EXPORT_SYMBOL (dmam_pool_destroy);
482