• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/powerpc/mm/
1/*
2 *  PowerPC version derived from arch/arm/mm/consistent.c
3 *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4 *
5 *  Copyright (C) 2000 Russell King
6 *
7 * Consistent memory allocators.  Used for DMA devices that want to
8 * share uncached memory with the processor core.  The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 *						-- Dan
12 *
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
15 *
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/highmem.h>
32#include <linux/dma-mapping.h>
33
34#include <asm/tlbflush.h>
35
36#include "mmu_decl.h"
37
38/*
39 * This address range defaults to a value that is safe for all
40 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
41 * can be further configured for specific applications under
42 * the "Advanced Setup" menu. -Matt
43 */
44#define CONSISTENT_BASE		(IOREMAP_TOP)
45#define CONSISTENT_END 		(CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
46#define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
47
48/*
49 * This is the page table (2MB) covering uncached, DMA consistent allocations
50 */
51static DEFINE_SPINLOCK(consistent_lock);
52
53/*
54 * VM region handling support.
55 *
56 * This should become something generic, handling VM region allocations for
57 * vmalloc and similar (ioremap, module space, etc).
58 *
59 * I envisage vmalloc()'s supporting vm_struct becoming:
60 *
61 *  struct vm_struct {
62 *    struct vm_region	region;
63 *    unsigned long	flags;
64 *    struct page	**pages;
65 *    unsigned int	nr_pages;
66 *    unsigned long	phys_addr;
67 *  };
68 *
69 * get_vm_area() would then call vm_region_alloc with an appropriate
70 * struct vm_region head (eg):
71 *
72 *  struct vm_region vmalloc_head = {
73 *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list),
74 *	.vm_start	= VMALLOC_START,
75 *	.vm_end		= VMALLOC_END,
76 *  };
77 *
78 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
79 * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
80 * would have to initialise this each time prior to calling vm_region_alloc().
81 */
82struct ppc_vm_region {
83	struct list_head	vm_list;
84	unsigned long		vm_start;
85	unsigned long		vm_end;
86};
87
88static struct ppc_vm_region consistent_head = {
89	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
90	.vm_start	= CONSISTENT_BASE,
91	.vm_end		= CONSISTENT_END,
92};
93
94static struct ppc_vm_region *
95ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
96{
97	unsigned long addr = head->vm_start, end = head->vm_end - size;
98	unsigned long flags;
99	struct ppc_vm_region *c, *new;
100
101	new = kmalloc(sizeof(struct ppc_vm_region), gfp);
102	if (!new)
103		goto out;
104
105	spin_lock_irqsave(&consistent_lock, flags);
106
107	list_for_each_entry(c, &head->vm_list, vm_list) {
108		if ((addr + size) < addr)
109			goto nospc;
110		if ((addr + size) <= c->vm_start)
111			goto found;
112		addr = c->vm_end;
113		if (addr > end)
114			goto nospc;
115	}
116
117 found:
118	/*
119	 * Insert this entry _before_ the one we found.
120	 */
121	list_add_tail(&new->vm_list, &c->vm_list);
122	new->vm_start = addr;
123	new->vm_end = addr + size;
124
125	spin_unlock_irqrestore(&consistent_lock, flags);
126	return new;
127
128 nospc:
129	spin_unlock_irqrestore(&consistent_lock, flags);
130	kfree(new);
131 out:
132	return NULL;
133}
134
135static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
136{
137	struct ppc_vm_region *c;
138
139	list_for_each_entry(c, &head->vm_list, vm_list) {
140		if (c->vm_start == addr)
141			goto out;
142	}
143	c = NULL;
144 out:
145	return c;
146}
147
148/*
149 * Allocate DMA-coherent memory space and return both the kernel remapped
150 * virtual and bus address for that space.
151 */
152void *
153__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
154{
155	struct page *page;
156	struct ppc_vm_region *c;
157	unsigned long order;
158	u64 mask = ISA_DMA_THRESHOLD, limit;
159
160	if (dev) {
161		mask = dev->coherent_dma_mask;
162
163		/*
164		 * Sanity check the DMA mask - it must be non-zero, and
165		 * must be able to be satisfied by a DMA allocation.
166		 */
167		if (mask == 0) {
168			dev_warn(dev, "coherent DMA mask is unset\n");
169			goto no_page;
170		}
171
172		if ((~mask) & ISA_DMA_THRESHOLD) {
173			dev_warn(dev, "coherent DMA mask %#llx is smaller "
174				 "than system GFP_DMA mask %#llx\n",
175				 mask, (unsigned long long)ISA_DMA_THRESHOLD);
176			goto no_page;
177		}
178	}
179
180
181	size = PAGE_ALIGN(size);
182	limit = (mask + 1) & ~mask;
183	if ((limit && size >= limit) ||
184	    size >= (CONSISTENT_END - CONSISTENT_BASE)) {
185		printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
186		       size, mask);
187		return NULL;
188	}
189
190	order = get_order(size);
191
192	/* Might be useful if we ever have a real legacy DMA zone... */
193	if (mask != 0xffffffff)
194		gfp |= GFP_DMA;
195
196	page = alloc_pages(gfp, order);
197	if (!page)
198		goto no_page;
199
200	/*
201	 * Invalidate any data that might be lurking in the
202	 * kernel direct-mapped region for device DMA.
203	 */
204	{
205		unsigned long kaddr = (unsigned long)page_address(page);
206		memset(page_address(page), 0, size);
207		flush_dcache_range(kaddr, kaddr + size);
208	}
209
210	/*
211	 * Allocate a virtual address in the consistent mapping region.
212	 */
213	c = ppc_vm_region_alloc(&consistent_head, size,
214			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
215	if (c) {
216		unsigned long vaddr = c->vm_start;
217		struct page *end = page + (1 << order);
218
219		split_page(page, order);
220
221		/*
222		 * Set the "dma handle"
223		 */
224		*handle = page_to_phys(page);
225
226		do {
227			SetPageReserved(page);
228			map_page(vaddr, page_to_phys(page),
229				 pgprot_noncached(PAGE_KERNEL));
230			page++;
231			vaddr += PAGE_SIZE;
232		} while (size -= PAGE_SIZE);
233
234		/*
235		 * Free the otherwise unused pages.
236		 */
237		while (page < end) {
238			__free_page(page);
239			page++;
240		}
241
242		return (void *)c->vm_start;
243	}
244
245	if (page)
246		__free_pages(page, order);
247 no_page:
248	return NULL;
249}
250EXPORT_SYMBOL(__dma_alloc_coherent);
251
252/*
253 * free a page as defined by the above mapping.
254 */
255void __dma_free_coherent(size_t size, void *vaddr)
256{
257	struct ppc_vm_region *c;
258	unsigned long flags, addr;
259
260	size = PAGE_ALIGN(size);
261
262	spin_lock_irqsave(&consistent_lock, flags);
263
264	c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
265	if (!c)
266		goto no_area;
267
268	if ((c->vm_end - c->vm_start) != size) {
269		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
270		       __func__, c->vm_end - c->vm_start, size);
271		dump_stack();
272		size = c->vm_end - c->vm_start;
273	}
274
275	addr = c->vm_start;
276	do {
277		pte_t *ptep;
278		unsigned long pfn;
279
280		ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
281							       addr),
282						    addr),
283					 addr);
284		if (!pte_none(*ptep) && pte_present(*ptep)) {
285			pfn = pte_pfn(*ptep);
286			pte_clear(&init_mm, addr, ptep);
287			if (pfn_valid(pfn)) {
288				struct page *page = pfn_to_page(pfn);
289
290				ClearPageReserved(page);
291				__free_page(page);
292			}
293		}
294		addr += PAGE_SIZE;
295	} while (size -= PAGE_SIZE);
296
297	flush_tlb_kernel_range(c->vm_start, c->vm_end);
298
299	list_del(&c->vm_list);
300
301	spin_unlock_irqrestore(&consistent_lock, flags);
302
303	kfree(c);
304	return;
305
306 no_area:
307	spin_unlock_irqrestore(&consistent_lock, flags);
308	printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
309	       __func__, vaddr);
310	dump_stack();
311}
312EXPORT_SYMBOL(__dma_free_coherent);
313
314/*
315 * make an area consistent.
316 */
317void __dma_sync(void *vaddr, size_t size, int direction)
318{
319	unsigned long start = (unsigned long)vaddr;
320	unsigned long end   = start + size;
321
322	switch (direction) {
323	case DMA_NONE:
324		BUG();
325	case DMA_FROM_DEVICE:
326		/*
327		 * invalidate only when cache-line aligned otherwise there is
328		 * the potential for discarding uncommitted data from the cache
329		 */
330		if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
331			flush_dcache_range(start, end);
332		else
333			invalidate_dcache_range(start, end);
334		break;
335	case DMA_TO_DEVICE:		/* writeback only */
336		clean_dcache_range(start, end);
337		break;
338	case DMA_BIDIRECTIONAL:	/* writeback and invalidate */
339		flush_dcache_range(start, end);
340		break;
341	}
342}
343EXPORT_SYMBOL(__dma_sync);
344
345#ifdef CONFIG_HIGHMEM
346/*
347 * __dma_sync_page() implementation for systems using highmem.
348 * In this case, each page of a buffer must be kmapped/kunmapped
349 * in order to have a virtual address for __dma_sync(). This must
350 * not sleep so kmap_atomic()/kunmap_atomic() are used.
351 *
352 * Note: yes, it is possible and correct to have a buffer extend
353 * beyond the first page.
354 */
355static inline void __dma_sync_page_highmem(struct page *page,
356		unsigned long offset, size_t size, int direction)
357{
358	size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
359	size_t cur_size = seg_size;
360	unsigned long flags, start, seg_offset = offset;
361	int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
362	int seg_nr = 0;
363
364	local_irq_save(flags);
365
366	do {
367		start = (unsigned long)kmap_atomic(page + seg_nr,
368				KM_PPC_SYNC_PAGE) + seg_offset;
369
370		/* Sync this buffer segment */
371		__dma_sync((void *)start, seg_size, direction);
372		kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
373		seg_nr++;
374
375		/* Calculate next buffer segment size */
376		seg_size = min((size_t)PAGE_SIZE, size - cur_size);
377
378		/* Add the segment size to our running total */
379		cur_size += seg_size;
380		seg_offset = 0;
381	} while (seg_nr < nr_segs);
382
383	local_irq_restore(flags);
384}
385#endif /* CONFIG_HIGHMEM */
386
387/*
388 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
389 * takes a struct page instead of a virtual address
390 */
391void __dma_sync_page(struct page *page, unsigned long offset,
392	size_t size, int direction)
393{
394#ifdef CONFIG_HIGHMEM
395	__dma_sync_page_highmem(page, offset, size, direction);
396#else
397	unsigned long start = (unsigned long)page_address(page) + offset;
398	__dma_sync((void *)start, size, direction);
399#endif
400}
401EXPORT_SYMBOL(__dma_sync_page);
402