1/*
2 *  Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/dma-mapping.h>
10
11#include <asm/addrspace.h>
12#include <asm/cacheflush.h>
13
14void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
15{
16	/*
17	 * No need to sync an uncached area
18	 */
19	if (PXSEG(vaddr) == P2SEG)
20		return;
21
22	switch (direction) {
23	case DMA_FROM_DEVICE:		/* invalidate only */
24		dma_cache_inv(vaddr, size);
25		break;
26	case DMA_TO_DEVICE:		/* writeback only */
27		dma_cache_wback(vaddr, size);
28		break;
29	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
30		dma_cache_wback_inv(vaddr, size);
31		break;
32	default:
33		BUG();
34	}
35}
36EXPORT_SYMBOL(dma_cache_sync);
37
38static struct page *__dma_alloc(struct device *dev, size_t size,
39				dma_addr_t *handle, gfp_t gfp)
40{
41	struct page *page, *free, *end;
42	int order;
43
44	size = PAGE_ALIGN(size);
45	order = get_order(size);
46
47	page = alloc_pages(gfp, order);
48	if (!page)
49		return NULL;
50	split_page(page, order);
51
52	/*
53	 * When accessing physical memory with valid cache data, we
54	 * get a cache hit even if the virtual memory region is marked
55	 * as uncached.
56	 *
57	 * Since the memory is newly allocated, there is no point in
58	 * doing a writeback. If the previous owner cares, he should
59	 * have flushed the cache before releasing the memory.
60	 */
61	invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
62
63	*handle = page_to_bus(page);
64	free = page + (size >> PAGE_SHIFT);
65	end = page + (1 << order);
66
67	/*
68	 * Free any unused pages
69	 */
70	while (free < end) {
71		__free_page(free);
72		free++;
73	}
74
75	return page;
76}
77
78static void __dma_free(struct device *dev, size_t size,
79		       struct page *page, dma_addr_t handle)
80{
81	struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
82
83	while (page < end)
84		__free_page(page++);
85}
86
87void *dma_alloc_coherent(struct device *dev, size_t size,
88			 dma_addr_t *handle, gfp_t gfp)
89{
90	struct page *page;
91	void *ret = NULL;
92
93	page = __dma_alloc(dev, size, handle, gfp);
94	if (page)
95		ret = phys_to_uncached(page_to_phys(page));
96
97	return ret;
98}
99EXPORT_SYMBOL(dma_alloc_coherent);
100
101void dma_free_coherent(struct device *dev, size_t size,
102		       void *cpu_addr, dma_addr_t handle)
103{
104	void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
105	struct page *page;
106
107	pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
108		 cpu_addr, (unsigned long)handle, (unsigned)size);
109	BUG_ON(!virt_addr_valid(addr));
110	page = virt_to_page(addr);
111	__dma_free(dev, size, page, handle);
112}
113EXPORT_SYMBOL(dma_free_coherent);
114
115void *dma_alloc_writecombine(struct device *dev, size_t size,
116			     dma_addr_t *handle, gfp_t gfp)
117{
118	struct page *page;
119	dma_addr_t phys;
120
121	page = __dma_alloc(dev, size, handle, gfp);
122	if (!page)
123		return NULL;
124
125	phys = page_to_phys(page);
126	*handle = phys;
127
128	/* Now, map the page into P3 with write-combining turned on */
129	return __ioremap(phys, size, _PAGE_BUFFER);
130}
131EXPORT_SYMBOL(dma_alloc_writecombine);
132
133void dma_free_writecombine(struct device *dev, size_t size,
134			   void *cpu_addr, dma_addr_t handle)
135{
136	struct page *page;
137
138	iounmap(cpu_addr);
139
140	page = phys_to_page(handle);
141	__dma_free(dev, size, page, handle);
142}
143EXPORT_SYMBOL(dma_free_writecombine);
144