• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/microblaze/kernel/
1/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/gfp.h>
12#include <linux/dma-debug.h>
13#include <asm/bug.h>
14#include <asm/cacheflush.h>
15
16/*
17 * Generic direct DMA implementation
18 *
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
23 */
24static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
25				size_t size, enum dma_data_direction direction)
26{
27	switch (direction) {
28	case DMA_TO_DEVICE:
29		flush_dcache_range(paddr + offset, paddr + offset + size);
30		break;
31	case DMA_FROM_DEVICE:
32		invalidate_dcache_range(paddr + offset, paddr + offset + size);
33		break;
34	default:
35		BUG();
36	}
37}
38
39static unsigned long get_dma_direct_offset(struct device *dev)
40{
41	if (likely(dev))
42		return (unsigned long)dev->archdata.dma_data;
43
44	return PCI_DRAM_OFFSET;
45}
46
47#define NOT_COHERENT_CACHE
48
49static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
50				dma_addr_t *dma_handle, gfp_t flag)
51{
52#ifdef NOT_COHERENT_CACHE
53	return consistent_alloc(flag, size, dma_handle);
54#else
55	void *ret;
56	struct page *page;
57	int node = dev_to_node(dev);
58
59	/* ignore region specifiers */
60	flag  &= ~(__GFP_HIGHMEM);
61
62	page = alloc_pages_node(node, flag, get_order(size));
63	if (page == NULL)
64		return NULL;
65	ret = page_address(page);
66	memset(ret, 0, size);
67	*dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
68
69	return ret;
70#endif
71}
72
73static void dma_direct_free_coherent(struct device *dev, size_t size,
74			      void *vaddr, dma_addr_t dma_handle)
75{
76#ifdef NOT_COHERENT_CACHE
77	consistent_free(size, vaddr);
78#else
79	free_pages((unsigned long)vaddr, get_order(size));
80#endif
81}
82
83static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
84			     int nents, enum dma_data_direction direction,
85			     struct dma_attrs *attrs)
86{
87	struct scatterlist *sg;
88	int i;
89
90	for_each_sg(sgl, sg, nents, i) {
91		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
92		__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
93							sg->length, direction);
94	}
95
96	return nents;
97}
98
99static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
100				int nents, enum dma_data_direction direction,
101				struct dma_attrs *attrs)
102{
103}
104
105static int dma_direct_dma_supported(struct device *dev, u64 mask)
106{
107	return 1;
108}
109
110static inline dma_addr_t dma_direct_map_page(struct device *dev,
111					     struct page *page,
112					     unsigned long offset,
113					     size_t size,
114					     enum dma_data_direction direction,
115					     struct dma_attrs *attrs)
116{
117	__dma_sync_page(page_to_phys(page), offset, size, direction);
118	return page_to_phys(page) + offset + get_dma_direct_offset(dev);
119}
120
121static inline void dma_direct_unmap_page(struct device *dev,
122					 dma_addr_t dma_address,
123					 size_t size,
124					 enum dma_data_direction direction,
125					 struct dma_attrs *attrs)
126{
127/* There is not necessary to do cache cleanup
128 *
129 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
130 * dma_address is physical address
131 */
132	__dma_sync_page(dma_address, 0 , size, direction);
133}
134
135struct dma_map_ops dma_direct_ops = {
136	.alloc_coherent	= dma_direct_alloc_coherent,
137	.free_coherent	= dma_direct_free_coherent,
138	.map_sg		= dma_direct_map_sg,
139	.unmap_sg	= dma_direct_unmap_sg,
140	.dma_supported	= dma_direct_dma_supported,
141	.map_page	= dma_direct_map_page,
142	.unmap_page	= dma_direct_unmap_page,
143};
144EXPORT_SYMBOL(dma_direct_ops);
145
146/* Number of entries preallocated for DMA-API debugging */
147#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
148
149static int __init dma_init(void)
150{
151       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
152
153       return 0;
154}
155fs_initcall(dma_init);
156