• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/x86/kernel/
1#include <linux/dma-mapping.h>
2#include <linux/dma-debug.h>
3#include <linux/dmar.h>
4#include <linux/bootmem.h>
5#include <linux/gfp.h>
6#include <linux/pci.h>
7#include <linux/kmemleak.h>
8
9#include <asm/proto.h>
10#include <asm/dma.h>
11#include <asm/iommu.h>
12#include <asm/gart.h>
13#include <asm/calgary.h>
14#include <asm/amd_iommu.h>
15#include <asm/x86_init.h>
16#include <asm/xen/swiotlb-xen.h>
17
18static int forbid_dac __read_mostly;
19
20struct dma_map_ops *dma_ops = &nommu_dma_ops;
21EXPORT_SYMBOL(dma_ops);
22
23static int iommu_sac_force __read_mostly;
24
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36/* Set this to 1 if there is a HW IOMMU in the system */
37int iommu_detected __read_mostly = 0;
38
39/*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41 * If this variable is 1, IOMMU implementations do no DMA translation for
42 * devices and allow every device to access to whole physical memory. This is
43 * useful if a user wants to use an IOMMU only for KVM device assignment to
44 * guests and not for driver dma translation.
45 */
46int iommu_pass_through __read_mostly;
47
48/* Dummy device used for NULL arguments (normally ISA). */
49struct device x86_dma_fallback_dev = {
50	.init_name = "fallback device",
51	.coherent_dma_mask = ISA_DMA_BIT_MASK,
52	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
53};
54EXPORT_SYMBOL(x86_dma_fallback_dev);
55
56/* Number of entries preallocated for DMA-API debugging */
57#define PREALLOC_DMA_DEBUG_ENTRIES       32768
58
59int dma_set_mask(struct device *dev, u64 mask)
60{
61	if (!dev->dma_mask || !dma_supported(dev, mask))
62		return -EIO;
63
64	*dev->dma_mask = mask;
65
66	return 0;
67}
68EXPORT_SYMBOL(dma_set_mask);
69
70#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
71static __initdata void *dma32_bootmem_ptr;
72static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
73
74static int __init parse_dma32_size_opt(char *p)
75{
76	if (!p)
77		return -EINVAL;
78	dma32_bootmem_size = memparse(p, &p);
79	return 0;
80}
81early_param("dma32_size", parse_dma32_size_opt);
82
83void __init dma32_reserve_bootmem(void)
84{
85	unsigned long size, align;
86	if (max_pfn <= MAX_DMA32_PFN)
87		return;
88
89	/*
90	 * check aperture_64.c allocate_aperture() for reason about
91	 * using 512M as goal
92	 */
93	align = 64ULL<<20;
94	size = roundup(dma32_bootmem_size, align);
95	dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
96				 512ULL<<20);
97	/*
98	 * Kmemleak should not scan this block as it may not be mapped via the
99	 * kernel direct mapping.
100	 */
101	kmemleak_ignore(dma32_bootmem_ptr);
102	if (dma32_bootmem_ptr)
103		dma32_bootmem_size = size;
104	else
105		dma32_bootmem_size = 0;
106}
107static void __init dma32_free_bootmem(void)
108{
109
110	if (max_pfn <= MAX_DMA32_PFN)
111		return;
112
113	if (!dma32_bootmem_ptr)
114		return;
115
116	free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
117
118	dma32_bootmem_ptr = NULL;
119	dma32_bootmem_size = 0;
120}
121#else
122void __init dma32_reserve_bootmem(void)
123{
124}
125static void __init dma32_free_bootmem(void)
126{
127}
128
129#endif
130
131void __init pci_iommu_alloc(void)
132{
133	/* free the range so iommu could get some range less than 4G */
134	dma32_free_bootmem();
135
136	if (pci_xen_swiotlb_detect() || pci_swiotlb_detect())
137		goto out;
138
139	gart_iommu_hole_init();
140
141	detect_calgary();
142
143	detect_intel_iommu();
144
145	/* needs to be called after gart_iommu_hole_init */
146	amd_iommu_detect();
147out:
148	pci_xen_swiotlb_init();
149
150	pci_swiotlb_init();
151}
152
153void *dma_generic_alloc_coherent(struct device *dev, size_t size,
154				 dma_addr_t *dma_addr, gfp_t flag)
155{
156	unsigned long dma_mask;
157	struct page *page;
158	dma_addr_t addr;
159
160	dma_mask = dma_alloc_coherent_mask(dev, flag);
161
162	flag |= __GFP_ZERO;
163again:
164	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
165	if (!page)
166		return NULL;
167
168	addr = page_to_phys(page);
169	if (addr + size > dma_mask) {
170		__free_pages(page, get_order(size));
171
172		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
173			flag = (flag & ~GFP_DMA32) | GFP_DMA;
174			goto again;
175		}
176
177		return NULL;
178	}
179
180	*dma_addr = addr;
181	return page_address(page);
182}
183
184/*
185 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
186 * documentation.
187 */
188static __init int iommu_setup(char *p)
189{
190	iommu_merge = 1;
191
192	if (!p)
193		return -EINVAL;
194
195	while (*p) {
196		if (!strncmp(p, "off", 3))
197			no_iommu = 1;
198		/* gart_parse_options has more force support */
199		if (!strncmp(p, "force", 5))
200			force_iommu = 1;
201		if (!strncmp(p, "noforce", 7)) {
202			iommu_merge = 0;
203			force_iommu = 0;
204		}
205
206		if (!strncmp(p, "biomerge", 8)) {
207			iommu_merge = 1;
208			force_iommu = 1;
209		}
210		if (!strncmp(p, "panic", 5))
211			panic_on_overflow = 1;
212		if (!strncmp(p, "nopanic", 7))
213			panic_on_overflow = 0;
214		if (!strncmp(p, "merge", 5)) {
215			iommu_merge = 1;
216			force_iommu = 1;
217		}
218		if (!strncmp(p, "nomerge", 7))
219			iommu_merge = 0;
220		if (!strncmp(p, "forcesac", 8))
221			iommu_sac_force = 1;
222		if (!strncmp(p, "allowdac", 8))
223			forbid_dac = 0;
224		if (!strncmp(p, "nodac", 5))
225			forbid_dac = 1;
226		if (!strncmp(p, "usedac", 6)) {
227			forbid_dac = -1;
228			return 1;
229		}
230#ifdef CONFIG_SWIOTLB
231		if (!strncmp(p, "soft", 4))
232			swiotlb = 1;
233#endif
234		if (!strncmp(p, "pt", 2))
235			iommu_pass_through = 1;
236
237		gart_parse_options(p);
238
239#ifdef CONFIG_CALGARY_IOMMU
240		if (!strncmp(p, "calgary", 7))
241			use_calgary = 1;
242#endif /* CONFIG_CALGARY_IOMMU */
243
244		p += strcspn(p, ",");
245		if (*p == ',')
246			++p;
247	}
248	return 0;
249}
250early_param("iommu", iommu_setup);
251
252int dma_supported(struct device *dev, u64 mask)
253{
254	struct dma_map_ops *ops = get_dma_ops(dev);
255
256#ifdef CONFIG_PCI
257	if (mask > 0xffffffff && forbid_dac > 0) {
258		dev_info(dev, "PCI: Disallowing DAC for device\n");
259		return 0;
260	}
261#endif
262
263	if (ops->dma_supported)
264		return ops->dma_supported(dev, mask);
265
266	/* Copied from i386. Doesn't make much sense, because it will
267	   only work for pci_alloc_coherent.
268	   The caller just has to use GFP_DMA in this case. */
269	if (mask < DMA_BIT_MASK(24))
270		return 0;
271
272	/* Tell the device to use SAC when IOMMU force is on.  This
273	   allows the driver to use cheaper accesses in some cases.
274
275	   Problem with this is that if we overflow the IOMMU area and
276	   return DAC as fallback address the device may not handle it
277	   correctly.
278
279	   As a special case some controllers have a 39bit address
280	   mode that is as efficient as 32bit (aic79xx). Don't force
281	   SAC for these.  Assume all masks <= 40 bits are of this
282	   type. Normally this doesn't make any difference, but gives
283	   more gentle handling of IOMMU overflow. */
284	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
285		dev_info(dev, "Force SAC with mask %Lx\n", mask);
286		return 0;
287	}
288
289	return 1;
290}
291EXPORT_SYMBOL(dma_supported);
292
293static int __init pci_iommu_init(void)
294{
295	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
296
297#ifdef CONFIG_PCI
298	dma_debug_add_bus(&pci_bus_type);
299#endif
300	x86_init.iommu.iommu_init();
301
302	if (swiotlb || xen_swiotlb) {
303		printk(KERN_INFO "PCI-DMA: "
304		       "Using software bounce buffering for IO (SWIOTLB)\n");
305		swiotlb_print_info();
306	} else
307		swiotlb_free();
308
309	return 0;
310}
311/* Must execute after PCI subsystem */
312rootfs_initcall(pci_iommu_init);
313
314#ifdef CONFIG_PCI
315/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
316
317static __devinit void via_no_dac(struct pci_dev *dev)
318{
319	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
320		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
321		forbid_dac = 1;
322	}
323}
324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
325#endif
326