• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/mm/
1/*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
9#include <linux/bootmem.h>
10#include <linux/init.h>
11#include <linux/io.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mmiotrace.h>
16
17#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22#include <asm/pgalloc.h>
23#include <asm/pat.h>
24
25#include "physaddr.h"
26
27/*
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32			       unsigned long prot_val)
33{
34	unsigned long nrpages = size >> PAGE_SHIFT;
35	int err;
36
37	switch (prot_val) {
38	case _PAGE_CACHE_UC:
39	default:
40		err = _set_memory_uc(vaddr, nrpages);
41		break;
42	case _PAGE_CACHE_WC:
43		err = _set_memory_wc(vaddr, nrpages);
44		break;
45	case _PAGE_CACHE_WB:
46		err = _set_memory_wb(vaddr, nrpages);
47		break;
48	}
49
50	return err;
51}
52
53/*
54 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
56 * directly.
57 *
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
61 */
62static void __iomem *__ioremap_caller(resource_size_t phys_addr,
63		unsigned long size, unsigned long prot_val, void *caller)
64{
65	unsigned long offset, vaddr;
66	resource_size_t pfn, last_pfn, last_addr;
67	const resource_size_t unaligned_phys_addr = phys_addr;
68	const unsigned long unaligned_size = size;
69	struct vm_struct *area;
70	unsigned long new_prot_val;
71	pgprot_t prot;
72	int retval;
73	void __iomem *ret_addr;
74
75	/* Don't allow wraparound or zero size */
76	last_addr = phys_addr + size - 1;
77	if (!size || last_addr < phys_addr)
78		return NULL;
79
80	if (!phys_addr_valid(phys_addr)) {
81		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
82		       (unsigned long long)phys_addr);
83		WARN_ON_ONCE(1);
84		return NULL;
85	}
86
87	/*
88	 * Don't remap the low PCI/ISA area, it's always mapped..
89	 */
90	if (is_ISA_range(phys_addr, last_addr))
91		return (__force void __iomem *)phys_to_virt(phys_addr);
92
93	/*
94	 * Check if the request spans more than any BAR in the iomem resource
95	 * tree.
96	 */
97	WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
98		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
99
100	/*
101	 * Don't allow anybody to remap normal RAM that we're using..
102	 */
103	last_pfn = last_addr >> PAGE_SHIFT;
104	for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
105		int is_ram = page_is_ram(pfn);
106
107		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
108			return NULL;
109		WARN_ON_ONCE(is_ram);
110	}
111
112	/*
113	 * Mappings have to be page-aligned
114	 */
115	offset = phys_addr & ~PAGE_MASK;
116	phys_addr &= PHYSICAL_PAGE_MASK;
117	size = PAGE_ALIGN(last_addr+1) - phys_addr;
118
119	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
120						prot_val, &new_prot_val);
121	if (retval) {
122		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
123		return NULL;
124	}
125
126	if (prot_val != new_prot_val) {
127		if (!is_new_memtype_allowed(phys_addr, size,
128					    prot_val, new_prot_val)) {
129			printk(KERN_ERR
130		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
131				(unsigned long long)phys_addr,
132				(unsigned long long)(phys_addr + size),
133				prot_val, new_prot_val);
134			goto err_free_memtype;
135		}
136		prot_val = new_prot_val;
137	}
138
139	switch (prot_val) {
140	case _PAGE_CACHE_UC:
141	default:
142		prot = PAGE_KERNEL_IO_NOCACHE;
143		break;
144	case _PAGE_CACHE_UC_MINUS:
145		prot = PAGE_KERNEL_IO_UC_MINUS;
146		break;
147	case _PAGE_CACHE_WC:
148		prot = PAGE_KERNEL_IO_WC;
149		break;
150	case _PAGE_CACHE_WB:
151		prot = PAGE_KERNEL_IO;
152		break;
153	}
154
155	/*
156	 * Ok, go for it..
157	 */
158	area = get_vm_area_caller(size, VM_IOREMAP, caller);
159	if (!area)
160		goto err_free_memtype;
161	area->phys_addr = phys_addr;
162	vaddr = (unsigned long) area->addr;
163
164	if (kernel_map_sync_memtype(phys_addr, size, prot_val))
165		goto err_free_area;
166
167	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
168		goto err_free_area;
169
170	ret_addr = (void __iomem *) (vaddr + offset);
171	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
172
173	return ret_addr;
174err_free_area:
175	free_vm_area(area);
176err_free_memtype:
177	free_memtype(phys_addr, phys_addr + size);
178	return NULL;
179}
180
181/**
182 * ioremap_nocache     -   map bus memory into CPU space
183 * @offset:    bus address of the memory
184 * @size:      size of the resource to map
185 *
186 * ioremap_nocache performs a platform specific sequence of operations to
187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
188 * writew/writel functions and the other mmio helpers. The returned
189 * address is not guaranteed to be usable directly as a virtual
190 * address.
191 *
192 * This version of ioremap ensures that the memory is marked uncachable
193 * on the CPU as well as honouring existing caching rules from things like
194 * the PCI bus. Note that there are other caches and buffers on many
195 * busses. In particular driver authors should read up on PCI writes
196 *
197 * It's useful if some control registers are in such an area and
198 * write combining or read caching is not desirable:
199 *
200 * Must be freed with iounmap.
201 */
202void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
203{
204	/*
205	 * Ideally, this should be:
206	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
207	 *
208	 * Till we fix all X drivers to use ioremap_wc(), we will use
209	 * UC MINUS.
210	 */
211	unsigned long val = _PAGE_CACHE_UC_MINUS;
212
213	return __ioremap_caller(phys_addr, size, val,
214				__builtin_return_address(0));
215}
216EXPORT_SYMBOL(ioremap_nocache);
217
218/**
219 * ioremap_wc	-	map memory into CPU space write combined
220 * @offset:	bus address of the memory
221 * @size:	size of the resource to map
222 *
223 * This version of ioremap ensures that the memory is marked write combining.
224 * Write combining allows faster writes to some hardware devices.
225 *
226 * Must be freed with iounmap.
227 */
228void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
229{
230	if (pat_enabled)
231		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
232					__builtin_return_address(0));
233	else
234		return ioremap_nocache(phys_addr, size);
235}
236EXPORT_SYMBOL(ioremap_wc);
237
238void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
239{
240	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
241				__builtin_return_address(0));
242}
243EXPORT_SYMBOL(ioremap_cache);
244
245void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
246				unsigned long prot_val)
247{
248	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
249				__builtin_return_address(0));
250}
251EXPORT_SYMBOL(ioremap_prot);
252
253/**
254 * iounmap - Free a IO remapping
255 * @addr: virtual address from ioremap_*
256 *
257 * Caller must ensure there is only one unmapping for the same pointer.
258 */
259void iounmap(volatile void __iomem *addr)
260{
261	struct vm_struct *p, *o;
262
263	if ((void __force *)addr <= high_memory)
264		return;
265
266	/*
267	 * __ioremap special-cases the PCI/ISA range by not instantiating a
268	 * vm_area and by simply returning an address into the kernel mapping
269	 * of ISA space.   So handle that here.
270	 */
271	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
272	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
273		return;
274
275	addr = (volatile void __iomem *)
276		(PAGE_MASK & (unsigned long __force)addr);
277
278	mmiotrace_iounmap(addr);
279
280	/* Use the vm area unlocked, assuming the caller
281	   ensures there isn't another iounmap for the same address
282	   in parallel. Reuse of the virtual address is prevented by
283	   leaving it in the global lists until we're done with it.
284	   cpa takes care of the direct mappings. */
285	read_lock(&vmlist_lock);
286	for (p = vmlist; p; p = p->next) {
287		if (p->addr == (void __force *)addr)
288			break;
289	}
290	read_unlock(&vmlist_lock);
291
292	if (!p) {
293		printk(KERN_ERR "iounmap: bad address %p\n", addr);
294		dump_stack();
295		return;
296	}
297
298	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
299
300	/* Finally remove it */
301	o = remove_vm_area((void __force *)addr);
302	BUG_ON(p != o || o == NULL);
303	kfree(p);
304}
305EXPORT_SYMBOL(iounmap);
306
307/*
308 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
309 * access
310 */
311void *xlate_dev_mem_ptr(unsigned long phys)
312{
313	void *addr;
314	unsigned long start = phys & PAGE_MASK;
315
316	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
317	if (page_is_ram(start >> PAGE_SHIFT))
318		return __va(phys);
319
320	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
321	if (addr)
322		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
323
324	return addr;
325}
326
327void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
328{
329	if (page_is_ram(phys >> PAGE_SHIFT))
330		return;
331
332	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
333	return;
334}
335
336static int __initdata early_ioremap_debug;
337
338static int __init early_ioremap_debug_setup(char *str)
339{
340	early_ioremap_debug = 1;
341
342	return 0;
343}
344early_param("early_ioremap_debug", early_ioremap_debug_setup);
345
346static __initdata int after_paging_init;
347static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
348
349static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
350{
351	/* Don't assume we're using swapper_pg_dir at this point */
352	pgd_t *base = __va(read_cr3());
353	pgd_t *pgd = &base[pgd_index(addr)];
354	pud_t *pud = pud_offset(pgd, addr);
355	pmd_t *pmd = pmd_offset(pud, addr);
356
357	return pmd;
358}
359
360static inline pte_t * __init early_ioremap_pte(unsigned long addr)
361{
362	return &bm_pte[pte_index(addr)];
363}
364
365static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
366
367void __init early_ioremap_init(void)
368{
369	pmd_t *pmd;
370	int i;
371
372	if (early_ioremap_debug)
373		printk(KERN_INFO "early_ioremap_init()\n");
374
375	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
376		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
377
378	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
379	memset(bm_pte, 0, sizeof(bm_pte));
380	pmd_populate_kernel(&init_mm, pmd, bm_pte);
381
382	/*
383	 * The boot-ioremap range spans multiple pmds, for which
384	 * we are not prepared:
385	 */
386#define __FIXADDR_TOP (-PAGE_SIZE)
387	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
388		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
389#undef __FIXADDR_TOP
390	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
391		WARN_ON(1);
392		printk(KERN_WARNING "pmd %p != %p\n",
393		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
394		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
395			fix_to_virt(FIX_BTMAP_BEGIN));
396		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
397			fix_to_virt(FIX_BTMAP_END));
398
399		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
400		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
401		       FIX_BTMAP_BEGIN);
402	}
403}
404
405void __init early_ioremap_reset(void)
406{
407	after_paging_init = 1;
408}
409
410static void __init __early_set_fixmap(enum fixed_addresses idx,
411				      phys_addr_t phys, pgprot_t flags)
412{
413	unsigned long addr = __fix_to_virt(idx);
414	pte_t *pte;
415
416	if (idx >= __end_of_fixed_addresses) {
417		BUG();
418		return;
419	}
420	pte = early_ioremap_pte(addr);
421
422	if (pgprot_val(flags))
423		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
424	else
425		pte_clear(&init_mm, addr, pte);
426	__flush_tlb_one(addr);
427}
428
429static inline void __init early_set_fixmap(enum fixed_addresses idx,
430					   phys_addr_t phys, pgprot_t prot)
431{
432	if (after_paging_init)
433		__set_fixmap(idx, phys, prot);
434	else
435		__early_set_fixmap(idx, phys, prot);
436}
437
438static inline void __init early_clear_fixmap(enum fixed_addresses idx)
439{
440	if (after_paging_init)
441		clear_fixmap(idx);
442	else
443		__early_set_fixmap(idx, 0, __pgprot(0));
444}
445
446static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
447static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
448
449void __init fixup_early_ioremap(void)
450{
451	int i;
452
453	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
454		if (prev_map[i]) {
455			WARN_ON(1);
456			break;
457		}
458	}
459
460	early_ioremap_init();
461}
462
463static int __init check_early_ioremap_leak(void)
464{
465	int count = 0;
466	int i;
467
468	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
469		if (prev_map[i])
470			count++;
471
472	if (!count)
473		return 0;
474	WARN(1, KERN_WARNING
475	       "Debug warning: early ioremap leak of %d areas detected.\n",
476		count);
477	printk(KERN_WARNING
478		"please boot with early_ioremap_debug and report the dmesg.\n");
479
480	return 1;
481}
482late_initcall(check_early_ioremap_leak);
483
484static void __init __iomem *
485__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
486{
487	unsigned long offset;
488	resource_size_t last_addr;
489	unsigned int nrpages;
490	enum fixed_addresses idx0, idx;
491	int i, slot;
492
493	WARN_ON(system_state != SYSTEM_BOOTING);
494
495	slot = -1;
496	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
497		if (!prev_map[i]) {
498			slot = i;
499			break;
500		}
501	}
502
503	if (slot < 0) {
504		printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
505			 (u64)phys_addr, size);
506		WARN_ON(1);
507		return NULL;
508	}
509
510	if (early_ioremap_debug) {
511		printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
512		       (u64)phys_addr, size, slot);
513		dump_stack();
514	}
515
516	/* Don't allow wraparound or zero size */
517	last_addr = phys_addr + size - 1;
518	if (!size || last_addr < phys_addr) {
519		WARN_ON(1);
520		return NULL;
521	}
522
523	prev_size[slot] = size;
524	/*
525	 * Mappings have to be page-aligned
526	 */
527	offset = phys_addr & ~PAGE_MASK;
528	phys_addr &= PAGE_MASK;
529	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
530
531	/*
532	 * Mappings have to fit in the FIX_BTMAP area.
533	 */
534	nrpages = size >> PAGE_SHIFT;
535	if (nrpages > NR_FIX_BTMAPS) {
536		WARN_ON(1);
537		return NULL;
538	}
539
540	/*
541	 * Ok, go for it..
542	 */
543	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
544	idx = idx0;
545	while (nrpages > 0) {
546		early_set_fixmap(idx, phys_addr, prot);
547		phys_addr += PAGE_SIZE;
548		--idx;
549		--nrpages;
550	}
551	if (early_ioremap_debug)
552		printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
553
554	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
555	return prev_map[slot];
556}
557
558/* Remap an IO device */
559void __init __iomem *
560early_ioremap(resource_size_t phys_addr, unsigned long size)
561{
562	return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
563}
564
565/* Remap memory */
566void __init __iomem *
567early_memremap(resource_size_t phys_addr, unsigned long size)
568{
569	return __early_ioremap(phys_addr, size, PAGE_KERNEL);
570}
571
572void __init early_iounmap(void __iomem *addr, unsigned long size)
573{
574	unsigned long virt_addr;
575	unsigned long offset;
576	unsigned int nrpages;
577	enum fixed_addresses idx;
578	int i, slot;
579
580	slot = -1;
581	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
582		if (prev_map[i] == addr) {
583			slot = i;
584			break;
585		}
586	}
587
588	if (slot < 0) {
589		printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
590			 addr, size);
591		WARN_ON(1);
592		return;
593	}
594
595	if (prev_size[slot] != size) {
596		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
597			 addr, size, slot, prev_size[slot]);
598		WARN_ON(1);
599		return;
600	}
601
602	if (early_ioremap_debug) {
603		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
604		       size, slot);
605		dump_stack();
606	}
607
608	virt_addr = (unsigned long)addr;
609	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
610		WARN_ON(1);
611		return;
612	}
613	offset = virt_addr & ~PAGE_MASK;
614	nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
615
616	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
617	while (nrpages > 0) {
618		early_clear_fixmap(idx);
619		--idx;
620		--nrpages;
621	}
622	prev_map[slot] = NULL;
623}
624