1/*
2 * arch/sh/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2005, 2006 Paul Mundt
10 *
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
14 */
15#include <linux/vmalloc.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/pci.h>
19#include <linux/io.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/addrspace.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/mmu.h>
26
27/*
28 * Remap an arbitrary physical address space into the kernel virtual
29 * address space. Needed when the kernel wants to access high addresses
30 * directly.
31 *
32 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
33 * have to convert them into an offset in a page-aligned mapping, but the
34 * caller shouldn't need to know that small detail.
35 */
36void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
37			unsigned long flags)
38{
39	struct vm_struct * area;
40	unsigned long offset, last_addr, addr, orig_addr;
41	pgprot_t pgprot;
42
43	/* Don't allow wraparound or zero size */
44	last_addr = phys_addr + size - 1;
45	if (!size || last_addr < phys_addr)
46		return NULL;
47
48	/*
49	 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
50	 * mapped at the end of the address space (typically 0xfd000000)
51	 * in a non-translatable area, so mapping through page tables for
52	 * this area is not only pointless, but also fundamentally
53	 * broken. Just return the physical address instead.
54	 *
55	 * For boards that map a small PCI memory aperture somewhere in
56	 * P1/P2 space, ioremap() will already do the right thing,
57	 * and we'll never get this far.
58	 */
59	if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
60		return (void __iomem *)phys_addr;
61
62	/*
63	 * Don't allow anybody to remap normal RAM that we're using..
64	 */
65	if (phys_addr < virt_to_phys(high_memory))
66		return NULL;
67
68	/*
69	 * Mappings have to be page-aligned
70	 */
71	offset = phys_addr & ~PAGE_MASK;
72	phys_addr &= PAGE_MASK;
73	size = PAGE_ALIGN(last_addr+1) - phys_addr;
74
75	/*
76	 * Ok, go for it..
77	 */
78	area = get_vm_area(size, VM_IOREMAP);
79	if (!area)
80		return NULL;
81	area->phys_addr = phys_addr;
82	orig_addr = addr = (unsigned long)area->addr;
83
84#ifdef CONFIG_32BIT
85	/*
86	 * First try to remap through the PMB once a valid VMA has been
87	 * established. Smaller allocations (or the rest of the size
88	 * remaining after a PMB mapping due to the size not being
89	 * perfectly aligned on a PMB size boundary) are then mapped
90	 * through the UTLB using conventional page tables.
91	 *
92	 * PMB entries are all pre-faulted.
93	 */
94	if (unlikely(size >= 0x1000000)) {
95		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
96
97		if (likely(mapped)) {
98			addr		+= mapped;
99			phys_addr	+= mapped;
100			size		-= mapped;
101		}
102	}
103#endif
104
105	pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
106	if (likely(size))
107		if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
108			vunmap((void *)orig_addr);
109			return NULL;
110		}
111
112	return (void __iomem *)(offset + (char *)orig_addr);
113}
114EXPORT_SYMBOL(__ioremap);
115
116void __iounmap(void __iomem *addr)
117{
118	unsigned long vaddr = (unsigned long __force)addr;
119	struct vm_struct *p;
120
121	if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
122		return;
123
124#ifdef CONFIG_32BIT
125	pmb_unmap(vaddr);
126#endif
127
128	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
129	if (!p) {
130		printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
131		return;
132	}
133
134	kfree(p);
135}
136EXPORT_SYMBOL(__iounmap);
137