• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/powerpc/mm/
1/*
2 *  This file contains ioremap and related functions for 64-bit machines.
3 *
4 *  Derived from arch/ppc64/mm/init.c
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 *    Copyright (C) 1996 Paul Mackerras
10 *
11 *  Derived from "arch/i386/mm/init.c"
12 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13 *
14 *  Dave Engebretsen <engebret@us.ibm.com>
15 *      Rework for PPC64 port.
16 *
17 *  This program is free software; you can redistribute it and/or
18 *  modify it under the terms of the GNU General Public License
19 *  as published by the Free Software Foundation; either version
20 *  2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/bootmem.h>
37#include <linux/memblock.h>
38#include <linux/slab.h>
39
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
50#include <asm/processor.h>
51#include <asm/cputable.h>
52#include <asm/sections.h>
53#include <asm/system.h>
54#include <asm/abs_addr.h>
55#include <asm/firmware.h>
56
57#include "mmu_decl.h"
58
59unsigned long ioremap_bot = IOREMAP_BASE;
60
61
62#ifdef CONFIG_PPC_MMU_NOHASH
63static void *early_alloc_pgtable(unsigned long size)
64{
65	void *pt;
66
67	if (init_bootmem_done)
68		pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
69	else
70		pt = __va(memblock_alloc_base(size, size,
71					 __pa(MAX_DMA_ADDRESS)));
72	memset(pt, 0, size);
73
74	return pt;
75}
76#endif /* CONFIG_PPC_MMU_NOHASH */
77
78/*
79 * map_kernel_page currently only called by __ioremap
80 * map_kernel_page adds an entry to the ioremap page table
81 * and adds an entry to the HPT, possibly bolting it
82 */
83int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
84{
85	pgd_t *pgdp;
86	pud_t *pudp;
87	pmd_t *pmdp;
88	pte_t *ptep;
89
90	if (slab_is_available()) {
91		pgdp = pgd_offset_k(ea);
92		pudp = pud_alloc(&init_mm, pgdp, ea);
93		if (!pudp)
94			return -ENOMEM;
95		pmdp = pmd_alloc(&init_mm, pudp, ea);
96		if (!pmdp)
97			return -ENOMEM;
98		ptep = pte_alloc_kernel(pmdp, ea);
99		if (!ptep)
100			return -ENOMEM;
101		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
102							  __pgprot(flags)));
103	} else {
104#ifdef CONFIG_PPC_MMU_NOHASH
105		/* Warning ! This will blow up if bootmem is not initialized
106		 * which our ppc64 code is keen to do that, we'll need to
107		 * fix it and/or be more careful
108		 */
109		pgdp = pgd_offset_k(ea);
110#ifdef PUD_TABLE_SIZE
111		if (pgd_none(*pgdp)) {
112			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
113			BUG_ON(pudp == NULL);
114			pgd_populate(&init_mm, pgdp, pudp);
115		}
116#endif /* PUD_TABLE_SIZE */
117		pudp = pud_offset(pgdp, ea);
118		if (pud_none(*pudp)) {
119			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
120			BUG_ON(pmdp == NULL);
121			pud_populate(&init_mm, pudp, pmdp);
122		}
123		pmdp = pmd_offset(pudp, ea);
124		if (!pmd_present(*pmdp)) {
125			ptep = early_alloc_pgtable(PAGE_SIZE);
126			BUG_ON(ptep == NULL);
127			pmd_populate_kernel(&init_mm, pmdp, ptep);
128		}
129		ptep = pte_offset_kernel(pmdp, ea);
130		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
131							  __pgprot(flags)));
132#else /* CONFIG_PPC_MMU_NOHASH */
133		/*
134		 * If the mm subsystem is not fully up, we cannot create a
135		 * linux page table entry for this mapping.  Simply bolt an
136		 * entry in the hardware page table.
137		 *
138		 */
139		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
140				      mmu_io_psize, mmu_kernel_ssize)) {
141			printk(KERN_ERR "Failed to do bolted mapping IO "
142			       "memory at %016lx !\n", pa);
143			return -ENOMEM;
144		}
145#endif /* !CONFIG_PPC_MMU_NOHASH */
146	}
147	return 0;
148}
149
150
151/**
152 * __ioremap_at - Low level function to establish the page tables
153 *                for an IO mapping
154 */
155void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
156			    unsigned long flags)
157{
158	unsigned long i;
159
160	/* Make sure we have the base flags */
161	if ((flags & _PAGE_PRESENT) == 0)
162		flags |= pgprot_val(PAGE_KERNEL);
163
164	/* Non-cacheable page cannot be coherent */
165	if (flags & _PAGE_NO_CACHE)
166		flags &= ~_PAGE_COHERENT;
167
168	/* We don't support the 4K PFN hack with ioremap */
169	if (flags & _PAGE_4K_PFN)
170		return NULL;
171
172	WARN_ON(pa & ~PAGE_MASK);
173	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
174	WARN_ON(size & ~PAGE_MASK);
175
176	for (i = 0; i < size; i += PAGE_SIZE)
177		if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
178			return NULL;
179
180	return (void __iomem *)ea;
181}
182
183/**
184 * __iounmap_from - Low level function to tear down the page tables
185 *                  for an IO mapping. This is used for mappings that
186 *                  are manipulated manually, like partial unmapping of
187 *                  PCI IOs or ISA space.
188 */
189void __iounmap_at(void *ea, unsigned long size)
190{
191	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
192	WARN_ON(size & ~PAGE_MASK);
193
194	unmap_kernel_range((unsigned long)ea, size);
195}
196
197void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
198				unsigned long flags, void *caller)
199{
200	phys_addr_t paligned;
201	void __iomem *ret;
202
203	/*
204	 * Choose an address to map it to.
205	 * Once the imalloc system is running, we use it.
206	 * Before that, we map using addresses going
207	 * up from ioremap_bot.  imalloc will use
208	 * the addresses from ioremap_bot through
209	 * IMALLOC_END
210	 *
211	 */
212	paligned = addr & PAGE_MASK;
213	size = PAGE_ALIGN(addr + size) - paligned;
214
215	if ((size == 0) || (paligned == 0))
216		return NULL;
217
218	if (mem_init_done) {
219		struct vm_struct *area;
220
221		area = __get_vm_area_caller(size, VM_IOREMAP,
222					    ioremap_bot, IOREMAP_END,
223					    caller);
224		if (area == NULL)
225			return NULL;
226		ret = __ioremap_at(paligned, area->addr, size, flags);
227		if (!ret)
228			vunmap(area->addr);
229	} else {
230		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
231		if (ret)
232			ioremap_bot += size;
233	}
234
235	if (ret)
236		ret += addr & ~PAGE_MASK;
237	return ret;
238}
239
240void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
241			 unsigned long flags)
242{
243	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
244}
245
246void __iomem * ioremap(phys_addr_t addr, unsigned long size)
247{
248	unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
249	void *caller = __builtin_return_address(0);
250
251	if (ppc_md.ioremap)
252		return ppc_md.ioremap(addr, size, flags, caller);
253	return __ioremap_caller(addr, size, flags, caller);
254}
255
256void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
257			     unsigned long flags)
258{
259	void *caller = __builtin_return_address(0);
260
261	/* writeable implies dirty for kernel addresses */
262	if (flags & _PAGE_RW)
263		flags |= _PAGE_DIRTY;
264
265	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
266	flags &= ~(_PAGE_USER | _PAGE_EXEC);
267
268#ifdef _PAGE_BAP_SR
269	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
270	 * which means that we just cleared supervisor access... oops ;-) This
271	 * restores it
272	 */
273	flags |= _PAGE_BAP_SR;
274#endif
275
276	if (ppc_md.ioremap)
277		return ppc_md.ioremap(addr, size, flags, caller);
278	return __ioremap_caller(addr, size, flags, caller);
279}
280
281
282/*
283 * Unmap an IO region and remove it from imalloc'd list.
284 * Access to IO memory should be serialized by driver.
285 */
286void __iounmap(volatile void __iomem *token)
287{
288	void *addr;
289
290	if (!mem_init_done)
291		return;
292
293	addr = (void *) ((unsigned long __force)
294			 PCI_FIX_ADDR(token) & PAGE_MASK);
295	if ((unsigned long)addr < ioremap_bot) {
296		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
297		       " at 0x%p\n", addr);
298		return;
299	}
300	vunmap(addr);
301}
302
303void iounmap(volatile void __iomem *token)
304{
305	if (ppc_md.iounmap)
306		ppc_md.iounmap(token);
307	else
308		__iounmap(token);
309}
310
311EXPORT_SYMBOL(ioremap);
312EXPORT_SYMBOL(ioremap_flags);
313EXPORT_SYMBOL(__ioremap);
314EXPORT_SYMBOL(__ioremap_at);
315EXPORT_SYMBOL(iounmap);
316EXPORT_SYMBOL(__iounmap);
317EXPORT_SYMBOL(__iounmap_at);
318