1/*
2 *  This file contains ioremap and related functions for 64-bit machines.
3 *
4 *  Derived from arch/ppc64/mm/init.c
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 *    Copyright (C) 1996 Paul Mackerras
10 *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 *
12 *  Derived from "arch/i386/mm/init.c"
13 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
14 *
15 *  Dave Engebretsen <engebret@us.ibm.com>
16 *      Rework for PPC64 port.
17 *
18 *  This program is free software; you can redistribute it and/or
19 *  modify it under the terms of the GNU General Public License
20 *  as published by the Free Software Foundation; either version
21 *  2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
36#include <linux/init.h>
37#include <linux/delay.h>
38#include <linux/bootmem.h>
39#include <linux/highmem.h>
40#include <linux/idr.h>
41#include <linux/nodemask.h>
42#include <linux/module.h>
43
44#include <asm/pgalloc.h>
45#include <asm/page.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/rtas.h>
49#include <asm/io.h>
50#include <asm/mmu_context.h>
51#include <asm/pgtable.h>
52#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h>
55#include <asm/machdep.h>
56#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/firmware.h>
67
68#include "mmu_decl.h"
69
70unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72
73/*
74 * map_io_page currently only called by __ioremap
75 * map_io_page adds an entry to the ioremap page table
76 * and adds an entry to the HPT, possibly bolting it
77 */
78static int map_io_page(unsigned long ea, unsigned long pa, int flags)
79{
80	pgd_t *pgdp;
81	pud_t *pudp;
82	pmd_t *pmdp;
83	pte_t *ptep;
84
85	if (mem_init_done) {
86		pgdp = pgd_offset_k(ea);
87		pudp = pud_alloc(&init_mm, pgdp, ea);
88		if (!pudp)
89			return -ENOMEM;
90		pmdp = pmd_alloc(&init_mm, pudp, ea);
91		if (!pmdp)
92			return -ENOMEM;
93		ptep = pte_alloc_kernel(pmdp, ea);
94		if (!ptep)
95			return -ENOMEM;
96		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
97							  __pgprot(flags)));
98	} else {
99		/*
100		 * If the mm subsystem is not fully up, we cannot create a
101		 * linux page table entry for this mapping.  Simply bolt an
102		 * entry in the hardware page table.
103		 *
104		 */
105		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
106				      mmu_io_psize)) {
107			printk(KERN_ERR "Failed to do bolted mapping IO "
108			       "memory at %016lx !\n", pa);
109			return -ENOMEM;
110		}
111	}
112	return 0;
113}
114
115
116static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
117			    unsigned long ea, unsigned long size,
118			    unsigned long flags)
119{
120	unsigned long i;
121
122	if ((flags & _PAGE_PRESENT) == 0)
123		flags |= pgprot_val(PAGE_KERNEL);
124
125	for (i = 0; i < size; i += PAGE_SIZE)
126		if (map_io_page(ea+i, pa+i, flags))
127			return NULL;
128
129	return (void __iomem *) (ea + (addr & ~PAGE_MASK));
130}
131
132void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
133			 unsigned long flags)
134{
135	unsigned long pa, ea;
136	void __iomem *ret;
137
138	/*
139	 * Choose an address to map it to.
140	 * Once the imalloc system is running, we use it.
141	 * Before that, we map using addresses going
142	 * up from ioremap_bot.  imalloc will use
143	 * the addresses from ioremap_bot through
144	 * IMALLOC_END
145	 *
146	 */
147	pa = addr & PAGE_MASK;
148	size = PAGE_ALIGN(addr + size) - pa;
149
150	if ((size == 0) || (pa == 0))
151		return NULL;
152
153	if (mem_init_done) {
154		struct vm_struct *area;
155		area = im_get_free_area(size);
156		if (area == NULL)
157			return NULL;
158		ea = (unsigned long)(area->addr);
159		ret = __ioremap_com(addr, pa, ea, size, flags);
160		if (!ret)
161			im_free(area->addr);
162	} else {
163		ea = ioremap_bot;
164		ret = __ioremap_com(addr, pa, ea, size, flags);
165		if (ret)
166			ioremap_bot += size;
167	}
168	return ret;
169}
170
171
172void __iomem * ioremap(phys_addr_t addr, unsigned long size)
173{
174	unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
175
176	if (ppc_md.ioremap)
177		return ppc_md.ioremap(addr, size, flags);
178	return __ioremap(addr, size, flags);
179}
180
181void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
182			     unsigned long flags)
183{
184	if (ppc_md.ioremap)
185		return ppc_md.ioremap(addr, size, flags);
186	return __ioremap(addr, size, flags);
187}
188
189
190#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
191
192int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
193		       unsigned long size, unsigned long flags)
194{
195	struct vm_struct *area;
196	void __iomem *ret;
197
198	/* For now, require page-aligned values for pa, ea, and size */
199	if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
200	    !IS_PAGE_ALIGNED(size)) {
201		printk(KERN_ERR	"unaligned value in %s\n", __FUNCTION__);
202		return 1;
203	}
204
205	if (!mem_init_done) {
206		/* Two things to consider in this case:
207		 * 1) No records will be kept (imalloc, etc) that the region
208		 *    has been remapped
209		 * 2) It won't be easy to iounmap() the region later (because
210		 *    of 1)
211		 */
212		;
213	} else {
214		area = im_get_area(ea, size,
215			IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
216		if (area == NULL) {
217			/* Expected when PHB-dlpar is in play */
218			return 1;
219		}
220		if (ea != (unsigned long) area->addr) {
221			printk(KERN_ERR "unexpected addr return from "
222			       "im_get_area\n");
223			return 1;
224		}
225	}
226
227	ret = __ioremap_com(pa, pa, ea, size, flags);
228	if (ret == NULL) {
229		printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
230		return 1;
231	}
232	if (ret != (void *) ea) {
233		printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
234		return 1;
235	}
236
237	return 0;
238}
239
240void __iounmap(volatile void __iomem *token)
241{
242	void *addr;
243
244	if (!mem_init_done)
245		return;
246
247	addr = (void *) ((unsigned long __force) token & PAGE_MASK);
248
249	im_free(addr);
250}
251
252void iounmap(volatile void __iomem *token)
253{
254	if (ppc_md.iounmap)
255		ppc_md.iounmap(token);
256	else
257		__iounmap(token);
258}
259
260static int iounmap_subset_regions(unsigned long addr, unsigned long size)
261{
262	struct vm_struct *area;
263
264	/* Check whether subsets of this region exist */
265	area = im_get_area(addr, size, IM_REGION_SUPERSET);
266	if (area == NULL)
267		return 1;
268
269	while (area) {
270		iounmap((void __iomem *) area->addr);
271		area = im_get_area(addr, size,
272				IM_REGION_SUPERSET);
273	}
274
275	return 0;
276}
277
278int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
279{
280	struct vm_struct *area;
281	unsigned long addr;
282	int rc;
283
284	addr = (unsigned long __force) start & PAGE_MASK;
285
286	/* Verify that the region either exists or is a subset of an existing
287	 * region.  In the latter case, split the parent region to create
288	 * the exact region
289	 */
290	area = im_get_area(addr, size,
291			    IM_REGION_EXISTS | IM_REGION_SUBSET);
292	if (area == NULL) {
293		/* Determine whether subset regions exist.  If so, unmap */
294		rc = iounmap_subset_regions(addr, size);
295		if (rc) {
296			printk(KERN_ERR
297			       "%s() cannot unmap nonexistent range 0x%lx\n",
298 				__FUNCTION__, addr);
299			return 1;
300		}
301	} else {
302		iounmap((void __iomem *) area->addr);
303	}
304	return 0;
305}
306
307EXPORT_SYMBOL(ioremap);
308EXPORT_SYMBOL(ioremap_flags);
309EXPORT_SYMBOL(__ioremap);
310EXPORT_SYMBOL(iounmap);
311EXPORT_SYMBOL(__iounmap);
312
313static DEFINE_SPINLOCK(phb_io_lock);
314
315void __iomem * reserve_phb_iospace(unsigned long size)
316{
317	void __iomem *virt_addr;
318
319	if (phbs_io_bot >= IMALLOC_BASE)
320		panic("reserve_phb_iospace(): phb io space overflow\n");
321
322	spin_lock(&phb_io_lock);
323	virt_addr = (void __iomem *) phbs_io_bot;
324	phbs_io_bot += size;
325	spin_unlock(&phb_io_lock);
326
327	return virt_addr;
328}
329