1/*
2 *  linux/arch/arm/mm/mm-armv.c
3 *
4 *  Copyright (C) 1998-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *  Page table sludge for ARM v3 and v4 processor architectures.
11 */
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/bootmem.h>
16
17#include <asm/hardware.h>
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/page.h>
21#include <asm/setup.h>
22
23#include <asm/mach/map.h>
24
25/*
26 * These are useful for identifing cache coherency
27 * problems by allowing the cache or the cache and
28 * writebuffer to be turned off.  (Note: the write
29 * buffer should not be on and the cache off).
30 */
31static int __init nocache_setup(char *__unused)
32{
33	cr_alignment &= ~4;
34	cr_no_alignment &= ~4;
35	flush_cache_all();
36	set_cr(cr_alignment);
37	return 1;
38}
39
40static int __init nowrite_setup(char *__unused)
41{
42	cr_alignment &= ~(8|4);
43	cr_no_alignment &= ~(8|4);
44	flush_cache_all();
45	set_cr(cr_alignment);
46	return 1;
47}
48
49static int __init noalign_setup(char *__unused)
50{
51	cr_alignment &= ~2;
52	cr_no_alignment &= ~2;
53	set_cr(cr_alignment);
54	return 1;
55}
56
57__setup("noalign", noalign_setup);
58__setup("nocache", nocache_setup);
59__setup("nowb", nowrite_setup);
60
61#define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
62
63#define clean_cache_area(start,size) \
64	cpu_cache_clean_invalidate_range((unsigned long)start, ((unsigned long)start) + size, 0);
65
66
67/*
68 * need to get a 16k page for level 1
69 */
70pgd_t *get_pgd_slow(struct mm_struct *mm)
71{
72	pgd_t *new_pgd, *init_pgd;
73	pmd_t *new_pmd, *init_pmd;
74	pte_t *new_pte, *init_pte;
75
76	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
77	if (!new_pgd)
78		goto no_pgd;
79
80	memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
81
82	init_pgd = pgd_offset_k(0);
83
84	if (vectors_base() == 0) {
85		init_pmd = pmd_offset(init_pgd, 0);
86		init_pte = pte_offset(init_pmd, 0);
87
88		/*
89		 * This lock is here just to satisfy pmd_alloc and pte_lock
90		 */
91		spin_lock(&mm->page_table_lock);
92
93		/*
94		 * On ARM, first page must always be allocated since it
95		 * contains the machine vectors.
96		 */
97		new_pmd = pmd_alloc(mm, new_pgd, 0);
98		if (!new_pmd)
99			goto no_pmd;
100
101		new_pte = pte_alloc(mm, new_pmd, 0);
102		if (!new_pte)
103			goto no_pte;
104
105		set_pte(new_pte, *init_pte);
106
107		spin_unlock(&mm->page_table_lock);
108	}
109
110	/*
111	 * Copy over the kernel and IO PGD entries
112	 */
113	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
114		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
115
116	clean_cache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
117
118	return new_pgd;
119
120no_pte:
121	spin_unlock(&mm->page_table_lock);
122	pmd_free(new_pmd);
123	free_pages((unsigned long)new_pgd, 2);
124	return NULL;
125
126no_pmd:
127	spin_unlock(&mm->page_table_lock);
128	free_pages((unsigned long)new_pgd, 2);
129	return NULL;
130
131no_pgd:
132	return NULL;
133}
134
135void free_pgd_slow(pgd_t *pgd)
136{
137	pmd_t *pmd;
138	pte_t *pte;
139
140	if (!pgd)
141		return;
142
143	/* pgd is always present and good */
144	pmd = (pmd_t *)pgd;
145	if (pmd_none(*pmd))
146		goto free;
147	if (pmd_bad(*pmd)) {
148		pmd_ERROR(*pmd);
149		pmd_clear(pmd);
150		goto free;
151	}
152
153	pte = pte_offset(pmd, 0);
154	pmd_clear(pmd);
155	pte_free(pte);
156	pmd_free(pmd);
157free:
158	free_pages((unsigned long) pgd, 2);
159}
160
161/*
162 * Create a SECTION PGD between VIRT and PHYS in domain
163 * DOMAIN with protection PROT
164 */
165static inline void
166alloc_init_section(unsigned long virt, unsigned long phys, int prot)
167{
168	pmd_t pmd;
169
170	pmd_val(pmd) = phys | prot;
171
172	set_pmd(pmd_offset(pgd_offset_k(virt), virt), pmd);
173}
174
175/*
176 * Add a PAGE mapping between VIRT and PHYS in domain
177 * DOMAIN with protection PROT.  Note that due to the
178 * way we map the PTEs, we must allocate two PTE_SIZE'd
179 * blocks - one for the Linux pte table, and one for
180 * the hardware pte table.
181 */
182static inline void
183alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
184{
185	pmd_t *pmdp;
186	pte_t *ptep;
187
188	pmdp = pmd_offset(pgd_offset_k(virt), virt);
189
190	if (pmd_none(*pmdp)) {
191		pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
192						      sizeof(pte_t));
193
194		ptep += PTRS_PER_PTE;
195
196		set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));
197	}
198	ptep = pte_offset(pmdp, virt);
199
200	set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
201}
202
203/*
204 * Clear any PGD mapping.  On a two-level page table system,
205 * the clearance is done by the middle-level functions (pmd)
206 * rather than the top-level (pgd) functions.
207 */
208static inline void clear_mapping(unsigned long virt)
209{
210	pmd_clear(pmd_offset(pgd_offset_k(virt), virt));
211}
212
213/*
214 * Create the page directory entries and any necessary
215 * page tables for the mapping specified by `md'.  We
216 * are able to cope here with varying sizes and address
217 * offsets, and we take full advantage of sections.
218 */
219static void __init create_mapping(struct map_desc *md)
220{
221	unsigned long virt, length;
222	int prot_sect, prot_pte;
223	long off;
224
225	if (md->prot_read && md->prot_write &&
226	    !md->cacheable && !md->bufferable) {
227		printk(KERN_WARNING "Security risk: creating user "
228		       "accessible mapping for 0x%08lx at 0x%08lx\n",
229		       md->physical, md->virtual);
230	}
231
232	if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) {
233		printk(KERN_WARNING "MM: not creating mapping for "
234		       "0x%08lx at 0x%08lx in user region\n",
235		       md->physical, md->virtual);
236	}
237
238	prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
239		   (md->prot_read  ? L_PTE_USER       : 0) |
240		   (md->prot_write ? L_PTE_WRITE      : 0) |
241		   (md->cacheable  ? L_PTE_CACHEABLE  : 0) |
242		   (md->bufferable ? L_PTE_BUFFERABLE : 0);
243
244	prot_sect = PMD_TYPE_SECT | PMD_DOMAIN(md->domain) |
245		    (md->prot_read  ? PMD_SECT_AP_READ    : 0) |
246		    (md->prot_write ? PMD_SECT_AP_WRITE   : 0) |
247		    (md->cacheable  ? PMD_SECT_CACHEABLE  : 0) |
248		    (md->bufferable ? PMD_SECT_BUFFERABLE : 0);
249
250	virt   = md->virtual;
251	off    = md->physical - virt;
252	length = md->length;
253
254	while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
255		alloc_init_page(virt, virt + off, md->domain, prot_pte);
256
257		virt   += PAGE_SIZE;
258		length -= PAGE_SIZE;
259	}
260
261	while (length >= PGDIR_SIZE) {
262		alloc_init_section(virt, virt + off, prot_sect);
263
264		virt   += PGDIR_SIZE;
265		length -= PGDIR_SIZE;
266	}
267
268	while (length >= PAGE_SIZE) {
269		alloc_init_page(virt, virt + off, md->domain, prot_pte);
270
271		virt   += PAGE_SIZE;
272		length -= PAGE_SIZE;
273	}
274}
275
276/*
277 * In order to soft-boot, we need to insert a 1:1 mapping in place of
278 * the user-mode pages.  This will then ensure that we have predictable
279 * results when turning the mmu off
280 */
281void setup_mm_for_reboot(char mode)
282{
283	pgd_t *pgd;
284	pmd_t pmd;
285	int i;
286
287	if (current->mm && current->mm->pgd)
288		pgd = current->mm->pgd;
289	else
290		pgd = init_mm.pgd;
291
292	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
293		pmd_val(pmd) = (i << PGDIR_SHIFT) |
294			PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
295			PMD_TYPE_SECT;
296		set_pmd(pmd_offset(pgd + i, i << PGDIR_SHIFT), pmd);
297	}
298}
299
300/*
301 * Setup initial mappings.  We use the page we allocated for zero page to hold
302 * the mappings, which will get overwritten by the vectors in traps_init().
303 * The mappings must be in virtual address order.
304 */
305void __init memtable_init(struct meminfo *mi)
306{
307	struct map_desc *init_maps, *p, *q;
308	unsigned long address = 0;
309	int i;
310
311	init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
312
313	for (i = 0; i < mi->nr_banks; i++) {
314		if (mi->bank[i].size == 0)
315			continue;
316
317		p->physical   = mi->bank[i].start;
318		p->virtual    = __phys_to_virt(p->physical);
319		p->length     = mi->bank[i].size;
320		p->domain     = DOMAIN_KERNEL;
321		p->prot_read  = 0;
322		p->prot_write = 1;
323		p->cacheable  = 1;
324		p->bufferable = 1;
325
326		p ++;
327	}
328
329#ifdef FLUSH_BASE
330	p->physical   = FLUSH_BASE_PHYS;
331	p->virtual    = FLUSH_BASE;
332	p->length     = PGDIR_SIZE;
333	p->domain     = DOMAIN_KERNEL;
334	p->prot_read  = 1;
335	p->prot_write = 0;
336	p->cacheable  = 1;
337	p->bufferable = 1;
338
339	p ++;
340#endif
341
342#ifdef FLUSH_BASE_MINICACHE
343	p->physical   = FLUSH_BASE_PHYS + PGDIR_SIZE;
344	p->virtual    = FLUSH_BASE_MINICACHE;
345	p->length     = PGDIR_SIZE;
346	p->domain     = DOMAIN_KERNEL;
347	p->prot_read  = 1;
348	p->prot_write = 0;
349	p->cacheable  = 1;
350	p->bufferable = 0;
351
352	p ++;
353#endif
354
355	/*
356	 * Go through the initial mappings, but clear out any
357	 * pgdir entries that are not in the description.
358	 */
359	q = init_maps;
360	do {
361		if (address < q->virtual || q == p) {
362			clear_mapping(address);
363			address += PGDIR_SIZE;
364		} else {
365			create_mapping(q);
366
367			address = q->virtual + q->length;
368			address = (address + PGDIR_SIZE - 1) & PGDIR_MASK;
369
370			q ++;
371		}
372	} while (address != 0);
373
374	/*
375	 * Create a mapping for the machine vectors at virtual address 0
376	 * or 0xffff0000.  We should always try the high mapping.
377	 */
378	init_maps->physical   = virt_to_phys(init_maps);
379	init_maps->virtual    = vectors_base();
380	init_maps->length     = PAGE_SIZE;
381	init_maps->domain     = DOMAIN_USER;
382	init_maps->prot_read  = 0;
383	init_maps->prot_write = 0;
384	init_maps->cacheable  = 1;
385	init_maps->bufferable = 0;
386
387	create_mapping(init_maps);
388
389	flush_cache_all();
390}
391
392/*
393 * Create the architecture specific mappings
394 */
395void __init iotable_init(struct map_desc *io_desc)
396{
397	int i;
398
399	for (i = 0; io_desc[i].last == 0; i++)
400		create_mapping(io_desc + i);
401}
402
403static inline void free_memmap(int node, unsigned long start, unsigned long end)
404{
405	unsigned long pg, pgend;
406
407	start = __phys_to_virt(start);
408	end   = __phys_to_virt(end);
409
410	pg    = PAGE_ALIGN((unsigned long)(virt_to_page(start)));
411	pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK;
412
413	start = __virt_to_phys(pg);
414	end   = __virt_to_phys(pgend);
415
416	free_bootmem_node(NODE_DATA(node), start, end - start);
417}
418
419static inline void free_unused_memmap_node(int node, struct meminfo *mi)
420{
421	unsigned long bank_start, prev_bank_end = 0;
422	unsigned int i;
423
424	for (i = 0; i < mi->nr_banks; i++) {
425		if (mi->bank[i].size == 0 || mi->bank[i].node != node)
426			continue;
427
428		bank_start = mi->bank[i].start & PAGE_MASK;
429
430		/*
431		 * If we had a previous bank, and there is a space
432		 * between the current bank and the previous, free it.
433		 */
434		if (prev_bank_end && prev_bank_end != bank_start)
435			free_memmap(node, prev_bank_end, bank_start);
436
437		prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
438					   mi->bank[i].size);
439	}
440}
441
442/*
443 * The mem_map array can get very big.  Free
444 * the unused area of the memory map.
445 */
446void __init create_memmap_holes(struct meminfo *mi)
447{
448	int node;
449
450	for (node = 0; node < numnodes; node++)
451		free_unused_memmap_node(node, mi);
452}
453
454/*
455 * PTE table allocation cache.
456 *
457 * This is a move away from our custom 2K page allocator.  We now use the
458 * slab cache to keep track of these objects.
459 *
460 * With this, it is questionable as to whether the PGT cache gains us
461 * anything.  We may be better off dropping the PTE stuff from our PGT
462 * cache implementation.
463 */
464kmem_cache_t *pte_cache;
465
466/*
467 * The constructor gets called for each object within the cache when the
468 * cache page is created.  Note that if slab tries to misalign the blocks,
469 * we BUG() loudly.
470 */
471static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
472{
473	unsigned long block = (unsigned long)pte;
474
475	if (block & 2047)
476		BUG();
477
478	memzero(pte, 2 * PTRS_PER_PTE * sizeof(pte_t));
479	cpu_cache_clean_invalidate_range(block, block +
480			PTRS_PER_PTE * sizeof(pte_t), 0);
481}
482
483void __init pgtable_cache_init(void)
484{
485	pte_cache = kmem_cache_create("pte-cache",
486				2 * PTRS_PER_PTE * sizeof(pte_t), 0, 0,
487				pte_cache_ctor, NULL);
488	if (!pte_cache)
489		BUG();
490}
491