1/*
2 * Set up paging and the MMU.
3 *
4 * Copyright (C) 2000-2003, Axis Communications AB.
5 *
6 * Authors:   Bjorn Wesen <bjornw@axis.com>
7 *            Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8 */
9#include <linux/mmzone.h>
10#include <linux/init.h>
11#include <linux/bootmem.h>
12#include <linux/mm.h>
13#include <asm/pgtable.h>
14#include <asm/page.h>
15#include <asm/types.h>
16#include <asm/mmu.h>
17#include <asm/io.h>
18#include <asm/mmu_context.h>
19#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
20#include <asm/arch/hwregs/supp_reg.h>
21
22extern void tlb_init(void);
23
24/*
25 * The kernel is already mapped with linear mapping at kseg_c so there's no
26 * need to map it with a page table. However, head.S also temporarily mapped it
27 * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
28 * other paging stuff.
29 */
30void __init
31cris_mmu_init(void)
32{
33	unsigned long mmu_config;
34	unsigned long mmu_kbase_hi;
35	unsigned long mmu_kbase_lo;
36	unsigned short mmu_page_id;
37
38	/*
39	 * Make sure the current pgd table points to something sane, even if it
40	 * is most probably not used until the next switch_mm.
41	 */
42	per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
43
44#ifdef CONFIG_SMP
45	{
46		pgd_t **pgd;
47		pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
48		SUPP_BANK_SEL(1);
49		SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
50		SUPP_BANK_SEL(2);
51		SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
52	}
53#endif
54
55	/* Initialise the TLB. Function found in tlb.c. */
56	tlb_init();
57
58	/* Enable exceptions and initialize the kernel segments. */
59	mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on)        |
60		       REG_STATE(mmu, rw_mm_cfg, acc, on)       |
61		       REG_STATE(mmu, rw_mm_cfg, ex, on)        |
62		       REG_STATE(mmu, rw_mm_cfg, inv, on)       |
63		       REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
64		       REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
65		       REG_STATE(mmu, rw_mm_cfg, seg_d, page)   |
66		       REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
67		       REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
68#ifndef CONFIG_ETRAXFS_SIM
69                       REG_STATE(mmu, rw_mm_cfg, seg_a, page)   |
70#else
71		       REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
72#endif
73		       REG_STATE(mmu, rw_mm_cfg, seg_9, page)   |
74		       REG_STATE(mmu, rw_mm_cfg, seg_8, page)   |
75		       REG_STATE(mmu, rw_mm_cfg, seg_7, page)   |
76		       REG_STATE(mmu, rw_mm_cfg, seg_6, page)   |
77		       REG_STATE(mmu, rw_mm_cfg, seg_5, page)   |
78		       REG_STATE(mmu, rw_mm_cfg, seg_4, page)   |
79		       REG_STATE(mmu, rw_mm_cfg, seg_3, page)   |
80		       REG_STATE(mmu, rw_mm_cfg, seg_2, page)   |
81		       REG_STATE(mmu, rw_mm_cfg, seg_1, page)   |
82		       REG_STATE(mmu, rw_mm_cfg, seg_0, page));
83
84	mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
85			 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
86			 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
87#ifndef CONFIG_ETRAXFS_SIM
88                         REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
89#else
90			 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x0) |
91#endif
92			 REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
93#ifndef CONFIG_ETRAXFS_SIM
94			 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
95#else
96                         REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
97#endif
98			 REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
99			 REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
100
101	mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
102			 REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
103			 REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
104			 REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
105			 REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
106			 REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
107			 REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
108			 REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
109
110	mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
111
112	/* Update the instruction MMU. */
113	SUPP_BANK_SEL(BANK_IM);
114	SUPP_REG_WR(RW_MM_CFG, mmu_config);
115	SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
116	SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
117	SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
118
119	/* Update the data MMU. */
120	SUPP_BANK_SEL(BANK_DM);
121	SUPP_REG_WR(RW_MM_CFG, mmu_config);
122	SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
123	SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
124	SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
125
126	SPEC_REG_WR(SPEC_REG_PID, 0);
127
128	/*
129	 * The MMU has been enabled ever since head.S but just to make it
130	 * totally obvious enable it here as well.
131	 */
132	SUPP_BANK_SEL(BANK_GC);
133	SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
134}
135
136void __init
137paging_init(void)
138{
139	int i;
140	unsigned long zones_size[MAX_NR_ZONES];
141
142	printk("Setting up paging and the MMU.\n");
143
144	/* Clear out the init_mm.pgd that will contain the kernel's mappings. */
145	for(i = 0; i < PTRS_PER_PGD; i++)
146		swapper_pg_dir[i] = __pgd(0);
147
148	cris_mmu_init();
149
150	/*
151	 * Initialize the bad page table and bad page to point to a couple of
152	 * allocated pages.
153	 */
154	empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
155	memset((void *) empty_zero_page, 0, PAGE_SIZE);
156
157	/* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
158	zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
159
160	for (i = 1; i < MAX_NR_ZONES; i++)
161		zones_size[i] = 0;
162
163	/*
164	 * Use free_area_init_node instead of free_area_init, because it is
165	 * designed for systems where the DRAM starts at an address
166	 * substantially higher than 0, like us (we start at PAGE_OFFSET). This
167	 * saves space in the mem_map page array.
168	 */
169	free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
170
171	mem_map = contig_page_data.node_mem_map;
172}
173