1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * OpenRISC idle.c
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others.  All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 */
13
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/smp.h>
25#include <linux/memblock.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/pagemap.h>
29
30#include <asm/pgalloc.h>
31#include <asm/dma.h>
32#include <asm/io.h>
33#include <asm/tlb.h>
34#include <asm/mmu_context.h>
35#include <asm/fixmap.h>
36#include <asm/tlbflush.h>
37#include <asm/sections.h>
38
39int mem_init_done;
40
41static void __init zone_sizes_init(void)
42{
43	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
44
45	/*
46	 * We use only ZONE_NORMAL
47	 */
48	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
49
50	free_area_init(max_zone_pfn);
51}
52
53extern const char _s_kernel_ro[], _e_kernel_ro[];
54
55/*
56 * Map all physical memory into kernel's address space.
57 *
58 * This is explicitly coded for two-level page tables, so if you need
59 * something else then this needs to change.
60 */
61static void __init map_ram(void)
62{
63	phys_addr_t start, end;
64	unsigned long v, p, e;
65	pgprot_t prot;
66	pgd_t *pge;
67	p4d_t *p4e;
68	pud_t *pue;
69	pmd_t *pme;
70	pte_t *pte;
71	u64 i;
72	/* These mark extents of read-only kernel pages...
73	 * ...from vmlinux.lds.S
74	 */
75
76	v = PAGE_OFFSET;
77
78	for_each_mem_range(i, &start, &end) {
79		p = (u32) start & PAGE_MASK;
80		e = (u32) end;
81
82		v = (u32) __va(p);
83		pge = pgd_offset_k(v);
84
85		while (p < e) {
86			int j;
87			p4e = p4d_offset(pge, v);
88			pue = pud_offset(p4e, v);
89			pme = pmd_offset(pue, v);
90
91			if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
92				panic("%s: OR1K kernel hardcoded for "
93				      "two-level page tables",
94				     __func__);
95			}
96
97			/* Alloc one page for holding PTE's... */
98			pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
99			if (!pte)
100				panic("%s: Failed to allocate page for PTEs\n",
101				      __func__);
102			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
103
104			/* Fill the newly allocated page with PTE'S */
105			for (j = 0; p < e && j < PTRS_PER_PTE;
106			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
107				if (v >= (u32) _e_kernel_ro ||
108				    v < (u32) _s_kernel_ro)
109					prot = PAGE_KERNEL;
110				else
111					prot = PAGE_KERNEL_RO;
112
113				set_pte(pte, mk_pte_phys(p, prot));
114			}
115
116			pge++;
117		}
118
119		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
120		       start, end);
121	}
122}
123
124void __init paging_init(void)
125{
126	int i;
127
128	printk(KERN_INFO "Setting up paging and PTEs.\n");
129
130	/* clear out the init_mm.pgd that will contain the kernel's mappings */
131
132	for (i = 0; i < PTRS_PER_PGD; i++)
133		swapper_pg_dir[i] = __pgd(0);
134
135	/* make sure the current pgd table points to something sane
136	 * (even if it is most probably not used until the next
137	 *  switch_mm)
138	 */
139	current_pgd[smp_processor_id()] = init_mm.pgd;
140
141	map_ram();
142
143	zone_sizes_init();
144
145	/* self modifying code ;) */
146	/* Since the old TLB miss handler has been running up until now,
147	 * the kernel pages are still all RW, so we can still modify the
148	 * text directly... after this change and a TLB flush, the kernel
149	 * pages will become RO.
150	 */
151	{
152		extern unsigned long dtlb_miss_handler;
153		extern unsigned long itlb_miss_handler;
154
155		unsigned long *dtlb_vector = __va(0x900);
156		unsigned long *itlb_vector = __va(0xa00);
157
158		printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
159		*itlb_vector = ((unsigned long)&itlb_miss_handler -
160				(unsigned long)itlb_vector) >> 2;
161
162		/* Soft ordering constraint to ensure that dtlb_vector is
163		 * the last thing updated
164		 */
165		barrier();
166
167		printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
168		*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
169				(unsigned long)dtlb_vector) >> 2;
170
171	}
172
173	/* Soft ordering constraint to ensure that cache invalidation and
174	 * TLB flush really happen _after_ code has been modified.
175	 */
176	barrier();
177
178	/* Invalidate instruction caches after code modification */
179	mtspr(SPR_ICBIR, 0x900);
180	mtspr(SPR_ICBIR, 0xa00);
181
182	/* New TLB miss handlers and kernel page tables are in now place.
183	 * Make sure that page flags get updated for all pages in TLB by
184	 * flushing the TLB and forcing all TLB entries to be recreated
185	 * from their page table flags.
186	 */
187	flush_tlb_all();
188}
189
190/* References to section boundaries */
191
192void __init mem_init(void)
193{
194	BUG_ON(!mem_map);
195
196	max_mapnr = max_low_pfn;
197	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
198
199	/* clear the zero-page */
200	memset((void *)empty_zero_page, 0, PAGE_SIZE);
201
202	/* this will put all low memory onto the freelists */
203	memblock_free_all();
204
205	printk("mem_init_done ...........................................\n");
206	mem_init_done = 1;
207	return;
208}
209
210static const pgprot_t protection_map[16] = {
211	[VM_NONE]					= PAGE_NONE,
212	[VM_READ]					= PAGE_READONLY_X,
213	[VM_WRITE]					= PAGE_COPY,
214	[VM_WRITE | VM_READ]				= PAGE_COPY_X,
215	[VM_EXEC]					= PAGE_READONLY,
216	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
217	[VM_EXEC | VM_WRITE]				= PAGE_COPY,
218	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
219	[VM_SHARED]					= PAGE_NONE,
220	[VM_SHARED | VM_READ]				= PAGE_READONLY_X,
221	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
222	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED_X,
223	[VM_SHARED | VM_EXEC]				= PAGE_READONLY,
224	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
225	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED,
226	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
227};
228DECLARE_VM_GET_PAGE_PROT
229