1/*	$NetBSD$	*/
2
3/*
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37/*
38 * Copyright (c) 1997,1998 Mark Brinicombe.
39 * Copyright (c) 1997,1998 Causality Limited.
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 *    notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 *    notice, this list of conditions and the following disclaimer in the
49 *    documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 *    must display the following acknowledgement:
52 *	This product includes software developed by Mark Brinicombe
53 *	for the NetBSD Project.
54 * 4. The name of the company nor the name of the author may be used to
55 *    endorse or promote products derived from this software without specific
56 *    prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
59 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
60 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
62 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
63 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
64 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 */
70
71#include <sys/cdefs.h>
72__KERNEL_RCSID(0, "$NetBSD$");
73
74#include <sys/systm.h>
75#include <sys/param.h>
76#include <sys/kernel.h>
77
78#include <uvm/uvm_extern.h>
79
80#include <machine/bootconfig.h>
81#include <machine/cpu.h>
82#include <machine/pmap.h>
83#include <arm/undefined.h>
84
85#include <arm/arm32/machdep.h>
86
87#include <evbarm/evbarm/initarmvar.h>
88
89
90/* Define various stack sizes in pages */
91#define IRQ_STACK_SIZE	1
92#define ABT_STACK_SIZE	1
93#define UND_STACK_SIZE	1
94
95vm_offset_t msgbufphys;
96vm_offset_t physical_start;
97vm_offset_t physical_end;
98pv_addr_t systempage;
99
100extern u_int data_abort_handler_address;
101extern u_int prefetch_abort_handler_address;
102extern u_int undefined_handler_address;
103
104
105vaddr_t
106initarm_common(const struct initarm_config *ic)
107{
108#ifdef DIAGNOSTIC
109	extern vsize_t xscale_minidata_clean_size;
110#endif
111	extern char etext[], _end[];
112	const BootConfig *bc;
113	int loop;
114	vaddr_t l1pagetable;
115	pv_addr_t kernel_l1pt;
116	pv_addr_t *kernel_pt_table;
117	pv_addr_t irqstack;
118	pv_addr_t undstack;
119	pv_addr_t abtstack;
120	pv_addr_t kernelstack;
121	pv_addr_t minidataclean;
122	vm_offset_t physical_freestart;
123	vm_offset_t physical_freeend;
124	vaddr_t avail;
125	vaddr_t pt_vstart;
126	paddr_t pt_pstart;
127	vsize_t pt_size;
128	u_int ptcount_total;
129	u_int ptcount_kernel;
130	u_int ptcount_fixed_io;
131	u_int ptcount_vmdata;
132
133	/*
134	 * Set up the variables that define the availablilty of
135	 * physical memory.
136	 */
137	bc = ic->ic_bootconf;
138	avail = round_page((vaddr_t)(uintptr_t)&_end[0]);
139	physical_start = bc->dram[0].address;
140	physical_freestart = (avail - KERNEL_BASE) + ic->ic_kernel_base_pa;
141
142	for (loop = 0; loop < bc->dramblocks; loop++) {
143		paddr_t blk_end;
144
145		blk_end = bc->dram[loop].address +
146		    (bc->dram[loop].pages * PAGE_SIZE);
147
148		if (ic->ic_kernel_base_pa >= bc->dram[loop].address &&
149		    ic->ic_kernel_base_pa < blk_end)
150			physical_freeend = blk_end;
151
152		physmem += bc->dram[loop].pages;
153	}
154
155	loop--;
156	physical_end = bc->dram[loop].address +
157	    (bc->dram[loop].pages * PAGE_SIZE);
158
159	/* Tell the user about the memory */
160	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
161	    physical_start, physical_end - 1);
162
163	/*
164	 * Okay, the kernel starts near the bottom of physical memory
165	 * and extends to (avail - KERNEL_BASE) + ic->ic_kernel_base_pa.
166	 * We are going to allocate our bootstrap pages upwards
167	 * from there.
168	 *
169	 * We need to allocate some fixed page tables to get the kernel
170	 * going.  We allocate one page directory and a number of page
171	 * tables and store the physical addresses in the kernel_pt_table
172	 * array.
173	 *
174	 * The kernel page directory must be on a 16K boundary.  The page
175	 * tables must be on 1K boundaries.  What we do is allocate the
176	 * page directory on the first 16K boundary that we encounter, and
177	 * the page tables on 1K boundaries otherwise.  Since we allocate
178	 * at least 12 L2 page tables, we are guaranteed to encounter at
179	 * least one 16K aligned region.
180	 */
181
182#ifdef VERBOSE_INIT_ARM
183	printf("Allocating page tables\n");
184#endif
185
186#ifdef VERBOSE_INIT_ARM
187	printf("freestart = 0x%08lx, avail = 0x%08lx\n",
188	       physical_freestart, avail);
189#endif
190
191	/* Define a macro to simplify memory allocation */
192#define	valloc_l2(var, nl2)			\
193	alloc_l2((var).pv_pa, (nl2));		\
194	(var).pv_va = avail;			\
195	avail += ((nl2) * L2_TABLE_SIZE_REAL);
196#define alloc_l2(var, nl2)					\
197	if (physical_freestart >= physical_freeend)		\
198		panic("initarm: out of memory");		\
199	(var) = physical_freestart;				\
200	physical_freestart += ((nl2) * L2_TABLE_SIZE_REAL);	\
201	memset((char *)(var), 0, ((nl2) * L2_TABLE_SIZE_REAL));
202
203#define	valloc_pages(var, np)	\
204	    valloc_l2(var, (np) * (PAGE_SIZE / L2_TABLE_SIZE_REAL))
205#define	alloc_pages(var, np)	\
206	    alloc_l2(var, (np) * (PAGE_SIZE / L2_TABLE_SIZE_REAL))
207
208	/*
209	 * Burn some memory at the end of the kernel to hold ~85
210	 * pv_addr_t structures. This is more than sufficient to
211	 * track the page tables we'll be allocating here.
212	 */
213	kernel_pt_table = (pv_addr_t *)avail;
214	avail += L2_TABLE_SIZE_REAL;
215	physical_freestart += L2_TABLE_SIZE_REAL;
216
217	/*
218	 * Figure out how much space to allocate for page tables
219	 */
220#define round_sec(x)	(((x) + L1_S_OFFSET) & L1_S_FRAME)
221
222	ptcount_kernel   = round_sec(avail - KERNEL_BASE) / L1_S_SIZE;
223	ptcount_vmdata   = 16;	/* 16MB of KVM, initially */
224	ptcount_fixed_io = round_sec(ic->ic_iosize) / L1_S_SIZE;
225
226	ptcount_total =
227		1 +			/* The System Page */
228		ptcount_kernel +	/* text/data */
229		ptcount_vmdata +	/* Initial kernel VM size */
230		ptcount_fixed_io;	/* Fixed I/O mappings */
231
232	kernel_l1pt.pv_pa = 0;
233	pt_pstart = physical_freestart;
234	pt_vstart = avail;
235	for (loop = 0; loop < ptcount_total; ) {
236		/* Are we 16KB aligned for an L1 ? */
237		if ((physical_freestart & (L1_TABLE_SIZE - 1)) == 0
238		    && kernel_l1pt.pv_pa == 0) {
239			valloc_l2(kernel_l1pt,
240			    L1_TABLE_SIZE / L2_TABLE_SIZE_REAL);
241		} else {
242			valloc_l2(kernel_pt_table[loop], 1);
243			++loop;
244		}
245	}
246
247	/* This should never be able to happen but better confirm that. */
248	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
249		panic("initarm: Failed to align the kernel page directory");
250
251	/*
252	 * Re-align physical_freestart to a page boundary
253	 */
254	physical_freestart = round_page(physical_freestart);
255	avail = round_page(avail);
256	pt_size = physical_freestart - pt_pstart;
257
258#ifdef VERBOSE_INIT_ARM
259	printf("bootstrap PTs: VA: 0x%08lx, PA: 0x%08lx, Size: 0x%08lx\n",
260	       pt_vstart, pt_pstart, pt_size);
261#endif
262
263	/* Allocate stacks for all modes */
264	valloc_pages(irqstack, IRQ_STACK_SIZE);
265	valloc_pages(abtstack, ABT_STACK_SIZE);
266	valloc_pages(undstack, UND_STACK_SIZE);
267	valloc_pages(kernelstack, UPAGES);
268
269	/* Allocate enough pages for cleaning the Mini-Data cache. */
270	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
271	valloc_pages(minidataclean, 1);
272
273	/*
274	 * Allocate physical pages for the kernel message buffer
275	 */
276	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
277
278	/*
279	 * Allocate a page for the system page.
280	 * This page will just contain the system vectors and can be
281	 * shared by all processes.
282	 */
283	alloc_pages(systempage.pv_pa, 1);
284
285#ifdef VERBOSE_INIT_ARM
286	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
287	    irqstack.pv_va);
288	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
289	    abtstack.pv_va);
290	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
291	    undstack.pv_va);
292	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
293	    kernelstack.pv_va);
294	printf("System Pg: p0x%08lx v0x%08lx\n", systempage.pv_pa,
295	    ic->ic_vecbase);
296#endif
297
298	/*
299	 * Ok we have allocated physical pages for the primary kernel
300	 * page tables
301	 */
302
303#ifdef VERBOSE_INIT_ARM
304	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
305#endif
306
307	/*
308	 * Now we start construction of the L1 page table
309	 * We start by mapping the L2 page tables into the L1.
310	 * This means that we can replace L1 mappings later on if necessary
311	 */
312	l1pagetable = kernel_l1pt.pv_va;
313
314	pmap_link_l2pt(l1pagetable, ic->ic_vecbase, kernel_pt_table++);
315
316	for (loop = 0; loop < (ptcount_kernel + ptcount_vmdata); loop++)
317		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * L1_S_SIZE,
318		    kernel_pt_table++);
319
320	for (loop = 0; loop < ptcount_fixed_io; loop++)
321		pmap_link_l2pt(l1pagetable, ic->ic_iobase, kernel_pt_table++);
322
323	/*
324	 * Update the top of the kernel VM.
325	 *
326	 * Note that we round up 'avail' to a 1MB boundary since that's
327	 * what pmap_growkernel() expects.
328	 */
329	avail = round_sec(avail);
330	pmap_curmaxkvaddr = avail + (ptcount_vmdata * L1_S_SIZE);
331
332#ifdef VERBOSE_INIT_ARM
333	printf("Mapping kernel\n");
334#endif
335
336	/* Now we fill in the L2 pagetable for the kernel static code/data */
337	{
338		size_t textsize = (uintptr_t) etext - KERNEL_BASE;
339		size_t totalsize = (uintptr_t) _end - KERNEL_BASE;
340		u_int logical;
341
342		textsize = round_page(textsize);
343		totalsize = round_page(totalsize);
344
345		logical = pmap_map_chunk(l1pagetable, KERNEL_BASE,
346		    physical_start, textsize,
347		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
348
349		(void) pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
350		    physical_start + logical, totalsize - textsize,
351		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
352	}
353
354#ifdef VERBOSE_INIT_ARM
355	printf("Constructing L2 page tables\n");
356#endif
357
358	/* Map the stack pages */
359	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
360	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
361	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
362	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
363	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
364	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
365	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
366	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
367
368	/* Map the Mini-Data cache clean area. */
369	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
370	    minidataclean.pv_pa);
371
372	/* Map the vector page. */
373	pmap_map_entry(l1pagetable, ic->ic_vecbase, systempage.pv_pa,
374	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
375
376	/* Map page tables */
377	pmap_map_chunk(l1pagetable, pt_vstart, pt_pstart,
378	    pt_size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
379
380	/*
381	 * Map fixed I/O space
382	 */
383	for (loop = 0; loop < ic->ic_nio; loop++) {
384#ifdef VERBOSE_INIT_ARM
385		printf("Fixed I/O: 0x%08lx -> 0x%08lx @ 0x%08lx (0x%x, %d)\n",
386		    ic->ic_io[loop].ii_pa,
387		    ic->ic_io[loop].ii_pa + ic->ic_io[loop].ii_size - 1,
388		    ic->ic_io[loop].ii_kva, ic->ic_io[loop].ii_prot,
389		    ic->ic_io[loop].ii_cache);
390#endif
391#ifdef DEBUG
392		if (ic->ic_io[loop].ii_kva < ic->ic_iobase ||
393		    (ic->ic_io[loop].ii_kva + ic->ic_io[loop].ii_kva) >
394		    ic->ic_iosize)
395			panic("initarm_common: bad fixed i/o range: %d", loop);
396#endif
397
398		pmap_map_chunk(l1pagetable, ic->ic_io[loop].ii_kva,
399		    ic->ic_io[loop].ii_pa, ic->ic_io[loop].ii_size,
400		    ic->ic_io[loop].ii_prot, ic->ic_io[loop].ii_cache);
401	}
402
403	/*
404	 * Now we have the real page tables in place so we can switch to them.
405	 * Once this is done we will be running with the REAL kernel page
406	 * tables.
407	 */
408
409	/* Switch tables */
410	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
411	cpu_setttb(kernel_l1pt.pv_pa);
412	cpu_tlb_flushID();
413	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
414
415	/*
416	 * Moved from cpu_startup() as data_abort_handler() references
417	 * this during uvm init
418	 */
419	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
420
421#ifdef VERBOSE_INIT_ARM
422	printf("done!\n");
423#endif
424
425	/*
426	 * Fix up the vector table
427	 */
428	arm32_vector_init(ic->ic_vecbase, ARM_VEC_ALL);
429
430	/*
431	 * Pages were allocated during the secondary bootstrap for the
432	 * stacks for different CPU modes.
433	 * We must now set the r13 registers in the different CPU modes to
434	 * point to these stacks.
435	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
436	 * of the stack memory.
437	 */
438#ifdef VERBOSE_INIT_ARM
439	printf("init subsystems: stacks ");
440#endif
441
442	set_stackptr(PSR_IRQ32_MODE,
443	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
444	set_stackptr(PSR_ABT32_MODE,
445	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
446	set_stackptr(PSR_UND32_MODE,
447	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
448
449	/*
450	 * Well we should set a data abort handler.
451	 * Once things get going this will change as we will need a proper
452	 * handler.
453	 * Until then we will use a handler that just panics but tells us
454	 * why.
455	 * Initialisation of the vectors will just panic on a data abort.
456	 * This just fills in a slightly better one.
457	 */
458#ifdef VERBOSE_INIT_ARM
459	printf("vectors ");
460#endif
461	data_abort_handler_address = (u_int)data_abort_handler;
462	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
463	undefined_handler_address = (u_int)undefinedinstruction_bounce;
464
465	/* Initialise the undefined instruction handlers */
466#ifdef VERBOSE_INIT_ARM
467	printf("undefined ");
468#endif
469	undefined_init();
470
471	/* Load memory into UVM. */
472#ifdef VERBOSE_INIT_ARM
473	printf("page ");
474#endif
475	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
476
477	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
478	    atop(physical_freestart), atop(physical_freeend),
479	    VM_FREELIST_DEFAULT);
480
481	for (loop = 1; loop < bc->dramblocks; loop++) {
482		paddr_t blk_start;
483		paddr_t blk_end;
484
485		blk_start = bc->dram[loop].address;
486		blk_end = blk_start + (bc->dram[loop].pages * PAGE_SIZE);
487
488		/*
489		 * XXX: Support different free lists
490		 */
491		uvm_page_physload(atop(blk_start), atop(blk_end),
492		    atop(blk_start), atop(blk_end),
493		    VM_FREELIST_DEFAULT);
494	}
495
496	/* Boot strap pmap telling it where the kernel page table is */
497#ifdef VERBOSE_INIT_ARM
498	printf("pmap ");
499#endif
500	pmap_bootstrap((pd_entry_t *)l1pagetable, avail);
501
502#ifdef VERBOSE_INIT_ARM
503	printf("Done.\n");
504#endif
505
506	return (kernelstack.pv_va + USPACE_SVC_STACK_TOP);
507}
508