crb_machdep.c revision 171626
1/* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */ 2 3/*- 4 * Copyright (c) 1994-1998 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Brini. 21 * 4. The name of the company nor the name of the author may be used to 22 * endorse or promote products derived from this software without specific 23 * prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * RiscBSD kernel project 38 * 39 * machdep.c 40 * 41 * Machine dependant functions for kernel setup 42 * 43 * This file needs a lot of work. 44 * 45 * Created : 17/09/94 46 */ 47 48#include "opt_msgbuf.h" 49#include "opt_ddb.h" 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/arm/xscale/i8134x/crb_machdep.c 171626 2007-07-27 14:50:57Z cognet $"); 53 54#define _ARM32_BUS_DMA_PRIVATE 55#include <sys/param.h> 56#include <sys/systm.h> 57#include <sys/sysproto.h> 58#include <sys/signalvar.h> 59#include <sys/imgact.h> 60#include <sys/kernel.h> 61#include <sys/ktr.h> 62#include <sys/linker.h> 63#include <sys/lock.h> 64#include <sys/malloc.h> 65#include <sys/mutex.h> 66#include <sys/pcpu.h> 67#include <sys/proc.h> 68#include <sys/ptrace.h> 69#include <sys/cons.h> 70#include <sys/bio.h> 71#include <sys/bus.h> 72#include <sys/buf.h> 73#include <sys/exec.h> 74#include <sys/kdb.h> 75#include <sys/msgbuf.h> 76#include <machine/reg.h> 77#include <machine/cpu.h> 78 79#include <vm/vm.h> 80#include <vm/pmap.h> 81#include <vm/vm.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_pager.h> 85#include <vm/vm_map.h> 86#include <vm/vnode_pager.h> 87#include <machine/pmap.h> 88#include <machine/vmparam.h> 89#include <machine/pcb.h> 90#include <machine/undefined.h> 91#include <machine/machdep.h> 92#include <machine/metadata.h> 93#include <machine/armreg.h> 94#include <machine/bus.h> 95#include <sys/reboot.h> 96 97 98#include <arm/xscale/i80321/i80321var.h> /* For i80321_calibrate_delay() */ 99 100#include <arm/xscale/i8134x/i81342reg.h> 101#include <arm/xscale/i8134x/i81342var.h> 102#include <arm/xscale/i8134x/obiovar.h> 103 104 105#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ 106#define KERNEL_PT_IOPXS 1 107#define KERNEL_PT_BEFOREKERN 2 108#define KERNEL_PT_AFKERNEL 3 /* L2 table for mapping after kernel */ 109#define KERNEL_PT_AFKERNEL_NUM 9 110 111/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ 112#define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) 113 114/* Define various stack sizes in pages */ 115#define IRQ_STACK_SIZE 1 116#define ABT_STACK_SIZE 1 117#ifdef IPKDB 118#define UND_STACK_SIZE 2 119#else 120#define UND_STACK_SIZE 1 121#endif 122 123extern u_int data_abort_handler_address; 124extern u_int prefetch_abort_handler_address; 125extern u_int undefined_handler_address; 126 127struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; 128 129extern void *_end; 130 131extern vm_offset_t sa1_cache_clean_addr; 132 133extern int *end; 134 135struct pcpu __pcpu; 136struct pcpu *pcpup = &__pcpu; 137 138/* Physical and virtual addresses for some global pages */ 139 140vm_paddr_t phys_avail[10]; 141vm_paddr_t dump_avail[4]; 142vm_offset_t physical_pages; 143vm_offset_t clean_sva, clean_eva; 144 145struct pv_addr systempage; 146struct pv_addr msgbufpv; 147struct pv_addr irqstack; 148struct pv_addr undstack; 149struct pv_addr abtstack; 150struct pv_addr kernelstack; 151 152static struct trapframe proc0_tf; 153 154/* Static device mappings. */ 155static const struct pmap_devmap iq81342_devmap[] = { 156 { 157 IOP34X_VADDR, 158 IOP34X_HWADDR, 159 IOP34X_SIZE, 160 VM_PROT_READ|VM_PROT_WRITE, 161 PTE_NOCACHE, 162 }, 163 { 164 /* 165 * Cheat and map a whole section, this will bring 166 * both PCI-X and PCI-E outbound I/O 167 */ 168 IOP34X_PCIX_OIOBAR_VADDR &~ (0x100000 - 1), 169 IOP34X_PCIX_OIOBAR &~ (0x100000 - 1), 170 0x100000, 171 VM_PROT_READ|VM_PROT_WRITE, 172 PTE_NOCACHE, 173 }, 174 { 175 0, 176 0, 177 0, 178 0, 179 0, 180 } 181}; 182 183#define SDRAM_START 0x00000000 184 185#ifdef DDB 186extern vm_offset_t ksym_start, ksym_end; 187#endif 188 189extern vm_offset_t xscale_cache_clean_addr; 190 191void * 192initarm(void *arg, void *arg2) 193{ 194 struct pv_addr kernel_l1pt; 195 int loop; 196 u_int l1pagetable; 197 vm_offset_t freemempos; 198 vm_offset_t freemem_pt; 199 vm_offset_t afterkern; 200 vm_offset_t freemem_after; 201 vm_offset_t lastaddr; 202#ifdef DDB 203 vm_offset_t zstart = 0, zend = 0; 204#endif 205 int i; 206 uint32_t fake_preload[35]; 207 uint32_t memsize, memstart; 208 209 i = 0; 210 211 set_cpufuncs(); 212 fake_preload[i++] = MODINFO_NAME; 213 fake_preload[i++] = strlen("elf kernel") + 1; 214 strcpy((char*)&fake_preload[i++], "elf kernel"); 215 i += 2; 216 fake_preload[i++] = MODINFO_TYPE; 217 fake_preload[i++] = strlen("elf kernel") + 1; 218 strcpy((char*)&fake_preload[i++], "elf kernel"); 219 i += 2; 220 fake_preload[i++] = MODINFO_ADDR; 221 fake_preload[i++] = sizeof(vm_offset_t); 222 fake_preload[i++] = KERNBASE + 0x00200000; 223 fake_preload[i++] = MODINFO_SIZE; 224 fake_preload[i++] = sizeof(uint32_t); 225 fake_preload[i++] = (uint32_t)&end - KERNBASE - 0x00200000; 226#ifdef DDB 227 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { 228 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; 229 fake_preload[i++] = sizeof(vm_offset_t); 230 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); 231 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; 232 fake_preload[i++] = sizeof(vm_offset_t); 233 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); 234 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); 235 zend = lastaddr; 236 zstart = *(uint32_t *)(KERNVIRTADDR + 4); 237 ksym_start = zstart; 238 ksym_end = zend; 239 } else 240#endif 241 lastaddr = (vm_offset_t)&end; 242 243 fake_preload[i++] = 0; 244 fake_preload[i] = 0; 245 preload_metadata = (void *)fake_preload; 246 247 248 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 249 PCPU_SET(curthread, &thread0); 250 251#define KERNEL_TEXT_BASE (KERNBASE + 0x00200000) 252 freemempos = 0x00200000; 253 /* Define a macro to simplify memory allocation */ 254#define valloc_pages(var, np) \ 255 alloc_pages((var).pv_pa, (np)); \ 256 (var).pv_va = (var).pv_pa + 0xc0000000; 257 258#define alloc_pages(var, np) \ 259 freemempos -= (np * PAGE_SIZE); \ 260 (var) = freemempos; \ 261 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 262 263 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 264 freemempos -= PAGE_SIZE; 265 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 266 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { 267 if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 268 valloc_pages(kernel_pt_table[loop], 269 L2_TABLE_SIZE / PAGE_SIZE); 270 } else { 271 kernel_pt_table[loop].pv_pa = freemempos + 272 (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * 273 L2_TABLE_SIZE_REAL; 274 kernel_pt_table[loop].pv_va = 275 kernel_pt_table[loop].pv_pa + 0xc0000000; 276 } 277 } 278 freemem_pt = freemempos; 279 freemempos = 0x00100000; 280 /* 281 * Allocate a page for the system page mapped to V0x00000000 282 * This page will just contain the system vectors and can be 283 * shared by all processes. 284 */ 285 valloc_pages(systempage, 1); 286 287 /* Allocate stacks for all modes */ 288 valloc_pages(irqstack, IRQ_STACK_SIZE); 289 valloc_pages(abtstack, ABT_STACK_SIZE); 290 valloc_pages(undstack, UND_STACK_SIZE); 291 valloc_pages(kernelstack, KSTACK_PAGES); 292 valloc_pages(msgbufpv, round_page(MSGBUF_SIZE) / PAGE_SIZE); 293#ifdef ARM_USE_SMALL_ALLOC 294 freemempos -= PAGE_SIZE; 295 freemem_pt = trunc_page(freemem_pt); 296 freemem_after = freemempos - ((freemem_pt - 0x00100000) / 297 PAGE_SIZE) * sizeof(struct arm_small_page); 298 arm_add_smallalloc_pages((void *)(freemem_after + 0xc0000000) 299 , (void *)0xc0100000, freemem_pt - 0x00100000, 1); 300 freemem_after -= ((freemem_after - 0x00001000) / PAGE_SIZE) * 301 sizeof(struct arm_small_page); 302#if 0 303 arm_add_smallalloc_pages((void *)(freemem_after + 0xc0000000) 304 , (void *)0xc0001000, trunc_page(freemem_after) - 0x00001000, 0); 305#endif 306 freemempos = trunc_page(freemem_after); 307 freemempos -= PAGE_SIZE; 308#endif 309 /* 310 * Now we start construction of the L1 page table 311 * We start by mapping the L2 page tables into the L1. 312 * This means that we can replace L1 mappings later on if necessary 313 */ 314 l1pagetable = kernel_l1pt.pv_va; 315 316 /* Map the L2 pages tables in the L1 page table */ 317 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1), 318 &kernel_pt_table[KERNEL_PT_SYS]); 319 pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000, 320 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 321 322 pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000, 323 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 324 325 pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000, 326 (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1), 327 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 328 freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); 329 afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) & ~(L1_S_SIZE 330 - 1)); 331 for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { 332 pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, 333 &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); 334 } 335 336 337#ifdef ARM_USE_SMALL_ALLOC 338 if ((freemem_after + 2 * PAGE_SIZE) <= afterkern) { 339 arm_add_smallalloc_pages((void *)(freemem_after), 340 (void*)(freemem_after + PAGE_SIZE), 341 afterkern - (freemem_after + PAGE_SIZE), 0); 342 343 } 344#endif 345 346 /* Map the vector page. */ 347 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 348 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 349 pmap_devmap_bootstrap(l1pagetable, iq81342_devmap); 350 /* 351 * Give the XScale global cache clean code an appropriately 352 * sized chunk of unmapped VA space starting at 0xff000000 353 * (our device mappings end before this address). 354 */ 355 xscale_cache_clean_addr = 0xff000000U; 356 357 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); 358 setttb(kernel_l1pt.pv_pa); 359 cpu_tlb_flushID(); 360 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); 361 /* 362 * Pages were allocated during the secondary bootstrap for the 363 * stacks for different CPU modes. 364 * We must now set the r13 registers in the different CPU modes to 365 * point to these stacks. 366 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 367 * of the stack memory. 368 */ 369 370 371 set_stackptr(PSR_IRQ32_MODE, 372 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); 373 set_stackptr(PSR_ABT32_MODE, 374 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); 375 set_stackptr(PSR_UND32_MODE, 376 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); 377 378 379 380 /* 381 * We must now clean the cache again.... 382 * Cleaning may be done by reading new data to displace any 383 * dirty data in the cache. This will have happened in setttb() 384 * but since we are boot strapping the addresses used for the read 385 * may have just been remapped and thus the cache could be out 386 * of sync. A re-clean after the switch will cure this. 387 * After booting there are no gross reloations of the kernel thus 388 * this problem will not occur after initarm(). 389 */ 390 cpu_idcache_wbinv_all(); 391 i80321_calibrate_delay(); 392 i81342_sdram_bounds(&obio_bs_tag, IOP34X_VADDR, &memstart, &memsize); 393 physmem = memsize / PAGE_SIZE; 394 cninit(); 395 /* Set stack for exception handlers */ 396 397 data_abort_handler_address = (u_int)data_abort_handler; 398 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 399 undefined_handler_address = (u_int)undefinedinstruction_bounce; 400 undefined_init(); 401 402#ifdef KSE 403 proc_linkup(&proc0, &ksegrp0, &thread0); 404#else 405 proc_linkup(&proc0, &thread0); 406#endif 407 thread0.td_kstack = kernelstack.pv_va; 408 thread0.td_pcb = (struct pcb *) 409 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 410 thread0.td_pcb->pcb_flags = 0; 411 thread0.td_frame = &proc0_tf; 412 pcpup->pc_curpcb = thread0.td_pcb; 413 414 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 415 416 pmap_curmaxkvaddr = afterkern + PAGE_SIZE; 417 /* 418 * ARM_USE_SMALL_ALLOC uses dump_avail, so it must be filled before 419 * calling pmap_bootstrap. 420 */ 421 dump_avail[0] = 0x00000000; 422 dump_avail[1] = 0x00000000 + memsize; 423 dump_avail[2] = 0; 424 dump_avail[3] = 0; 425 426 pmap_bootstrap(pmap_curmaxkvaddr, 427 0xd0000000, &kernel_l1pt); 428 msgbufp = (void*)msgbufpv.pv_va; 429 msgbufinit(msgbufp, MSGBUF_SIZE); 430 mutex_init(); 431 432 i = 0; 433#ifdef ARM_USE_SMALL_ALLOC 434 phys_avail[i++] = 0x00000000; 435 phys_avail[i++] = 0x00001000; /* 436 *XXX: Gross hack to get our 437 * pages in the vm_page_array 438 . */ 439#endif 440 phys_avail[i++] = round_page(virtual_avail - KERNBASE + SDRAM_START); 441 phys_avail[i++] = trunc_page(0x00000000 + memsize - 1); 442 phys_avail[i++] = 0; 443 phys_avail[i] = 0; 444 445 /* Do basic tuning, hz etc */ 446 init_param1(); 447 init_param2(physmem); 448 kdb_init(); 449 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 450 sizeof(struct pcb))); 451} 452