mv_machdep.c revision 217709
1/*- 2 * Copyright (c) 1994-1998 Mark Brinicombe. 3 * Copyright (c) 1994 Brini. 4 * All rights reserved. 5 * 6 * This code is derived from software written for Brini by Mark Brinicombe 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Brini. 19 * 4. The name of the company nor the name of the author may be used to 20 * endorse or promote products derived from this software without specific 21 * prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45 36 */ 37 38#include "opt_ddb.h" 39#include "opt_platform.h" 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/arm/mv/mv_machdep.c 217709 2011-01-22 01:31:59Z marcel $"); 43 44#define _ARM32_BUS_DMA_PRIVATE 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/sysproto.h> 48#include <sys/signalvar.h> 49#include <sys/imgact.h> 50#include <sys/kernel.h> 51#include <sys/ktr.h> 52#include <sys/linker.h> 53#include <sys/lock.h> 54#include <sys/malloc.h> 55#include <sys/mutex.h> 56#include <sys/pcpu.h> 57#include <sys/proc.h> 58#include <sys/ptrace.h> 59#include <sys/cons.h> 60#include <sys/bio.h> 61#include <sys/bus.h> 62#include <sys/buf.h> 63#include <sys/exec.h> 64#include <sys/kdb.h> 65#include <sys/msgbuf.h> 66#include <machine/reg.h> 67#include <machine/cpu.h> 68#include <machine/fdt.h> 69 70#include <dev/fdt/fdt_common.h> 71#include <dev/ofw/openfirm.h> 72 73#include <vm/vm.h> 74#include <vm/pmap.h> 75#include <vm/vm_object.h> 76#include <vm/vm_page.h> 77#include <vm/vm_pager.h> 78#include <vm/vm_map.h> 79#include <machine/pte.h> 80#include <machine/pmap.h> 81#include <machine/vmparam.h> 82#include <machine/pcb.h> 83#include <machine/undefined.h> 84#include <machine/machdep.h> 85#include <machine/metadata.h> 86#include <machine/armreg.h> 87#include <machine/bus.h> 88#include <sys/reboot.h> 89 90#include <arm/mv/mvreg.h> /* XXX */ 91#include <arm/mv/mvvar.h> /* XXX eventually this should be eliminated */ 92#include <arm/mv/mvwin.h> 93 94#define DEBUG 95#undef DEBUG 96 97#ifdef DEBUG 98#define debugf(fmt, args...) printf(fmt, ##args) 99#else 100#define debugf(fmt, args...) 101#endif 102 103/* 104 * This is the number of L2 page tables required for covering max 105 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 106 * stacks etc.), uprounded to be divisible by 4. 107 */ 108#define KERNEL_PT_MAX 78 109 110/* Define various stack sizes in pages */ 111#define IRQ_STACK_SIZE 1 112#define ABT_STACK_SIZE 1 113#define UND_STACK_SIZE 1 114 115extern unsigned char kernbase[]; 116extern unsigned char _etext[]; 117extern unsigned char _edata[]; 118extern unsigned char __bss_start[]; 119extern unsigned char _end[]; 120 121#ifdef DDB 122extern vm_offset_t ksym_start, ksym_end; 123#endif 124 125extern u_int data_abort_handler_address; 126extern u_int prefetch_abort_handler_address; 127extern u_int undefined_handler_address; 128 129extern vm_offset_t pmap_bootstrap_lastaddr; 130extern int *end; 131 132struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 133struct pcpu __pcpu; 134struct pcpu *pcpup = &__pcpu; 135 136/* Physical and virtual addresses for some global pages */ 137 138vm_paddr_t phys_avail[10]; 139vm_paddr_t dump_avail[4]; 140vm_offset_t physical_pages; 141vm_offset_t pmap_bootstrap_lastaddr; 142 143const struct pmap_devmap *pmap_devmap_bootstrap_table; 144struct pv_addr systempage; 145struct pv_addr msgbufpv; 146struct pv_addr irqstack; 147struct pv_addr undstack; 148struct pv_addr abtstack; 149struct pv_addr kernelstack; 150 151static struct trapframe proc0_tf; 152 153static struct mem_region availmem_regions[FDT_MEM_REGIONS]; 154static int availmem_regions_sz; 155 156static void print_kenv(void); 157static void print_kernel_section_addr(void); 158 159static void physmap_init(void); 160static int platform_devmap_init(void); 161static int platform_mpp_init(void); 162 163static char * 164kenv_next(char *cp) 165{ 166 167 if (cp != NULL) { 168 while (*cp != 0) 169 cp++; 170 cp++; 171 if (*cp == 0) 172 cp = NULL; 173 } 174 return (cp); 175} 176 177static void 178print_kenv(void) 179{ 180 int len; 181 char *cp; 182 183 debugf("loader passed (static) kenv:\n"); 184 if (kern_envp == NULL) { 185 debugf(" no env, null ptr\n"); 186 return; 187 } 188 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 189 190 len = 0; 191 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 192 debugf(" %x %s\n", (uint32_t)cp, cp); 193} 194 195static void 196print_kernel_section_addr(void) 197{ 198 199 debugf("kernel image addresses:\n"); 200 debugf(" kernbase = 0x%08x\n", (uint32_t)kernbase); 201 debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext); 202 debugf(" _edata = 0x%08x\n", (uint32_t)_edata); 203 debugf(" __bss_start = 0x%08x\n", (uint32_t)__bss_start); 204 debugf(" _end = 0x%08x\n", (uint32_t)_end); 205} 206 207static void 208physmap_init(void) 209{ 210 int i, j, cnt; 211 vm_offset_t phys_kernelend, kernload; 212 uint32_t s, e, sz; 213 struct mem_region *mp, *mp1; 214 215 phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR); 216 kernload = KERNPHYSADDR; 217 218 /* 219 * Remove kernel physical address range from avail 220 * regions list. Page align all regions. 221 * Non-page aligned memory isn't very interesting to us. 222 * Also, sort the entries for ascending addresses. 223 */ 224 sz = 0; 225 cnt = availmem_regions_sz; 226 debugf("processing avail regions:\n"); 227 for (mp = availmem_regions; mp->mr_size; mp++) { 228 s = mp->mr_start; 229 e = mp->mr_start + mp->mr_size; 230 debugf(" %08x-%08x -> ", s, e); 231 /* Check whether this region holds all of the kernel. */ 232 if (s < kernload && e > phys_kernelend) { 233 availmem_regions[cnt].mr_start = phys_kernelend; 234 availmem_regions[cnt++].mr_size = e - phys_kernelend; 235 e = kernload; 236 } 237 /* Look whether this regions starts within the kernel. */ 238 if (s >= kernload && s < phys_kernelend) { 239 if (e <= phys_kernelend) 240 goto empty; 241 s = phys_kernelend; 242 } 243 /* Now look whether this region ends within the kernel. */ 244 if (e > kernload && e <= phys_kernelend) { 245 if (s >= kernload) { 246 goto empty; 247 } 248 e = kernload; 249 } 250 /* Now page align the start and size of the region. */ 251 s = round_page(s); 252 e = trunc_page(e); 253 if (e < s) 254 e = s; 255 sz = e - s; 256 debugf("%08x-%08x = %x\n", s, e, sz); 257 258 /* Check whether some memory is left here. */ 259 if (sz == 0) { 260 empty: 261 printf("skipping\n"); 262 bcopy(mp + 1, mp, 263 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 264 cnt--; 265 mp--; 266 continue; 267 } 268 269 /* Do an insertion sort. */ 270 for (mp1 = availmem_regions; mp1 < mp; mp1++) 271 if (s < mp1->mr_start) 272 break; 273 if (mp1 < mp) { 274 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 275 mp1->mr_start = s; 276 mp1->mr_size = sz; 277 } else { 278 mp->mr_start = s; 279 mp->mr_size = sz; 280 } 281 } 282 availmem_regions_sz = cnt; 283 284 /* Fill in phys_avail table, based on availmem_regions */ 285 debugf("fill in phys_avail:\n"); 286 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 287 288 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 289 availmem_regions[i].mr_start, 290 availmem_regions[i].mr_start + availmem_regions[i].mr_size, 291 availmem_regions[i].mr_size); 292 293 phys_avail[j] = availmem_regions[i].mr_start; 294 phys_avail[j + 1] = availmem_regions[i].mr_start + 295 availmem_regions[i].mr_size; 296 } 297 phys_avail[j] = 0; 298 phys_avail[j + 1] = 0; 299} 300 301void * 302initarm(void *mdp, void *unused __unused) 303{ 304 struct pv_addr kernel_l1pt; 305 struct pv_addr dpcpu; 306 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 307 uint32_t memsize, l2size; 308 void *kmdp; 309 u_int l1pagetable; 310 int i = 0, j = 0; 311 312 kmdp = NULL; 313 lastaddr = 0; 314 memsize = 0; 315 dtbp = (vm_offset_t)NULL; 316 317 set_cpufuncs(); 318 319 /* 320 * Mask metadata pointer: it is supposed to be on page boundary. If 321 * the first argument (mdp) doesn't point to a valid address the 322 * bootloader must have passed us something else than the metadata 323 * ptr... In this case we want to fall back to some built-in settings. 324 */ 325 mdp = (void *)((uint32_t)mdp & ~PAGE_MASK); 326 327 /* Parse metadata and fetch parameters */ 328 if (mdp != NULL) { 329 preload_metadata = mdp; 330 kmdp = preload_search_by_type("elf kernel"); 331 if (kmdp != NULL) { 332 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 333 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 334 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 335 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, 336 vm_offset_t); 337#ifdef DDB 338 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 339 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 340#endif 341 } 342 343 } else { 344 /* Fall back to hardcoded metadata. */ 345 lastaddr = fake_preload_metadata(); 346 } 347 348#if defined(FDT_DTB_STATIC) 349 /* 350 * In case the device tree blob was not retrieved (from metadata) try 351 * to use the statically embedded one. 352 */ 353 if (dtbp == (vm_offset_t)NULL) 354 dtbp = (vm_offset_t)&fdt_static_dtb; 355#endif 356 357 if (OF_install(OFW_FDT, 0) == FALSE) 358 while (1); 359 360 if (OF_init((void *)dtbp) != 0) 361 while (1); 362 363 /* Grab physical memory regions information from device tree. */ 364 if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz, 365 &memsize) != 0) 366 while(1); 367 368 if (fdt_immr_addr(MV_BASE) != 0) 369 while (1); 370 371 /* Platform-specific initialisation */ 372 pmap_bootstrap_lastaddr = fdt_immr_va - ARM_NOCACHE_KVA_SIZE; 373 374 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 375 PCPU_SET(curthread, &thread0); 376 377 /* Calculate number of L2 tables needed for mapping vm_page_array */ 378 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 379 l2size = (l2size >> L1_S_SHIFT) + 1; 380 381 /* 382 * Add one table for end of kernel map, one for stacks, msgbuf and 383 * L1 and L2 tables map and one for vectors map. 384 */ 385 l2size += 3; 386 387 /* Make it divisible by 4 */ 388 l2size = (l2size + 3) & ~3; 389 390#define KERNEL_TEXT_BASE (KERNBASE) 391 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 392 393 /* Define a macro to simplify memory allocation */ 394#define valloc_pages(var, np) \ 395 alloc_pages((var).pv_va, (np)); \ 396 (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR); 397 398#define alloc_pages(var, np) \ 399 (var) = freemempos; \ 400 freemempos += (np * PAGE_SIZE); \ 401 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 402 403 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 404 freemempos += PAGE_SIZE; 405 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 406 407 for (i = 0; i < l2size; ++i) { 408 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 409 valloc_pages(kernel_pt_table[i], 410 L2_TABLE_SIZE / PAGE_SIZE); 411 j = i; 412 } else { 413 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 414 L2_TABLE_SIZE_REAL * (i - j); 415 kernel_pt_table[i].pv_pa = 416 kernel_pt_table[i].pv_va - KERNVIRTADDR + 417 KERNPHYSADDR; 418 419 } 420 } 421 /* 422 * Allocate a page for the system page mapped to 0x00000000 423 * or 0xffff0000. This page will just contain the system vectors 424 * and can be shared by all processes. 425 */ 426 valloc_pages(systempage, 1); 427 428 /* Allocate dynamic per-cpu area. */ 429 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 430 dpcpu_init((void *)dpcpu.pv_va, 0); 431 432 /* Allocate stacks for all modes */ 433 valloc_pages(irqstack, IRQ_STACK_SIZE); 434 valloc_pages(abtstack, ABT_STACK_SIZE); 435 valloc_pages(undstack, UND_STACK_SIZE); 436 valloc_pages(kernelstack, KSTACK_PAGES); 437 438 init_param1(); 439 440 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 441 442 /* 443 * Now we start construction of the L1 page table 444 * We start by mapping the L2 page tables into the L1. 445 * This means that we can replace L1 mappings later on if necessary 446 */ 447 l1pagetable = kernel_l1pt.pv_va; 448 449 /* 450 * Try to map as much as possible of kernel text and data using 451 * 1MB section mapping and for the rest of initial kernel address 452 * space use L2 coarse tables. 453 * 454 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 455 * and kernel structures 456 */ 457 l2_start = lastaddr & ~(L1_S_OFFSET); 458 for (i = 0 ; i < l2size - 1; i++) 459 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 460 &kernel_pt_table[i]); 461 462 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 463 464 /* Map kernel code and data */ 465 pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR, 466 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 467 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 468 469 470 /* Map L1 directory and allocated L2 page tables */ 471 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 472 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 473 474 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 475 kernel_pt_table[0].pv_pa, 476 L2_TABLE_SIZE_REAL * l2size, 477 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 478 479 /* Map allocated DPCPU, stacks and msgbuf */ 480 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 481 freemempos - dpcpu.pv_va, 482 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 483 484 /* Link and map the vector page */ 485 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 486 &kernel_pt_table[l2size - 1]); 487 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 488 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 489 490 /* Map pmap_devmap[] entries */ 491 if (platform_devmap_init() != 0) 492 while (1); 493 pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table); 494 495 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 496 DOMAIN_CLIENT); 497 setttb(kernel_l1pt.pv_pa); 498 cpu_tlb_flushID(); 499 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 500 501 /* 502 * Only after the SOC registers block is mapped we can perform device 503 * tree fixups, as they may attempt to read parameters from hardware. 504 */ 505 OF_interpret("perform-fixup", 0); 506 507 /* 508 * Re-initialise MPP. It is important to call this prior to using 509 * console as the physical connection can be routed via MPP. 510 */ 511 if (platform_mpp_init() != 0) 512 while (1); 513 514 /* 515 * Initialize GPIO as early as possible. 516 */ 517 if (platform_gpio_init() != 0) 518 while (1); 519 520 cninit(); 521 physmem = memsize / PAGE_SIZE; 522 523 debugf("initarm: console initialized\n"); 524 debugf(" arg1 mdp = 0x%08x\n", (uint32_t)mdp); 525 debugf(" boothowto = 0x%08x\n", boothowto); 526 printf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 527 print_kernel_section_addr(); 528 print_kenv(); 529 530 /* 531 * Re-initialise decode windows 532 */ 533 if (soc_decode_win() != 0) 534 printf("WARNING: could not re-initialise decode windows! " 535 "Running with existing settings...\n"); 536 /* 537 * Pages were allocated during the secondary bootstrap for the 538 * stacks for different CPU modes. 539 * We must now set the r13 registers in the different CPU modes to 540 * point to these stacks. 541 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 542 * of the stack memory. 543 */ 544 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 545 set_stackptr(PSR_IRQ32_MODE, 546 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); 547 set_stackptr(PSR_ABT32_MODE, 548 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); 549 set_stackptr(PSR_UND32_MODE, 550 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); 551 552 /* 553 * We must now clean the cache again.... 554 * Cleaning may be done by reading new data to displace any 555 * dirty data in the cache. This will have happened in setttb() 556 * but since we are boot strapping the addresses used for the read 557 * may have just been remapped and thus the cache could be out 558 * of sync. A re-clean after the switch will cure this. 559 * After booting there are no gross relocations of the kernel thus 560 * this problem will not occur after initarm(). 561 */ 562 cpu_idcache_wbinv_all(); 563 564 /* Set stack for exception handlers */ 565 data_abort_handler_address = (u_int)data_abort_handler; 566 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 567 undefined_handler_address = (u_int)undefinedinstruction_bounce; 568 undefined_init(); 569 570 proc_linkup0(&proc0, &thread0); 571 thread0.td_kstack = kernelstack.pv_va; 572 thread0.td_kstack_pages = KSTACK_PAGES; 573 thread0.td_pcb = (struct pcb *) 574 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 575 thread0.td_pcb->pcb_flags = 0; 576 thread0.td_frame = &proc0_tf; 577 pcpup->pc_curpcb = thread0.td_pcb; 578 579 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 580 581 dump_avail[0] = 0; 582 dump_avail[1] = memsize; 583 dump_avail[2] = 0; 584 dump_avail[3] = 0; 585 586 pmap_bootstrap(freemempos, pmap_bootstrap_lastaddr, &kernel_l1pt); 587 msgbufp = (void *)msgbufpv.pv_va; 588 msgbufinit(msgbufp, msgbufsize); 589 mutex_init(); 590 591 /* 592 * Prepare map of physical memory regions available to vm subsystem. 593 */ 594 physmap_init(); 595 596 /* Do basic tuning, hz etc */ 597 init_param2(physmem); 598 kdb_init(); 599 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 600 sizeof(struct pcb))); 601} 602 603#define MPP_PIN_MAX 50 604#define MPP_PIN_CELLS 2 605#define MPP_PINS_PER_REG 8 606#define MPP_SEL(pin,func) (((func) & 0xf) << \ 607 (((pin) % MPP_PINS_PER_REG) * 4)) 608 609static int 610platform_mpp_init(void) 611{ 612 pcell_t pinmap[MPP_PIN_MAX * MPP_PIN_CELLS]; 613 int mpp[MPP_PIN_MAX]; 614 uint32_t ctrl_val, ctrl_offset; 615 pcell_t reg[4]; 616 u_long start, size; 617 phandle_t node; 618 pcell_t pin_cells, *pinmap_ptr, pin_count; 619 ssize_t len; 620 int par_addr_cells, par_size_cells; 621 int tuple_size, tuples, rv, pins, i, j; 622 int mpp_pin, mpp_function; 623 624 /* 625 * Try to access the MPP node directly i.e. through /aliases/mpp. 626 */ 627 if ((node = OF_finddevice("mpp")) != 0) 628 if (fdt_is_compatible(node, "mrvl,mpp")) 629 goto moveon; 630 /* 631 * Find the node the long way. 632 */ 633 if ((node = OF_finddevice("/")) == 0) 634 return (ENXIO); 635 636 if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0) 637 return (ENXIO); 638 639 if ((node = fdt_find_compatible(node, "mrvl,mpp", 0)) == 0) 640 return (ENXIO); 641moveon: 642 /* 643 * Process 'reg' prop. 644 */ 645 if ((rv = fdt_addrsize_cells(OF_parent(node), &par_addr_cells, 646 &par_size_cells)) != 0) 647 return(ENXIO); 648 649 tuple_size = sizeof(pcell_t) * (par_addr_cells + par_size_cells); 650 len = OF_getprop(node, "reg", reg, sizeof(reg)); 651 tuples = len / tuple_size; 652 if (tuple_size <= 0) 653 return (EINVAL); 654 655 /* 656 * Get address/size. XXX we assume only the first 'reg' tuple is used. 657 */ 658 rv = fdt_data_to_res(reg, par_addr_cells, par_size_cells, 659 &start, &size); 660 if (rv != 0) 661 return (rv); 662 start += fdt_immr_va; 663 664 /* 665 * Process 'pin-count' and 'pin-map' props. 666 */ 667 if (OF_getprop(node, "pin-count", &pin_count, sizeof(pin_count)) <= 0) 668 return (ENXIO); 669 pin_count = fdt32_to_cpu(pin_count); 670 if (pin_count > MPP_PIN_MAX) 671 return (ERANGE); 672 673 if (OF_getprop(node, "#pin-cells", &pin_cells, sizeof(pin_cells)) <= 0) 674 pin_cells = MPP_PIN_CELLS; 675 pin_cells = fdt32_to_cpu(pin_cells); 676 if (pin_cells > MPP_PIN_CELLS) 677 return (ERANGE); 678 tuple_size = sizeof(pcell_t) * pin_cells; 679 680 bzero(pinmap, sizeof(pinmap)); 681 len = OF_getprop(node, "pin-map", pinmap, sizeof(pinmap)); 682 if (len <= 0) 683 return (ERANGE); 684 if (len % tuple_size) 685 return (ERANGE); 686 pins = len / tuple_size; 687 if (pins > pin_count) 688 return (ERANGE); 689 /* 690 * Fill out a "mpp[pin] => function" table. All pins unspecified in 691 * the 'pin-map' property are defaulted to 0 function i.e. GPIO. 692 */ 693 bzero(mpp, sizeof(mpp)); 694 pinmap_ptr = pinmap; 695 for (i = 0; i < pins; i++) { 696 mpp_pin = fdt32_to_cpu(*pinmap_ptr); 697 mpp_function = fdt32_to_cpu(*(pinmap_ptr + 1)); 698 mpp[mpp_pin] = mpp_function; 699 pinmap_ptr += pin_cells; 700 } 701 702 /* 703 * Prepare and program MPP control register values. 704 */ 705 ctrl_offset = 0; 706 for (i = 0; i < pin_count;) { 707 ctrl_val = 0; 708 709 for (j = 0; j < MPP_PINS_PER_REG; j++) { 710 if (i + j == pin_count - 1) 711 break; 712 ctrl_val |= MPP_SEL(i + j, mpp[i + j]); 713 } 714 i += MPP_PINS_PER_REG; 715 bus_space_write_4(fdtbus_bs_tag, start, ctrl_offset, 716 ctrl_val); 717 718#if defined(SOC_MV_ORION) 719 /* 720 * Third MPP reg on Orion SoC is placed 721 * non-linearly (with different offset). 722 */ 723 if (i == (2 * MPP_PINS_PER_REG)) 724 ctrl_offset = 0x50; 725 else 726#endif 727 ctrl_offset += 4; 728 } 729 730 return (0); 731} 732 733#define FDT_DEVMAP_MAX (1 + 2 + 1 + 1) 734static struct pmap_devmap fdt_devmap[FDT_DEVMAP_MAX] = { 735 { 0, 0, 0, 0, 0, } 736}; 737 738/* 739 * Construct pmap_devmap[] with DT-derived config data. 740 */ 741static int 742platform_devmap_init(void) 743{ 744 phandle_t root, child; 745 u_long base, size; 746 int i; 747 748 /* 749 * IMMR range. 750 */ 751 i = 0; 752 fdt_devmap[i].pd_va = fdt_immr_va; 753 fdt_devmap[i].pd_pa = fdt_immr_pa; 754 fdt_devmap[i].pd_size = fdt_immr_size; 755 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 756 fdt_devmap[i].pd_cache = PTE_NOCACHE; 757 i++; 758 759 /* 760 * PCI range(s). 761 */ 762 if ((root = OF_finddevice("/")) == 0) 763 return (ENXIO); 764 765 for (child = OF_child(root); child != 0; child = OF_peer(child)) 766 if (fdt_is_type(child, "pci")) { 767 /* 768 * Check space: each PCI node will consume 2 devmap 769 * entries. 770 */ 771 if (i + 1 >= FDT_DEVMAP_MAX) { 772 return (ENOMEM); 773 break; 774 } 775 776 /* 777 * XXX this should account for PCI and multiple ranges 778 * of a given kind. 779 */ 780 if (fdt_pci_devmap(child, &fdt_devmap[i], 781 MV_PCIE_IO_BASE, MV_PCIE_MEM_BASE) != 0) 782 return (ENXIO); 783 i += 2; 784 } 785 786 /* 787 * CESA SRAM range. 788 */ 789 if ((child = OF_finddevice("sram")) != 0) 790 if (fdt_is_compatible(child, "mrvl,cesa-sram")) 791 goto moveon; 792 793 if ((child = fdt_find_compatible(root, "mrvl,cesa-sram", 0)) == 0) 794 /* No CESA SRAM node. */ 795 goto out; 796moveon: 797 if (i >= FDT_DEVMAP_MAX) 798 return (ENOMEM); 799 800 if (fdt_regsize(child, &base, &size) != 0) 801 return (EINVAL); 802 803 fdt_devmap[i].pd_va = MV_CESA_SRAM_BASE; /* XXX */ 804 fdt_devmap[i].pd_pa = base; 805 fdt_devmap[i].pd_size = size; 806 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 807 fdt_devmap[i].pd_cache = PTE_NOCACHE; 808 809out: 810 pmap_devmap_bootstrap_table = &fdt_devmap[0]; 811 return (0); 812} 813 814struct arm32_dma_range * 815bus_dma_get_range(void) 816{ 817 818 return (NULL); 819} 820 821int 822bus_dma_get_range_nb(void) 823{ 824 825 return (0); 826} 827