mv_machdep.c revision 236990
1270866Simp/*- 2270866Simp * Copyright (c) 1994-1998 Mark Brinicombe. 3270866Simp * Copyright (c) 1994 Brini. 4270866Simp * All rights reserved. 5270866Simp * 6270866Simp * This code is derived from software written for Brini by Mark Brinicombe 7270866Simp * 8270866Simp * Redistribution and use in source and binary forms, with or without 9270866Simp * modification, are permitted provided that the following conditions 10270866Simp * are met: 11270866Simp * 1. Redistributions of source code must retain the above copyright 12270866Simp * notice, this list of conditions and the following disclaimer. 13270866Simp * 2. Redistributions in binary form must reproduce the above copyright 14270866Simp * notice, this list of conditions and the following disclaimer in the 15270866Simp * documentation and/or other materials provided with the distribution. 16270866Simp * 3. All advertising materials mentioning features or use of this software 17270866Simp * must display the following acknowledgement: 18270866Simp * This product includes software developed by Brini. 19270866Simp * 4. The name of the company nor the name of the author may be used to 20270866Simp * endorse or promote products derived from this software without specific 21270866Simp * prior written permission. 22270866Simp * 23270866Simp * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45 36 */ 37 38#include "opt_ddb.h" 39#include "opt_platform.h" 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/arm/mv/mv_machdep.c 236990 2012-06-13 04:59:00Z imp $"); 43 44#define _ARM32_BUS_DMA_PRIVATE 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/sysproto.h> 48#include <sys/signalvar.h> 49#include <sys/imgact.h> 50#include <sys/kernel.h> 51#include <sys/ktr.h> 52#include <sys/linker.h> 53#include <sys/lock.h> 54#include <sys/malloc.h> 55#include <sys/mutex.h> 56#include <sys/pcpu.h> 57#include <sys/proc.h> 58#include <sys/ptrace.h> 59#include <sys/cons.h> 60#include <sys/bio.h> 61#include <sys/bus.h> 62#include <sys/buf.h> 63#include <sys/exec.h> 64#include <sys/kdb.h> 65#include <sys/msgbuf.h> 66#include <machine/reg.h> 67#include <machine/cpu.h> 68#include <machine/fdt.h> 69 70#include <dev/fdt/fdt_common.h> 71#include <dev/ofw/openfirm.h> 72 73#include <vm/vm.h> 74#include <vm/pmap.h> 75#include <vm/vm_object.h> 76#include <vm/vm_page.h> 77#include <vm/vm_pager.h> 78#include <vm/vm_map.h> 79#include <machine/pte.h> 80#include <machine/pmap.h> 81#include <machine/vmparam.h> 82#include <machine/pcb.h> 83#include <machine/undefined.h> 84#include <machine/machdep.h> 85#include <machine/metadata.h> 86#include <machine/armreg.h> 87#include <machine/bus.h> 88#include <sys/reboot.h> 89 90#include <arm/mv/mvreg.h> /* XXX */ 91#include <arm/mv/mvvar.h> /* XXX eventually this should be eliminated */ 92#include <arm/mv/mvwin.h> 93 94#ifdef DEBUG 95#define debugf(fmt, args...) printf(fmt, ##args) 96#else 97#define debugf(fmt, args...) 98#endif 99 100/* 101 * This is the number of L2 page tables required for covering max 102 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 103 * stacks etc.), uprounded to be divisible by 4. 104 */ 105#define KERNEL_PT_MAX 78 106 107/* Define various stack sizes in pages */ 108#define IRQ_STACK_SIZE 1 109#define ABT_STACK_SIZE 1 110#define UND_STACK_SIZE 1 111 112extern unsigned char kernbase[]; 113extern unsigned char _etext[]; 114extern unsigned char _edata[]; 115extern unsigned char __bss_start[]; 116extern unsigned char _end[]; 117 118#ifdef DDB 119extern vm_offset_t ksym_start, ksym_end; 120#endif 121 122extern u_int data_abort_handler_address; 123extern u_int prefetch_abort_handler_address; 124extern u_int undefined_handler_address; 125 126extern vm_offset_t pmap_bootstrap_lastaddr; 127extern int *end; 128 129struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 130struct pcpu __pcpu; 131struct pcpu *pcpup = &__pcpu; 132 133/* Physical and virtual addresses for some global pages */ 134 135vm_paddr_t phys_avail[10]; 136vm_paddr_t dump_avail[4]; 137vm_offset_t physical_pages; 138vm_offset_t pmap_bootstrap_lastaddr; 139 140const struct pmap_devmap *pmap_devmap_bootstrap_table; 141struct pv_addr systempage; 142struct pv_addr msgbufpv; 143struct pv_addr irqstack; 144struct pv_addr undstack; 145struct pv_addr abtstack; 146struct pv_addr kernelstack; 147 148static struct mem_region availmem_regions[FDT_MEM_REGIONS]; 149static int availmem_regions_sz; 150 151static void print_kenv(void); 152static void print_kernel_section_addr(void); 153 154static void physmap_init(void); 155static int platform_devmap_init(void); 156static int platform_mpp_init(void); 157 158static char * 159kenv_next(char *cp) 160{ 161 162 if (cp != NULL) { 163 while (*cp != 0) 164 cp++; 165 cp++; 166 if (*cp == 0) 167 cp = NULL; 168 } 169 return (cp); 170} 171 172static void 173print_kenv(void) 174{ 175 int len; 176 char *cp; 177 178 debugf("loader passed (static) kenv:\n"); 179 if (kern_envp == NULL) { 180 debugf(" no env, null ptr\n"); 181 return; 182 } 183 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 184 185 len = 0; 186 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 187 debugf(" %x %s\n", (uint32_t)cp, cp); 188} 189 190static void 191print_kernel_section_addr(void) 192{ 193 194 debugf("kernel image addresses:\n"); 195 debugf(" kernbase = 0x%08x\n", (uint32_t)kernbase); 196 debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext); 197 debugf(" _edata = 0x%08x\n", (uint32_t)_edata); 198 debugf(" __bss_start = 0x%08x\n", (uint32_t)__bss_start); 199 debugf(" _end = 0x%08x\n", (uint32_t)_end); 200} 201 202static void 203physmap_init(void) 204{ 205 int i, j, cnt; 206 vm_offset_t phys_kernelend, kernload; 207 uint32_t s, e, sz; 208 struct mem_region *mp, *mp1; 209 210 phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR); 211 kernload = KERNPHYSADDR; 212 213 /* 214 * Remove kernel physical address range from avail 215 * regions list. Page align all regions. 216 * Non-page aligned memory isn't very interesting to us. 217 * Also, sort the entries for ascending addresses. 218 */ 219 sz = 0; 220 cnt = availmem_regions_sz; 221 debugf("processing avail regions:\n"); 222 for (mp = availmem_regions; mp->mr_size; mp++) { 223 s = mp->mr_start; 224 e = mp->mr_start + mp->mr_size; 225 debugf(" %08x-%08x -> ", s, e); 226 /* Check whether this region holds all of the kernel. */ 227 if (s < kernload && e > phys_kernelend) { 228 availmem_regions[cnt].mr_start = phys_kernelend; 229 availmem_regions[cnt++].mr_size = e - phys_kernelend; 230 e = kernload; 231 } 232 /* Look whether this regions starts within the kernel. */ 233 if (s >= kernload && s < phys_kernelend) { 234 if (e <= phys_kernelend) 235 goto empty; 236 s = phys_kernelend; 237 } 238 /* Now look whether this region ends within the kernel. */ 239 if (e > kernload && e <= phys_kernelend) { 240 if (s >= kernload) { 241 goto empty; 242 } 243 e = kernload; 244 } 245 /* Now page align the start and size of the region. */ 246 s = round_page(s); 247 e = trunc_page(e); 248 if (e < s) 249 e = s; 250 sz = e - s; 251 debugf("%08x-%08x = %x\n", s, e, sz); 252 253 /* Check whether some memory is left here. */ 254 if (sz == 0) { 255 empty: 256 printf("skipping\n"); 257 bcopy(mp + 1, mp, 258 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 259 cnt--; 260 mp--; 261 continue; 262 } 263 264 /* Do an insertion sort. */ 265 for (mp1 = availmem_regions; mp1 < mp; mp1++) 266 if (s < mp1->mr_start) 267 break; 268 if (mp1 < mp) { 269 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 270 mp1->mr_start = s; 271 mp1->mr_size = sz; 272 } else { 273 mp->mr_start = s; 274 mp->mr_size = sz; 275 } 276 } 277 availmem_regions_sz = cnt; 278 279 /* Fill in phys_avail table, based on availmem_regions */ 280 debugf("fill in phys_avail:\n"); 281 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 282 283 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 284 availmem_regions[i].mr_start, 285 availmem_regions[i].mr_start + availmem_regions[i].mr_size, 286 availmem_regions[i].mr_size); 287 288 /* 289 * We should not map the page at PA 0x0000000, the VM can't 290 * handle it, as pmap_extract() == 0 means failure. 291 */ 292 if (availmem_regions[i].mr_start > 0 || 293 availmem_regions[i].mr_size > PAGE_SIZE) { 294 phys_avail[j] = availmem_regions[i].mr_start; 295 if (phys_avail[j] == 0) 296 phys_avail[j] += PAGE_SIZE; 297 phys_avail[j + 1] = availmem_regions[i].mr_start + 298 availmem_regions[i].mr_size; 299 } else 300 j -= 2; 301 } 302 phys_avail[j] = 0; 303 phys_avail[j + 1] = 0; 304} 305 306void * 307initarm(struct arm_boot_params *abp) 308{ 309 struct pv_addr kernel_l1pt; 310 struct pv_addr dpcpu; 311 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 312 uint32_t memsize, l2size; 313 void *kmdp; 314 void *mdp; 315 u_int l1pagetable; 316 int i = 0, j = 0, err_devmap = 0; 317 318 mdp = (void *)abp->abp_r0; 319 kmdp = NULL; 320 lastaddr = 0; 321 memsize = 0; 322 dtbp = (vm_offset_t)NULL; 323 324 set_cpufuncs(); 325 326 /* 327 * Mask metadata pointer: it is supposed to be on page boundary. If 328 * the first argument (mdp) doesn't point to a valid address the 329 * bootloader must have passed us something else than the metadata 330 * ptr... In this case we want to fall back to some built-in settings. 331 */ 332 mdp = (void *)((uint32_t)mdp & ~PAGE_MASK); 333 334 /* Parse metadata and fetch parameters */ 335 if (mdp != NULL) { 336 preload_metadata = mdp; 337 kmdp = preload_search_by_type("elf kernel"); 338 if (kmdp != NULL) { 339 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 340 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 341 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 342 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, 343 vm_offset_t); 344#ifdef DDB 345 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 346 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 347#endif 348 } 349 350 preload_addr_relocate = KERNVIRTADDR - KERNPHYSADDR; 351 } else { 352 /* Fall back to hardcoded metadata. */ 353 lastaddr = fake_preload_metadata(); 354 } 355 356#if defined(FDT_DTB_STATIC) 357 /* 358 * In case the device tree blob was not retrieved (from metadata) try 359 * to use the statically embedded one. 360 */ 361 if (dtbp == (vm_offset_t)NULL) 362 dtbp = (vm_offset_t)&fdt_static_dtb; 363#endif 364 365 if (OF_install(OFW_FDT, 0) == FALSE) 366 while (1); 367 368 if (OF_init((void *)dtbp) != 0) 369 while (1); 370 371 /* Grab physical memory regions information from device tree. */ 372 if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz, 373 &memsize) != 0) 374 while(1); 375 376 if (fdt_immr_addr(MV_BASE) != 0) 377 while (1); 378 379 /* Platform-specific initialisation */ 380 pmap_bootstrap_lastaddr = fdt_immr_va - ARM_NOCACHE_KVA_SIZE; 381 382 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 383 PCPU_SET(curthread, &thread0); 384 385 /* Calculate number of L2 tables needed for mapping vm_page_array */ 386 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 387 l2size = (l2size >> L1_S_SHIFT) + 1; 388 389 /* 390 * Add one table for end of kernel map, one for stacks, msgbuf and 391 * L1 and L2 tables map and one for vectors map. 392 */ 393 l2size += 3; 394 395 /* Make it divisible by 4 */ 396 l2size = (l2size + 3) & ~3; 397 398#define KERNEL_TEXT_BASE (KERNBASE) 399 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 400 401 /* Define a macro to simplify memory allocation */ 402#define valloc_pages(var, np) \ 403 alloc_pages((var).pv_va, (np)); \ 404 (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR); 405 406#define alloc_pages(var, np) \ 407 (var) = freemempos; \ 408 freemempos += (np * PAGE_SIZE); \ 409 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 410 411 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 412 freemempos += PAGE_SIZE; 413 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 414 415 for (i = 0; i < l2size; ++i) { 416 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 417 valloc_pages(kernel_pt_table[i], 418 L2_TABLE_SIZE / PAGE_SIZE); 419 j = i; 420 } else { 421 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 422 L2_TABLE_SIZE_REAL * (i - j); 423 kernel_pt_table[i].pv_pa = 424 kernel_pt_table[i].pv_va - KERNVIRTADDR + 425 KERNPHYSADDR; 426 427 } 428 } 429 /* 430 * Allocate a page for the system page mapped to 0x00000000 431 * or 0xffff0000. This page will just contain the system vectors 432 * and can be shared by all processes. 433 */ 434 valloc_pages(systempage, 1); 435 436 /* Allocate dynamic per-cpu area. */ 437 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 438 dpcpu_init((void *)dpcpu.pv_va, 0); 439 440 /* Allocate stacks for all modes */ 441 valloc_pages(irqstack, IRQ_STACK_SIZE); 442 valloc_pages(abtstack, ABT_STACK_SIZE); 443 valloc_pages(undstack, UND_STACK_SIZE); 444 valloc_pages(kernelstack, KSTACK_PAGES); 445 446 init_param1(); 447 448 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 449 450 /* 451 * Now we start construction of the L1 page table 452 * We start by mapping the L2 page tables into the L1. 453 * This means that we can replace L1 mappings later on if necessary 454 */ 455 l1pagetable = kernel_l1pt.pv_va; 456 457 /* 458 * Try to map as much as possible of kernel text and data using 459 * 1MB section mapping and for the rest of initial kernel address 460 * space use L2 coarse tables. 461 * 462 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 463 * and kernel structures 464 */ 465 l2_start = lastaddr & ~(L1_S_OFFSET); 466 for (i = 0 ; i < l2size - 1; i++) 467 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 468 &kernel_pt_table[i]); 469 470 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 471 472 /* Map kernel code and data */ 473 pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR, 474 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 475 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 476 477 478 /* Map L1 directory and allocated L2 page tables */ 479 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 480 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 481 482 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 483 kernel_pt_table[0].pv_pa, 484 L2_TABLE_SIZE_REAL * l2size, 485 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 486 487 /* Map allocated DPCPU, stacks and msgbuf */ 488 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 489 freemempos - dpcpu.pv_va, 490 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 491 492 /* Link and map the vector page */ 493 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 494 &kernel_pt_table[l2size - 1]); 495 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 496 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 497 498 /* Map pmap_devmap[] entries */ 499 err_devmap = platform_devmap_init(); 500 pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table); 501 502 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 503 DOMAIN_CLIENT); 504 setttb(kernel_l1pt.pv_pa); 505 cpu_tlb_flushID(); 506 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 507 508 /* 509 * Only after the SOC registers block is mapped we can perform device 510 * tree fixups, as they may attempt to read parameters from hardware. 511 */ 512 OF_interpret("perform-fixup", 0); 513 514 /* 515 * Re-initialise MPP. It is important to call this prior to using 516 * console as the physical connection can be routed via MPP. 517 */ 518 if (platform_mpp_init() != 0) 519 while (1); 520 521 cninit(); 522 523 physmem = memsize / PAGE_SIZE; 524 525 debugf("initarm: console initialized\n"); 526 debugf(" arg1 mdp = 0x%08x\n", (uint32_t)mdp); 527 debugf(" boothowto = 0x%08x\n", boothowto); 528 printf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 529 print_kernel_section_addr(); 530 print_kenv(); 531 532 if (err_devmap != 0) 533 printf("WARNING: could not fully configure devmap, error=%d\n", 534 err_devmap); 535 536 /* 537 * Re-initialise decode windows 538 */ 539 if (soc_decode_win() != 0) 540 printf("WARNING: could not re-initialise decode windows! " 541 "Running with existing settings...\n"); 542 543 /* 544 * Pages were allocated during the secondary bootstrap for the 545 * stacks for different CPU modes. 546 * We must now set the r13 registers in the different CPU modes to 547 * point to these stacks. 548 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 549 * of the stack memory. 550 */ 551 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 552 set_stackptr(PSR_IRQ32_MODE, 553 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); 554 set_stackptr(PSR_ABT32_MODE, 555 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); 556 set_stackptr(PSR_UND32_MODE, 557 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); 558 559 /* 560 * We must now clean the cache again.... 561 * Cleaning may be done by reading new data to displace any 562 * dirty data in the cache. This will have happened in setttb() 563 * but since we are boot strapping the addresses used for the read 564 * may have just been remapped and thus the cache could be out 565 * of sync. A re-clean after the switch will cure this. 566 * After booting there are no gross relocations of the kernel thus 567 * this problem will not occur after initarm(). 568 */ 569 cpu_idcache_wbinv_all(); 570 571 /* Set stack for exception handlers */ 572 data_abort_handler_address = (u_int)data_abort_handler; 573 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 574 undefined_handler_address = (u_int)undefinedinstruction_bounce; 575 undefined_init(); 576 577 init_proc0(kernelstack.pv_va); 578 579 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 580 581 dump_avail[0] = 0; 582 dump_avail[1] = memsize; 583 dump_avail[2] = 0; 584 dump_avail[3] = 0; 585 586 pmap_bootstrap(freemempos, pmap_bootstrap_lastaddr, &kernel_l1pt); 587 msgbufp = (void *)msgbufpv.pv_va; 588 msgbufinit(msgbufp, msgbufsize); 589 mutex_init(); 590 591 /* 592 * Prepare map of physical memory regions available to vm subsystem. 593 */ 594 physmap_init(); 595 596 /* Do basic tuning, hz etc */ 597 init_param2(physmem); 598 kdb_init(); 599 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 600 sizeof(struct pcb))); 601} 602 603#define MPP_PIN_MAX 50 604#define MPP_PIN_CELLS 2 605#define MPP_PINS_PER_REG 8 606#define MPP_SEL(pin,func) (((func) & 0xf) << \ 607 (((pin) % MPP_PINS_PER_REG) * 4)) 608 609static int 610platform_mpp_init(void) 611{ 612 pcell_t pinmap[MPP_PIN_MAX * MPP_PIN_CELLS]; 613 int mpp[MPP_PIN_MAX]; 614 uint32_t ctrl_val, ctrl_offset; 615 pcell_t reg[4]; 616 u_long start, size; 617 phandle_t node; 618 pcell_t pin_cells, *pinmap_ptr, pin_count; 619 ssize_t len; 620 int par_addr_cells, par_size_cells; 621 int tuple_size, tuples, rv, pins, i, j; 622 int mpp_pin, mpp_function; 623 624 /* 625 * Try to access the MPP node directly i.e. through /aliases/mpp. 626 */ 627 if ((node = OF_finddevice("mpp")) != -1) 628 if (fdt_is_compatible(node, "mrvl,mpp")) 629 goto moveon; 630 /* 631 * Find the node the long way. 632 */ 633 if ((node = OF_finddevice("/")) == -1) 634 return (ENXIO); 635 636 if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0) 637 return (ENXIO); 638 639 if ((node = fdt_find_compatible(node, "mrvl,mpp", 0)) == 0) 640 return (ENXIO); 641moveon: 642 /* 643 * Process 'reg' prop. 644 */ 645 if ((rv = fdt_addrsize_cells(OF_parent(node), &par_addr_cells, 646 &par_size_cells)) != 0) 647 return(ENXIO); 648 649 tuple_size = sizeof(pcell_t) * (par_addr_cells + par_size_cells); 650 len = OF_getprop(node, "reg", reg, sizeof(reg)); 651 tuples = len / tuple_size; 652 if (tuple_size <= 0) 653 return (EINVAL); 654 655 /* 656 * Get address/size. XXX we assume only the first 'reg' tuple is used. 657 */ 658 rv = fdt_data_to_res(reg, par_addr_cells, par_size_cells, 659 &start, &size); 660 if (rv != 0) 661 return (rv); 662 start += fdt_immr_va; 663 664 /* 665 * Process 'pin-count' and 'pin-map' props. 666 */ 667 if (OF_getprop(node, "pin-count", &pin_count, sizeof(pin_count)) <= 0) 668 return (ENXIO); 669 pin_count = fdt32_to_cpu(pin_count); 670 if (pin_count > MPP_PIN_MAX) 671 return (ERANGE); 672 673 if (OF_getprop(node, "#pin-cells", &pin_cells, sizeof(pin_cells)) <= 0) 674 pin_cells = MPP_PIN_CELLS; 675 pin_cells = fdt32_to_cpu(pin_cells); 676 if (pin_cells > MPP_PIN_CELLS) 677 return (ERANGE); 678 tuple_size = sizeof(pcell_t) * pin_cells; 679 680 bzero(pinmap, sizeof(pinmap)); 681 len = OF_getprop(node, "pin-map", pinmap, sizeof(pinmap)); 682 if (len <= 0) 683 return (ERANGE); 684 if (len % tuple_size) 685 return (ERANGE); 686 pins = len / tuple_size; 687 if (pins > pin_count) 688 return (ERANGE); 689 /* 690 * Fill out a "mpp[pin] => function" table. All pins unspecified in 691 * the 'pin-map' property are defaulted to 0 function i.e. GPIO. 692 */ 693 bzero(mpp, sizeof(mpp)); 694 pinmap_ptr = pinmap; 695 for (i = 0; i < pins; i++) { 696 mpp_pin = fdt32_to_cpu(*pinmap_ptr); 697 mpp_function = fdt32_to_cpu(*(pinmap_ptr + 1)); 698 mpp[mpp_pin] = mpp_function; 699 pinmap_ptr += pin_cells; 700 } 701 702 /* 703 * Prepare and program MPP control register values. 704 */ 705 ctrl_offset = 0; 706 for (i = 0; i < pin_count;) { 707 ctrl_val = 0; 708 709 for (j = 0; j < MPP_PINS_PER_REG; j++) { 710 if (i + j == pin_count - 1) 711 break; 712 ctrl_val |= MPP_SEL(i + j, mpp[i + j]); 713 } 714 i += MPP_PINS_PER_REG; 715 bus_space_write_4(fdtbus_bs_tag, start, ctrl_offset, 716 ctrl_val); 717 718#if defined(SOC_MV_ORION) 719 /* 720 * Third MPP reg on Orion SoC is placed 721 * non-linearly (with different offset). 722 */ 723 if (i == (2 * MPP_PINS_PER_REG)) 724 ctrl_offset = 0x50; 725 else 726#endif 727 ctrl_offset += 4; 728 } 729 730 return (0); 731} 732 733#define FDT_DEVMAP_MAX (MV_WIN_CPU_MAX + 1) 734static struct pmap_devmap fdt_devmap[FDT_DEVMAP_MAX] = { 735 { 0, 0, 0, 0, 0, } 736}; 737 738/* 739 * XXX: When device entry in devmap has pd_size smaller than section size, 740 * system will freeze during initialization 741 */ 742 743/* 744 * Construct pmap_devmap[] with DT-derived config data. 745 */ 746 747static int 748platform_devmap_init(void) 749{ 750 phandle_t root, child; 751 pcell_t bank_count; 752 u_long base, size; 753 int i, num_mapped; 754 755 i = 0; 756 pmap_devmap_bootstrap_table = &fdt_devmap[0]; 757 758 /* 759 * IMMR range. 760 */ 761 fdt_devmap[i].pd_va = fdt_immr_va; 762 fdt_devmap[i].pd_pa = fdt_immr_pa; 763 fdt_devmap[i].pd_size = fdt_immr_size; 764 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 765 fdt_devmap[i].pd_cache = PTE_NOCACHE; 766 i++; 767 768 /* 769 * PCI range(s) and localbus. 770 */ 771 if ((root = OF_finddevice("/")) == -1) 772 return (ENXIO); 773 774 for (child = OF_child(root); child != 0; child = OF_peer(child)) { 775 if (fdt_is_type(child, "pci")) { 776 /* 777 * Check space: each PCI node will consume 2 devmap 778 * entries. 779 */ 780 if (i + 1 >= FDT_DEVMAP_MAX) { 781 return (ENOMEM); 782 } 783 784 /* 785 * XXX this should account for PCI and multiple ranges 786 * of a given kind. 787 */ 788 if (fdt_pci_devmap(child, &fdt_devmap[i], 789 MV_PCIE_IO_BASE, MV_PCIE_MEM_BASE) != 0) 790 return (ENXIO); 791 i += 2; 792 } 793 794 if (fdt_is_compatible(child, "mrvl,lbc")) { 795 /* Check available space */ 796 if (OF_getprop(child, "bank-count", (void *)&bank_count, 797 sizeof(bank_count)) <= 0) 798 /* If no property, use default value */ 799 bank_count = 1; 800 else 801 bank_count = fdt32_to_cpu(bank_count); 802 803 if ((i + bank_count) >= FDT_DEVMAP_MAX) 804 return (ENOMEM); 805 806 /* Add all localbus ranges to device map */ 807 num_mapped = 0; 808 809 if (fdt_localbus_devmap(child, &fdt_devmap[i], 810 (int)bank_count, &num_mapped) != 0) 811 return (ENXIO); 812 813 i += num_mapped; 814 } 815 } 816 817 /* 818 * CESA SRAM range. 819 */ 820 if ((child = OF_finddevice("sram")) != -1) 821 if (fdt_is_compatible(child, "mrvl,cesa-sram")) 822 goto moveon; 823 824 if ((child = fdt_find_compatible(root, "mrvl,cesa-sram", 0)) == 0) 825 /* No CESA SRAM node. */ 826 return (0); 827moveon: 828 if (i >= FDT_DEVMAP_MAX) 829 return (ENOMEM); 830 831 if (fdt_regsize(child, &base, &size) != 0) 832 return (EINVAL); 833 834 fdt_devmap[i].pd_va = MV_CESA_SRAM_BASE; /* XXX */ 835 fdt_devmap[i].pd_pa = base; 836 fdt_devmap[i].pd_size = size; 837 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 838 fdt_devmap[i].pd_cache = PTE_NOCACHE; 839 840 return (0); 841} 842 843struct arm32_dma_range * 844bus_dma_get_range(void) 845{ 846 847 return (NULL); 848} 849 850int 851bus_dma_get_range_nb(void) 852{ 853 854 return (0); 855} 856