1/* 2 * Initialization and support routines for self-booting compressed 3 * image. 4 * 5 * Copyright (C) 2015, Broadcom Corporation. All Rights Reserved. 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 * 19 * $Id: min_osl.c 419467 2013-08-21 09:19:48Z $ 20 */ 21 22#include <typedefs.h> 23#include <bcmdefs.h> 24#include <osl.h> 25#include <bcmdevs.h> 26#include <bcmutils.h> 27#include <siutils.h> 28#include <hndcpu.h> 29#include <sbchipc.h> 30#include <hndchipc.h> 31 32/* Global ASSERT type */ 33uint32 g_assert_type = 0; 34 35#ifdef mips 36/* Cache support */ 37 38/* Cache and line sizes */ 39uint __icache_size, __ic_lsize, __dcache_size, __dc_lsize; 40 41static void 42_change_cachability(uint32 cm) 43{ 44 uint32 prid, c0reg; 45 46 c0reg = MFC0(C0_CONFIG, 0); 47 c0reg &= ~CONF_CM_CMASK; 48 c0reg |= (cm & CONF_CM_CMASK); 49 MTC0(C0_CONFIG, 0, c0reg); 50 prid = MFC0(C0_PRID, 0); 51 if (BCM330X(prid)) { 52 c0reg = MFC0(C0_BROADCOM, 0); 53 /* Enable icache & dcache */ 54 c0reg |= BRCM_IC_ENABLE | BRCM_DC_ENABLE; 55 MTC0(C0_BROADCOM, 0, c0reg); 56 } 57} 58static void (*change_cachability)(uint32); 59 60void 61caches_on(void) 62{ 63 uint32 config, config1, r2, tmp; 64 uint start, end, size, lsize; 65 66 config = MFC0(C0_CONFIG, 0); 67 r2 = config & CONF_AR; 68 config1 = MFC0(C0_CONFIG, 1); 69 70 icache_probe(config1, &size, &lsize); 71 __icache_size = size; 72 __ic_lsize = lsize; 73 74 dcache_probe(config1, &size, &lsize); 75 __dcache_size = size; 76 __dc_lsize = lsize; 77 78 /* If caches are not in the default state then 79 * presume that caches are already init'd 80 */ 81 if ((config & CONF_CM_CMASK) != CONF_CM_UNCACHED) { 82 blast_dcache(); 83 blast_icache(); 84 return; 85 } 86 87 tmp = R_REG(NULL, (uint32 *)(OSL_UNCACHED(SI_ENUM_BASE + CC_CHIPID))); 88 if (((tmp & CID_PKG_MASK) >> CID_PKG_SHIFT) != HDLSIM_PKG_ID) { 89 /* init icache */ 90 start = KSEG0ADDR(caches_on) & 0xff800000; 91 end = (start + __icache_size); 92 MTC0(C0_TAGLO, 0, 0); 93 MTC0(C0_TAGHI, 0, 0); 94 while (start < end) { 95 cache_op(start, Index_Store_Tag_I); 96 start += __ic_lsize; 97 } 98 99 /* init dcache */ 100 start = KSEG0ADDR(caches_on) & 0xff800000; 101 end = (start + __dcache_size); 102 if (r2) { 103 /* mips32r2 has the data tags in select 2 */ 104 MTC0(C0_TAGLO, 2, 0); 105 MTC0(C0_TAGHI, 2, 0); 106 } else { 107 MTC0(C0_TAGLO, 0, 0); 108 MTC0(C0_TAGHI, 0, 0); 109 } 110 while (start < end) { 111 cache_op(start, Index_Store_Tag_D); 112 start += __dc_lsize; 113 } 114 } 115 116 /* Must be in KSEG1 to change cachability */ 117 change_cachability = (void (*)(uint32))KSEG1ADDR(_change_cachability); 118 change_cachability(CONF_CM_CACHABLE_NONCOHERENT); 119} 120 121 122void 123blast_dcache(void) 124{ 125 uint32 start, end; 126 127 start = KSEG0ADDR(blast_dcache) & 0xff800000; 128 end = start + __dcache_size; 129 130 while (start < end) { 131 cache_op(start, Index_Writeback_Inv_D); 132 start += __dc_lsize; 133 } 134} 135 136void 137blast_icache(void) 138{ 139 uint32 start, end; 140 141 start = KSEG0ADDR(blast_icache) & 0xff800000; 142 end = start + __icache_size; 143 144 while (start < end) { 145 cache_op(start, Index_Invalidate_I); 146 start += __ic_lsize; 147 } 148} 149 150#elif defined(__ARM_ARCH_7A__) 151 152static uint8 loader_pagetable_array[128*1024+16384]; 153 154typedef volatile struct scu_reg_struct_t { 155 uint32 control; 156 uint32 config; 157 uint32 cpupwrstatus; 158 uint32 invalidate; 159 uint32 rsvd1[4]; 160 uint32 rsvd2[4]; 161 uint32 rsvd3[4]; 162 uint32 filtstart; 163 uint32 filtend; 164 uint32 rsvd4[2]; 165 uint32 sac; 166 uint32 snsac; 167} scu_reg_struct; 168 169typedef volatile struct l2cc_reg_struct_t { 170 uint32 cache_id; 171 uint32 cache_type; 172 uint32 rsvd1[62]; 173 uint32 control; /* 0x100 */ 174 uint32 aux_control; 175 uint32 tag_ram_control; 176 uint32 data_ram_control; 177 uint32 rsvd2[60]; 178 uint32 ev_counter_ctrl; /* 0x200 */ 179 uint32 ev_counter1_cfg; 180 uint32 ev_counter0_cfg; 181 uint32 ev_counter1; 182 uint32 ev_counter0; 183 uint32 int_mask; 184 uint32 int_mask_status; 185 uint32 int_raw_status; 186 uint32 int_clear; 187 uint32 rsvd3[55]; 188 uint32 rsvd4[64]; /* 0x300 */ 189 uint32 rsvd5[64]; /* 0x400 */ 190 uint32 rsvd6[64]; /* 0x500 */ 191 uint32 rsvd7[64]; /* 0x600 */ 192 uint32 rsvd8[12]; /* 0x700 - 0x72F */ 193 uint32 cache_sync; /* 0x730 */ 194 uint32 rsvd9[15]; 195 uint32 inv_pa; /* 0x770 */ 196 uint32 rsvd10[2]; 197 uint32 inv_way; /* 0x77C */ 198 uint32 rsvd11[12]; 199 uint32 clean_pa; /* 0x7B0 */ 200 uint32 rsvd12[1]; 201 uint32 clean_index; /* 0x7B8 */ 202 uint32 clean_way; 203 uint32 rsvd13[12]; 204 uint32 clean_inv_pa; /* 0x7F0 */ 205 uint32 rsvd14[1]; 206 uint32 clean_inv_index; 207 uint32 clean_inv_way; 208 uint32 rsvd15[64]; /* 0x800 - 0x8FF */ 209 uint32 d_lockdown0; /* 0x900 */ 210 uint32 i_lockdown0; 211 uint32 d_lockdown1; 212 uint32 i_lockdown1; 213 uint32 d_lockdown2; 214 uint32 i_lockdown2; 215 uint32 d_lockdown3; 216 uint32 i_lockdown3; 217 uint32 d_lockdown4; 218 uint32 i_lockdown4; 219 uint32 d_lockdown5; 220 uint32 i_lockdown5; 221 uint32 d_lockdown6; 222 uint32 i_lockdown6; 223 uint32 d_lockdown7; 224 uint32 i_lockdown7; 225 uint32 rsvd16[4]; /* 0x940 */ 226 uint32 lock_line_en; /* 0x950 */ 227 uint32 unlock_way; 228 uint32 rsvd17[42]; 229 uint32 rsvd18[64]; /* 0xA00 */ 230 uint32 rsvd19[64]; /* 0xB00 */ 231 uint32 addr_filtering_start; /* 0xC00 */ 232 uint32 addr_filtering_end; 233 uint32 rsvd20[62]; 234 uint32 rsvd21[64]; /* 0xD00 */ 235 uint32 rsvd22[64]; /* 0xE00 */ 236 uint32 rsvd23[16]; /* 0xF00 - 0xF3F */ 237 uint32 debug_ctrl; /* 0xF40 */ 238 uint32 rsvd24[7]; 239 uint32 prefetch_ctrl; /* 0xF60 */ 240 uint32 rsvd25[7]; 241 uint32 power_ctrl; /* 0xF80 */ 242} l2cc_reg_struct; 243 244/* ARM9 Private memory region */ 245#define IPROC_PERIPH_BASE (0x19020000) /* (IHOST_A9MP_scu_CONTROL) */ 246#define IPROC_PERIPH_SCU_REG_BASE (IPROC_PERIPH_BASE) 247#define IPROC_L2CC_REG_BASE (IPROC_PERIPH_BASE + 0x2000) /* L2 Cache controller */ 248 249/* Structures and bit definitions */ 250/* SCU Control register */ 251#define IPROC_SCU_CTRL_SCU_EN (0x00000001) 252#define IPROC_SCU_CTRL_ADRFLT_EN (0x00000002) 253#define IPROC_SCU_CTRL_PARITY_EN (0x00000004) 254#define IPROC_SCU_CTRL_SPEC_LNFL_EN (0x00000008) 255#define IPROC_SCU_CTRL_FRC2P0_EN (0x00000010) 256#define IPROC_SCU_CTRL_SCU_STNDBY_EN (0x00000020) 257#define IPROC_SCU_CTRL_IC_STNDBY_EN (0x00000040) 258 259/* 260 * CR1 bits (CP#15 CR1) 261 */ 262#define CR_M (1 << 0) /* MMU enable */ 263#define CR_A (1 << 1) /* Alignment abort enable */ 264#define CR_C (1 << 2) /* Dcache enable */ 265#define CR_W (1 << 3) /* Write buffer enable */ 266#define CR_P (1 << 4) /* 32-bit exception handler */ 267#define CR_D (1 << 5) /* 32-bit data address range */ 268#define CR_L (1 << 6) /* Implementation defined */ 269#define CR_B (1 << 7) /* Big endian */ 270#define CR_S (1 << 8) /* System MMU protection */ 271#define CR_R (1 << 9) /* ROM MMU protection */ 272#define CR_F (1 << 10) /* Implementation defined */ 273#define CR_Z (1 << 11) /* Implementation defined */ 274#define CR_I (1 << 12) /* Icache enable */ 275#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 276#define CR_RR (1 << 14) /* Round Robin cache replacement */ 277#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 278#define CR_DT (1 << 16) 279#define CR_IT (1 << 18) 280#define CR_ST (1 << 19) 281#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 282#define CR_U (1 << 22) /* Unaligned access operation */ 283#define CR_XP (1 << 23) /* Extended page tables */ 284#define CR_VE (1 << 24) /* Vectored interrupts */ 285#define CR_EE (1 << 25) /* Exception (Big) Endian */ 286#define CR_TRE (1 << 28) /* TEX remap enable */ 287#define CR_AFE (1 << 29) /* Access flag enable */ 288#define CR_TE (1 << 30) /* Thumb exception enable */ 289 290#define isb() __asm__ __volatile__ ("" : : : "memory") 291#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t") 292 293extern void cpu_flush_cache_all(void); 294extern void cpu_inv_cache_all(void); 295 296void flush_cache(unsigned long dummy1, unsigned long dummy2) 297{ 298 cpu_flush_cache_all(); 299 return; 300} 301 302static void l2cc_init(void) 303{ 304 uint32 regval; 305 l2cc_reg_struct *l2cc = (l2cc_reg_struct *)IPROC_L2CC_REG_BASE; 306 307 regval = l2cc->aux_control; 308 regval &= ~(0x000F0000); /* Clear the Way-size and associativity (8 way) */ 309 regval |= 0x0A130000; /* Non-secure interrupt access, Way-size 16KB, 310 16 way and event monitoring 311 */ 312 l2cc->aux_control = regval; 313 l2cc->tag_ram_control = 0; /* Tag ram latency */ 314 l2cc->data_ram_control = 0; /* Data ram latency */ 315} 316 317static void l2cc_invalidate(void) 318{ 319 l2cc_reg_struct *l2cc = (l2cc_reg_struct *)IPROC_L2CC_REG_BASE; 320 321 /* Invalidate the entire L2 cache */ 322 l2cc->inv_way = 0x0000FFFF; 323} 324 325int l2cc_enable(void) 326{ 327 int i; 328 l2cc_reg_struct *l2cc = (l2cc_reg_struct *)IPROC_L2CC_REG_BASE; 329 330 l2cc_init(); 331 l2cc_invalidate(); 332 333 i = 1000; 334 while (l2cc->inv_way && i) 335 { 336 --i; 337 }; 338 339 if (i == 0) 340 return (-1); 341 342 /* Clear any pending interrupts from this controller */ 343 l2cc->int_clear = 0x1FF; 344 345 /* Enable the L2 */ 346 l2cc->control = 0x01; 347 348 /* mem barrier to sync up things */ 349 i = 0; 350 asm("mcr p15, 0, %0, c7, c10, 4": :"r"(i)); 351 352 return 0; 353} 354 355static void cp_delay(void) 356{ 357 volatile int i; 358 359 /* copro seems to need some delay between reading and writing */ 360 for (i = 0; i < 1000; i++) 361 nop(); 362 asm volatile("" : : : "memory"); 363} 364 365void 366caches_on(void) 367{ 368 int i; 369 uint32 val, *ptb, ptbaddr; 370 371 cpu_inv_cache_all(); 372 373 /* Enable I$ */ 374 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 375 cp_delay(); 376 val |= CR_I; 377 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : : "r" (val) : "cc"); 378 isb(); 379 380 /* prepare page table for MMU */ 381 ptbaddr = (uint32)loader_pagetable_array; 382 /* Round down to next 64 kB limit */ 383 ptbaddr += 0x10000; 384 ptbaddr &= ~(0x10000 - 1); 385 ptb = (uint32 *)ptbaddr; 386 387 /* Set up an identity-mapping for all 4GB, rw for everyone */ 388 for (i = 0; i < 128; i++) { 389 /* DRAM area: TEX = 0x4, Ap = 3, Domain = 0, C =1, B = 0 */ 390 ptb[i] = i << 20 | 0x4c0e; 391 } 392 393 for (i = 128; i < 480; i++) { 394 /* TEX = 0x2(device memory), Ap = 3, Domain = 0, C =0, B = 0 */ 395 ptb[i] = i << 20 | 0x0c02; 396 } 397 398 for (i = 480; i < 512; i++) { 399 /* SPI region: TEX = 0x4, Ap = 3, Domain = 0, C =1, B = 0 */ 400 ptb[i] = i << 20 | 0x4c0a; 401 } 402 403 for (i = 512; i < 4096; i++) { 404 /* TEX = 0x2(device memory), Ap = 3, Domain = 0, C =0, B = 0 */ 405 ptb[i] = i << 20 | 0x2c02; 406 } 407 408 /* Apply page table address to CP15 */ 409 asm volatile("mcr p15, 0, %0, c2, c0, 0" : : "r" (ptb) : "memory"); 410 /* Set the access control to all-supervisor */ 411 asm volatile("mcr p15, 0, %0, c3, c0, 0" : : "r" (~0)); 412 413 /* Enable I$ and MMU */ 414 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 415 cp_delay(); 416 val |= (CR_C | CR_M); 417 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : : "r" (val) : "cc"); 418 isb(); 419} 420 421void 422blast_dcache(void) 423{ 424#ifndef CFG_UNCACHED 425 uint32 val; 426 427 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 428 cp_delay(); 429 430 if ((val & CR_C) != CR_C) 431 return; /* D$ not enabled */ 432 433 flush_cache(0, ~0); 434 435#ifdef CFG_SHMOO 436 val &= ~CR_C; 437#else 438 val &= ~(CR_C | CR_M); 439#endif 440 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : : "r" (val) : "cc"); 441 isb(); 442#endif /* !CFG_UNCACHED */ 443} 444 445void 446blast_icache(void) 447{ 448#ifndef CFG_UNCACHED 449 uint32 val; 450 451 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 452 cp_delay(); 453 454 if ((val & CR_I) != CR_I) 455 return; /* I$ not enabled */ 456 457 val &= ~CR_I; 458 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : : "r" (val) : "cc"); 459 isb(); 460 461 /* invalidate I-cache */ 462 asm("mcr p15, 0, %0, c7, c5, 0": :"r" (0)); 463#endif 464} 465 466#endif /* mips */ 467 468/* uart output */ 469 470struct serial_struct { 471 unsigned char *reg_base; 472 unsigned short reg_shift; 473 int irq; 474 int baud_base; 475}; 476 477static struct serial_struct min_uart; 478 479#ifdef BCMDBG 480#define LOG_BUF_LEN (16 * 1024) 481#else 482#define LOG_BUF_LEN (1024) 483#endif 484#define LOG_BUF_MASK (LOG_BUF_LEN-1) 485static unsigned long log_idx; 486static char log_buf[LOG_BUF_LEN]; 487 488 489static inline int 490serial_in(struct serial_struct *info, int offset) 491{ 492 return ((int)R_REG(NULL, (uint8 *)(info->reg_base + (offset << info->reg_shift)))); 493} 494 495static inline void 496serial_out(struct serial_struct *info, int offset, int value) 497{ 498 W_REG(NULL, (uint8 *)(info->reg_base + (offset << info->reg_shift)), value); 499} 500 501void 502putc(int c) 503{ 504 uint32 idx; 505 506 /* CR before LF */ 507 if (c == '\n') 508 putc('\r'); 509 510 /* Store in log buffer */ 511 idx = *((uint32 *)OSL_UNCACHED((uintptr)&log_idx)); 512 *((char *)OSL_UNCACHED(&log_buf[idx])) = (char)c; 513 *((uint32 *)OSL_UNCACHED((uintptr)&log_idx)) = (idx + 1) & LOG_BUF_MASK; 514 515 /* No UART */ 516 if (!min_uart.reg_base) 517 return; 518 519 while (!(serial_in(&min_uart, UART_LSR) & UART_LSR_THRE)); 520 serial_out(&min_uart, UART_TX, c); 521} 522 523/* assert & debugging */ 524 525 526/* general purpose memory allocation */ 527 528extern char text_start[], text_end[]; 529extern char data_start[], data_end[]; 530extern char bss_start[], bss_end[]; 531 532static ulong free_mem_ptr = 0; 533static ulong free_mem_ptr_end = 0; 534 535#define MIN_ALIGN 4 /* Alignment at 4 bytes */ 536#define MAX_ALIGN 4096 /* Max alignment at 4k */ 537 538void * 539malloc(uint size) 540{ 541 return malloc_align(size, MIN_ALIGN); 542} 543 544void * 545malloc_align(uint size, uint align_bits) 546{ 547 void *p; 548 uint align_mask; 549 550 /* Sanity check */ 551 if (size < 0) 552 printf("Malloc error\n"); 553 if (free_mem_ptr == 0) 554 printf("Memory error\n"); 555 556 /* Align */ 557 align_mask = 1 << align_bits; 558 if (align_mask < MIN_ALIGN) 559 align_mask = MIN_ALIGN; 560 if (align_mask > MAX_ALIGN) 561 align_mask = MAX_ALIGN; 562 align_mask--; 563 free_mem_ptr = (free_mem_ptr + align_mask) & ~align_mask; 564 565 p = (void *) free_mem_ptr; 566 free_mem_ptr += size; 567 568 if (free_mem_ptr >= free_mem_ptr_end) 569 printf("Out of memory\n"); 570 571 return p; 572} 573 574int 575free(void *where) 576{ 577 return 0; 578} 579 580/* get processor cycle count */ 581 582#if defined(mips) 583#define get_cycle_count get_c0_count 584#elif defined(__arm__) || defined(__thumb__) || defined(__thumb2__) 585#define get_cycle_count get_arm_cyclecount 586#ifdef __ARM_ARCH_7A__ 587extern long _getticks(void); 588#define get_arm_cyclecount (uint32)_getticks 589#endif 590#endif /* mips */ 591 592uint32 593osl_getcycles(void) 594{ 595 return get_cycle_count(); 596} 597 598/* microsecond delay */ 599 600/* Default to 125 MHz */ 601static uint32 cpu_clock = 125000000; 602static uint32 c0counts_per_us = 125000000 / 2000000; 603static uint32 c0counts_per_ms = 125000000 / 2000; 604 605void 606udelay(uint32 us) 607{ 608 uint32 curr, lim; 609 610 curr = get_cycle_count(); 611 lim = curr + (us * c0counts_per_us); 612 613 if (lim < curr) 614 while (get_cycle_count() > curr) 615 ; 616 617 while (get_cycle_count() < lim) 618 ; 619} 620 621#ifndef MIN_DO_TRAP 622 623/* No trap handling in self-decompressing boots */ 624extern void trap_init(void); 625 626void 627trap_init(void) 628{ 629} 630 631#endif /* !MIN_DO_TRAP */ 632 633static void 634serial_add(void *regs, uint irq, uint baud_base, uint reg_shift) 635{ 636 int quot; 637 638 if (min_uart.reg_base) 639 return; 640 641 min_uart.reg_base = regs; 642 min_uart.irq = irq; 643 min_uart.baud_base = baud_base / 16; 644 min_uart.reg_shift = reg_shift; 645 646 /* Set baud and 8N1 */ 647#if defined(CFG_SIM) && defined(__ARM_ARCH_7A__) 648 quot = (min_uart.baud_base + 300) / 600; 649#else 650 quot = (min_uart.baud_base + 57600) / 115200; 651#endif 652 serial_out(&min_uart, UART_LCR, UART_LCR_DLAB); 653 serial_out(&min_uart, UART_DLL, quot & 0xff); 654 serial_out(&min_uart, UART_DLM, quot >> 8); 655 serial_out(&min_uart, UART_LCR, UART_LCR_WLEN8); 656 657 /* According to the Synopsys website: "the serial clock 658 * modules must have time to see new register values 659 * and reset their respective state machines. This 660 * total time is guaranteed to be no more than 661 * (2 * baud divisor * 16) clock cycles of the slower 662 * of the two system clocks. No data should be transmitted 663 * or received before this maximum time expires." 664 */ 665 udelay(1000); 666} 667 668 669void * 670osl_init() 671{ 672 uint32 c0counts_per_cycle; 673 si_t *sih; 674 675 /* Scan backplane */ 676 sih = si_kattach(SI_OSH); 677 678 if (sih == NULL) 679 return NULL; 680 681#if defined(mips) 682 si_mips_init(sih, 0); 683 c0counts_per_cycle = 2; 684#elif defined(__arm__) || defined(__thumb__) || defined(__thumb2__) 685 si_arm_init(sih); 686 c0counts_per_cycle = 1; 687#else 688#error "Unknow CPU" 689#endif 690 691 cpu_clock = si_cpu_clock(sih); 692 c0counts_per_us = cpu_clock / (1000000 * c0counts_per_cycle); 693 c0counts_per_ms = si_cpu_clock(sih) / (1000 * c0counts_per_cycle); 694 695 /* Don't really need to talk to the uart in simulation */ 696 if ((sih->chippkg != HDLSIM_PKG_ID) && (sih->chippkg != HWSIM_PKG_ID)) 697 si_serial_init(sih, serial_add); 698 699 /* Init malloc */ 700#if defined(CFG_SHMOO) 701 { 702 extern int _memsize; 703 if (_memsize) { 704 free_mem_ptr = _memsize >> 1; 705 free_mem_ptr_end = _memsize - (_memsize >> 2); 706 } 707 } 708#else 709 free_mem_ptr = (ulong) bss_end; 710 free_mem_ptr_end = ((ulong)&sih) - 8192; /* Enough stack? */ 711#endif /* CFG_SHMOO */ 712 return ((void *)sih); 713} 714 715/* translate bcmerros */ 716int 717osl_error(int bcmerror) 718{ 719 if (bcmerror) 720 return -1; 721 else 722 return 0; 723} 724