1/*
2 * Initialization and support routines for self-booting compressed
3 * image.
4 *
5 * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 *
19 * $Id: min_osl.c 401759 2013-05-13 16:08:08Z $
20 */
21
22#include <typedefs.h>
23#include <bcmdefs.h>
24#include <osl.h>
25#include <bcmdevs.h>
26#include <bcmutils.h>
27#include <siutils.h>
28#include <hndcpu.h>
29#include <sbchipc.h>
30#include <hndchipc.h>
31
32/* Global ASSERT type */
33uint32 g_assert_type = 0;
34
35#ifdef	mips
36/* Cache support */
37
38/* Cache and line sizes */
39uint __icache_size, __ic_lsize, __dcache_size, __dc_lsize;
40
41static void
42_change_cachability(uint32 cm)
43{
44	uint32 prid, c0reg;
45
46	c0reg = MFC0(C0_CONFIG, 0);
47	c0reg &= ~CONF_CM_CMASK;
48	c0reg |= (cm & CONF_CM_CMASK);
49	MTC0(C0_CONFIG, 0, c0reg);
50	prid = MFC0(C0_PRID, 0);
51	if (BCM330X(prid)) {
52		c0reg = MFC0(C0_BROADCOM, 0);
53		/* Enable icache & dcache */
54		c0reg |= BRCM_IC_ENABLE | BRCM_DC_ENABLE;
55		MTC0(C0_BROADCOM, 0, c0reg);
56	}
57}
58static void (*change_cachability)(uint32);
59
60void
61caches_on(void)
62{
63	uint32 config, config1, r2, tmp;
64	uint start, end, size, lsize;
65
66	config = MFC0(C0_CONFIG, 0);
67	r2 = config & CONF_AR;
68	config1 = MFC0(C0_CONFIG, 1);
69
70	icache_probe(config1, &size, &lsize);
71	__icache_size = size;
72	__ic_lsize = lsize;
73
74	dcache_probe(config1, &size, &lsize);
75	__dcache_size = size;
76	__dc_lsize = lsize;
77
78	/* If caches are not in the default state then
79	 * presume that caches are already init'd
80	 */
81	if ((config & CONF_CM_CMASK) != CONF_CM_UNCACHED) {
82		blast_dcache();
83		blast_icache();
84		return;
85	}
86
87	tmp = R_REG(NULL, (uint32 *)(OSL_UNCACHED(SI_ENUM_BASE + CC_CHIPID)));
88	if (((tmp & CID_PKG_MASK) >> CID_PKG_SHIFT) != HDLSIM_PKG_ID) {
89		/* init icache */
90		start = KSEG0ADDR(caches_on) & 0xff800000;
91		end = (start + __icache_size);
92		MTC0(C0_TAGLO, 0, 0);
93		MTC0(C0_TAGHI, 0, 0);
94		while (start < end) {
95			cache_op(start, Index_Store_Tag_I);
96			start += __ic_lsize;
97		}
98
99		/* init dcache */
100		start = KSEG0ADDR(caches_on) & 0xff800000;
101		end = (start + __dcache_size);
102		if (r2) {
103			/* mips32r2 has the data tags in select 2 */
104			MTC0(C0_TAGLO, 2, 0);
105			MTC0(C0_TAGHI, 2, 0);
106		} else {
107			MTC0(C0_TAGLO, 0, 0);
108			MTC0(C0_TAGHI, 0, 0);
109		}
110		while (start < end) {
111			cache_op(start, Index_Store_Tag_D);
112			start += __dc_lsize;
113		}
114	}
115
116	/* Must be in KSEG1 to change cachability */
117	change_cachability = (void (*)(uint32))KSEG1ADDR(_change_cachability);
118	change_cachability(CONF_CM_CACHABLE_NONCOHERENT);
119}
120
121
122void
123blast_dcache(void)
124{
125	uint32 start, end;
126
127	start = KSEG0ADDR(blast_dcache) & 0xff800000;
128	end = start + __dcache_size;
129
130	while (start < end) {
131		cache_op(start, Index_Writeback_Inv_D);
132		start += __dc_lsize;
133	}
134}
135
136void
137blast_icache(void)
138{
139	uint32 start, end;
140
141	start = KSEG0ADDR(blast_icache) & 0xff800000;
142	end = start + __icache_size;
143
144	while (start < end) {
145		cache_op(start, Index_Invalidate_I);
146		start += __ic_lsize;
147	}
148}
149
150#elif defined(__ARM_ARCH_7A__)
151
152static uint8 loader_pagetable_array[128*1024+16384];
153
154typedef volatile struct scu_reg_struct_t {
155	uint32 control;
156	uint32 config;
157	uint32 cpupwrstatus;
158	uint32 invalidate;
159	uint32 rsvd1[4];
160	uint32 rsvd2[4];
161	uint32 rsvd3[4];
162	uint32 filtstart;
163	uint32 filtend;
164	uint32 rsvd4[2];
165	uint32 sac;
166	uint32 snsac;
167} scu_reg_struct;
168
169typedef volatile struct l2cc_reg_struct_t {
170	uint32 cache_id;
171	uint32 cache_type;
172	uint32 rsvd1[62];
173	uint32 control;	/* 0x100 */
174	uint32 aux_control;
175	uint32 tag_ram_control;
176	uint32 data_ram_control;
177	uint32 rsvd2[60];
178	uint32 ev_counter_ctrl;	/* 0x200 */
179	uint32 ev_counter1_cfg;
180	uint32 ev_counter0_cfg;
181	uint32 ev_counter1;
182	uint32 ev_counter0;
183	uint32 int_mask;
184	uint32 int_mask_status;
185	uint32 int_raw_status;
186	uint32 int_clear;
187	uint32 rsvd3[55];
188	uint32 rsvd4[64]; /* 0x300 */
189	uint32 rsvd5[64]; /* 0x400 */
190	uint32 rsvd6[64]; /* 0x500 */
191	uint32 rsvd7[64]; /* 0x600 */
192	uint32 rsvd8[12]; /* 0x700 - 0x72F */
193	uint32 cache_sync; /* 0x730 */
194	uint32 rsvd9[15];
195	uint32 inv_pa; /* 0x770 */
196	uint32 rsvd10[2];
197	uint32 inv_way; /* 0x77C */
198	uint32 rsvd11[12];
199	uint32 clean_pa; /* 0x7B0 */
200	uint32 rsvd12[1];
201	uint32 clean_index; /* 0x7B8 */
202	uint32 clean_way;
203	uint32 rsvd13[12];
204	uint32 clean_inv_pa; /* 0x7F0 */
205	uint32 rsvd14[1];
206	uint32 clean_inv_index;
207	uint32 clean_inv_way;
208	uint32 rsvd15[64]; /* 0x800 - 0x8FF */
209	uint32 d_lockdown0; /* 0x900 */
210	uint32 i_lockdown0;
211	uint32 d_lockdown1;
212	uint32 i_lockdown1;
213	uint32 d_lockdown2;
214	uint32 i_lockdown2;
215	uint32 d_lockdown3;
216	uint32 i_lockdown3;
217	uint32 d_lockdown4;
218	uint32 i_lockdown4;
219	uint32 d_lockdown5;
220	uint32 i_lockdown5;
221	uint32 d_lockdown6;
222	uint32 i_lockdown6;
223	uint32 d_lockdown7;
224	uint32 i_lockdown7;
225	uint32 rsvd16[4]; /* 0x940 */
226	uint32 lock_line_en; /* 0x950 */
227	uint32 unlock_way;
228	uint32 rsvd17[42];
229	uint32 rsvd18[64]; /* 0xA00 */
230	uint32 rsvd19[64]; /* 0xB00 */
231	uint32 addr_filtering_start; /* 0xC00 */
232	uint32 addr_filtering_end;
233	uint32 rsvd20[62];
234	uint32 rsvd21[64]; /* 0xD00 */
235	uint32 rsvd22[64]; /* 0xE00 */
236	uint32 rsvd23[16]; /* 0xF00 - 0xF3F */
237	uint32 debug_ctrl; /* 0xF40 */
238	uint32 rsvd24[7];
239	uint32 prefetch_ctrl; /* 0xF60 */
240	uint32 rsvd25[7];
241	uint32 power_ctrl; /* 0xF80 */
242} l2cc_reg_struct;
243
244/* ARM9 Private memory region */
245#define IPROC_PERIPH_BASE		(0x19020000)	/* (IHOST_A9MP_scu_CONTROL) */
246#define IPROC_PERIPH_SCU_REG_BASE	(IPROC_PERIPH_BASE)
247#define IPROC_L2CC_REG_BASE		(IPROC_PERIPH_BASE + 0x2000) /* L2 Cache controller */
248
249/* Structures and bit definitions */
250/* SCU Control register */
251#define IPROC_SCU_CTRL_SCU_EN		(0x00000001)
252#define IPROC_SCU_CTRL_ADRFLT_EN	(0x00000002)
253#define IPROC_SCU_CTRL_PARITY_EN	(0x00000004)
254#define IPROC_SCU_CTRL_SPEC_LNFL_EN	(0x00000008)
255#define IPROC_SCU_CTRL_FRC2P0_EN	(0x00000010)
256#define IPROC_SCU_CTRL_SCU_STNDBY_EN	(0x00000020)
257#define IPROC_SCU_CTRL_IC_STNDBY_EN	(0x00000040)
258
259/*
260 * CR1 bits (CP#15 CR1)
261 */
262#define CR_M	(1 << 0)	/* MMU enable				*/
263#define CR_A	(1 << 1)	/* Alignment abort enable		*/
264#define CR_C	(1 << 2)	/* Dcache enable			*/
265#define CR_W	(1 << 3)	/* Write buffer enable			*/
266#define CR_P	(1 << 4)	/* 32-bit exception handler		*/
267#define CR_D	(1 << 5)	/* 32-bit data address range		*/
268#define CR_L	(1 << 6)	/* Implementation defined		*/
269#define CR_B	(1 << 7)	/* Big endian				*/
270#define CR_S	(1 << 8)	/* System MMU protection		*/
271#define CR_R	(1 << 9)	/* ROM MMU protection			*/
272#define CR_F	(1 << 10)	/* Implementation defined		*/
273#define CR_Z	(1 << 11)	/* Implementation defined		*/
274#define CR_I	(1 << 12)	/* Icache enable			*/
275#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
276#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
277#define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
278#define CR_DT	(1 << 16)
279#define CR_IT	(1 << 18)
280#define CR_ST	(1 << 19)
281#define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
282#define CR_U	(1 << 22)	/* Unaligned access operation		*/
283#define CR_XP	(1 << 23)	/* Extended page tables			*/
284#define CR_VE	(1 << 24)	/* Vectored interrupts			*/
285#define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/
286#define CR_TRE	(1 << 28)	/* TEX remap enable			*/
287#define CR_AFE	(1 << 29)	/* Access flag enable			*/
288#define CR_TE	(1 << 30)	/* Thumb exception enable		*/
289
290#define isb() __asm__ __volatile__ ("" : : : "memory")
291#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t")
292
293extern void cpu_flush_cache_all(void);
294extern void cpu_inv_cache_all(void);
295
296void flush_cache(unsigned long dummy1, unsigned long dummy2)
297{
298	cpu_flush_cache_all();
299	return;
300}
301
302static void l2cc_init(void)
303{
304	uint32 regval;
305	l2cc_reg_struct *l2cc = (l2cc_reg_struct *)IPROC_L2CC_REG_BASE;
306
307	regval = l2cc->aux_control;
308	regval &= ~(0x000F0000); /* Clear the Way-size and associativity (8 way) */
309	regval |= 0x0A130000;    /* Non-secure interrupt access, Way-size 16KB,
310				    16 way and event monitoring
311				  */
312	l2cc->aux_control = regval;
313	l2cc->tag_ram_control = 0; /* Tag ram latency */
314	l2cc->data_ram_control = 0; /* Data ram latency */
315}
316
317static void l2cc_invalidate(void)
318{
319	l2cc_reg_struct *l2cc = (l2cc_reg_struct *)IPROC_L2CC_REG_BASE;
320
321	/* Invalidate the entire L2 cache */
322	l2cc->inv_way = 0x0000FFFF;
323}
324
325int l2cc_enable(void)
326{
327	int i;
328	l2cc_reg_struct *l2cc = (l2cc_reg_struct *)IPROC_L2CC_REG_BASE;
329
330	l2cc_init();
331	l2cc_invalidate();
332
333	i = 1000;
334	while (l2cc->inv_way && i)
335	{
336		--i;
337	};
338
339	if (i == 0)
340		return (-1);
341
342	/* Clear any pending interrupts from this controller */
343	l2cc->int_clear = 0x1FF;
344
345	/* Enable the L2 */
346	l2cc->control = 0x01;
347
348	/* mem barrier to sync up things */
349	i = 0;
350	asm("mcr p15, 0, %0, c7, c10, 4": :"r"(i));
351
352	return 0;
353}
354
355static void cp_delay(void)
356{
357	volatile int i;
358
359	/* copro seems to need some delay between reading and writing */
360	for (i = 0; i < 1000; i++)
361		nop();
362	asm volatile("" : : : "memory");
363}
364
365void
366caches_on(void)
367{
368	int i;
369	uint32 val, *ptb, ptbaddr;
370
371	cpu_inv_cache_all();
372
373	/* Enable I$ */
374	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
375	cp_delay();
376	val |= CR_I;
377	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" : : "r" (val) : "cc");
378	isb();
379
380	/* prepare page table for MMU */
381	ptbaddr = (uint32)loader_pagetable_array;
382	/* Round down to next 64 kB limit */
383	ptbaddr += 0x10000;
384	ptbaddr &= ~(0x10000 - 1);
385	ptb = (uint32 *)ptbaddr;
386
387	/* Set up an identity-mapping for all 4GB, rw for everyone */
388	for (i = 0; i < 128; i++) {
389		/* DRAM area: TEX = 0x4, Ap = 3, Domain = 0, C =1, B = 0 */
390		ptb[i] = i << 20 | 0x4c0e;
391	}
392
393	for (i = 128; i < 480; i++) {
394		/* TEX = 0x2(device memory), Ap = 3, Domain = 0, C =0, B = 0 */
395		ptb[i] = i << 20 | 0x0c02;
396	}
397
398	for (i = 480; i < 512; i++) {
399		/* SPI region: TEX = 0x4, Ap = 3, Domain = 0, C =1, B = 0 */
400		ptb[i] = i << 20 | 0x4c0a;
401	}
402
403	for (i = 512; i < 4096; i++) {
404		/* TEX = 0x2(device memory), Ap = 3, Domain = 0, C =0, B = 0 */
405		ptb[i] = i << 20 | 0x2c02;
406	}
407
408	/* Apply page table address to CP15 */
409	asm volatile("mcr p15, 0, %0, c2, c0, 0" : : "r" (ptb) : "memory");
410	/* Set the access control to all-supervisor */
411	asm volatile("mcr p15, 0, %0, c3, c0, 0" : : "r" (~0));
412
413	/* Enable I$ and MMU */
414	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
415	cp_delay();
416	val |= (CR_C | CR_M);
417	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" : : "r" (val) : "cc");
418	isb();
419}
420
421void
422blast_dcache(void)
423{
424#ifndef CFG_UNCACHED
425	uint32 val;
426
427	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
428	cp_delay();
429
430	if ((val & CR_C) != CR_C)
431		return; /* D$ not enabled */
432
433	flush_cache(0, ~0);
434
435#ifdef CFG_SHMOO
436	val &= ~CR_C;
437#else
438	val &= ~(CR_C | CR_M);
439#endif
440	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" : : "r" (val) : "cc");
441	isb();
442#endif /* !CFG_UNCACHED */
443}
444
445void
446blast_icache(void)
447{
448#ifndef CFG_UNCACHED
449	uint32 val;
450
451	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
452	cp_delay();
453
454	if ((val & CR_I) != CR_I)
455		return; /* I$ not enabled */
456
457	val &= ~CR_I;
458	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" : : "r" (val) : "cc");
459	isb();
460
461	/* invalidate I-cache */
462	asm("mcr p15, 0, %0, c7, c5, 0": :"r" (0));
463#endif
464}
465
466#endif	/* mips */
467
468/* uart output */
469
470struct serial_struct {
471	unsigned char	*reg_base;
472	unsigned short	reg_shift;
473	int	irq;
474	int	baud_base;
475};
476
477static struct serial_struct min_uart;
478
479#define LOG_BUF_LEN	(1024)
480#define LOG_BUF_MASK	(LOG_BUF_LEN-1)
481static unsigned long log_idx;
482static char log_buf[LOG_BUF_LEN];
483
484
485static inline int
486serial_in(struct serial_struct *info, int offset)
487{
488	return ((int)R_REG(NULL, (uint8 *)(info->reg_base + (offset << info->reg_shift))));
489}
490
491static inline void
492serial_out(struct serial_struct *info, int offset, int value)
493{
494	W_REG(NULL, (uint8 *)(info->reg_base + (offset << info->reg_shift)), value);
495}
496
497void
498putc(int c)
499{
500	uint32 idx;
501
502	/* CR before LF */
503	if (c == '\n')
504		putc('\r');
505
506	/* Store in log buffer */
507	idx = *((uint32 *)OSL_UNCACHED((uintptr)&log_idx));
508	*((char *)OSL_UNCACHED(&log_buf[idx])) = (char)c;
509	*((uint32 *)OSL_UNCACHED((uintptr)&log_idx)) = (idx + 1) & LOG_BUF_MASK;
510
511	/* No UART */
512	if (!min_uart.reg_base)
513		return;
514
515	while (!(serial_in(&min_uart, UART_LSR) & UART_LSR_THRE));
516	serial_out(&min_uart, UART_TX, c);
517}
518
519/* assert & debugging */
520
521#ifdef BCMDBG_ASSERT
522void
523assfail(char *exp, char *file, int line)
524{
525	printf("ASSERT %s file %s line %d\n", exp, file, line);
526}
527#endif /* BCMDBG_ASSERT */
528
529/* general purpose memory allocation */
530
531extern char text_start[], text_end[];
532extern char data_start[], data_end[];
533extern char bss_start[], bss_end[];
534
535static ulong free_mem_ptr = 0;
536static ulong free_mem_ptr_end = 0;
537
538#define	MIN_ALIGN	4	/* Alignment at 4 bytes */
539#define	MAX_ALIGN	4096	/* Max alignment at 4k */
540
541void *
542malloc(uint size)
543{
544	return malloc_align(size, MIN_ALIGN);
545}
546
547void *
548malloc_align(uint size, uint align_bits)
549{
550	void *p;
551	uint align_mask;
552
553	/* Sanity check */
554	if (size < 0)
555		printf("Malloc error\n");
556	if (free_mem_ptr == 0)
557		printf("Memory error\n");
558
559	/* Align */
560	align_mask = 1 << align_bits;
561	if (align_mask < MIN_ALIGN)
562		align_mask = MIN_ALIGN;
563	if (align_mask > MAX_ALIGN)
564		align_mask = MAX_ALIGN;
565	align_mask--;
566	free_mem_ptr = (free_mem_ptr + align_mask) & ~align_mask;
567
568	p = (void *) free_mem_ptr;
569	free_mem_ptr += size;
570
571	if (free_mem_ptr >= free_mem_ptr_end)
572		printf("Out of memory\n");
573
574	return p;
575}
576
577int
578free(void *where)
579{
580	return 0;
581}
582
583/* get processor cycle count */
584
585#if defined(mips)
586#define	get_cycle_count	get_c0_count
587#elif defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
588#define	get_cycle_count	get_arm_cyclecount
589#ifdef __ARM_ARCH_7A__
590extern long _getticks(void);
591#define get_arm_cyclecount	(uint32)_getticks
592#endif
593#endif /* mips */
594
595uint32
596osl_getcycles(void)
597{
598	return get_cycle_count();
599}
600
601/* microsecond delay */
602
603/* Default to 125 MHz */
604static uint32 cpu_clock = 125000000;
605static uint32 c0counts_per_us = 125000000 / 2000000;
606static uint32 c0counts_per_ms = 125000000 / 2000;
607
608void
609udelay(uint32 us)
610{
611	uint32 curr, lim;
612
613	curr = get_cycle_count();
614	lim = curr + (us * c0counts_per_us);
615
616	if (lim < curr)
617		while (get_cycle_count() > curr)
618			;
619
620	while (get_cycle_count() < lim)
621		;
622}
623
624#ifndef	MIN_DO_TRAP
625
626/* No trap handling in self-decompressing boots */
627extern void trap_init(void);
628
629void
630trap_init(void)
631{
632}
633
634#endif	/* !MIN_DO_TRAP */
635
636static void
637serial_add(void *regs, uint irq, uint baud_base, uint reg_shift)
638{
639	int quot;
640
641	if (min_uart.reg_base)
642		return;
643
644	min_uart.reg_base = regs;
645	min_uart.irq = irq;
646	min_uart.baud_base = baud_base / 16;
647	min_uart.reg_shift = reg_shift;
648
649	/* Set baud and 8N1 */
650#if defined(CFG_SIM) && defined(__ARM_ARCH_7A__)
651	quot = (min_uart.baud_base + 300) / 600;
652#else
653	quot = (min_uart.baud_base + 57600) / 115200;
654#endif
655	serial_out(&min_uart, UART_LCR, UART_LCR_DLAB);
656	serial_out(&min_uart, UART_DLL, quot & 0xff);
657	serial_out(&min_uart, UART_DLM, quot >> 8);
658	serial_out(&min_uart, UART_LCR, UART_LCR_WLEN8);
659
660	/* According to the Synopsys website: "the serial clock
661	 * modules must have time to see new register values
662	 * and reset their respective state machines. This
663	 * total time is guaranteed to be no more than
664	 * (2 * baud divisor * 16) clock cycles of the slower
665	 * of the two system clocks. No data should be transmitted
666	 * or received before this maximum time expires."
667	 */
668	udelay(1000);
669}
670
671
672void *
673osl_init()
674{
675	uint32 c0counts_per_cycle;
676	si_t *sih;
677
678	/* Scan backplane */
679	sih = si_kattach(SI_OSH);
680
681	if (sih == NULL)
682		return NULL;
683
684#if defined(mips)
685	si_mips_init(sih, 0);
686	c0counts_per_cycle = 2;
687#elif defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
688	si_arm_init(sih);
689	c0counts_per_cycle = 1;
690#else
691#error "Unknow CPU"
692#endif
693
694	cpu_clock = si_cpu_clock(sih);
695	c0counts_per_us = cpu_clock / (1000000 * c0counts_per_cycle);
696	c0counts_per_ms = si_cpu_clock(sih) / (1000 * c0counts_per_cycle);
697
698	/* Don't really need to talk to the uart in simulation */
699	if ((sih->chippkg != HDLSIM_PKG_ID) && (sih->chippkg != HWSIM_PKG_ID))
700		si_serial_init(sih, serial_add);
701
702	/* Init malloc */
703#if defined(CFG_SHMOO)
704	{
705	extern int _memsize;
706	if (_memsize) {
707		free_mem_ptr = _memsize >> 1;
708		free_mem_ptr_end = _memsize - (_memsize >> 2);
709	}
710	}
711#else
712	free_mem_ptr = (ulong) bss_end;
713	free_mem_ptr_end = ((ulong)&sih) - 8192;	/* Enough stack? */
714#endif /* CFG_SHMOO */
715	return ((void *)sih);
716}
717
718/* translate bcmerros */
719int
720osl_error(int bcmerror)
721{
722	if (bcmerror)
723		return -1;
724	else
725		return 0;
726}
727