1/* $Id: head.S,v 1.1.1.1 2007-08-03 18:52:18 $
2 * head.S: Initial boot code for the Sparc64 port of Linux.
3 *
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 */
9
10#include <linux/version.h>
11#include <linux/errno.h>
12#include <linux/threads.h>
13#include <asm/thread_info.h>
14#include <asm/asi.h>
15#include <asm/pstate.h>
16#include <asm/ptrace.h>
17#include <asm/spitfire.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/errno.h>
21#include <asm/signal.h>
22#include <asm/processor.h>
23#include <asm/lsu.h>
24#include <asm/dcr.h>
25#include <asm/dcu.h>
26#include <asm/head.h>
27#include <asm/ttable.h>
28#include <asm/mmu.h>
29#include <asm/cpudata.h>
30
31/* This section from from _start to sparc64_boot_end should fit into
32 * 0x0000000000404000 to 0x0000000000408000.
33 */
34	.text
35	.globl	start, _start, stext, _stext
36_start:
37start:
38_stext:
39stext:
40! 0x0000000000404000
41	b	sparc64_boot
42	 flushw					/* Flush register file.      */
43
44/* This stuff has to be in sync with SILO and other potential boot loaders
45 * Fields should be kept upward compatible and whenever any change is made,
46 * HdrS version should be incremented.
47 */
48        .global root_flags, ram_flags, root_dev
49        .global sparc_ramdisk_image, sparc_ramdisk_size
50	.global sparc_ramdisk_image64
51
52        .ascii  "HdrS"
53        .word   LINUX_VERSION_CODE
54
55	/* History:
56	 *
57	 * 0x0300 : Supports being located at other than 0x4000
58	 * 0x0202 : Supports kernel params string
59	 * 0x0201 : Supports reboot_command
60	 */
61	.half   0x0301          /* HdrS version */
62
63root_flags:
64        .half   1
65root_dev:
66        .half   0
67ram_flags:
68        .half   0
69sparc_ramdisk_image:
70        .word   0
71sparc_ramdisk_size:
72        .word   0
73        .xword  reboot_command
74	.xword	bootstr_info
75sparc_ramdisk_image64:
76	.xword	0
77	.word	_end
78
79	/* PROM cif handler code address is in %o4.  */
80sparc64_boot:
81	mov	%o4, %l7
82
83	/* We need to remap the kernel.  Use position independant
84	 * code to remap us to KERNBASE.
85	 *
86	 * SILO can invoke us with 32-bit address masking enabled,
87	 * so make sure that's clear.
88	 */
89	rdpr	%pstate, %g1
90	andn	%g1, PSTATE_AM, %g1
91	wrpr	%g1, 0x0, %pstate
92	ba,a,pt	%xcc, 1f
93
94	.globl	prom_finddev_name, prom_chosen_path, prom_root_node
95	.globl	prom_getprop_name, prom_mmu_name, prom_peer_name
96	.globl	prom_callmethod_name, prom_translate_name, prom_root_compatible
97	.globl	prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
98	.globl	prom_boot_mapped_pc, prom_boot_mapping_mode
99	.globl	prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
100	.globl	is_sun4v
101prom_peer_name:
102	.asciz	"peer"
103prom_compatible_name:
104	.asciz	"compatible"
105prom_finddev_name:
106	.asciz	"finddevice"
107prom_chosen_path:
108	.asciz	"/chosen"
109prom_getprop_name:
110	.asciz	"getprop"
111prom_mmu_name:
112	.asciz	"mmu"
113prom_callmethod_name:
114	.asciz	"call-method"
115prom_translate_name:
116	.asciz	"translate"
117prom_map_name:
118	.asciz	"map"
119prom_unmap_name:
120	.asciz	"unmap"
121prom_sun4v_name:
122	.asciz	"sun4v"
123	.align	4
124prom_root_compatible:
125	.skip	64
126prom_root_node:
127	.word	0
128prom_mmu_ihandle_cache:
129	.word	0
130prom_boot_mapped_pc:
131	.word	0
132prom_boot_mapping_mode:
133	.word	0
134	.align	8
135prom_boot_mapping_phys_high:
136	.xword	0
137prom_boot_mapping_phys_low:
138	.xword	0
139is_sun4v:
140	.word	0
1411:
142	rd	%pc, %l0
143
144	mov	(1b - prom_peer_name), %l1
145	sub	%l0, %l1, %l1
146	mov	0, %l2
147
148	/* prom_root_node = prom_peer(0) */
149	stx	%l1, [%sp + 2047 + 128 + 0x00]	! service, "peer"
150	mov	1, %l3
151	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 1
152	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 1
153	stx	%l2, [%sp + 2047 + 128 + 0x18]	! arg1, 0
154	stx	%g0, [%sp + 2047 + 128 + 0x20]	! ret1
155	call	%l7
156	 add	%sp, (2047 + 128), %o0		! argument array
157
158	ldx	[%sp + 2047 + 128 + 0x20], %l4	! prom root node
159	mov	(1b - prom_root_node), %l1
160	sub	%l0, %l1, %l1
161	stw	%l4, [%l1]
162
163	mov	(1b - prom_getprop_name), %l1
164	mov	(1b - prom_compatible_name), %l2
165	mov	(1b - prom_root_compatible), %l5
166	sub	%l0, %l1, %l1
167	sub	%l0, %l2, %l2
168	sub	%l0, %l5, %l5
169
170	/* prom_getproperty(prom_root_node, "compatible",
171	 *                  &prom_root_compatible, 64)
172	 */
173	stx	%l1, [%sp + 2047 + 128 + 0x00]	! service, "getprop"
174	mov	4, %l3
175	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 4
176	mov	1, %l3
177	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 1
178	stx	%l4, [%sp + 2047 + 128 + 0x18]	! arg1, prom_root_node
179	stx	%l2, [%sp + 2047 + 128 + 0x20]	! arg2, "compatible"
180	stx	%l5, [%sp + 2047 + 128 + 0x28]	! arg3, &prom_root_compatible
181	mov	64, %l3
182	stx	%l3, [%sp + 2047 + 128 + 0x30]	! arg4, size
183	stx	%g0, [%sp + 2047 + 128 + 0x38]	! ret1
184	call	%l7
185	 add	%sp, (2047 + 128), %o0		! argument array
186
187	mov	(1b - prom_finddev_name), %l1
188	mov	(1b - prom_chosen_path), %l2
189	mov	(1b - prom_boot_mapped_pc), %l3
190	sub	%l0, %l1, %l1
191	sub	%l0, %l2, %l2
192	sub	%l0, %l3, %l3
193	stw	%l0, [%l3]
194	sub	%sp, (192 + 128), %sp
195
196	/* chosen_node = prom_finddevice("/chosen") */
197	stx	%l1, [%sp + 2047 + 128 + 0x00]	! service, "finddevice"
198	mov	1, %l3
199	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 1
200	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 1
201	stx	%l2, [%sp + 2047 + 128 + 0x18]	! arg1, "/chosen"
202	stx	%g0, [%sp + 2047 + 128 + 0x20]	! ret1
203	call	%l7
204	 add	%sp, (2047 + 128), %o0		! argument array
205
206	ldx	[%sp + 2047 + 128 + 0x20], %l4	! chosen device node
207
208	mov	(1b - prom_getprop_name), %l1
209	mov	(1b - prom_mmu_name), %l2
210	mov	(1b - prom_mmu_ihandle_cache), %l5
211	sub	%l0, %l1, %l1
212	sub	%l0, %l2, %l2
213	sub	%l0, %l5, %l5
214
215	/* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */
216	stx	%l1, [%sp + 2047 + 128 + 0x00]	! service, "getprop"
217	mov	4, %l3
218	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 4
219	mov	1, %l3
220	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 1
221	stx	%l4, [%sp + 2047 + 128 + 0x18]	! arg1, chosen_node
222	stx	%l2, [%sp + 2047 + 128 + 0x20]	! arg2, "mmu"
223	stx	%l5, [%sp + 2047 + 128 + 0x28]	! arg3, &prom_mmu_ihandle_cache
224	mov	4, %l3
225	stx	%l3, [%sp + 2047 + 128 + 0x30]	! arg4, sizeof(arg3)
226	stx	%g0, [%sp + 2047 + 128 + 0x38]	! ret1
227	call	%l7
228	 add	%sp, (2047 + 128), %o0		! argument array
229
230	mov	(1b - prom_callmethod_name), %l1
231	mov	(1b - prom_translate_name), %l2
232	sub	%l0, %l1, %l1
233	sub	%l0, %l2, %l2
234	lduw	[%l5], %l5			! prom_mmu_ihandle_cache
235
236	stx	%l1, [%sp + 2047 + 128 + 0x00]	! service, "call-method"
237	mov	3, %l3
238	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 3
239	mov	5, %l3
240	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 5
241	stx	%l2, [%sp + 2047 + 128 + 0x18]	! arg1: "translate"
242	stx	%l5, [%sp + 2047 + 128 + 0x20]	! arg2: prom_mmu_ihandle_cache
243	/* PAGE align */
244	srlx	%l0, 13, %l3
245	sllx	%l3, 13, %l3
246	stx	%l3, [%sp + 2047 + 128 + 0x28]	! arg3: vaddr, our PC
247	stx	%g0, [%sp + 2047 + 128 + 0x30]	! res1
248	stx	%g0, [%sp + 2047 + 128 + 0x38]	! res2
249	stx	%g0, [%sp + 2047 + 128 + 0x40]	! res3
250	stx	%g0, [%sp + 2047 + 128 + 0x48]	! res4
251	stx	%g0, [%sp + 2047 + 128 + 0x50]	! res5
252	call	%l7
253	 add	%sp, (2047 + 128), %o0		! argument array
254
255	ldx	[%sp + 2047 + 128 + 0x40], %l1	! translation mode
256	mov	(1b - prom_boot_mapping_mode), %l4
257	sub	%l0, %l4, %l4
258	stw	%l1, [%l4]
259	mov	(1b - prom_boot_mapping_phys_high), %l4
260	sub	%l0, %l4, %l4
261	ldx	[%sp + 2047 + 128 + 0x48], %l2	! physaddr high
262	stx	%l2, [%l4 + 0x0]
263	ldx	[%sp + 2047 + 128 + 0x50], %l3	! physaddr low
264	/* 4MB align */
265	srlx	%l3, 22, %l3
266	sllx	%l3, 22, %l3
267	stx	%l3, [%l4 + 0x8]
268
269	/* Leave service as-is, "call-method" */
270	mov	7, %l3
271	stx	%l3, [%sp + 2047 + 128 + 0x08]	! num_args, 7
272	mov	1, %l3
273	stx	%l3, [%sp + 2047 + 128 + 0x10]	! num_rets, 1
274	mov	(1b - prom_map_name), %l3
275	sub	%l0, %l3, %l3
276	stx	%l3, [%sp + 2047 + 128 + 0x18]	! arg1: "map"
277	/* Leave arg2 as-is, prom_mmu_ihandle_cache */
278	mov	-1, %l3
279	stx	%l3, [%sp + 2047 + 128 + 0x28]	! arg3: mode (-1 default)
280	sethi	%hi(8 * 1024 * 1024), %l3
281	stx	%l3, [%sp + 2047 + 128 + 0x30]	! arg4: size (8MB)
282	sethi	%hi(KERNBASE), %l3
283	stx	%l3, [%sp + 2047 + 128 + 0x38]	! arg5: vaddr (KERNBASE)
284	stx	%g0, [%sp + 2047 + 128 + 0x40]	! arg6: empty
285	mov	(1b - prom_boot_mapping_phys_low), %l3
286	sub	%l0, %l3, %l3
287	ldx	[%l3], %l3
288	stx	%l3, [%sp + 2047 + 128 + 0x48]	! arg7: phys addr
289	call	%l7
290	 add	%sp, (2047 + 128), %o0		! argument array
291
292	add	%sp, (192 + 128), %sp
293
294	sethi	%hi(prom_root_compatible), %g1
295	or	%g1, %lo(prom_root_compatible), %g1
296	sethi	%hi(prom_sun4v_name), %g7
297	or	%g7, %lo(prom_sun4v_name), %g7
298	mov	5, %g3
2991:	ldub	[%g7], %g2
300	ldub	[%g1], %g4
301	cmp	%g2, %g4
302	bne,pn	%icc, 2f
303	 add	%g7, 1, %g7
304	subcc	%g3, 1, %g3
305	bne,pt	%xcc, 1b
306	 add	%g1, 1, %g1
307
308	sethi	%hi(is_sun4v), %g1
309	or	%g1, %lo(is_sun4v), %g1
310	mov	1, %g7
311	stw	%g7, [%g1]
312
3132:
314	BRANCH_IF_SUN4V(g1, jump_to_sun4u_init)
315	BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
316	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
317	ba,pt	%xcc, spitfire_boot
318	 nop
319
320cheetah_plus_boot:
321	/* Preserve OBP chosen DCU and DCR register settings.  */
322	ba,pt	%xcc, cheetah_generic_boot
323	 nop
324
325cheetah_boot:
326	mov	DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
327	wr	%g1, %asr18
328
329	sethi	%uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
330	or	%g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
331	sllx	%g7, 32, %g7
332	or	%g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
333	stxa	%g7, [%g0] ASI_DCU_CONTROL_REG
334	membar	#Sync
335
336cheetah_generic_boot:
337	mov	TSB_EXTENSION_P, %g3
338	stxa	%g0, [%g3] ASI_DMMU
339	stxa	%g0, [%g3] ASI_IMMU
340	membar	#Sync
341
342	mov	TSB_EXTENSION_S, %g3
343	stxa	%g0, [%g3] ASI_DMMU
344	membar	#Sync
345
346	mov	TSB_EXTENSION_N, %g3
347	stxa	%g0, [%g3] ASI_DMMU
348	stxa	%g0, [%g3] ASI_IMMU
349	membar	#Sync
350
351	ba,a,pt	%xcc, jump_to_sun4u_init
352
353spitfire_boot:
354	/* Typically PROM has already enabled both MMU's and both on-chip
355	 * caches, but we do it here anyway just to be paranoid.
356	 */
357	mov	(LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
358	stxa	%g1, [%g0] ASI_LSU_CONTROL
359	membar	#Sync
360
361jump_to_sun4u_init:
362	/*
363	 * Make sure we are in privileged mode, have address masking,
364         * using the ordinary globals and have enabled floating
365         * point.
366	 *
367	 * Again, typically PROM has left %pil at 13 or similar, and
368	 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
369         */
370	wrpr    %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
371	wr	%g0, 0, %fprs
372
373	set	sun4u_init, %g2
374	jmpl    %g2 + %g0, %g0
375	 nop
376
377sun4u_init:
378	BRANCH_IF_SUN4V(g1, sun4v_init)
379
380	/* Set ctx 0 */
381	mov		PRIMARY_CONTEXT, %g7
382	stxa		%g0, [%g7] ASI_DMMU
383	membar		#Sync
384
385	mov		SECONDARY_CONTEXT, %g7
386	stxa		%g0, [%g7] ASI_DMMU
387	membar	#Sync
388
389	ba,pt		%xcc, sun4u_continue
390	 nop
391
392sun4v_init:
393	/* Set ctx 0 */
394	mov		PRIMARY_CONTEXT, %g7
395	stxa		%g0, [%g7] ASI_MMU
396	membar		#Sync
397
398	mov		SECONDARY_CONTEXT, %g7
399	stxa		%g0, [%g7] ASI_MMU
400	membar		#Sync
401	ba,pt		%xcc, niagara_tlb_fixup
402	 nop
403
404sun4u_continue:
405	BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
406
407	ba,pt	%xcc, spitfire_tlb_fixup
408	 nop
409
410niagara_tlb_fixup:
411	mov	3, %g2		/* Set TLB type to hypervisor. */
412	sethi	%hi(tlb_type), %g1
413	stw	%g2, [%g1 + %lo(tlb_type)]
414
415	/* Patch copy/clear ops.  */
416	call	niagara_patch_copyops
417	 nop
418	call	niagara_patch_bzero
419	 nop
420	call	niagara_patch_pageops
421	 nop
422
423	/* Patch TLB/cache ops.  */
424	call	hypervisor_patch_cachetlbops
425	 nop
426
427	ba,pt	%xcc, tlb_fixup_done
428	 nop
429
430cheetah_tlb_fixup:
431	mov	2, %g2		/* Set TLB type to cheetah+. */
432	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
433
434	mov	1, %g2		/* Set TLB type to cheetah. */
435
4361:	sethi	%hi(tlb_type), %g1
437	stw	%g2, [%g1 + %lo(tlb_type)]
438
439	/* Patch copy/page operations to cheetah optimized versions. */
440	call	cheetah_patch_copyops
441	 nop
442	call	cheetah_patch_copy_page
443	 nop
444	call	cheetah_patch_cachetlbops
445	 nop
446
447	ba,pt	%xcc, tlb_fixup_done
448	 nop
449
450spitfire_tlb_fixup:
451	/* Set TLB type to spitfire. */
452	mov	0, %g2
453	sethi	%hi(tlb_type), %g1
454	stw	%g2, [%g1 + %lo(tlb_type)]
455
456tlb_fixup_done:
457	sethi	%hi(init_thread_union), %g6
458	or	%g6, %lo(init_thread_union), %g6
459	ldx	[%g6 + TI_TASK], %g4
460	mov	%sp, %l6
461	mov	%o4, %l7
462
463	wr	%g0, ASI_P, %asi
464	mov	1, %g1
465	sllx	%g1, THREAD_SHIFT, %g1
466	sub	%g1, (STACKFRAME_SZ + STACK_BIAS), %g1
467	add	%g6, %g1, %sp
468	mov	0, %fp
469
470	/* Set per-cpu pointer initially to zero, this makes
471	 * the boot-cpu use the in-kernel-image per-cpu areas
472	 * before setup_per_cpu_area() is invoked.
473	 */
474	clr	%g5
475
476	wrpr	%g0, 0, %wstate
477	wrpr	%g0, 0x0, %tl
478
479	/* Clear the bss */
480	sethi	%hi(__bss_start), %o0
481	or	%o0, %lo(__bss_start), %o0
482	sethi	%hi(_end), %o1
483	or	%o1, %lo(_end), %o1
484	call	__bzero
485	 sub	%o1, %o0, %o1
486
487#ifdef CONFIG_LOCKDEP
488	/* We have this call this super early, as even prom_init can grab
489	 * spinlocks and thus call into the lockdep code.
490	 */
491	call	lockdep_init
492	 nop
493#endif
494
495	mov	%l6, %o1			! OpenPROM stack
496	call	prom_init
497	 mov	%l7, %o0			! OpenPROM cif handler
498
499	/* Initialize current_thread_info()->cpu as early as possible.
500	 * In order to do that accurately we have to patch up the get_cpuid()
501	 * assembler sequences.  And that, in turn, requires that we know
502	 * if we are on a Starfire box or not.  While we're here, patch up
503	 * the sun4v sequences as well.
504	 */
505	call	check_if_starfire
506	 nop
507	call	per_cpu_patch
508	 nop
509	call	sun4v_patch
510	 nop
511
512#ifdef CONFIG_SMP
513	call	hard_smp_processor_id
514	 nop
515	cmp	%o0, NR_CPUS
516	blu,pt	%xcc, 1f
517	 nop
518	call	boot_cpu_id_too_large
519	 nop
520	/* Not reached... */
521
5221:
523#else
524	mov	0, %o0
525#endif
526	sth	%o0, [%g6 + TI_CPU]
527
528	/* Off we go.... */
529	call	start_kernel
530	 nop
531	/* Not reached... */
532
533	/* This is meant to allow the sharing of this code between
534	 * boot processor invocation (via setup_tba() below) and
535	 * secondary processor startup (via trampoline.S).  The
536	 * former does use this code, the latter does not yet due
537	 * to some complexities.  That should be fixed up at some
538	 * point.
539	 *
540	 * There used to be enormous complexity wrt. transferring
541	 * over from the firwmare's trap table to the Linux kernel's.
542	 * For example, there was a chicken & egg problem wrt. building
543	 * the OBP page tables, yet needing to be on the Linux kernel
544	 * trap table (to translate PAGE_OFFSET addresses) in order to
545	 * do that.
546	 *
547	 * We now handle OBP tlb misses differently, via linear lookups
548	 * into the prom_trans[] array.  So that specific problem no
549	 * longer exists.  Yet, unfortunately there are still some issues
550	 * preventing trampoline.S from using this code... ho hum.
551	 */
552	.globl	setup_trap_table
553setup_trap_table:
554	save	%sp, -192, %sp
555
556	/* Force interrupts to be disabled. */
557	rdpr	%pstate, %l0
558	andn	%l0, PSTATE_IE, %o1
559	wrpr	%o1, 0x0, %pstate
560	rdpr	%pil, %l1
561	wrpr	%g0, 15, %pil
562
563	/* Make the firmware call to jump over to the Linux trap table.  */
564	sethi	%hi(is_sun4v), %o0
565	lduw	[%o0 + %lo(is_sun4v)], %o0
566	brz,pt	%o0, 1f
567	 nop
568
569	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
570	add	%g2, TRAP_PER_CPU_FAULT_INFO, %g2
571	stxa	%g2, [%g0] ASI_SCRATCHPAD
572
573	/* Compute physical address:
574	 *
575	 * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
576	 */
577	sethi	%hi(KERNBASE), %g3
578	sub	%g2, %g3, %g2
579	sethi	%hi(kern_base), %g3
580	ldx	[%g3 + %lo(kern_base)], %g3
581	add	%g2, %g3, %o1
582
583	call	prom_set_trap_table_sun4v
584	 sethi	%hi(sparc64_ttable_tl0), %o0
585
586	ba,pt	%xcc, 2f
587	 nop
588
5891:	call	prom_set_trap_table
590	 sethi	%hi(sparc64_ttable_tl0), %o0
591
592	/* Start using proper page size encodings in ctx register.  */
5932:	sethi	%hi(sparc64_kern_pri_context), %g3
594	ldx	[%g3 + %lo(sparc64_kern_pri_context)], %g2
595
596	mov		PRIMARY_CONTEXT, %g1
597
598661:	stxa		%g2, [%g1] ASI_DMMU
599	.section	.sun4v_1insn_patch, "ax"
600	.word		661b
601	stxa		%g2, [%g1] ASI_MMU
602	.previous
603
604	membar	#Sync
605
606	/* Kill PROM timer */
607	sethi	%hi(0x80000000), %o2
608	sllx	%o2, 32, %o2
609	wr	%o2, 0, %tick_cmpr
610
611	BRANCH_IF_SUN4V(o2, 1f)
612	BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
613
614	ba,pt	%xcc, 2f
615	 nop
616
617	/* Disable STICK_INT interrupts. */
6181:
619	sethi	%hi(0x80000000), %o2
620	sllx	%o2, 32, %o2
621	wr	%o2, %asr25
622
6232:
624	wrpr	%g0, %g0, %wstate
625
626	call	init_irqwork_curcpu
627	 nop
628
629	/* Now we can restore interrupt state. */
630	wrpr	%l0, 0, %pstate
631	wrpr	%l1, 0x0, %pil
632
633	ret
634	 restore
635
636	.globl	setup_tba
637setup_tba:
638	save	%sp, -192, %sp
639
640	/* The boot processor is the only cpu which invokes this
641	 * routine, the other cpus set things up via trampoline.S.
642	 * So save the OBP trap table address here.
643	 */
644	rdpr	%tba, %g7
645	sethi	%hi(prom_tba), %o1
646	or	%o1, %lo(prom_tba), %o1
647	stx	%g7, [%o1]
648
649	call	setup_trap_table
650	 nop
651
652	ret
653	 restore
654sparc64_boot_end:
655
656#include "etrap.S"
657#include "rtrap.S"
658#include "winfixup.S"
659#include "entry.S"
660#include "sun4v_tlb_miss.S"
661#include "sun4v_ivec.S"
662#include "ktlb.S"
663#include "tsb.S"
664
665/*
666 * The following skip makes sure the trap table in ttable.S is aligned
667 * on a 32K boundary as required by the v9 specs for TBA register.
668 *
669 * We align to a 32K boundary, then we have the 32K kernel TSB,
670 * the 64K kernel 4MB TSB, and then the 32K aligned trap table.
671 */
6721:
673	.skip	0x4000 + _start - 1b
674
675! 0x0000000000408000
676
677	.globl	swapper_tsb
678swapper_tsb:
679	.skip	(32 * 1024)
680
681	.globl	swapper_4m_tsb
682swapper_4m_tsb:
683	.skip	(64 * 1024)
684
685! 0x0000000000420000
686
687	/* Some care needs to be exercised if you try to move the
688	 * location of the trap table relative to other things.  For
689	 * one thing there are br* instructions in some of the
690	 * trap table entires which branch back to code in ktlb.S
691	 * Those instructions can only handle a signed 16-bit
692	 * displacement.
693	 *
694	 * There is a binutils bug (bugzilla #4558) which causes
695	 * the relocation overflow checks for such instructions to
696	 * not be done correctly.  So bintuils will not notice the
697	 * error and will instead write junk into the relocation and
698	 * you'll have an unbootable kernel.
699	 */
700#include "ttable.S"
701
702! 0x0000000000428000
703
704#include "systbls.S"
705
706	.data
707	.align	8
708	.globl	prom_tba, tlb_type
709prom_tba:	.xword	0
710tlb_type:	.word	0	/* Must NOT end up in BSS */
711	.section	".fixup",#alloc,#execinstr
712
713	.globl	__ret_efault, __retl_efault
714__ret_efault:
715	ret
716	 restore %g0, -EFAULT, %o0
717__retl_efault:
718	retl
719	 mov	-EFAULT, %o0
720