mp_locore.S revision 181701
1/*-
2 * Copyright (c) 2002 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <machine/asm.h>
28__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/mp_locore.S 181701 2008-08-13 20:30:28Z marius $");
29
30#include <machine/asi.h>
31#include <machine/asmacros.h>
32#include <machine/ktr.h>
33#include <machine/pstate.h>
34#include <machine/smp.h>
35#include <machine/upa.h>
36
37#include "assym.s"
38
39	.register	%g2, #ignore
40	.register	%g3, #ignore
41
42	.text
43	_ALIGN_TEXT
441:	rd	%pc, %l0
45	ldx	[%l0 + (4f-1b)], %l1
46	add	%l0, (6f-1b), %l2
47	clr	%l3
482:	cmp	%l3, %l1
49	be	%xcc, 3f
50	 nop
51	ldx	[%l2 + TTE_VPN], %l4
52	ldx	[%l2 + TTE_DATA], %l5
53	srlx	%l4, TV_SIZE_BITS, %l4
54	sllx	%l4, PAGE_SHIFT_4M, %l4
55	wr	%g0, ASI_DMMU, %asi
56	stxa	%l4, [%g0 + AA_DMMU_TAR] %asi
57	stxa	%l5, [%g0] ASI_DTLB_DATA_IN_REG
58	wr	%g0, ASI_IMMU, %asi
59	stxa	%l4, [%g0 + AA_IMMU_TAR] %asi
60	stxa	%l5, [%g0] ASI_ITLB_DATA_IN_REG
61	membar	#Sync
62	flush	%l4
63	add	%l2, 1 << TTE_SHIFT, %l2
64	add	%l3, 1, %l3
65	ba	%xcc, 2b
66	 nop
673:	ldx	[%l0 + (5f-1b)], %l1
68	jmpl	%l1, %g0
69	 nop
70	_ALIGN_DATA
714:	.xword	0x0
725:	.xword	0x0
736:
74
75DATA(mp_tramp_code)
76	.xword	1b
77DATA(mp_tramp_code_len)
78	.xword	6b-1b
79DATA(mp_tramp_tlb_slots)
80	.xword	4b-1b
81DATA(mp_tramp_func)
82	.xword	5b-1b
83
84/*
85 * void mp_startup(void)
86 */
87ENTRY(mp_startup)
88	wrpr	%g0, PSTATE_NORMAL, %pstate
89	wrpr	%g0, 0, %cleanwin
90	wrpr	%g0, 0, %pil
91	wr	%g0, 0, %fprs
92
93	SET(cpu_start_args, %l1, %l0)
94
95	mov	CPU_CLKSYNC, %l1
96	membar	#StoreLoad
97	stw	%l1, [%l0 + CSA_STATE]
98
991:	ldx	[%l0 + CSA_TICK], %l1
100	brz	%l1, 1b
101	 nop
102	wrpr	%l1, 0, %tick
103
104	UPA_GET_MID(%o0)
105
106#if KTR_COMPILE & KTR_SMP
107	CATR(KTR_SMP, "mp_start: CPU %d entered kernel"
108	    , %g1, %g2, %g3, 7, 8, 9)
109	stx	%o0, [%g1 + KTR_PARM1]
1109:
111#endif
112
113	rdpr	%ver, %l1
114	stx	%l1, [%l0 + CSA_VER]
115
116	/*
117	 * Inform the boot processor we have inited.
118	 */
119	mov	CPU_INIT, %l1
120	membar	#LoadStore
121	stw	%l1, [%l0 + CSA_STATE]
122
123	/*
124	 * Wait till its our turn to bootstrap.
125	 */
1262:	lduw	[%l0 + CSA_MID], %l1
127	cmp	%l1, %o0
128	bne	%xcc, 2b
129	 nop
130
131#if KTR_COMPILE & KTR_SMP
132	CATR(KTR_SMP, "_mp_start: CPU %d got start signal"
133	    , %g1, %g2, %g3, 7, 8, 9)
134	stx	%o0, [%g1 + KTR_PARM1]
1359:
136#endif
137
138	add	%l0, CSA_TTES, %l1
139	clr	%l2
140
141	/*
142	 * Map the per-CPU pages.
143	 */
1443:	sllx	%l2, TTE_SHIFT, %l3
145	add	%l1, %l3, %l3
146
147	ldx	[%l3 + TTE_VPN], %l4
148	ldx	[%l3 + TTE_DATA], %l5
149
150	wr	%g0, ASI_DMMU, %asi
151	srlx	%l4, TV_SIZE_BITS, %l4
152	sllx	%l4, PAGE_SHIFT_8K, %l4
153	stxa	%l4, [%g0 + AA_DMMU_TAR] %asi
154	stxa	%l5, [%g0] ASI_DTLB_DATA_IN_REG
155	membar	#Sync
156
157	add	%l2, 1, %l2
158	cmp	%l2, PCPU_PAGES
159	bne	%xcc, 3b
160	 nop
161
162	/*
163	 * Get onto our per-CPU panic stack, which precedes the struct pcpu
164	 * in the per-CPU page.
165	 */
166	ldx	[%l0 + CSA_PCPU], %l1
167	set	PCPU_PAGES * PAGE_SIZE - PC_SIZEOF, %l2
168	add	%l1, %l2, %l1
169	sub	%l1, SPOFF + CCFSZ, %sp
170
171	/*
172	 * Enable interrupts.
173	 */
174	wrpr	%g0, PSTATE_KERNEL, %pstate
175
176#if KTR_COMPILE & KTR_SMP
177	CATR(KTR_SMP,
178	    "_mp_start: bootstrap cpuid=%d mid=%d pcpu=%#lx data=%#lx sp=%#lx"
179	    , %g1, %g2, %g3, 7, 8, 9)
180	lduw	[%l1 + PC_CPUID], %g2
181	stx	%g2, [%g1 + KTR_PARM1]
182	lduw	[%l1 + PC_MID], %g2
183	stx	%g2, [%g1 + KTR_PARM2]
184	stx	%l1, [%g1 + KTR_PARM3]
185	stx	%sp, [%g1 + KTR_PARM5]
1869:
187#endif
188
189	/*
190	 * And away we go.  This doesn't return.
191	 */
192	call	cpu_mp_bootstrap
193	 mov	%l1, %o0
194	sir
195	! NOTREACHED
196END(mp_startup)
197