1/* This file is subject to the terms and conditions of the GNU General Public
2 * License.  See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20#include <asm/pgtable.h>
21
22#include <linux/linkage.h>
23
24	.level	LEVEL
25
26	.data
27ENTRY(boot_args)
28	.word 0 /* arg0 */
29	.word 0 /* arg1 */
30	.word 0 /* arg2 */
31	.word 0 /* arg3 */
32END(boot_args)
33
34	.text
35	.align	4
36	.import init_thread_union,data
37	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
38#ifndef CONFIG_64BIT
39        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
40	.import	$global$		/* forward declaration */
41#endif /*!CONFIG_64BIT*/
42	.export _stext,data		/* Kernel want it this way! */
43_stext:
44ENTRY(stext)
45	.proc
46	.callinfo
47
48	/* Make sure sr4-sr7 are set to zero for the kernel address space */
49	mtsp	%r0,%sr4
50	mtsp	%r0,%sr5
51	mtsp	%r0,%sr6
52	mtsp	%r0,%sr7
53
54	/* Clear BSS (shouldn't the boot loader do this?) */
55
56	.import __bss_start,data
57	.import __bss_stop,data
58
59	load32		PA(__bss_start),%r3
60	load32		PA(__bss_stop),%r4
61$bss_loop:
62	cmpb,<<,n       %r3,%r4,$bss_loop
63	stw,ma          %r0,4(%r3)
64
65	/* Save away the arguments the boot loader passed in (32 bit args) */
66	load32		PA(boot_args),%r1
67	stw,ma          %arg0,4(%r1)
68	stw,ma          %arg1,4(%r1)
69	stw,ma          %arg2,4(%r1)
70	stw,ma          %arg3,4(%r1)
71
72	/* Initialize startup VM. Just map first 8/16 MB of memory */
73	load32		PA(swapper_pg_dir),%r4
74	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
75	mtctl		%r4,%cr25	/* Initialize user root pointer */
76
77#if PT_NLEVELS == 3
78	/* Set pmd in pgd */
79	load32		PA(pmd0),%r5
80	shrd            %r5,PxD_VALUE_SHIFT,%r3
81	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
82	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
83	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
84#else
85	/* 2-level page table, so pmd == pgd */
86	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
87#endif
88
89	/* Fill in pmd with enough pte directories */
90	load32		PA(pg0),%r1
91	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
92	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
93
94	ldi		ASM_PT_INITIAL,%r1
95
961:
97	stw		%r3,0(%r4)
98	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
99	addib,>		-1,%r1,1b
100#if PT_NLEVELS == 3
101	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
102#else
103	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
104#endif
105
106
107	/* Now initialize the PTEs themselves */
108	ldo		0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
109	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
110	load32		PA(pg0),%r1
111
112$pgt_fill_loop:
113	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
114	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
115	addib,>		-1,%r11,$pgt_fill_loop
116	nop
117
118	/* Load the return address...er...crash 'n burn */
119	copy		%r0,%r2
120
121	/* And the RFI Target address too */
122	load32		start_kernel,%r11
123
124	/* And the initial task pointer */
125	load32		init_thread_union,%r6
126	mtctl           %r6,%cr30
127
128	/* And the stack pointer too */
129	ldo             THREAD_SZ_ALGN(%r6),%sp
130
131	/* And the interrupt stack */
132	load32		interrupt_stack,%r6
133	mtctl           %r6,%cr31
134
135#ifdef CONFIG_SMP
136	/* Set the smp rendevous address into page zero.
137	** It would be safer to do this in init_smp_config() but
138	** it's just way easier to deal with here because
139	** of 64-bit function ptrs and the address is local to this file.
140	*/
141	load32		PA(smp_slave_stext),%r10
142	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
143	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
144
145	/* FALLTHROUGH */
146	.procend
147
148	/*
149	** Code Common to both Monarch and Slave processors.
150	** Entry:
151	**
152	**  1.1:
153	**    %r11 must contain RFI target address.
154	**    %r25/%r26 args to pass to target function
155	**    %r2  in case rfi target decides it didn't like something
156	**
157	**  2.0w:
158	**    %r3  PDCE_PROC address
159	**    %r11 RFI target address
160	**
161	** Caller must init: SR4-7, %sp, %r10, %cr24/25,
162	*/
163common_stext:
164	.proc
165	.callinfo
166#else
167	/* Clear PDC entry point - we won't use it */
168	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
169	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
170#endif /*CONFIG_SMP*/
171
172#ifdef CONFIG_64BIT
173	tophys_r1	%sp
174
175	/* Save the rfi target address */
176	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
177	tophys_r1       %r10
178	std             %r11,  TASK_PT_GR11(%r10)
179	/* Switch to wide mode Superdome doesn't support narrow PDC
180	** calls.
181	*/
1821:	mfia            %rp             /* clear upper part of pcoq */
183	ldo             2f-1b(%rp),%rp
184	depdi           0,31,32,%rp
185	bv              (%rp)
186	ssm             PSW_SM_W,%r0
187
188        /* Set Wide mode as the "Default" (eg for traps)
189        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
190        ** Someday, palo might not do this for the Monarch either.
191        */
1922:
193#define MEM_PDC_LO 0x388
194#define MEM_PDC_HI 0x35C
195	ldw             MEM_PDC_LO(%r0),%r3
196	ldw             MEM_PDC_HI(%r0),%r6
197	depd            %r6, 31, 32, %r3        /* move to upper word */
198
199	ldo             PDC_PSW(%r0),%arg0              /* 21 */
200	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
201	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
202	load32          PA(stext_pdc_ret), %rp
203	bv              (%r3)
204	copy            %r0,%arg3
205
206stext_pdc_ret:
207	/* restore rfi target address*/
208	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
209	tophys_r1       %r10
210	ldd             TASK_PT_GR11(%r10), %r11
211	tovirt_r1       %sp
212#endif
213
214	/* PARANOID: clear user scratch/user space SR's */
215	mtsp	%r0,%sr0
216	mtsp	%r0,%sr1
217	mtsp	%r0,%sr2
218	mtsp	%r0,%sr3
219
220	/* Initialize Protection Registers */
221	mtctl	%r0,%cr8
222	mtctl	%r0,%cr9
223	mtctl	%r0,%cr12
224	mtctl	%r0,%cr13
225
226	/* Initialize the global data pointer */
227	loadgp
228
229	/* Set up our interrupt table.  HPMCs might not work after this!
230	 *
231	 * We need to install the correct iva for PA1.1 or PA2.0. The
232	 * following short sequence of instructions can determine this
233	 * (without being illegal on a PA1.1 machine).
234	 */
235#ifndef CONFIG_64BIT
236	ldi		32,%r10
237	mtctl		%r10,%cr11
238	.level 2.0
239	mfctl,w		%cr11,%r10
240	.level 1.1
241	comib,<>,n	0,%r10,$is_pa20
242	ldil		L%PA(fault_vector_11),%r10
243	b		$install_iva
244	ldo		R%PA(fault_vector_11)(%r10),%r10
245
246$is_pa20:
247	.level		LEVEL /* restore 1.1 || 2.0w */
248#endif /*!CONFIG_64BIT*/
249	load32		PA(fault_vector_20),%r10
250
251$install_iva:
252	mtctl		%r10,%cr14
253
254	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
255	nop
256
257	.align 128
258aligned_rfi:
259	pcxt_ssm_bug
260
261	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
262	/* Don't need NOPs, have 8 compliant insn before rfi */
263
264	mtctl		%r0,%cr17	/* Clear IIASQ tail */
265	mtctl		%r0,%cr17	/* Clear IIASQ head */
266
267	/* Load RFI target into PC queue */
268	mtctl		%r11,%cr18	/* IIAOQ head */
269	ldo		4(%r11),%r11
270	mtctl		%r11,%cr18	/* IIAOQ tail */
271
272	load32		KERNEL_PSW,%r10
273	mtctl		%r10,%ipsw
274
275	/* Jump through hyperspace to Virt Mode */
276	rfi
277	nop
278
279	.procend
280
281#ifdef CONFIG_SMP
282
283	.import smp_init_current_idle_task,data
284	.import	smp_callin,code
285
286#ifndef CONFIG_64BIT
287smp_callin_rtn:
288        .proc
289	.callinfo
290	break	1,1		/*  Break if returned from start_secondary */
291	nop
292	nop
293        .procend
294#endif /*!CONFIG_64BIT*/
295
296/***************************************************************************
297* smp_slave_stext is executed by all non-monarch Processors when the Monarch
298* pokes the slave CPUs in smp.c:smp_boot_cpus().
299*
300* Once here, registers values are initialized in order to branch to virtual
301* mode. Once all available/eligible CPUs are in virtual mode, all are
302* released and start out by executing their own idle task.
303*****************************************************************************/
304smp_slave_stext:
305        .proc
306	.callinfo
307
308	/*
309	** Initialize Space registers
310	*/
311	mtsp	   %r0,%sr4
312	mtsp	   %r0,%sr5
313	mtsp	   %r0,%sr6
314	mtsp	   %r0,%sr7
315
316	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
317	load32		PA(smp_init_current_idle_task),%sp
318	LDREG		0(%sp),%sp	/* load task address */
319	tophys_r1	%sp
320	LDREG		TASK_THREAD_INFO(%sp),%sp
321	mtctl           %sp,%cr30       /* store in cr30 */
322	ldo             THREAD_SZ_ALGN(%sp),%sp
323
324	/* point CPU to kernel page tables */
325	load32		PA(swapper_pg_dir),%r4
326	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
327	mtctl		%r4,%cr25	/* Initialize user root pointer */
328
329#ifdef CONFIG_64BIT
330	/* Setup PDCE_PROC entry */
331	copy            %arg0,%r3
332#else
333	/* Load RFI *return* address in case smp_callin bails */
334	load32		smp_callin_rtn,%r2
335#endif
336
337	/* Load RFI target address.  */
338	load32		smp_callin,%r11
339
340	/* ok...common code can handle the rest */
341	b		common_stext
342	nop
343
344	.procend
345#endif /* CONFIG_SMP */
346
347ENDPROC(stext)
348
349#ifndef CONFIG_64BIT
350	.data
351
352	.align	4
353	.export	$global$,data
354
355	.type	$global$,@object
356	.size	$global$,4
357$global$:
358	.word 0
359#endif /*!CONFIG_64BIT*/
360