1/* $NetBSD: locore.s,v 1.120 2010/07/07 01:17:49 chs Exp $ */
2
3/*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved.
36 *
37 * Author: Chris G. Demetriou
38 *
39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation.
44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 *
49 * Carnegie Mellon requests users of this software to return to
50 *
51 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
52 *  School of Computer Science
53 *  Carnegie Mellon University
54 *  Pittsburgh PA 15213-3890
55 *
56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes.
58 */
59
60.stabs	__FILE__,100,0,0,kernel_text
61
62#include "opt_ddb.h"
63#include "opt_kgdb.h"
64#include "opt_multiprocessor.h"
65#include "opt_lockdebug.h"
66#include "opt_compat_netbsd.h"
67
68#include <machine/asm.h>
69
70__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.120 2010/07/07 01:17:49 chs Exp $");
71
72#include "assym.h"
73
74.stabs	__FILE__,132,0,0,kernel_text
75
76/*
77 * Perform actions necessary to switch to a new context.  The
78 * hwpcb should be in a0.  Clobbers v0, t0, t8..t11, a0.
79 */
80#define	SWITCH_CONTEXT							\
81	/* Make a note of the context we're running on. */		\
82	GET_CURPCB						;	\
83	stq	a0, 0(v0)					;	\
84									\
85	/* Swap in the new context. */					\
86	call_pal PAL_OSF1_swpctx
87
88
89	/* don't reorder instructions; paranoia. */
90	.set noreorder
91	.text
92
93	.macro	bfalse	reg, dst
94	beq	\reg, \dst
95	.endm
96
97	.macro	btrue	reg, dst
98	bne	\reg, \dst
99	.endm
100
101/*
102 * This is for kvm_mkdb, and should be the address of the beginning
103 * of the kernel text segment (not necessarily the same as kernbase).
104 */
105	EXPORT(kernel_text)
106.loc	1 __LINE__
107kernel_text:
108
109/*
110 * bootstack: a temporary stack, for booting.
111 *
112 * Extends from 'start' down.
113 */
114bootstack:
115
116/*
117 * locorestart: Kernel start. This is no longer the actual entry
118 * point, although jumping to here (the first kernel address) will
119 * in fact work just fine.
120 *
121 * Arguments:
122 *	a0 is the first free page frame number (PFN)
123 *	a1 is the page table base register (PTBR)
124 *	a2 is the bootinfo magic number
125 *	a3 is the pointer to the bootinfo structure
126 *
127 * All arguments are passed to alpha_init().
128 */
129NESTED_NOPROFILE(locorestart,1,0,ra,0,0)
130	br	pv,1f
1311:	LDGP(pv)
132
133	/* Switch to the boot stack. */
134	lda	sp,bootstack
135
136	/* Load KGP with current GP. */
137	mov	a0, s0			/* save pfn */
138	mov	gp, a0
139	call_pal PAL_OSF1_wrkgp		/* clobbers a0, t0, t8-t11 */
140	mov	s0, a0			/* restore pfn */
141
142	/*
143	 * Call alpha_init() to do pre-main initialization.
144	 * alpha_init() gets the arguments we were called with,
145	 * which are already in a0, a1, a2, a3, and a4.
146	 */
147	CALL(alpha_init)
148
149	/* Set up the virtual page table pointer. */
150	ldiq	a0, VPTBASE
151	call_pal PAL_OSF1_wrvptptr	/* clobbers a0, t0, t8-t11 */
152
153	/*
154	 * Switch to lwp0's PCB.
155	 */
156	lda	a0, lwp0
157	ldq	a0, L_MD_PCBPADDR(a0)		/* phys addr of PCB */
158	SWITCH_CONTEXT
159
160	/*
161	 * We've switched to a new page table base, so invalidate the TLB
162	 * and I-stream.  This happens automatically everywhere but here.
163	 */
164	ldiq	a0, -2				/* TBIA */
165	call_pal PAL_OSF1_tbi
166	call_pal PAL_imb
167
168	/*
169	 * All ready to go!  Call main()!
170	 */
171	CALL(main)
172
173	/* This should never happen. */
174	PANIC("main() returned",Lmain_returned_pmsg)
175	END(locorestart)
176
177/**************************************************************************/
178
179/*
180 * Pull in the PROM interface routines; these are needed for
181 * prom printf (while bootstrapping), and for determining the
182 * boot device, etc.
183 */
184#include <alpha/alpha/prom_disp.s>
185
186/**************************************************************************/
187
188/*
189 * Pull in the PALcode function stubs.
190 */
191#include <alpha/alpha/pal.s>
192
193/**************************************************************************/
194
195/**************************************************************************/
196
197#if defined(MULTIPROCESSOR)
198/*
199 * Pull in the multiprocssor glue.
200 */
201#include <alpha/alpha/multiproc.s>
202#endif /* MULTIPROCESSOR */
203
204/**************************************************************************/
205
206/**************************************************************************/
207
208#if defined(DDB) || defined(KGDB)
209/*
210 * Pull in debugger glue.
211 */
212#include <alpha/alpha/debug.s>
213#endif /* DDB || KGDB */
214
215/**************************************************************************/
216
217/**************************************************************************/
218
219	.text
220.stabs	__FILE__,132,0,0,backtolocore1	/* done with includes */
221.loc	1 __LINE__
222backtolocore1:
223/**************************************************************************/
224
225#ifdef COMPAT_16
226/*
227 * Signal "trampoline" code.
228 *
229 * The kernel arranges for the handler to be invoked directly.  This
230 * trampoline is used only to return from the signal.
231 *
232 * The stack pointer points to the saved sigcontext.
233 */
234
235NESTED_NOPROFILE(sigcode,0,0,ra,0,0)
236	mov	sp, a0			/* get pointer to sigcontext */
237	CALLSYS_NOERROR(compat_16___sigreturn14)	/* and call sigreturn() with it. */
238	mov	v0, a0			/* if that failed, get error code */
239	CALLSYS_NOERROR(exit)		/* and call exit() with it. */
240XNESTED(esigcode,0)
241	END(sigcode)
242#endif /* COMPAT_16 */
243
244/**************************************************************************/
245
246/*
247 * exception_return: return from trap, exception, or syscall
248 */
249
250IMPORT(ssir, 8)
251
252LEAF(exception_return, 1)			/* XXX should be NESTED */
253	br	pv, 1f
2541:	LDGP(pv)
255
256	ldq	s1, (FRAME_PS * 8)(sp)		/* get the saved PS */
257	and	s1, ALPHA_PSL_IPL_MASK, t0	/* look at the saved IPL */
258	bne	t0, 5f				/* != 0: can't do AST or SIR */
259
260	/* see if we can do an SIR */
2612:	ldq	t1, ssir			/* SIR pending? */
262	bne	t1, 6f				/* yes */
263	/* no */
264
265	and	s1, ALPHA_PSL_USERMODE, t0	/* are we returning to user? */
266	beq	t0, 5f				/* no: just return */
267	/* yes */
268
269	/* GET_CPUINFO clobbers v0, t0, t8...t11. */
2703:	GET_CPUINFO
271
272	/* check for AST */
273	ldq	t1, CPU_INFO_CURLWP(v0)
274	ldl	t3, L_MD_ASTPENDING(t1)		/* AST pending? */
275	bne	t3, 7f				/* yes */
276	/* no: headed back to user space */
277
278	/* Enable the FPU based on whether MDLWP_FPACTIVE is set. */
2794:	ldq	t2, L_MD_FLAGS(t1)
280	cmplt	t2, zero, a0
281	call_pal PAL_OSF1_wrfen
282
283	/* restore the registers, and return */
2845:	bsr	ra, exception_restore_regs	/* jmp/CALL trashes pv/t12 */
285	ldq	ra,(FRAME_RA*8)(sp)
286	.set noat
287	ldq	at_reg,(FRAME_AT*8)(sp)
288
289	lda	sp,(FRAME_SW_SIZE*8)(sp)
290	call_pal PAL_OSF1_rti
291	.set at
292	/* NOTREACHED */
293
294	/* We've got a SIR */
2956:	ldiq	a0, ALPHA_PSL_IPL_SOFT
296	call_pal PAL_OSF1_swpipl
297	mov	v0, s2				/* remember old IPL */
298	CALL(softintr_dispatch)
299
300	/* SIR handled; restore IPL and check again */
301	mov	s2, a0
302	call_pal PAL_OSF1_swpipl
303	br	2b
304
305	/* We've got an AST */
3067:	stl	zero, L_MD_ASTPENDING(t1)	/* no AST pending */
307
308	ldiq	a0, ALPHA_PSL_IPL_0		/* drop IPL to zero */
309	call_pal PAL_OSF1_swpipl
310	mov	v0, s2				/* remember old IPL */
311
312	mov	sp, a0				/* only arg is frame */
313	CALL(ast)
314
315	/* AST handled; restore IPL and check again */
316	mov	s2, a0
317	call_pal PAL_OSF1_swpipl
318	br	3b
319
320	END(exception_return)
321
322LEAF(exception_save_regs, 0)
323	stq	v0,(FRAME_V0*8)(sp)
324	stq	a3,(FRAME_A3*8)(sp)
325	stq	a4,(FRAME_A4*8)(sp)
326	stq	a5,(FRAME_A5*8)(sp)
327	stq	s0,(FRAME_S0*8)(sp)
328	stq	s1,(FRAME_S1*8)(sp)
329	stq	s2,(FRAME_S2*8)(sp)
330	stq	s3,(FRAME_S3*8)(sp)
331	stq	s4,(FRAME_S4*8)(sp)
332	stq	s5,(FRAME_S5*8)(sp)
333	stq	s6,(FRAME_S6*8)(sp)
334	stq	t0,(FRAME_T0*8)(sp)
335	stq	t1,(FRAME_T1*8)(sp)
336	stq	t2,(FRAME_T2*8)(sp)
337	stq	t3,(FRAME_T3*8)(sp)
338	stq	t4,(FRAME_T4*8)(sp)
339	stq	t5,(FRAME_T5*8)(sp)
340	stq	t6,(FRAME_T6*8)(sp)
341	stq	t7,(FRAME_T7*8)(sp)
342	stq	t8,(FRAME_T8*8)(sp)
343	stq	t9,(FRAME_T9*8)(sp)
344	stq	t10,(FRAME_T10*8)(sp)
345	stq	t11,(FRAME_T11*8)(sp)
346	stq	t12,(FRAME_T12*8)(sp)
347	RET
348	END(exception_save_regs)
349
350LEAF(exception_restore_regs, 0)
351	ldq	v0,(FRAME_V0*8)(sp)
352	ldq	a3,(FRAME_A3*8)(sp)
353	ldq	a4,(FRAME_A4*8)(sp)
354	ldq	a5,(FRAME_A5*8)(sp)
355	ldq	s0,(FRAME_S0*8)(sp)
356	ldq	s1,(FRAME_S1*8)(sp)
357	ldq	s2,(FRAME_S2*8)(sp)
358	ldq	s3,(FRAME_S3*8)(sp)
359	ldq	s4,(FRAME_S4*8)(sp)
360	ldq	s5,(FRAME_S5*8)(sp)
361	ldq	s6,(FRAME_S6*8)(sp)
362	ldq	t0,(FRAME_T0*8)(sp)
363	ldq	t1,(FRAME_T1*8)(sp)
364	ldq	t2,(FRAME_T2*8)(sp)
365	ldq	t3,(FRAME_T3*8)(sp)
366	ldq	t4,(FRAME_T4*8)(sp)
367	ldq	t5,(FRAME_T5*8)(sp)
368	ldq	t6,(FRAME_T6*8)(sp)
369	ldq	t7,(FRAME_T7*8)(sp)
370	ldq	t8,(FRAME_T8*8)(sp)
371	ldq	t9,(FRAME_T9*8)(sp)
372	ldq	t10,(FRAME_T10*8)(sp)
373	ldq	t11,(FRAME_T11*8)(sp)
374	ldq	t12,(FRAME_T12*8)(sp)
375	RET
376	END(exception_restore_regs)
377
378/**************************************************************************/
379
380/*
381 * XentArith:
382 * System arithmetic trap entry point.
383 */
384
385	PALVECT(XentArith)		/* setup frame, save registers */
386
387	/* a0, a1, & a2 already set up */
388	ldiq	a3, ALPHA_KENTRY_ARITH
389	mov	sp, a4			; .loc 1 __LINE__
390	CALL(trap)
391
392	jmp	zero, exception_return
393	END(XentArith)
394
395/**************************************************************************/
396
397/*
398 * XentIF:
399 * System instruction fault trap entry point.
400 */
401
402	PALVECT(XentIF)			/* setup frame, save registers */
403
404	/* a0, a1, & a2 already set up */
405	ldiq	a3, ALPHA_KENTRY_IF
406	mov	sp, a4			; .loc 1 __LINE__
407	CALL(trap)
408	jmp	zero, exception_return
409	END(XentIF)
410
411/**************************************************************************/
412
413/*
414 * XentInt:
415 * System interrupt entry point.
416 */
417
418	PALVECT(XentInt)		/* setup frame, save registers */
419
420	/* a0, a1, & a2 already set up */
421	mov	sp, a3			; .loc 1 __LINE__
422	CALL(interrupt)
423	jmp	zero, exception_return
424	END(XentInt)
425
426/**************************************************************************/
427
428/*
429 * XentMM:
430 * System memory management fault entry point.
431 */
432
433	PALVECT(XentMM)			/* setup frame, save registers */
434
435	/* a0, a1, & a2 already set up */
436	ldiq	a3, ALPHA_KENTRY_MM
437	mov	sp, a4			; .loc 1 __LINE__
438	CALL(trap)
439
440	jmp	zero, exception_return
441	END(XentMM)
442
443/**************************************************************************/
444
445/*
446 * XentSys:
447 * System call entry point.
448 */
449
450	ESETUP(XentSys)			; .loc 1 __LINE__
451
452	stq	v0,(FRAME_V0*8)(sp)		/* in case we need to restart */
453	stq	s0,(FRAME_S0*8)(sp)
454	stq	s1,(FRAME_S1*8)(sp)
455	stq	s2,(FRAME_S2*8)(sp)
456	stq	s3,(FRAME_S3*8)(sp)
457	stq	s4,(FRAME_S4*8)(sp)
458	stq	s5,(FRAME_S5*8)(sp)
459	stq	s6,(FRAME_S6*8)(sp)
460	stq	a0,(FRAME_A0*8)(sp)
461	stq	a1,(FRAME_A1*8)(sp)
462	stq	a2,(FRAME_A2*8)(sp)
463	stq	a3,(FRAME_A3*8)(sp)
464	stq	a4,(FRAME_A4*8)(sp)
465	stq	a5,(FRAME_A5*8)(sp)
466	stq	ra,(FRAME_RA*8)(sp)
467
468	/* syscall number, passed in v0, is first arg, frame pointer second */
469	mov	v0,a1
470	GET_CURLWP
471	ldq	a0,0(v0)
472	mov	sp,a2			; .loc 1 __LINE__
473	ldq	t11,L_PROC(a0)
474	ldq	t12,P_MD_SYSCALL(t11)
475	CALL((t12))
476
477	jmp	zero, exception_return
478	END(XentSys)
479
480/**************************************************************************/
481
482/*
483 * XentUna:
484 * System unaligned access entry point.
485 */
486
487LEAF(XentUna, 3)				/* XXX should be NESTED */
488	.set noat
489	lda	sp,-(FRAME_SW_SIZE*8)(sp)
490	stq	at_reg,(FRAME_AT*8)(sp)
491	.set at
492	stq	ra,(FRAME_RA*8)(sp)
493	bsr	ra, exception_save_regs		/* jmp/CALL trashes pv/t12 */
494
495	/* a0, a1, & a2 already set up */
496	ldiq	a3, ALPHA_KENTRY_UNA
497	mov	sp, a4			; .loc 1 __LINE__
498	CALL(trap)
499
500	jmp	zero, exception_return
501	END(XentUna)
502
503/**************************************************************************/
504
505/*
506 * savefpstate: Save a process's floating point state.
507 *
508 * Arguments:
509 *	a0	'struct fpstate *' to save into
510 */
511
512LEAF(savefpstate, 1)
513	LDGP(pv)
514	/* save all of the FP registers */
515	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
516	stt	$f0,   (0 * 8)(t1)	/* save first register, using hw name */
517	stt	$f1,   (1 * 8)(t1)	/* etc. */
518	stt	$f2,   (2 * 8)(t1)
519	stt	$f3,   (3 * 8)(t1)
520	stt	$f4,   (4 * 8)(t1)
521	stt	$f5,   (5 * 8)(t1)
522	stt	$f6,   (6 * 8)(t1)
523	stt	$f7,   (7 * 8)(t1)
524	stt	$f8,   (8 * 8)(t1)
525	stt	$f9,   (9 * 8)(t1)
526	stt	$f10, (10 * 8)(t1)
527	stt	$f11, (11 * 8)(t1)
528	stt	$f12, (12 * 8)(t1)
529	stt	$f13, (13 * 8)(t1)
530	stt	$f14, (14 * 8)(t1)
531	stt	$f15, (15 * 8)(t1)
532	stt	$f16, (16 * 8)(t1)
533	stt	$f17, (17 * 8)(t1)
534	stt	$f18, (18 * 8)(t1)
535	stt	$f19, (19 * 8)(t1)
536	stt	$f20, (20 * 8)(t1)
537	stt	$f21, (21 * 8)(t1)
538	stt	$f22, (22 * 8)(t1)
539	stt	$f23, (23 * 8)(t1)
540	stt	$f24, (24 * 8)(t1)
541	stt	$f25, (25 * 8)(t1)
542	stt	$f26, (26 * 8)(t1)
543	stt	$f27, (27 * 8)(t1)
544	.set noat
545	stt	$f28, (28 * 8)(t1)
546	.set at
547	stt	$f29, (29 * 8)(t1)
548	stt	$f30, (30 * 8)(t1)
549
550	/*
551	 * Then save the FPCR; note that the necessary 'trapb's are taken
552	 * care of on kernel entry and exit.
553	 */
554	mf_fpcr	ft0
555	stt	ft0, FPREG_FPR_CR(a0)	/* store to FPCR save area */
556
557	RET
558	END(savefpstate)
559
560/**************************************************************************/
561
562/*
563 * restorefpstate: Restore a process's floating point state.
564 *
565 * Arguments:
566 *	a0	'struct fpstate *' to restore from
567 */
568
569LEAF(restorefpstate, 1)
570	LDGP(pv)
571	/*
572	 * Restore the FPCR; note that the necessary 'trapb's are taken care of
573	 * on kernel entry and exit.
574	 */
575	ldt	ft0, FPREG_FPR_CR(a0)	/* load from FPCR save area */
576	mt_fpcr	ft0
577
578	/* Restore all of the FP registers. */
579	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
580	ldt	$f0,   (0 * 8)(t1)	/* restore first reg., using hw name */
581	ldt	$f1,   (1 * 8)(t1)	/* etc. */
582	ldt	$f2,   (2 * 8)(t1)
583	ldt	$f3,   (3 * 8)(t1)
584	ldt	$f4,   (4 * 8)(t1)
585	ldt	$f5,   (5 * 8)(t1)
586	ldt	$f6,   (6 * 8)(t1)
587	ldt	$f7,   (7 * 8)(t1)
588	ldt	$f8,   (8 * 8)(t1)
589	ldt	$f9,   (9 * 8)(t1)
590	ldt	$f10, (10 * 8)(t1)
591	ldt	$f11, (11 * 8)(t1)
592	ldt	$f12, (12 * 8)(t1)
593	ldt	$f13, (13 * 8)(t1)
594	ldt	$f14, (14 * 8)(t1)
595	ldt	$f15, (15 * 8)(t1)
596	ldt	$f16, (16 * 8)(t1)
597	ldt	$f17, (17 * 8)(t1)
598	ldt	$f18, (18 * 8)(t1)
599	ldt	$f19, (19 * 8)(t1)
600	ldt	$f20, (20 * 8)(t1)
601	ldt	$f21, (21 * 8)(t1)
602	ldt	$f22, (22 * 8)(t1)
603	ldt	$f23, (23 * 8)(t1)
604	ldt	$f24, (24 * 8)(t1)
605	ldt	$f25, (25 * 8)(t1)
606	ldt	$f26, (26 * 8)(t1)
607	ldt	$f27, (27 * 8)(t1)
608	ldt	$f28, (28 * 8)(t1)
609	ldt	$f29, (29 * 8)(t1)
610	ldt	$f30, (30 * 8)(t1)
611
612	RET
613	END(restorefpstate)
614
615/**************************************************************************/
616
617/*
618 * savectx: save process context, i.e. callee-saved registers
619 *
620 * Note that savectx() only works for processes other than curlwp,
621 * since cpu_switchto will copy over the info saved here.  (It _can_
622 * sanely be used for curlwp iff cpu_switchto won't be called again, e.g.
623 * if called from boot().)
624 *
625 * Arguments:
626 *	a0	'struct pcb *' of the process that needs its context saved
627 *
628 * Return:
629 *	v0	0.  (note that for child processes, it seems
630 *		like savectx() returns 1, because the return address
631 *		in the PCB is set to the return address from savectx().)
632 */
633
634LEAF(savectx, 1)
635	br	pv, 1f
6361:	LDGP(pv)
637	stq	sp, PCB_HWPCB_KSP(a0)		/* store sp */
638	stq	s0, PCB_CONTEXT+(0 * 8)(a0)	/* store s0 - s6 */
639	stq	s1, PCB_CONTEXT+(1 * 8)(a0)
640	stq	s2, PCB_CONTEXT+(2 * 8)(a0)
641	stq	s3, PCB_CONTEXT+(3 * 8)(a0)
642	stq	s4, PCB_CONTEXT+(4 * 8)(a0)
643	stq	s5, PCB_CONTEXT+(5 * 8)(a0)
644	stq	s6, PCB_CONTEXT+(6 * 8)(a0)
645	stq	ra, PCB_CONTEXT+(7 * 8)(a0)	/* store ra */
646	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
647	stq	v0, PCB_CONTEXT+(8 * 8)(a0)	/* store ps, for ipl */
648
649	mov	zero, v0
650	RET
651	END(savectx)
652
653/**************************************************************************/
654
655
656/*
657 * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next)
658 * Switch to the specified next LWP
659 * Arguments:
660 *	a0	'struct lwp *' of the LWP to switch from
661 *	a1	'struct lwp *' of the LWP to switch to
662 */
663LEAF(cpu_switchto, 0)
664	LDGP(pv)
665
666	beq	a0, 1f
667
668	/*
669	 * do an inline savectx(), to save old context
670	 */
671	ldq	a2, L_PCB(a0)
672	/* NOTE: ksp is stored by the swpctx */
673	stq	s0, PCB_CONTEXT+(0 * 8)(a2)	/* store s0 - s6 */
674	stq	s1, PCB_CONTEXT+(1 * 8)(a2)
675	stq	s2, PCB_CONTEXT+(2 * 8)(a2)
676	stq	s3, PCB_CONTEXT+(3 * 8)(a2)
677	stq	s4, PCB_CONTEXT+(4 * 8)(a2)
678	stq	s5, PCB_CONTEXT+(5 * 8)(a2)
679	stq	s6, PCB_CONTEXT+(6 * 8)(a2)
680	stq	ra, PCB_CONTEXT+(7 * 8)(a2)	/* store ra */
681
6821:
683	mov	a0, s4				/* save old curlwp */
684	mov	a1, s2				/* save new lwp */
685	ldq	a0, L_MD_PCBPADDR(s2)		/* save new pcbpaddr */
686
687	SWITCH_CONTEXT				/* swap the context */
688
689	GET_CPUINFO
690	stq	s2, CPU_INFO_CURLWP(v0)		/* curlwp = l */
691
692	/*
693	 * Now running on the new PCB.
694	 */
695	ldq	s0, L_PCB(s2)
696
697	/*
698	 * Check for restartable atomic sequences (RAS).
699	 */
700	ldq	a0, L_PROC(s2)			/* first ras_lookup() arg */
701	ldq	t0, P_RASLIST(a0)		/* any RAS entries? */
702	beq	t0, 1f				/* no, skip */
703	ldq	s1, L_MD_TF(s2)			/* s1 = l->l_md.md_tf */
704	ldq	a1, (FRAME_PC*8)(s1)		/* second ras_lookup() arg */
705	CALL(ras_lookup)			/* ras_lookup(p, PC) */
706	addq	v0, 1, t0			/* -1 means "not in ras" */
707	beq	t0, 1f
708	stq	v0, (FRAME_PC*8)(s1)
709
7101:
711	mov	s4, v0				/* return the old lwp */
712	/*
713	 * Restore registers and return.
714	 * NOTE: ksp is restored by the swpctx.
715	 */
716	ldq	s1, PCB_CONTEXT+(1 * 8)(s0)		/* restore s1-s6 */
717	ldq	s2, PCB_CONTEXT+(2 * 8)(s0)
718	ldq	s3, PCB_CONTEXT+(3 * 8)(s0)
719	ldq	s4, PCB_CONTEXT+(4 * 8)(s0)
720	ldq	s5, PCB_CONTEXT+(5 * 8)(s0)
721	ldq	s6, PCB_CONTEXT+(6 * 8)(s0)
722	ldq	ra, PCB_CONTEXT+(7 * 8)(s0)		/* restore ra */
723	ldq	s0, PCB_CONTEXT+(0 * 8)(s0)		/* restore s0 */
724
725	RET
726	END(cpu_switchto)
727
728/*
729 * lwp_trampoline()
730 *
731 * Arrange for a function to be invoked neatly, after a cpu_lwp_fork().
732 *
733 * Invokes the function specified by the s0 register with the return
734 * address specified by the s1 register and with one argument specified
735 * by the s2 register.
736 */
737LEAF_NOPROFILE(lwp_trampoline, 0)
738	mov	v0, a0
739	mov	s3, a1
740	CALL(lwp_startup)
741	mov	s0, pv
742	mov	s1, ra
743	mov	s2, a0
744	jmp	zero, (pv)
745	END(lwp_trampoline)
746
747/*
748 * Simplified version of above: don't call lwp_startup()
749 */
750LEAF_NOPROFILE(setfunc_trampoline, 0)
751	mov	s0, pv
752	mov	s1, ra
753	mov	s2, a0
754	jmp	zero, (pv)
755	END(setfunc_trampoline)
756
757/**************************************************************************/
758
759/*
760 * Copy a null-terminated string within the kernel's address space.
761 * If lenp is not NULL, store the number of chars copied in *lenp
762 *
763 * int copystr(char *from, char *to, size_t len, size_t *lenp);
764 */
765LEAF(copystr, 4)
766	LDGP(pv)
767
768	mov	a2, t0			/* t0 = i = len */
769	bne	a2, 1f			/* if (len != 0), proceed */
770	ldiq	t1, 1			/* else bail */
771	br	zero, 2f
772
7731:	ldq_u	t1, 0(a0)		/* t1 = *from */
774	extbl	t1, a0, t1
775	ldq_u	t3, 0(a1)		/* set up t2 with quad around *to */
776	insbl	t1, a1, t2
777	mskbl	t3, a1, t3
778	or	t3, t2, t3		/* add *from to quad around *to */
779	stq_u	t3, 0(a1)		/* write out that quad */
780
781	subl	a2, 1, a2		/* len-- */
782	beq	t1, 2f			/* if (*from == 0), bail out */
783	addq	a1, 1, a1		/* to++ */
784	addq	a0, 1, a0		/* from++ */
785	bne	a2, 1b			/* if (len != 0) copy more */
786
7872:	beq	a3, 3f			/* if (lenp != NULL) */
788	subl	t0, a2, t0		/* *lenp = (i - len) */
789	stq	t0, 0(a3)
7903:	beq	t1, 4f			/* *from == '\0'; leave quietly */
791
792	ldiq	v0, ENAMETOOLONG	/* *from != '\0'; error. */
793	RET
794
7954:	mov	zero, v0		/* return 0. */
796	RET
797	END(copystr)
798
799NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
800	LDGP(pv)
801	lda	sp, -16(sp)			/* set up stack frame	     */
802	stq	ra, (16-8)(sp)			/* save ra		     */
803	stq	s0, (16-16)(sp)			/* save s0		     */
804	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
805	cmpult	a0, t0, t1			/* is in user space.	     */
806	beq	t1, copyerr_efault		/* if it's not, error out.   */
807	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
808	GET_CURLWP
809	mov	v0, s0
810	lda	v0, copyerr			/* set up fault handler.     */
811	.set noat
812	ldq	at_reg, 0(s0)
813	ldq	at_reg, L_PCB(at_reg)
814	stq	v0, PCB_ONFAULT(at_reg)
815	.set at
816	CALL(copystr)				/* do the copy.		     */
817	.set noat
818	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
819	ldq	at_reg, L_PCB(at_reg)
820	stq	zero, PCB_ONFAULT(at_reg)
821	.set at
822	ldq	ra, (16-8)(sp)			/* restore ra.		     */
823	ldq	s0, (16-16)(sp)			/* restore s0.		     */
824	lda	sp, 16(sp)			/* kill stack frame.	     */
825	RET					/* v0 left over from copystr */
826	END(copyinstr)
827
828NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
829	LDGP(pv)
830	lda	sp, -16(sp)			/* set up stack frame	     */
831	stq	ra, (16-8)(sp)			/* save ra		     */
832	stq	s0, (16-16)(sp)			/* save s0		     */
833	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
834	cmpult	a1, t0, t1			/* is in user space.	     */
835	beq	t1, copyerr_efault		/* if it's not, error out.   */
836	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
837	GET_CURLWP
838	mov	v0, s0
839	lda	v0, copyerr			/* set up fault handler.     */
840	.set noat
841	ldq	at_reg, 0(s0)
842	ldq	at_reg, L_PCB(at_reg)
843	stq	v0, PCB_ONFAULT(at_reg)
844	.set at
845	CALL(copystr)				/* do the copy.		     */
846	.set noat
847	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
848	ldq	at_reg, L_PCB(at_reg)
849	stq	zero, PCB_ONFAULT(at_reg)
850	.set at
851	ldq	ra, (16-8)(sp)			/* restore ra.		     */
852	ldq	s0, (16-16)(sp)			/* restore s0.		     */
853	lda	sp, 16(sp)			/* kill stack frame.	     */
854	RET					/* v0 left over from copystr */
855	END(copyoutstr)
856
857/*
858 * kcopy(const void *src, void *dst, size_t len);
859 *
860 * Copy len bytes from src to dst, aborting if we encounter a fatal
861 * page fault.
862 *
863 * kcopy() _must_ save and restore the old fault handler since it is
864 * called by uiomove(), which may be in the path of servicing a non-fatal
865 * page fault.
866 */
867NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
868	LDGP(pv)
869	lda	sp, -32(sp)			/* set up stack frame	     */
870	stq	ra, (32-8)(sp)			/* save ra		     */
871	stq	s0, (32-16)(sp)			/* save s0		     */
872	stq	s1, (32-24)(sp)			/* save s1		     */
873	/* Swap a0, a1, for call to memcpy(). */
874	mov	a1, v0
875	mov	a0, a1
876	mov	v0, a0
877	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
878	GET_CURLWP
879	ldq	s1, 0(v0)			/* s1 = curlwp		     */
880	lda	v0, kcopyerr			/* set up fault handler.     */
881	.set noat
882	ldq	at_reg, L_PCB(s1)
883	ldq	s0, PCB_ONFAULT(at_reg)	/* save old handler.	     */
884	stq	v0, PCB_ONFAULT(at_reg)
885	.set at
886	CALL(memcpy)				/* do the copy.		     */
887	.set noat
888	ldq	at_reg, L_PCB(s1)		/* restore the old handler.  */
889	stq	s0, PCB_ONFAULT(at_reg)
890	.set at
891	ldq	ra, (32-8)(sp)			/* restore ra.		     */
892	ldq	s0, (32-16)(sp)			/* restore s0.		     */
893	ldq	s1, (32-24)(sp)			/* restore s1.		     */
894	lda	sp, 32(sp)			/* kill stack frame.	     */
895	mov	zero, v0			/* return 0. */
896	RET
897	END(kcopy)
898
899LEAF(kcopyerr, 0)
900	LDGP(pv)
901	.set noat
902	ldq	at_reg, L_PCB(s1)		/* restore the old handler.  */
903	stq	s0, PCB_ONFAULT(at_reg)
904	.set at
905	ldq	ra, (32-8)(sp)			/* restore ra.		     */
906	ldq	s0, (32-16)(sp)			/* restore s0.		     */
907	ldq	s1, (32-24)(sp)			/* restore s1.		     */
908	lda	sp, 32(sp)			/* kill stack frame.	     */
909	RET
910END(kcopyerr)
911
912NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0)
913	LDGP(pv)
914	lda	sp, -16(sp)			/* set up stack frame	     */
915	stq	ra, (16-8)(sp)			/* save ra		     */
916	stq	s0, (16-16)(sp)			/* save s0		     */
917	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
918	cmpult	a0, t0, t1			/* is in user space.	     */
919	beq	t1, copyerr_efault		/* if it's not, error out.   */
920	/* Swap a0, a1, for call to memcpy(). */
921	mov	a1, v0
922	mov	a0, a1
923	mov	v0, a0
924	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
925	GET_CURLWP
926	ldq	s0, 0(v0)			/* s0 = curlwp		     */
927	lda	v0, copyerr			/* set up fault handler.     */
928	.set noat
929	ldq	at_reg, L_PCB(s0)
930	stq	v0, PCB_ONFAULT(at_reg)
931	.set at
932	CALL(memcpy)				/* do the copy.		     */
933	.set noat
934	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
935	stq	zero, PCB_ONFAULT(at_reg)
936	.set at
937	ldq	ra, (16-8)(sp)			/* restore ra.		     */
938	ldq	s0, (16-16)(sp)			/* restore s0.		     */
939	lda	sp, 16(sp)			/* kill stack frame.	     */
940	mov	zero, v0			/* return 0. */
941	RET
942	END(copyin)
943
944NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
945	LDGP(pv)
946	lda	sp, -16(sp)			/* set up stack frame	     */
947	stq	ra, (16-8)(sp)			/* save ra		     */
948	stq	s0, (16-16)(sp)			/* save s0		     */
949	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
950	cmpult	a1, t0, t1			/* is in user space.	     */
951	beq	t1, copyerr_efault		/* if it's not, error out.   */
952	/* Swap a0, a1, for call to memcpy(). */
953	mov	a1, v0
954	mov	a0, a1
955	mov	v0, a0
956	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
957	GET_CURLWP
958	ldq	s0, 0(v0)			/* s0 = curlwp		     */
959	lda	v0, copyerr			/* set up fault handler.     */
960	.set noat
961	ldq	at_reg, L_PCB(s0)
962	stq	v0, PCB_ONFAULT(at_reg)
963	.set at
964	CALL(memcpy)				/* do the copy.		     */
965	.set noat
966	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
967	stq	zero, PCB_ONFAULT(at_reg)
968	.set at
969	ldq	ra, (16-8)(sp)			/* restore ra.		     */
970	ldq	s0, (16-16)(sp)			/* restore s0.		     */
971	lda	sp, 16(sp)			/* kill stack frame.	     */
972	mov	zero, v0			/* return 0. */
973	RET
974	END(copyout)
975
976LEAF(copyerr_efault, 0)
977	ldiq	v0, EFAULT			/* return EFAULT.	     */
978XLEAF(copyerr, 0)
979	LDGP(pv)
980	ldq	ra, (16-8)(sp)			/* restore ra.		     */
981	ldq	s0, (16-16)(sp)			/* restore s0.		     */
982	lda	sp, 16(sp)			/* kill stack frame.	     */
983	RET
984END(copyerr)
985
986/**************************************************************************/
987
988/*
989 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
990 * user text space.
991 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
992 * user data space.
993 */
994LEAF(fuword, 1)
995XLEAF(fuiword, 1)
996	LDGP(pv)
997	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
998	cmpult	a0, t0, t1			/* is in user space. */
999	beq	t1, fswberr			/* if it's not, error out. */
1000	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1001	GET_CURLWP
1002	ldq	t1, 0(v0)
1003	lda	t0, fswberr
1004	.set noat
1005	ldq	at_reg, L_PCB(t1)
1006	stq	t0, PCB_ONFAULT(at_reg)
1007	.set at
1008	ldq	v0, 0(a0)
1009	zap	v0, 0xf0, v0
1010	.set noat
1011	ldq	at_reg, L_PCB(t1)
1012	stq	zero, PCB_ONFAULT(at_reg)
1013	.set at
1014	RET
1015	END(fuword)
1016
1017LEAF(fusword, 1)
1018XLEAF(fuisword, 1)
1019	LDGP(pv)
1020	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1021	cmpult	a0, t0, t1			/* is in user space. */
1022	beq	t1, fswberr			/* if it's not, error out. */
1023	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1024	GET_CURLWP
1025	ldq	t1, 0(v0)
1026	lda	t0, fswberr
1027	.set noat
1028	ldq	at_reg, L_PCB(t1)
1029	stq	t0, PCB_ONFAULT(at_reg)
1030	.set at
1031	/* XXX FETCH IT */
1032	.set noat
1033	ldq	at_reg, L_PCB(t1)
1034	stq	zero, PCB_ONFAULT(at_reg)
1035	.set at
1036	RET
1037	END(fusword)
1038
1039LEAF(fubyte, 1)
1040XLEAF(fuibyte, 1)
1041	LDGP(pv)
1042	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1043	cmpult	a0, t0, t1			/* is in user space. */
1044	beq	t1, fswberr			/* if it's not, error out. */
1045	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1046	GET_CURLWP
1047	ldq	t1, 0(v0)
1048	lda	t0, fswberr
1049	.set noat
1050	ldq	at_reg, L_PCB(t1)
1051	stq	t0, PCB_ONFAULT(at_reg)
1052	.set at
1053	/* XXX FETCH IT */
1054	.set noat
1055	ldq	at_reg, L_PCB(t1)
1056	stq	zero, PCB_ONFAULT(at_reg)
1057	.set at
1058	RET
1059	END(fubyte)
1060
1061LEAF(suword, 2)
1062	LDGP(pv)
1063	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1064	cmpult	a0, t0, t1			/* is in user space. */
1065	beq	t1, fswberr			/* if it's not, error out. */
1066	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1067	GET_CURLWP
1068	ldq	t1, 0(v0)
1069	lda	t0, fswberr
1070	.set noat
1071	ldq	at_reg, L_PCB(t1)
1072	stq	t0, PCB_ONFAULT(at_reg)
1073	.set at
1074	stq	a1, 0(a0)			/* do the store. */
1075	.set noat
1076	ldq	at_reg, L_PCB(t1)
1077	stq	zero, PCB_ONFAULT(at_reg)
1078	.set at
1079	mov	zero, v0
1080	RET
1081	END(suword)
1082
1083#ifdef notdef
1084LEAF(suiword, 2)
1085	LDGP(pv)
1086	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1087	cmpult	a0, t0, t1			/* is in user space. */
1088	beq	t1, fswberr			/* if it's not, error out. */
1089	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1090	GET_CURLWP
1091	ldq	t1, 0(v0)
1092	lda	t0, fswberr
1093	.set noat
1094	ldq	at_reg, L_PCB(t1)
1095	stq	t0, PCB_ONFAULT(at_reg)
1096	.set at
1097	/* XXX STORE IT */
1098	.set noat
1099	ldq	at_reg, L_PCB(t1)
1100	stq	zero, PCB_ONFAULT(at_reg)
1101	.set at
1102	call_pal PAL_OSF1_imb			/* sync instruction stream */
1103	mov	zero, v0
1104	RET
1105	END(suiword)
1106
1107LEAF(susword, 2)
1108	LDGP(pv)
1109	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1110	cmpult	a0, t0, t1			/* is in user space. */
1111	beq	t1, fswberr			/* if it's not, error out. */
1112	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1113	GET_CURLWP
1114	ldq	t1, 0(v0)
1115	lda	t0, fswberr
1116	.set noat
1117	ldq	at_reg, L_PCB(t1)
1118	stq	t0, PCB_ONFAULT(at_reg)
1119	.set at
1120	/* XXX STORE IT */
1121	.set noat
1122	ldq	at_reg, L_PCB(t1)
1123	stq	zero, PCB_ONFAULT(at_reg)
1124	.set at
1125	mov	zero, v0
1126	RET
1127	END(susword)
1128
1129LEAF(suisword, 2)
1130	LDGP(pv)
1131	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1132	cmpult	a0, t0, t1			/* is in user space. */
1133	beq	t1, fswberr			/* if it's not, error out. */
1134	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1135	GET_CURLWP
1136	ldq	t1, 0(v0)
1137	lda	t0, fswberr
1138	.set noat
1139	ldq	at_reg, L_PCB(t1)
1140	stq	t0, PCB_ONFAULT(at_reg)
1141	.set at
1142	/* XXX STORE IT */
1143	.set noat
1144	ldq	at_reg, L_PCB(t1)
1145	stq	zero, PCB_ONFAULT(at_reg)
1146	.set at
1147	call_pal PAL_OSF1_imb			/* sync instruction stream */
1148	mov	zero, v0
1149	RET
1150	END(suisword)
1151#endif /* notdef */
1152
1153LEAF(subyte, 2)
1154	LDGP(pv)
1155	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1156	cmpult	a0, t0, t1			/* is in user space. */
1157	beq	t1, fswberr			/* if it's not, error out. */
1158	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1159	GET_CURLWP
1160	ldq	t1, 0(v0)
1161	lda	t0, fswberr
1162	.set noat
1163	ldq	at_reg, L_PCB(t1)
1164	stq	t0, PCB_ONFAULT(at_reg)
1165	.set at
1166	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1167	insbl	a1, a0, a1			/* move it to the right byte */
1168	ldq_u	t0, 0(a0)			/* load quad around byte */
1169	mskbl	t0, a0, t0			/* kill the target byte */
1170	or	t0, a1, a1			/* put the result together */
1171	stq_u	a1, 0(a0)			/* and store it. */
1172	.set noat
1173	ldq	at_reg, L_PCB(t1)
1174	stq	zero, PCB_ONFAULT(at_reg)
1175	.set at
1176	mov	zero, v0
1177	RET
1178	END(subyte)
1179
1180LEAF(suibyte, 2)
1181	LDGP(pv)
1182	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1183	cmpult	a0, t0, t1			/* is in user space. */
1184	beq	t1, fswberr			/* if it's not, error out. */
1185	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1186	GET_CURLWP
1187	ldq	t1, 0(v0)
1188	lda	t0, fswberr
1189	.set noat
1190	ldq	at_reg, L_PCB(t1)
1191	stq	t0, PCB_ONFAULT(at_reg)
1192	.set at
1193	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1194	insbl	a1, a0, a1			/* move it to the right byte */
1195	ldq_u	t0, 0(a0)			/* load quad around byte */
1196	mskbl	t0, a0, t0			/* kill the target byte */
1197	or	t0, a1, a1			/* put the result together */
1198	stq_u	a1, 0(a0)			/* and store it. */
1199	.set noat
1200	ldq	at_reg, L_PCB(t1)
1201	stq	zero, PCB_ONFAULT(at_reg)
1202	.set at
1203	call_pal PAL_OSF1_imb			/* sync instruction stream */
1204	mov	zero, v0
1205	RET
1206	END(suibyte)
1207
1208LEAF(fswberr, 0)
1209	LDGP(pv)
1210	ldiq	v0, -1
1211	RET
1212	END(fswberr)
1213
1214/**************************************************************************/
1215
1216#ifdef notdef
1217/*
1218 * fuswintr and suswintr are just like fusword and susword except that if
1219 * the page is not in memory or would cause a trap, then we return an error.
1220 * The important thing is to prevent sleep() and switch().
1221 */
1222
1223LEAF(fuswintr, 2)
1224	LDGP(pv)
1225	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1226	cmpult	a0, t0, t1			/* is in user space. */
1227	beq	t1, fswintrberr			/* if it's not, error out. */
1228	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1229	GET_CURLWP
1230	ldq	t1, 0(v0)
1231	lda	t0, fswintrberr
1232	.set noat
1233	ldq	at_reg, L_PCB(t1)
1234	stq	t0, PCB_ONFAULT(at_reg)
1235	stq	a0, PCB_ACCESSADDR(at_reg)
1236	.set at
1237	/* XXX FETCH IT */
1238	.set noat
1239	ldq	at_reg, L_PCB(t1)
1240	stq	zero, PCB_ONFAULT(at_reg)
1241	.set at
1242	RET
1243	END(fuswintr)
1244
1245LEAF(suswintr, 2)
1246	LDGP(pv)
1247	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1248	cmpult	a0, t0, t1			/* is in user space. */
1249	beq	t1, fswintrberr			/* if it's not, error out. */
1250	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1251	GET_CURLWP
1252	ldq	t1, 0(v0)
1253	lda	t0, fswintrberr
1254	.set noat
1255	ldq	at_reg, L_PCB(t1)
1256	stq	t0, PCB_ONFAULT(at_reg)
1257	stq	a0, PCB_ACCESSADDR(at_reg)
1258	.set at
1259	/* XXX STORE IT */
1260	.set noat
1261	ldq	at_reg, L_PCB(t1)
1262	stq	zero, PCB_ONFAULT(at_reg)
1263	.set at
1264	mov	zero, v0
1265	RET
1266	END(suswintr)
1267#endif
1268
1269LEAF(fswintrberr, 0)
1270XLEAF(fuswintr, 2)				/* XXX what is a 'word'? */
1271XLEAF(suswintr, 2)				/* XXX what is a 'word'? */
1272	LDGP(pv)
1273	ldiq	v0, -1
1274	RET
1275	END(fswberr)
1276
1277/*
1278 * int ucas_32(volatile int32_t *uptr, int32_t old, int32_t new, int32_t *ret);
1279 */
1280
1281NESTED(ucas_32, 4, 16, ra, IM_S0 | IM_RA, 0)
1282	LDGP(pv)
1283	lda	sp, -16(sp)			/* set up stack frame	     */
1284	stq	ra, (16-8)(sp)			/* save ra		     */
1285	stq	s0, (16-16)(sp)			/* save s0		     */
1286	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1287	cmpult	a0, t0, t1			/* is in user space.	     */
1288	beq	t1, copyerr_efault		/* if it's not, error out.   */
1289	and	a0, 3, t1			/* check if addr is aligned. */
1290	bne	t1, copyerr_efault		/* if it's not, error out.   */
1291	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1292	GET_CURLWP
1293	ldq	s0, 0(v0)			/* s0 = curlwp		     */
1294	lda	v0, copyerr			/* set up fault handler.     */
1295	.set noat
1296	ldq	at_reg, L_PCB(s0)
1297	stq	v0, PCB_ONFAULT(at_reg)
1298	.set at
1299
13003:
1301	ldl_l	t0, 0(a0)			/* t0 = *uptr */
1302	cmpeq	t0, a1, t1			/* does t0 = old? */
1303	beq	t1, 1f				/* if not, skip */
1304	mov	a2, t1
1305	stl_c	t1, 0(a0)			/* *uptr ~= new */
1306	beq	t1, 2f				/* did it work? */
13071:
1308	stl	t0, 0(a3)			/* *ret = t0 */
1309	mov	zero, v0
1310
1311	.set noat
1312	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
1313	stq	zero, PCB_ONFAULT(at_reg)
1314	.set at
1315	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1316	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1317	lda	sp, 16(sp)			/* kill stack frame.	     */
1318	RET					/* v0 left over from copystr */
1319
13202:
1321	br	3b
1322END(ucas_32)
1323
1324STRONG_ALIAS(ucas_int,ucas_32)
1325
1326/*
1327 * int ucas_64(volatile int64_t *uptr, int64_t old, int64_t new, int64_t *ret);
1328 */
1329
1330NESTED(ucas_64, 4, 16, ra, IM_S0 | IM_RA, 0)
1331	LDGP(pv)
1332	lda	sp, -16(sp)			/* set up stack frame	     */
1333	stq	ra, (16-8)(sp)			/* save ra		     */
1334	stq	s0, (16-16)(sp)			/* save s0		     */
1335	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1336	cmpult	a0, t0, t1			/* is in user space.	     */
1337	beq	t1, copyerr_efault		/* if it's not, error out.   */
1338	and	a0, 3, t1			/* check if addr is aligned. */
1339	bne	t1, copyerr_efault		/* if it's not, error out.   */
1340	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1341	GET_CURLWP
1342	ldq	s0, 0(v0)			/* s0 = curlwp		     */
1343	lda	v0, copyerr			/* set up fault handler.     */
1344	.set noat
1345	ldq	at_reg, L_PCB(s0)
1346	stq	v0, PCB_ONFAULT(at_reg)
1347	.set at
1348
13493:
1350	ldq_l	t0, 0(a0)			/* t0 = *uptr */
1351	cmpeq	t0, a1, t1			/* does t0 = old? */
1352	beq	t1, 1f				/* if not, skip */
1353	mov	a2, t1
1354	stq_c	t1, 0(a0)			/* *uptr ~= new */
1355	beq	t1, 2f				/* did it work? */
13561:
1357	stq	t0, 0(a3)			/* *ret = t0 */
1358	mov	zero, v0
1359
1360	.set noat
1361	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
1362	stq	zero, PCB_ONFAULT(at_reg)
1363	.set at
1364	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1365	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1366	lda	sp, 16(sp)			/* kill stack frame.	     */
1367	RET					/* v0 left over from copystr */
1368
13692:
1370	br	3b
1371END(ucas_64)
1372
1373STRONG_ALIAS(ucas_ptr,ucas_64)
1374
1375/**************************************************************************/
1376
1377/*
1378 * console 'restart' routine to be placed in HWRPB.
1379 */
1380LEAF(XentRestart, 1)			/* XXX should be NESTED */
1381	.set noat
1382	lda	sp,-(FRAME_SIZE*8)(sp)
1383	stq	at_reg,(FRAME_AT*8)(sp)
1384	.set at
1385	stq	v0,(FRAME_V0*8)(sp)
1386	stq	a0,(FRAME_A0*8)(sp)
1387	stq	a1,(FRAME_A1*8)(sp)
1388	stq	a2,(FRAME_A2*8)(sp)
1389	stq	a3,(FRAME_A3*8)(sp)
1390	stq	a4,(FRAME_A4*8)(sp)
1391	stq	a5,(FRAME_A5*8)(sp)
1392	stq	s0,(FRAME_S0*8)(sp)
1393	stq	s1,(FRAME_S1*8)(sp)
1394	stq	s2,(FRAME_S2*8)(sp)
1395	stq	s3,(FRAME_S3*8)(sp)
1396	stq	s4,(FRAME_S4*8)(sp)
1397	stq	s5,(FRAME_S5*8)(sp)
1398	stq	s6,(FRAME_S6*8)(sp)
1399	stq	t0,(FRAME_T0*8)(sp)
1400	stq	t1,(FRAME_T1*8)(sp)
1401	stq	t2,(FRAME_T2*8)(sp)
1402	stq	t3,(FRAME_T3*8)(sp)
1403	stq	t4,(FRAME_T4*8)(sp)
1404	stq	t5,(FRAME_T5*8)(sp)
1405	stq	t6,(FRAME_T6*8)(sp)
1406	stq	t7,(FRAME_T7*8)(sp)
1407	stq	t8,(FRAME_T8*8)(sp)
1408	stq	t9,(FRAME_T9*8)(sp)
1409	stq	t10,(FRAME_T10*8)(sp)
1410	stq	t11,(FRAME_T11*8)(sp)
1411	stq	t12,(FRAME_T12*8)(sp)
1412	stq	ra,(FRAME_RA*8)(sp)
1413
1414	br	pv,1f
14151:	LDGP(pv)
1416
1417	mov	sp,a0
1418	CALL(console_restart)
1419
1420	call_pal PAL_halt
1421	END(XentRestart)
1422
1423/**************************************************************************/
1424
1425/*
1426 * Kernel setjmp and longjmp.  Rather minimalist.
1427 *
1428 *	longjmp(label_t *a)
1429 * will generate a "return (1)" from the last call to
1430 *	setjmp(label_t *a)
1431 * by restoring registers from the stack,
1432 */
1433
1434	.set	noreorder
1435
1436LEAF(setjmp, 1)
1437	LDGP(pv)
1438
1439	stq	ra, (0 * 8)(a0)			/* return address */
1440	stq	s0, (1 * 8)(a0)			/* callee-saved registers */
1441	stq	s1, (2 * 8)(a0)
1442	stq	s2, (3 * 8)(a0)
1443	stq	s3, (4 * 8)(a0)
1444	stq	s4, (5 * 8)(a0)
1445	stq	s5, (6 * 8)(a0)
1446	stq	s6, (7 * 8)(a0)
1447	stq	sp, (8 * 8)(a0)
1448
1449	ldiq	t0, 0xbeeffedadeadbabe		/* set magic number */
1450	stq	t0, (9 * 8)(a0)
1451
1452	mov	zero, v0			/* return zero */
1453	RET
1454END(setjmp)
1455
1456LEAF(longjmp, 1)
1457	LDGP(pv)
1458
1459	ldiq	t0, 0xbeeffedadeadbabe		/* check magic number */
1460	ldq	t1, (9 * 8)(a0)
1461	cmpeq	t0, t1, t0
1462	beq	t0, longjmp_botch		/* if bad, punt */
1463
1464	ldq	ra, (0 * 8)(a0)			/* return address */
1465	ldq	s0, (1 * 8)(a0)			/* callee-saved registers */
1466	ldq	s1, (2 * 8)(a0)
1467	ldq	s2, (3 * 8)(a0)
1468	ldq	s3, (4 * 8)(a0)
1469	ldq	s4, (5 * 8)(a0)
1470	ldq	s5, (6 * 8)(a0)
1471	ldq	s6, (7 * 8)(a0)
1472	ldq	sp, (8 * 8)(a0)
1473
1474	ldiq	v0, 1
1475	RET
1476
1477longjmp_botch:
1478	lda	a0, longjmp_botchmsg
1479	mov	ra, a1
1480	CALL(panic)
1481	call_pal PAL_bugchk
1482
1483	.data
1484longjmp_botchmsg:
1485	.asciz	"longjmp botch from %p"
1486	.text
1487END(longjmp)
1488
1489/*
1490 * void sts(int rn, u_int32_t *rval);
1491 * void stt(int rn, u_int64_t *rval);
1492 * void lds(int rn, u_int32_t *rval);
1493 * void ldt(int rn, u_int64_t *rval);
1494 */
1495
1496.macro	make_freg_util name, op
1497	LEAF(alpha_\name, 2)
1498	and	a0, 0x1f, a0
1499	s8addq	a0, pv, pv
1500	addq	pv, 1f - alpha_\name, pv
1501	jmp	(pv)
15021:
1503	rn = 0
1504	.rept	32
1505	\op	$f0 + rn, 0(a1)
1506	RET
1507	rn = rn + 1
1508	.endr
1509	END(alpha_\name)
1510.endm
1511/*
1512LEAF(alpha_sts, 2)
1513LEAF(alpha_stt, 2)
1514LEAF(alpha_lds, 2)
1515LEAF(alpha_ldt, 2)
1516 */
1517	make_freg_util sts, sts
1518	make_freg_util stt, stt
1519	make_freg_util lds, lds
1520	make_freg_util ldt, ldt
1521
1522LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16
1523	lda	sp, -framesz(sp)
1524	stt	$f30, f30save(sp)
1525	mf_fpcr	$f30
1526	stt	$f30, rettmp(sp)
1527	ldt	$f30, f30save(sp)
1528	ldq	v0, rettmp(sp)
1529	lda	sp, framesz(sp)
1530	RET
1531END(alpha_read_fpcr)
1532
1533LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16
1534	lda	sp, -framesz(sp)
1535	stq	a0, fpcrtmp(sp)
1536	stt	$f30, f30save(sp)
1537	ldt	$f30, fpcrtmp(sp)
1538	mt_fpcr	$f30
1539	ldt	$f30, f30save(sp)
1540	lda	sp, framesz(sp)
1541	RET
1542END(alpha_write_fpcr)
1543