1178172Simp/*	$OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $	*/
2178172Simp/*-
3178172Simp * Copyright (c) 1992, 1993
4178172Simp *	The Regents of the University of California.  All rights reserved.
5178172Simp *
6178172Simp * This code is derived from software contributed to Berkeley by
7178172Simp * Digital Equipment Corporation and Ralph Campbell.
8178172Simp *
9178172Simp * Redistribution and use in source and binary forms, with or without
10178172Simp * modification, are permitted provided that the following conditions
11178172Simp * are met:
12178172Simp * 1. Redistributions of source code must retain the above copyright
13178172Simp *    notice, this list of conditions and the following disclaimer.
14178172Simp * 2. Redistributions in binary form must reproduce the above copyright
15178172Simp *    notice, this list of conditions and the following disclaimer in the
16178172Simp *    documentation and/or other materials provided with the distribution.
17178172Simp * 4. Neither the name of the University nor the names of its contributors
18178172Simp *    may be used to endorse or promote products derived from this software
19178172Simp *    without specific prior written permission.
20178172Simp *
21178172Simp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24178172Simp * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25178172Simp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31178172Simp * SUCH DAMAGE.
32178172Simp *
33178172Simp * Copyright (C) 1989 Digital Equipment Corporation.
34178172Simp * Permission to use, copy, modify, and distribute this software and
35178172Simp * its documentation for any purpose and without fee is hereby granted,
36178172Simp * provided that the above copyright notice appears in all copies.
37178172Simp * Digital Equipment Corporation makes no representations about the
38178172Simp * suitability of this software for any purpose.  It is provided "as is"
39178172Simp * without express or implied warranty.
40178172Simp *
41178172Simp * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42178172Simp *	v 1.1 89/07/11 17:55:04 nelson Exp  SPRITE (DECWRL)
43178172Simp * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44178172Simp *	v 9.2 90/01/29 18:00:39 shirriff Exp  SPRITE (DECWRL)
45178172Simp * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46178172Simp *	v 1.1 89/07/10 14:27:41 nelson Exp  SPRITE (DECWRL)
47178172Simp *
48178172Simp *	from: @(#)locore.s	8.5 (Berkeley) 1/4/94
49178172Simp *	JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
50178172Simp * $FreeBSD$
51178172Simp */
52178172Simp
53178172Simp/*
54205675Sneel * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
55205675Sneel * All rights reserved.
56205675Sneel *
57205675Sneel * Redistribution and use in source and binary forms, with or without
58205675Sneel * modification, are permitted provided that the following conditions
59205675Sneel * are met:
60205675Sneel * 1. Redistributions of source code must retain the above copyright
61205675Sneel *    notice, this list of conditions and the following disclaimer.
62205675Sneel * 2. Redistributions in binary form must reproduce the above copyright
63205675Sneel *    notice, this list of conditions and the following disclaimer in the
64205675Sneel *    documentation and/or other materials provided with the distribution.
65205675Sneel * 3. All advertising materials mentioning features or use of this software
66205675Sneel *    must display the following acknowledgement:
67205675Sneel *      This product includes software developed by Jonathan R. Stone for
68205675Sneel *      the NetBSD Project.
69205675Sneel * 4. The name of the author may not be used to endorse or promote products
70205675Sneel *    derived from this software without specific prior written permission.
71205675Sneel *
72205675Sneel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
73205675Sneel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74205675Sneel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75205675Sneel * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE
76205675Sneel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77205675Sneel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78205675Sneel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79205675Sneel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80205675Sneel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81205675Sneel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82205675Sneel * SUCH DAMAGE.
83205675Sneel */
84205675Sneel
85205675Sneel/*
86243203Sjmallett *	Contains assembly language support routines.
87178172Simp */
88178172Simp
89178172Simp#include "opt_ddb.h"
90178172Simp#include <sys/errno.h>
91178172Simp#include <machine/asm.h>
92178172Simp#include <machine/cpu.h>
93178172Simp#include <machine/regnum.h>
94205675Sneel#include <machine/cpuregs.h>
95249901Simp#include <machine/pcb.h>
96178172Simp
97178172Simp#include "assym.s"
98178172Simp
99178172Simp	.set	noreorder		# Noreorder is default style!
100178172Simp
101178172Simp/*
102178172Simp * Primitives
103178172Simp */
104178172Simp
105178172Simp	.text
106178172Simp
107178172Simp/*
108178172Simp * See if access to addr with a len type instruction causes a machine check.
109206749Sjmallett * len is length of access (1=byte, 2=short, 4=int)
110178172Simp *
111178172Simp * badaddr(addr, len)
112178172Simp *	char *addr;
113178172Simp *	int len;
114178172Simp */
115178172SimpLEAF(badaddr)
116206749Sjmallett	PTR_LA	v0, baderr
117178172Simp	GET_CPU_PCPU(v1)
118206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
119178172Simp	bne	a1, 1, 2f
120206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
121178172Simp	b	5f
122178172Simp	lbu	v0, (a0)
123178172Simp2:
124178172Simp	bne	a1, 2, 4f
125178172Simp	nop
126178172Simp	b	5f
127178172Simp	lhu	v0, (a0)
128178172Simp4:
129178172Simp	lw	v0, (a0)
130178172Simp5:
131206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
132178172Simp	j	ra
133178172Simp	move	v0, zero		# made it w/o errors
134178172Simpbaderr:
135178172Simp	j	ra
136178172Simp	li	v0, 1			# trap sends us here
137178172SimpEND(badaddr)
138178172Simp
139178172Simp/*
140178172Simp * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
141178172Simp * Copy a NIL-terminated string, at most maxlen characters long.  Return the
142178172Simp * number of characters copied (including the NIL) in *lencopied.  If the
143178172Simp * string is too long, return ENAMETOOLONG; else return 0.
144178172Simp */
145178172SimpLEAF(copystr)
146206749Sjmallett	move		t0, a2
147206749Sjmallett	beq		a2, zero, 4f
148178172Simp1:
149206749Sjmallett	lbu		v0, 0(a0)
150206749Sjmallett	PTR_SUBU	a2, a2, 1
151206749Sjmallett	beq		v0, zero, 2f
152206749Sjmallett	sb		v0, 0(a1)		# each byte until NIL
153206749Sjmallett	PTR_ADDU	a0, a0, 1
154206749Sjmallett	bne		a2, zero, 1b		# less than maxlen
155206749Sjmallett	PTR_ADDU	a1, a1, 1
156178172Simp4:
157206749Sjmallett	li		v0, ENAMETOOLONG	# run out of space
158178172Simp2:
159206749Sjmallett	beq		a3, zero, 3f		# return num. of copied bytes
160206749Sjmallett	PTR_SUBU	a2, t0, a2		# if the 4th arg was non-NULL
161206749Sjmallett	PTR_S		a2, 0(a3)
162178172Simp3:
163206749Sjmallett	j		ra			# v0 is 0 or ENAMETOOLONG
164178172Simp	nop
165178172SimpEND(copystr)
166178172Simp
167178172Simp
168178172Simp/*
169178172Simp * Copy a null terminated string from the user address space into
170178172Simp * the kernel address space.
171178172Simp *
172178172Simp *	copyinstr(fromaddr, toaddr, maxlength, &lencopied)
173178172Simp *		caddr_t fromaddr;
174178172Simp *		caddr_t toaddr;
175178172Simp *		u_int maxlength;
176178172Simp *		u_int *lencopied;
177178172Simp */
178206749SjmallettNON_LEAF(copyinstr, CALLFRAME_SIZ, ra)
179206749Sjmallett	PTR_SUBU	sp, sp, CALLFRAME_SIZ
180206749Sjmallett	.mask	0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
181206749Sjmallett	PTR_LA	v0, copyerr
182178172Simp	blt	a0, zero, _C_LABEL(copyerr)  # make sure address is in user space
183206749Sjmallett	REG_S	ra, CALLFRAME_RA(sp)
184178172Simp	GET_CPU_PCPU(v1)
185206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
186178172Simp	jal	_C_LABEL(copystr)
187206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
188206749Sjmallett	REG_L	ra, CALLFRAME_RA(sp)
189178172Simp	GET_CPU_PCPU(v1)
190206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
191206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
192178172Simp	j	ra
193206749Sjmallett	PTR_ADDU	sp, sp, CALLFRAME_SIZ
194178172SimpEND(copyinstr)
195178172Simp
196178172Simp/*
197178172Simp * Copy a null terminated string from the kernel address space into
198178172Simp * the user address space.
199178172Simp *
200178172Simp *	copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
201178172Simp *		caddr_t fromaddr;
202178172Simp *		caddr_t toaddr;
203178172Simp *		u_int maxlength;
204178172Simp *		u_int *lencopied;
205178172Simp */
206206749SjmallettNON_LEAF(copyoutstr, CALLFRAME_SIZ, ra)
207206749Sjmallett	PTR_SUBU	sp, sp, CALLFRAME_SIZ
208206749Sjmallett	.mask	0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
209206749Sjmallett	PTR_LA	v0, copyerr
210178172Simp	blt	a1, zero, _C_LABEL(copyerr)  # make sure address is in user space
211206749Sjmallett	REG_S	ra, CALLFRAME_RA(sp)
212178172Simp	GET_CPU_PCPU(v1)
213206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
214178172Simp	jal	_C_LABEL(copystr)
215206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
216206749Sjmallett	REG_L	ra, CALLFRAME_RA(sp)
217178172Simp	GET_CPU_PCPU(v1)
218206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
219206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
220178172Simp	j	ra
221206749Sjmallett	PTR_ADDU	sp, sp, CALLFRAME_SIZ
222178172SimpEND(copyoutstr)
223178172Simp
224178172Simp/*
225178172Simp * Copy specified amount of data from user space into the kernel
226178172Simp *	copyin(from, to, len)
227178172Simp *		caddr_t *from;	(user source address)
228178172Simp *		caddr_t *to;	(kernel destination address)
229178172Simp *		unsigned len;
230178172Simp */
231206749SjmallettNON_LEAF(copyin, CALLFRAME_SIZ, ra)
232206749Sjmallett	PTR_SUBU	sp, sp, CALLFRAME_SIZ
233206749Sjmallett	.mask	0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
234206749Sjmallett	PTR_LA	v0, copyerr
235178172Simp	blt	a0, zero, _C_LABEL(copyerr)  # make sure address is in user space
236206749Sjmallett	REG_S	ra, CALLFRAME_RA(sp)
237178172Simp	GET_CPU_PCPU(v1)
238206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
239178172Simp	jal	_C_LABEL(bcopy)
240206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
241206749Sjmallett	REG_L	ra, CALLFRAME_RA(sp)
242178172Simp	GET_CPU_PCPU(v1)
243206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)	 	# bcopy modified v1, so reload
244206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
245206749Sjmallett	PTR_ADDU	sp, sp, CALLFRAME_SIZ
246178172Simp	j	ra
247178172Simp	move	v0, zero
248178172SimpEND(copyin)
249178172Simp
250178172Simp/*
251178172Simp * Copy specified amount of data from kernel to the user space
252178172Simp *	copyout(from, to, len)
253178172Simp *		caddr_t *from;	(kernel source address)
254178172Simp *		caddr_t *to;	(user destination address)
255178172Simp *		unsigned len;
256178172Simp */
257206749SjmallettNON_LEAF(copyout, CALLFRAME_SIZ, ra)
258206749Sjmallett	PTR_SUBU	sp, sp, CALLFRAME_SIZ
259206749Sjmallett	.mask	0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
260206749Sjmallett	PTR_LA	v0, copyerr
261178172Simp	blt	a1, zero, _C_LABEL(copyerr) # make sure address is in user space
262206749Sjmallett	REG_S	ra, CALLFRAME_RA(sp)
263178172Simp	GET_CPU_PCPU(v1)
264206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
265178172Simp	jal	_C_LABEL(bcopy)
266206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
267206749Sjmallett	REG_L	ra, CALLFRAME_RA(sp)
268178172Simp	GET_CPU_PCPU(v1)
269206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)	 	# bcopy modified v1, so reload
270206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
271206749Sjmallett	PTR_ADDU	sp, sp, CALLFRAME_SIZ
272178172Simp	j	ra
273178172Simp	move	v0, zero
274178172SimpEND(copyout)
275178172Simp
276178172SimpLEAF(copyerr)
277206749Sjmallett	REG_L	ra, CALLFRAME_RA(sp)
278206749Sjmallett	PTR_ADDU	sp, sp, CALLFRAME_SIZ
279178172Simp	j	ra
280178172Simp	li	v0, EFAULT			# return error
281178172SimpEND(copyerr)
282178172Simp
283178172Simp/*
284178172Simp * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
285178172Simp * user text space.
286178172Simp * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
287178172Simp * user data space.
288178172Simp */
289210625Sjchandra#ifdef __mips_n64
290210625SjchandraLEAF(fuword64)
291210625SjchandraALEAF(fuword)
292206749Sjmallett	PTR_LA	v0, fswberr
293178172Simp	blt	a0, zero, fswberr	# make sure address is in user space
294206749Sjmallett	nop
295178172Simp	GET_CPU_PCPU(v1)
296206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
297206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
298210625Sjchandra	ld	v0, 0(a0)		# fetch word
299210625Sjchandra	j	ra
300210625Sjchandra	PTR_S	zero, U_PCB_ONFAULT(v1)
301210625SjchandraEND(fuword64)
302210625Sjchandra#endif
303210625Sjchandra
304210625SjchandraLEAF(fuword32)
305210625Sjchandra#ifndef __mips_n64
306210625SjchandraALEAF(fuword)
307210625Sjchandra#endif
308210625Sjchandra	PTR_LA	v0, fswberr
309210625Sjchandra	blt	a0, zero, fswberr	# make sure address is in user space
310210625Sjchandra	nop
311210625Sjchandra	GET_CPU_PCPU(v1)
312210625Sjchandra	PTR_L	v1, PC_CURPCB(v1)
313210625Sjchandra	PTR_S	v0, U_PCB_ONFAULT(v1)
314178172Simp	lw	v0, 0(a0)		# fetch word
315178172Simp	j	ra
316206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
317210625SjchandraEND(fuword32)
318178172Simp
319178172SimpLEAF(fusword)
320206749Sjmallett	PTR_LA	v0, fswberr
321178172Simp	blt	a0, zero, fswberr	# make sure address is in user space
322206749Sjmallett	nop
323178172Simp	GET_CPU_PCPU(v1)
324206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
325206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
326178172Simp	lhu	v0, 0(a0)		# fetch short
327178172Simp	j	ra
328206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
329178172SimpEND(fusword)
330178172Simp
331178172SimpLEAF(fubyte)
332206749Sjmallett	PTR_LA	v0, fswberr
333178172Simp	blt	a0, zero, fswberr	# make sure address is in user space
334206749Sjmallett	nop
335178172Simp	GET_CPU_PCPU(v1)
336206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
337206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
338178172Simp	lbu	v0, 0(a0)		# fetch byte
339178172Simp	j	ra
340206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
341178172SimpEND(fubyte)
342178172Simp
343202046SimpLEAF(suword32)
344202046Simp#ifndef __mips_n64
345202046SimpXLEAF(suword)
346202046Simp#endif
347206749Sjmallett	PTR_LA	v0, fswberr
348178172Simp	blt	a0, zero, fswberr	# make sure address is in user space
349206749Sjmallett	nop
350178172Simp	GET_CPU_PCPU(v1)
351206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
352206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
353178172Simp	sw	a1, 0(a0)		# store word
354206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
355178172Simp	j	ra
356178172Simp	move	v0, zero
357202046SimpEND(suword32)
358178172Simp
359202046Simp#ifdef __mips_n64
360202046SimpLEAF(suword64)
361202046SimpXLEAF(suword)
362206749Sjmallett	PTR_LA	v0, fswberr
363202046Simp	blt	a0, zero, fswberr	# make sure address is in user space
364206749Sjmallett	nop
365202046Simp	GET_CPU_PCPU(v1)
366206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
367206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
368202046Simp	sd	a1, 0(a0)		# store word
369206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
370202046Simp	j	ra
371202046Simp	move	v0, zero
372202046SimpEND(suword64)
373202046Simp#endif
374202046Simp
375178172Simp/*
376178172Simp * casuword(9)
377178172Simp * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
378178172Simp */
379178172Simp/*
380178172Simp * casuword32(9)
381178172Simp * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
382178172Simp *							<a2>uint32_t newval)
383178172Simp */
384202046SimpLEAF(casuword32)
385202046Simp#ifndef __mips_n64
386202046SimpXLEAF(casuword)
387202046Simp#endif
388206749Sjmallett	PTR_LA	v0, fswberr
389202046Simp	blt	a0, zero, fswberr	# make sure address is in user space
390206749Sjmallett	nop
391202046Simp	GET_CPU_PCPU(v1)
392206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
393206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
394202046Simp1:
395202046Simp	move	t0, a2
396202046Simp	ll	v0, 0(a0)
397202046Simp	bne	a1, v0, 2f
398202046Simp	nop
399202046Simp	sc	t0, 0(a0)		# store word
400202046Simp	beqz	t0, 1b
401202046Simp	nop
402202046Simp	j	3f
403202046Simp	nop
404202046Simp2:
405178172Simp	li	v0, -1
406202046Simp3:
407206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
408178172Simp	jr	ra
409178172Simp	nop
410178172SimpEND(casuword32)
411178172Simp
412202046Simp#ifdef __mips_n64
413202046SimpLEAF(casuword64)
414202046SimpXLEAF(casuword)
415206749Sjmallett	PTR_LA	v0, fswberr
416202046Simp	blt	a0, zero, fswberr	# make sure address is in user space
417206749Sjmallett	nop
418202046Simp	GET_CPU_PCPU(v1)
419206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
420206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
421202046Simp1:
422202046Simp	move	t0, a2
423202046Simp	lld	v0, 0(a0)
424202046Simp	bne	a1, v0, 2f
425202046Simp	nop
426202046Simp	scd	t0, 0(a0)		# store double word
427202046Simp	beqz	t0, 1b
428202046Simp	nop
429202046Simp	j	3f
430202046Simp	nop
431202046Simp2:
432202046Simp	li	v0, -1
433202046Simp3:
434206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
435202046Simp	jr	ra
436202046Simp	nop
437202046SimpEND(casuword64)
438202046Simp#endif
439202046Simp
440178172Simp/*
441178172Simp * Will have to flush the instruction cache if byte merging is done in hardware.
442178172Simp */
443178172SimpLEAF(susword)
444206749Sjmallett	PTR_LA	v0, fswberr
445178172Simp	blt	a0, zero, fswberr	# make sure address is in user space
446206749Sjmallett	nop
447178172Simp	GET_CPU_PCPU(v1)
448206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
449206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
450178172Simp	sh	a1, 0(a0)		# store short
451206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
452178172Simp	j	ra
453178172Simp	move	v0, zero
454178172SimpEND(susword)
455178172Simp
456178172SimpLEAF(subyte)
457206749Sjmallett	PTR_LA	v0, fswberr
458178172Simp	blt	a0, zero, fswberr	# make sure address is in user space
459206749Sjmallett	nop
460178172Simp	GET_CPU_PCPU(v1)
461206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
462206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
463178172Simp	sb	a1, 0(a0)		# store byte
464206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
465178172Simp	j	ra
466178172Simp	move	v0, zero
467178172SimpEND(subyte)
468178172Simp
469178172SimpLEAF(fswberr)
470178172Simp	j	ra
471178172Simp	li	v0, -1
472178172SimpEND(fswberr)
473178172Simp
474178172Simp/*
475178172Simp * fuswintr and suswintr are just like fusword and susword except that if
476178172Simp * the page is not in memory or would cause a trap, then we return an error.
477178172Simp * The important thing is to prevent sleep() and switch().
478178172Simp */
479178172SimpLEAF(fuswintr)
480206749Sjmallett	PTR_LA	v0, fswintrberr
481178172Simp	blt	a0, zero, fswintrberr	# make sure address is in user space
482206749Sjmallett	nop
483178172Simp	GET_CPU_PCPU(v1)
484206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
485206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
486178172Simp	lhu	v0, 0(a0)		# fetch short
487178172Simp	j	ra
488206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
489178172SimpEND(fuswintr)
490178172Simp
491178172SimpLEAF(suswintr)
492206749Sjmallett	PTR_LA	v0, fswintrberr
493178172Simp	blt	a0, zero, fswintrberr	# make sure address is in user space
494206749Sjmallett	nop
495178172Simp	GET_CPU_PCPU(v1)
496206749Sjmallett	PTR_L	v1, PC_CURPCB(v1)
497206749Sjmallett	PTR_S	v0, U_PCB_ONFAULT(v1)
498178172Simp	sh	a1, 0(a0)		# store short
499206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(v1)
500178172Simp	j	ra
501178172Simp	move	v0, zero
502178172SimpEND(suswintr)
503178172Simp
504178172SimpLEAF(fswintrberr)
505178172Simp	j	ra
506178172Simp	li	v0, -1
507178172SimpEND(fswintrberr)
508178172Simp
509178172Simp/*
510178172Simp * memset(void *s1, int c, int len)
511178172Simp * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
512178172Simp */
513178172SimpLEAF(memset)
514178172Simp	.set noreorder
515178172Simp	blt	a2, 12, memsetsmallclr	# small amount to clear?
516178172Simp	move	v0, a0			# save s1 for result
517178172Simp
518178172Simp	sll	t1, a1, 8		# compute  c << 8 in t1
519178172Simp	or	t1, t1, a1		# compute c << 8 | c in 11
520178172Simp	sll	t2, t1, 16		# shift that left 16
521178172Simp	or	t1, t2, t1		# or together
522178172Simp
523206749Sjmallett	PTR_SUBU	t0, zero, a0		# compute # bytes to word align address
524178172Simp	and	t0, t0, 3
525178172Simp	beq	t0, zero, 1f		# skip if word aligned
526206749Sjmallett	PTR_SUBU	a2, a2, t0		# subtract from remaining count
527178172Simp	SWHI	t1, 0(a0)		# store 1, 2, or 3 bytes to align
528206749Sjmallett	PTR_ADDU	a0, a0, t0
529178172Simp1:
530178172Simp	and	v1, a2, 3		# compute number of whole words left
531206749Sjmallett	PTR_SUBU	t0, a2, v1
532206749Sjmallett	PTR_SUBU	a2, a2, t0
533206749Sjmallett	PTR_ADDU	t0, t0, a0		# compute ending address
534178172Simp2:
535206749Sjmallett	PTR_ADDU	a0, a0, 4		# clear words
536178172Simp	bne	a0, t0, 2b		#  unrolling loop does not help
537178172Simp	sw	t1, -4(a0)		#  since we are limited by memory speed
538178172Simp
539178172Simpmemsetsmallclr:
540178172Simp	ble	a2, zero, 2f
541206749Sjmallett	PTR_ADDU	t0, a2, a0		# compute ending address
542178172Simp1:
543206749Sjmallett	PTR_ADDU	a0, a0, 1		# clear bytes
544178172Simp	bne	a0, t0, 1b
545178172Simp	sb	a1, -1(a0)
546178172Simp2:
547178172Simp	j	ra
548178172Simp	nop
549178172Simp	.set reorder
550178172SimpEND(memset)
551178172Simp
552178172Simp/*
553178172Simp * bzero(s1, n)
554178172Simp */
555178172SimpLEAF(bzero)
556178172SimpALEAF(blkclr)
557178172Simp	.set	noreorder
558178172Simp	blt	a1, 12, smallclr	# small amount to clear?
559206749Sjmallett	PTR_SUBU	a3, zero, a0		# compute # bytes to word align address
560178172Simp	and	a3, a3, 3
561178172Simp	beq	a3, zero, 1f		# skip if word aligned
562206749Sjmallett	PTR_SUBU	a1, a1, a3		# subtract from remaining count
563178172Simp	SWHI	zero, 0(a0)		# clear 1, 2, or 3 bytes to align
564206749Sjmallett	PTR_ADDU	a0, a0, a3
565178172Simp1:
566178172Simp	and	v0, a1, 3		# compute number of words left
567206749Sjmallett	PTR_SUBU	a3, a1, v0
568178172Simp	move	a1, v0
569206749Sjmallett	PTR_ADDU	a3, a3, a0		# compute ending address
570178172Simp2:
571206749Sjmallett	PTR_ADDU	a0, a0, 4		# clear words
572178172Simp	bne	a0, a3, 2b		#  unrolling loop does not help
573178172Simp	sw	zero, -4(a0)		#  since we are limited by memory speed
574178172Simpsmallclr:
575178172Simp	ble	a1, zero, 2f
576206749Sjmallett	PTR_ADDU	a3, a1, a0		# compute ending address
577178172Simp1:
578206749Sjmallett	PTR_ADDU	a0, a0, 1		# clear bytes
579178172Simp	bne	a0, a3, 1b
580178172Simp	sb	zero, -1(a0)
581178172Simp2:
582178172Simp	j	ra
583178172Simp	nop
584178172SimpEND(bzero)
585178172Simp
586178172Simp
587178172Simp/*
588178172Simp * bcmp(s1, s2, n)
589178172Simp */
590178172SimpLEAF(bcmp)
591178172Simp	.set	noreorder
592178172Simp	blt	a2, 16, smallcmp	# is it worth any trouble?
593178172Simp	xor	v0, a0, a1		# compare low two bits of addresses
594178172Simp	and	v0, v0, 3
595206749Sjmallett	PTR_SUBU	a3, zero, a1		# compute # bytes to word align address
596178172Simp	bne	v0, zero, unalignedcmp	# not possible to align addresses
597178172Simp	and	a3, a3, 3
598178172Simp
599178172Simp	beq	a3, zero, 1f
600206749Sjmallett	PTR_SUBU	a2, a2, a3		# subtract from remaining count
601178172Simp	move	v0, v1			# init v0,v1 so unmodified bytes match
602178172Simp	LWHI	v0, 0(a0)		# read 1, 2, or 3 bytes
603178172Simp	LWHI	v1, 0(a1)
604206749Sjmallett	PTR_ADDU	a1, a1, a3
605178172Simp	bne	v0, v1, nomatch
606206749Sjmallett	PTR_ADDU	a0, a0, a3
607178172Simp1:
608178172Simp	and	a3, a2, ~3		# compute number of whole words left
609206749Sjmallett	PTR_SUBU	a2, a2, a3		#   which has to be >= (16-3) & ~3
610206749Sjmallett	PTR_ADDU	a3, a3, a0		# compute ending address
611178172Simp2:
612178172Simp	lw	v0, 0(a0)		# compare words
613178172Simp	lw	v1, 0(a1)
614206749Sjmallett	PTR_ADDU	a0, a0, 4
615178172Simp	bne	v0, v1, nomatch
616206749Sjmallett	PTR_ADDU	a1, a1, 4
617178172Simp	bne	a0, a3, 2b
618178172Simp	nop
619178172Simp	b	smallcmp		# finish remainder
620178172Simp	nop
621178172Simpunalignedcmp:
622178172Simp	beq	a3, zero, 2f
623206749Sjmallett	PTR_SUBU	a2, a2, a3		# subtract from remaining count
624206749Sjmallett	PTR_ADDU	a3, a3, a0		# compute ending address
625178172Simp1:
626178172Simp	lbu	v0, 0(a0)		# compare bytes until a1 word aligned
627178172Simp	lbu	v1, 0(a1)
628206749Sjmallett	PTR_ADDU	a0, a0, 1
629178172Simp	bne	v0, v1, nomatch
630206749Sjmallett	PTR_ADDU	a1, a1, 1
631178172Simp	bne	a0, a3, 1b
632178172Simp	nop
633178172Simp2:
634178172Simp	and	a3, a2, ~3		# compute number of whole words left
635206749Sjmallett	PTR_SUBU	a2, a2, a3		#   which has to be >= (16-3) & ~3
636206749Sjmallett	PTR_ADDU	a3, a3, a0		# compute ending address
637178172Simp3:
638178172Simp	LWHI	v0, 0(a0)		# compare words a0 unaligned, a1 aligned
639178172Simp	LWLO	v0, 3(a0)
640178172Simp	lw	v1, 0(a1)
641206749Sjmallett	PTR_ADDU	a0, a0, 4
642178172Simp	bne	v0, v1, nomatch
643206749Sjmallett	PTR_ADDU	a1, a1, 4
644178172Simp	bne	a0, a3, 3b
645178172Simp	nop
646178172Simpsmallcmp:
647178172Simp	ble	a2, zero, match
648206749Sjmallett	PTR_ADDU	a3, a2, a0		# compute ending address
649178172Simp1:
650178172Simp	lbu	v0, 0(a0)
651178172Simp	lbu	v1, 0(a1)
652206749Sjmallett	PTR_ADDU	a0, a0, 1
653178172Simp	bne	v0, v1, nomatch
654206749Sjmallett	PTR_ADDU	a1, a1, 1
655178172Simp	bne	a0, a3, 1b
656178172Simp	nop
657178172Simpmatch:
658178172Simp	j	ra
659178172Simp	 move	v0, zero
660178172Simpnomatch:
661178172Simp	j	ra
662178172Simp	li	v0, 1
663178172SimpEND(bcmp)
664178172Simp
665178172Simp
666178172Simp/*
667178172Simp * bit = ffs(value)
668178172Simp */
669178172SimpLEAF(ffs)
670178172Simp	.set	noreorder
671178172Simp	beq	a0, zero, 2f
672178172Simp	move	v0, zero
673178172Simp1:
674178172Simp	and	v1, a0, 1		# bit set?
675178172Simp	addu	v0, v0, 1
676178172Simp	beq	v1, zero, 1b		# no, continue
677178172Simp	srl	a0, a0, 1
678178172Simp2:
679178172Simp	j	ra
680178172Simp	nop
681178172SimpEND(ffs)
682178172Simp
683178172Simp/**
684178172Simp * void
685178172Simp * atomic_set_16(u_int16_t *a, u_int16_t b)
686178172Simp * {
687178172Simp *	*a |= b;
688178172Simp * }
689178172Simp */
690178172SimpLEAF(atomic_set_16)
691178172Simp	.set	noreorder
692178172Simp	srl	a0, a0, 2	# round down address to be 32-bit aligned
693178172Simp	sll	a0, a0, 2
694178172Simp	andi	a1, a1, 0xffff
695178172Simp1:
696178172Simp	ll	t0, 0(a0)
697178172Simp	or	t0, t0, a1
698178172Simp	sc	t0, 0(a0)
699178172Simp	beq	t0, zero, 1b
700178172Simp	nop
701178172Simp	j	ra
702178172Simp	nop
703178172SimpEND(atomic_set_16)
704178172Simp
705178172Simp/**
706178172Simp * void
707178172Simp * atomic_clear_16(u_int16_t *a, u_int16_t b)
708178172Simp * {
709178172Simp *	*a &= ~b;
710178172Simp * }
711178172Simp */
712178172SimpLEAF(atomic_clear_16)
713178172Simp	.set	noreorder
714178172Simp	srl	a0, a0, 2	# round down address to be 32-bit aligned
715178172Simp	sll	a0, a0, 2
716178172Simp	nor	a1, zero, a1
717178172Simp1:
718178172Simp	ll	t0, 0(a0)
719178172Simp	move	t1, t0
720178172Simp	andi	t1, t1, 0xffff	# t1 has the original lower 16 bits
721178172Simp	and	t1, t1, a1	# t1 has the new lower 16 bits
722178172Simp	srl	t0, t0, 16	# preserve original top 16 bits
723178172Simp	sll	t0, t0, 16
724178172Simp	or	t0, t0, t1
725178172Simp	sc	t0, 0(a0)
726178172Simp	beq	t0, zero, 1b
727178172Simp	nop
728178172Simp	j	ra
729178172Simp	nop
730178172SimpEND(atomic_clear_16)
731178172Simp
732178172Simp
733178172Simp/**
734178172Simp * void
735178172Simp * atomic_subtract_16(uint16_t *a, uint16_t b)
736178172Simp * {
737178172Simp *	*a -= b;
738178172Simp * }
739178172Simp */
740178172SimpLEAF(atomic_subtract_16)
741178172Simp	.set	noreorder
742178172Simp	srl	a0, a0, 2	# round down address to be 32-bit aligned
743178172Simp	sll	a0, a0, 2
744178172Simp1:
745178172Simp	ll	t0, 0(a0)
746178172Simp	move	t1, t0
747178172Simp	andi	t1, t1, 0xffff	# t1 has the original lower 16 bits
748178172Simp	subu	t1, t1, a1
749178172Simp	andi	t1, t1, 0xffff	# t1 has the new lower 16 bits
750178172Simp	srl	t0, t0, 16	# preserve original top 16 bits
751178172Simp	sll	t0, t0, 16
752178172Simp	or	t0, t0, t1
753178172Simp	sc	t0, 0(a0)
754178172Simp	beq	t0, zero, 1b
755178172Simp	nop
756178172Simp	j	ra
757178172Simp	nop
758178172SimpEND(atomic_subtract_16)
759178172Simp
760178172Simp/**
761178172Simp * void
762178172Simp * atomic_add_16(uint16_t *a, uint16_t b)
763178172Simp * {
764178172Simp *	*a += b;
765178172Simp * }
766178172Simp */
767178172SimpLEAF(atomic_add_16)
768178172Simp	.set	noreorder
769178172Simp	srl	a0, a0, 2	# round down address to be 32-bit aligned
770178172Simp	sll	a0, a0, 2
771178172Simp1:
772178172Simp	ll	t0, 0(a0)
773178172Simp	move	t1, t0
774178172Simp	andi	t1, t1, 0xffff	# t1 has the original lower 16 bits
775178172Simp	addu	t1, t1, a1
776178172Simp	andi	t1, t1, 0xffff	# t1 has the new lower 16 bits
777178172Simp	srl	t0, t0, 16	# preserve original top 16 bits
778178172Simp	sll	t0, t0, 16
779178172Simp	or	t0, t0, t1
780178172Simp	sc	t0, 0(a0)
781178172Simp	beq	t0, zero, 1b
782178172Simp	nop
783178172Simp	j	ra
784178172Simp	nop
785178172SimpEND(atomic_add_16)
786178172Simp
787178172Simp/**
788178172Simp * void
789178172Simp * atomic_add_8(uint8_t *a, uint8_t b)
790178172Simp * {
791178172Simp *	*a += b;
792178172Simp * }
793178172Simp */
794178172SimpLEAF(atomic_add_8)
795178172Simp	.set	noreorder
796178172Simp	srl	a0, a0, 2	# round down address to be 32-bit aligned
797178172Simp	sll	a0, a0, 2
798178172Simp1:
799178172Simp	ll	t0, 0(a0)
800178172Simp	move	t1, t0
801178172Simp	andi	t1, t1, 0xff	# t1 has the original lower 8 bits
802178172Simp	addu	t1, t1, a1
803178172Simp	andi	t1, t1, 0xff	# t1 has the new lower 8 bits
804178172Simp	srl	t0, t0, 8	# preserve original top 24 bits
805178172Simp	sll	t0, t0, 8
806178172Simp	or	t0, t0, t1
807178172Simp	sc	t0, 0(a0)
808178172Simp	beq	t0, zero, 1b
809178172Simp	nop
810178172Simp	j	ra
811178172Simp	nop
812178172SimpEND(atomic_add_8)
813178172Simp
814178172Simp
815178172Simp/**
816178172Simp * void
817178172Simp * atomic_subtract_8(uint8_t *a, uint8_t b)
818178172Simp * {
819178172Simp *	*a += b;
820178172Simp * }
821178172Simp */
822178172SimpLEAF(atomic_subtract_8)
823178172Simp	.set	noreorder
824178172Simp	srl	a0, a0, 2	# round down address to be 32-bit aligned
825178172Simp	sll	a0, a0, 2
826178172Simp1:
827178172Simp	ll	t0, 0(a0)
828178172Simp	move	t1, t0
829178172Simp	andi	t1, t1, 0xff	# t1 has the original lower 8 bits
830178172Simp	subu	t1, t1, a1
831178172Simp	andi	t1, t1, 0xff	# t1 has the new lower 8 bits
832178172Simp	srl	t0, t0, 8	# preserve original top 24 bits
833178172Simp	sll	t0, t0, 8
834178172Simp	or	t0, t0, t1
835178172Simp	sc	t0, 0(a0)
836178172Simp	beq	t0, zero, 1b
837178172Simp	nop
838178172Simp	j	ra
839178172Simp	nop
840178172SimpEND(atomic_subtract_8)
841178172Simp
842178172Simp/*
843178172Simp *	atomic 64-bit register read/write assembly language support routines.
844178172Simp */
845178172Simp
846178172Simp	.set	noreorder		# Noreorder is default style!
847178172Simp
848202046Simp#if !defined(__mips_n64) && !defined(__mips_n32)
849202046Simp	/*
850202046Simp	 * I don't know if these routines have the right number of
851202046Simp	 * NOPs in it for all processors.  XXX
852210038Simp	 *
853210038Simp	 * Maybe it would be better to just leave this undefined in that case.
854232855Sjmallett	 *
855232855Sjmallett	 * XXX These routines are not safe in the case of a TLB miss on a1 or
856232855Sjmallett	 *     a0 unless the trapframe is 64-bit, which it just isn't with O32.
857232855Sjmallett	 *     If we take any exception, not just an interrupt, the upper
858232855Sjmallett	 *     32-bits will be clobbered.  Use only N32 and N64 kernels if you
859232855Sjmallett	 *     want to use 64-bit registers while interrupts are enabled or
860232855Sjmallett	 *     with memory operations.  Since this isn't even using load-linked
861232855Sjmallett	 *     and store-conditional, perhaps it should just use two registers
862232855Sjmallett	 *     instead, as is right and good with the O32 ABI.
863202046Simp	 */
864178172SimpLEAF(atomic_store_64)
865210038Simp	mfc0	t1, MIPS_COP_0_STATUS
866210038Simp	and	t2, t1, ~MIPS_SR_INT_IE
867210038Simp	mtc0	t2, MIPS_COP_0_STATUS
868178172Simp	nop
869178172Simp	nop
870178172Simp	nop
871178172Simp	nop
872178172Simp	ld	t0, (a1)
873178172Simp	nop
874178172Simp	nop
875178172Simp	sd	t0, (a0)
876178172Simp	nop
877178172Simp	nop
878210038Simp	mtc0	t1,MIPS_COP_0_STATUS
879178172Simp	nop
880178172Simp	nop
881178172Simp	nop
882178172Simp	nop
883178172Simp	j	ra
884178172Simp	nop
885178172SimpEND(atomic_store_64)
886178172Simp
887178172SimpLEAF(atomic_load_64)
888210038Simp	mfc0	t1, MIPS_COP_0_STATUS
889210038Simp	and	t2, t1, ~MIPS_SR_INT_IE
890210038Simp	mtc0	t2, MIPS_COP_0_STATUS
891178172Simp	nop
892178172Simp	nop
893178172Simp	nop
894178172Simp	nop
895178172Simp	ld	t0, (a0)
896178172Simp	nop
897178172Simp	nop
898178172Simp	sd	t0, (a1)
899178172Simp	nop
900178172Simp	nop
901210038Simp	mtc0	t1,MIPS_COP_0_STATUS
902178172Simp	nop
903178172Simp	nop
904178172Simp	nop
905178172Simp	nop
906178172Simp	j	ra
907178172Simp	nop
908178172SimpEND(atomic_load_64)
909202046Simp#endif
910178172Simp
911178172Simp#if defined(DDB) || defined(DEBUG)
912178172Simp
913178172SimpLEAF(kdbpeek)
914206749Sjmallett	PTR_LA	v1, ddberr
915178172Simp	and	v0, a0, 3			# unaligned ?
916178172Simp	GET_CPU_PCPU(t1)
917206749Sjmallett	PTR_L	t1, PC_CURPCB(t1)
918178172Simp	bne	v0, zero, 1f
919206749Sjmallett	PTR_S	v1, U_PCB_ONFAULT(t1)
920178172Simp
921178172Simp	lw	v0, (a0)
922178172Simp	jr	ra
923206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(t1)
924178172Simp
925178172Simp1:
926178172Simp	LWHI	v0, 0(a0)
927178172Simp	LWLO	v0, 3(a0)
928178172Simp	jr	ra
929206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(t1)
930178172SimpEND(kdbpeek)
931178172Simp
932230094SgonzoLEAF(kdbpeekd)
933230094Sgonzo	PTR_LA	v1, ddberr
934230094Sgonzo	and	v0, a0, 3			# unaligned ?
935230094Sgonzo	GET_CPU_PCPU(t1)
936230094Sgonzo	PTR_L	t1, PC_CURPCB(t1)
937230094Sgonzo	bne	v0, zero, 1f
938230094Sgonzo	PTR_S	v1, U_PCB_ONFAULT(t1)
939230094Sgonzo
940230094Sgonzo	ld	v0, (a0)
941230094Sgonzo	jr	ra
942230094Sgonzo	PTR_S	zero, U_PCB_ONFAULT(t1)
943230094Sgonzo
944230094Sgonzo1:
945230094Sgonzo	REG_LHI	v0, 0(a0)
946230094Sgonzo	REG_LLO	v0, 7(a0)
947230094Sgonzo	jr	ra
948230094Sgonzo	PTR_S	zero, U_PCB_ONFAULT(t1)
949230094SgonzoEND(kdbpeekd)
950230094Sgonzo
951178172Simpddberr:
952178172Simp	jr	ra
953178172Simp	nop
954178172Simp
955178172Simp#if defined(DDB)
956178172SimpLEAF(kdbpoke)
957206749Sjmallett	PTR_LA	v1, ddberr
958178172Simp	and	v0, a0, 3			# unaligned ?
959178172Simp	GET_CPU_PCPU(t1)
960206749Sjmallett	PTR_L	t1, PC_CURPCB(t1)
961178172Simp	bne	v0, zero, 1f
962206749Sjmallett	PTR_S	v1, U_PCB_ONFAULT(t1)
963178172Simp
964178172Simp	sw	a1, (a0)
965178172Simp	jr	ra
966206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(t1)
967178172Simp
968178172Simp1:
969178172Simp	SWHI	a1, 0(a0)
970178172Simp	SWLO	a1, 3(a0)
971178172Simp	jr	ra
972206749Sjmallett	PTR_S	zero, U_PCB_ONFAULT(t1)
973178172SimpEND(kdbpoke)
974178172Simp
975178172Simp	.data
976178172Simp	.globl	esym
977178172Simpesym:	.word	0
978178172Simp
979178172Simp#endif /* DDB */
980178172Simp#endif /* DDB || DEBUG */
981178172Simp
982178172Simp	.text
983178172SimpLEAF(breakpoint)
984210038Simp	break	MIPS_BREAK_SOVER_VAL
985178172Simp	jr	ra
986178172Simp	nop
987232872SjmallettEND(breakpoint)
988178172Simp
989178172SimpLEAF(setjmp)
990210038Simp	mfc0	v0, MIPS_COP_0_STATUS	# Later the "real" spl value!
991249901Simp	REG_S	s0, (SZREG * PCB_REG_S0)(a0)
992249901Simp	REG_S	s1, (SZREG * PCB_REG_S1)(a0)
993249901Simp	REG_S	s2, (SZREG * PCB_REG_S2)(a0)
994249901Simp	REG_S	s3, (SZREG * PCB_REG_S3)(a0)
995249901Simp	REG_S	s4, (SZREG * PCB_REG_S4)(a0)
996249901Simp	REG_S	s5, (SZREG * PCB_REG_S5)(a0)
997249901Simp	REG_S	s6, (SZREG * PCB_REG_S6)(a0)
998249901Simp	REG_S	s7, (SZREG * PCB_REG_S7)(a0)
999249901Simp	REG_S	s8, (SZREG * PCB_REG_S8)(a0)
1000249901Simp	REG_S	sp, (SZREG * PCB_REG_SP)(a0)
1001249901Simp	REG_S	ra, (SZREG * PCB_REG_RA)(a0)
1002249901Simp	REG_S	v0, (SZREG * PCB_REG_SR)(a0)
1003178172Simp	jr	ra
1004178172Simp	li	v0, 0			# setjmp return
1005178172SimpEND(setjmp)
1006178172Simp
1007178172SimpLEAF(longjmp)
1008249901Simp	REG_L	v0, (SZREG * PCB_REG_SR)(a0)
1009249901Simp	REG_L	ra, (SZREG * PCB_REG_RA)(a0)
1010249901Simp	REG_L	s0, (SZREG * PCB_REG_S0)(a0)
1011249901Simp	REG_L	s1, (SZREG * PCB_REG_S1)(a0)
1012249901Simp	REG_L	s2, (SZREG * PCB_REG_S2)(a0)
1013249901Simp	REG_L	s3, (SZREG * PCB_REG_S3)(a0)
1014249901Simp	REG_L	s4, (SZREG * PCB_REG_S4)(a0)
1015249901Simp	REG_L	s5, (SZREG * PCB_REG_S5)(a0)
1016249901Simp	REG_L	s6, (SZREG * PCB_REG_S6)(a0)
1017249901Simp	REG_L	s7, (SZREG * PCB_REG_S7)(a0)
1018249901Simp	REG_L	s8, (SZREG * PCB_REG_S8)(a0)
1019249901Simp	REG_L	sp, (SZREG * PCB_REG_SP)(a0)
1020210038Simp	mtc0	v0, MIPS_COP_0_STATUS	# Later the "real" spl value!
1021178172Simp	ITLBNOPFIX
1022178172Simp	jr	ra
1023178172Simp	li	v0, 1			# longjmp return
1024178172SimpEND(longjmp)
1025178172Simp
1026205675SneelLEAF(mips3_ld)
1027205675Sneel	.set push
1028205675Sneel	.set noreorder
1029205675Sneel	.set mips64
1030205675Sneel#if defined(__mips_o32)
1031205675Sneel	mfc0	t0, MIPS_COP_0_STATUS		# turn off interrupts
1032205675Sneel	and	t1, t0, ~(MIPS_SR_INT_IE)
1033205675Sneel	mtc0	t1, MIPS_COP_0_STATUS
1034205675Sneel	COP0_SYNC
1035205675Sneel	nop
1036205675Sneel	nop
1037205675Sneel	nop
1038205675Sneel
1039205675Sneel	ld	v0, 0(a0)
1040205675Sneel#if _BYTE_ORDER == _BIG_ENDIAN
1041205675Sneel	dsll	v1, v0, 32
1042205675Sneel	dsra	v1, v1, 32			# low word in v1
1043205675Sneel	dsra	v0, v0, 32			# high word in v0
1044205675Sneel#else
1045205675Sneel	dsra	v1, v0, 32			# high word in v1
1046205675Sneel	dsll	v0, v0, 32
1047205675Sneel	dsra	v0, v0, 32			# low word in v0
1048205675Sneel#endif
1049205675Sneel
1050205675Sneel	mtc0	t0, MIPS_COP_0_STATUS		# restore intr status.
1051205675Sneel	COP0_SYNC
1052205675Sneel	nop
1053205675Sneel#else /* !__mips_o32 */
1054205675Sneel	ld	v0, 0(a0)
1055205675Sneel#endif /* !__mips_o32 */
1056205675Sneel
1057205675Sneel	jr	ra
1058205675Sneel	nop
1059205675Sneel	.set pop
1060205675SneelEND(mips3_ld)
1061205675Sneel
1062205675SneelLEAF(mips3_sd)
1063205675Sneel	.set push
1064205675Sneel	.set mips64
1065205675Sneel	.set noreorder
1066205675Sneel#if defined(__mips_o32)
1067205675Sneel	mfc0	t0, MIPS_COP_0_STATUS		# turn off interrupts
1068205675Sneel	and	t1, t0, ~(MIPS_SR_INT_IE)
1069205675Sneel	mtc0	t1, MIPS_COP_0_STATUS
1070205675Sneel	COP0_SYNC
1071205675Sneel	nop
1072205675Sneel	nop
1073205675Sneel	nop
1074205675Sneel
1075205675Sneel	# NOTE: a1 is padding!
1076205675Sneel
1077205675Sneel#if _BYTE_ORDER == _BIG_ENDIAN
1078205675Sneel	dsll	a2, a2, 32			# high word in a2
1079205675Sneel	dsll	a3, a3, 32			# low word in a3
1080205675Sneel	dsrl	a3, a3, 32
1081205675Sneel#else
1082205675Sneel	dsll	a2, a2, 32			# low word in a2
1083205675Sneel	dsrl	a2, a2, 32
1084205675Sneel	dsll	a3, a3, 32			# high word in a3
1085205675Sneel#endif
1086205675Sneel	or	a1, a2, a3
1087205675Sneel	sd	a1, 0(a0)
1088205675Sneel
1089205675Sneel	mtc0	t0, MIPS_COP_0_STATUS		# restore intr status.
1090205675Sneel	COP0_SYNC
1091205675Sneel	nop
1092205675Sneel#else /* !__mips_o32 */
1093205675Sneel	sd	a1, 0(a0)
1094205675Sneel#endif /* !__mips_o32 */
1095205675Sneel
1096205675Sneel	jr	ra
1097205675Sneel	nop
1098205675Sneel	.set pop
1099205675SneelEND(mips3_sd)
1100