1/*	$NetBSD: asm.h,v 1.29 2000/12/14 21:29:51 jeffs Exp $	*/
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)machAsmDefs.h	8.1 (Berkeley) 6/10/93
35 *	JNPR: asm.h,v 1.10 2007/08/09 11:23:32 katta
36 * $FreeBSD$
37 */
38
39/*
40 * machAsmDefs.h --
41 *
42 *	Macros used when writing assembler programs.
43 *
44 *	Copyright (C) 1989 Digital Equipment Corporation.
45 *	Permission to use, copy, modify, and distribute this software and
46 *	its documentation for any purpose and without fee is hereby granted,
47 *	provided that the above copyright notice appears in all copies.
48 *	Digital Equipment Corporation makes no representations about the
49 *	suitability of this software for any purpose.  It is provided "as is"
50 *	without express or implied warranty.
51 *
52 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
53 *	v 1.2 89/08/15 18:28:24 rab Exp  SPRITE (DECWRL)
54 */
55
56#ifndef _MACHINE_ASM_H_
57#define	_MACHINE_ASM_H_
58
59#ifndef NO_REG_DEFS
60#include <machine/regdef.h>
61#endif
62#include <machine/endian.h>
63#include <machine/cdefs.h>
64
65#undef __FBSDID
66#if !defined(lint) && !defined(STRIP_FBSDID)
67#define	__FBSDID(s)	.ident s
68#else
69#define	__FBSDID(s)	/* nothing */
70#endif
71
72/*
73 * Define -pg profile entry code.
74 * Must always be noreorder, must never use a macro instruction
75 * Final addiu to t9 must always equal the size of this _KERN_MCOUNT
76 */
77#define	_KERN_MCOUNT			\
78	.set	push;			\
79	.set	noreorder;		\
80	.set	noat;			\
81	subu	sp,sp,16;		\
82	sw	t9,12(sp);		\
83	move	AT,ra;			\
84	lui	t9,%hi(_mcount);	\
85	addiu	t9,t9,%lo(_mcount);	\
86	jalr	t9;			\
87	nop;				\
88	lw	t9,4(sp);		\
89	addiu	sp,sp,8;		\
90	addiu	t9,t9,40;		\
91	.set	pop;
92
93#ifdef GPROF
94#define	MCOUNT _KERN_MCOUNT
95#else
96#define	MCOUNT
97#endif
98
99#define	_C_LABEL(x)	x
100
101#ifdef USE_AENT
102#define	AENT(x)		\
103	.aent	x, 0
104#else
105#define	AENT(x)
106#endif
107
108/*
109 * WARN_REFERENCES: create a warning if the specified symbol is referenced
110 */
111#define	WARN_REFERENCES(_sym,_msg)				\
112	.section .gnu.warning. ## _sym ; .ascii _msg ; .text
113
114/*
115 * These are temp registers whose names can be used in either the old
116 * or new ABI, although they map to different physical registers.  In
117 * the old ABI, they map to t4-t7, and in the new ABI, they map to a4-a7.
118 *
119 * Because they overlap with the last 4 arg regs in the new ABI, ta0-ta3
120 * should be used only when we need more than t0-t3.
121 */
122#if defined(__mips_n32) || defined(__mips_n64)
123#define ta0     $8
124#define ta1     $9
125#define ta2     $10
126#define ta3     $11
127#else
128#define ta0     $12
129#define ta1     $13
130#define ta2     $14
131#define ta3     $15
132#endif /* __mips_n32 || __mips_n64 */
133
134#ifdef __ELF__
135# define _C_LABEL(x)    x
136#else
137#  define _C_LABEL(x)   _ ## x
138#endif
139
140/*
141 * WEAK_ALIAS: create a weak alias.
142 */
143#define	WEAK_ALIAS(alias,sym)						\
144	.weak alias;							\
145	alias = sym
146
147/*
148 * STRONG_ALIAS: create a strong alias.
149 */
150#define STRONG_ALIAS(alias,sym)						\
151	.globl alias;							\
152	alias = sym
153
154#define	GLOBAL(sym)						\
155	.globl sym; sym:
156
157#define	ENTRY(sym)						\
158	.text; .globl sym; .ent sym; sym:
159
160#define	ASM_ENTRY(sym)						\
161	.text; .globl sym; .type sym,@function; sym:
162
163/*
164 * LEAF
165 *	A leaf routine does
166 *	- call no other function,
167 *	- never use any register that callee-saved (S0-S8), and
168 *	- not use any local stack storage.
169 */
170#define	LEAF(x)			\
171	.globl	_C_LABEL(x);	\
172	.ent	_C_LABEL(x), 0;	\
173_C_LABEL(x): ;			\
174	.frame sp, 0, ra;	\
175	MCOUNT
176
177/*
178 * LEAF_NOPROFILE
179 *	No profilable leaf routine.
180 */
181#define	LEAF_NOPROFILE(x)	\
182	.globl	_C_LABEL(x);	\
183	.ent	_C_LABEL(x), 0;	\
184_C_LABEL(x): ;			\
185	.frame	sp, 0, ra
186
187/*
188 * XLEAF
189 *	declare alternate entry to leaf routine
190 */
191#define	XLEAF(x)		\
192	.globl	_C_LABEL(x);	\
193	AENT (_C_LABEL(x));	\
194_C_LABEL(x):
195
196/*
197 * NESTED
198 *	A function calls other functions and needs
199 *	therefore stack space to save/restore registers.
200 */
201#define	NESTED(x, fsize, retpc)		\
202	.globl	_C_LABEL(x);		\
203	.ent	_C_LABEL(x), 0;		\
204_C_LABEL(x): ;				\
205	.frame	sp, fsize, retpc;	\
206	MCOUNT
207
208/*
209 * NESTED_NOPROFILE(x)
210 *	No profilable nested routine.
211 */
212#define	NESTED_NOPROFILE(x, fsize, retpc)	\
213	.globl	_C_LABEL(x);			\
214	.ent	_C_LABEL(x), 0;			\
215_C_LABEL(x): ;					\
216	.frame	sp, fsize, retpc
217
218/*
219 * XNESTED
220 *	declare alternate entry point to nested routine.
221 */
222#define	XNESTED(x)		\
223	.globl	_C_LABEL(x);	\
224	AENT (_C_LABEL(x));	\
225_C_LABEL(x):
226
227/*
228 * END
229 *	Mark end of a procedure.
230 */
231#define	END(x)			\
232	.end _C_LABEL(x)
233
234/*
235 * IMPORT -- import external symbol
236 */
237#define	IMPORT(sym, size)	\
238	.extern _C_LABEL(sym),size
239
240/*
241 * EXPORT -- export definition of symbol
242 */
243#define	EXPORT(x)		\
244	.globl	_C_LABEL(x);	\
245_C_LABEL(x):
246
247/*
248 * VECTOR
249 *	exception vector entrypoint
250 *	XXX: regmask should be used to generate .mask
251 */
252#define	VECTOR(x, regmask)	\
253	.ent	_C_LABEL(x),0;	\
254	EXPORT(x);		\
255
256#define	VECTOR_END(x)		\
257	EXPORT(x ## End);	\
258	END(x)
259
260/*
261 * Macros to panic and printf from assembly language.
262 */
263#define	PANIC(msg)			\
264	PTR_LA	a0, 9f;			\
265	jal	_C_LABEL(panic);	\
266	nop;				\
267	MSG(msg)
268
269#define	PANIC_KSEG0(msg, reg)	PANIC(msg)
270
271#define	PRINTF(msg)			\
272	PTR_LA	a0, 9f;			\
273	jal	_C_LABEL(printf);	\
274	nop;				\
275	MSG(msg)
276
277#define	MSG(msg)			\
278	.rdata;				\
2799:	.asciiz	msg;			\
280	.text
281
282#define	ASMSTR(str)			\
283	.asciiz str;			\
284	.align	3
285
286/*
287 * Call ast if required
288 *
289 * XXX Do we really need to disable interrupts?
290 */
291#define DO_AST				             \
29244:				                     \
293	mfc0	t0, MIPS_COP_0_STATUS               ;\
294	and	a0, t0, MIPS_SR_INT_IE              ;\
295	xor	t0, a0, t0                          ;\
296	mtc0	t0, MIPS_COP_0_STATUS               ;\
297	COP0_SYNC                                   ;\
298	GET_CPU_PCPU(s1)                            ;\
299	PTR_L	s3, PC_CURPCB(s1)                   ;\
300	PTR_L	s1, PC_CURTHREAD(s1)                ;\
301	lw	s2, TD_FLAGS(s1)                    ;\
302	li	s0, TDF_ASTPENDING | TDF_NEEDRESCHED;\
303	and	s2, s0                              ;\
304	mfc0	t0, MIPS_COP_0_STATUS               ;\
305	or	t0, a0, t0                          ;\
306	mtc0	t0, MIPS_COP_0_STATUS               ;\
307	COP0_SYNC                                   ;\
308	beq	s2, zero, 4f                        ;\
309	nop                                         ;\
310	PTR_LA	s0, _C_LABEL(ast)                   ;\
311	jalr	s0                                  ;\
312	PTR_ADDU a0, s3, U_PCB_REGS                 ;\
313	j	44b		                    ;\
314        nop                                         ;\
3154:
316
317
318/*
319 * XXX retain dialects XXX
320 */
321#define	ALEAF(x)			XLEAF(x)
322#define	NLEAF(x)			LEAF_NOPROFILE(x)
323#define	NON_LEAF(x, fsize, retpc)	NESTED(x, fsize, retpc)
324#define	NNON_LEAF(x, fsize, retpc)	NESTED_NOPROFILE(x, fsize, retpc)
325
326#if defined(__mips_o32)
327#define	SZREG	4
328#else
329#define	SZREG	8
330#endif
331
332#if defined(__mips_o32) || defined(__mips_o64)
333#define	ALSK	7		/* stack alignment */
334#define	ALMASK	-7		/* stack alignment */
335#define	SZFPREG	4
336#define	FP_L	lwc1
337#define	FP_S	swc1
338#else
339#define	ALSK	15		/* stack alignment */
340#define	ALMASK	-15		/* stack alignment */
341#define	SZFPREG	8
342#define	FP_L	ldc1
343#define	FP_S	sdc1
344#endif
345
346/*
347 *  standard callframe {
348 *	register_t cf_pad[N];		o32/64 (N=0), n32 (N=1) n64 (N=1)
349 *  	register_t cf_args[4];		arg0 - arg3 (only on o32 and o64)
350 *  	register_t cf_gp;		global pointer (only on n32 and n64)
351 *  	register_t cf_sp;		frame pointer
352 *  	register_t cf_ra;		return address
353 *  };
354 */
355#if defined(__mips_o32) || defined(__mips_o64)
356#define	CALLFRAME_SIZ	(SZREG * (4 + 2))
357#define	CALLFRAME_S0	0
358#elif defined(__mips_n32) || defined(__mips_n64)
359#define	CALLFRAME_SIZ	(SZREG * 4)
360#define	CALLFRAME_S0	(CALLFRAME_SIZ - 4 * SZREG)
361#endif
362#ifndef _KERNEL
363#define	CALLFRAME_GP	(CALLFRAME_SIZ - 3 * SZREG)
364#endif
365#define	CALLFRAME_SP	(CALLFRAME_SIZ - 2 * SZREG)
366#define	CALLFRAME_RA	(CALLFRAME_SIZ - 1 * SZREG)
367
368/*
369 *   Endian-independent assembly-code aliases for unaligned memory accesses.
370 */
371#if _BYTE_ORDER == _LITTLE_ENDIAN
372# define LWHI lwr
373# define LWLO lwl
374# define SWHI swr
375# define SWLO swl
376# if SZREG == 4
377#  define REG_LHI   lwr
378#  define REG_LLO   lwl
379#  define REG_SHI   swr
380#  define REG_SLO   swl
381# else
382#  define REG_LHI   ldr
383#  define REG_LLO   ldl
384#  define REG_SHI   sdr
385#  define REG_SLO   sdl
386# endif
387#endif
388
389#if _BYTE_ORDER == _BIG_ENDIAN
390# define LWHI lwl
391# define LWLO lwr
392# define SWHI swl
393# define SWLO swr
394# if SZREG == 4
395#  define REG_LHI   lwl
396#  define REG_LLO   lwr
397#  define REG_SHI   swl
398#  define REG_SLO   swr
399# else
400#  define REG_LHI   ldl
401#  define REG_LLO   ldr
402#  define REG_SHI   sdl
403#  define REG_SLO   sdr
404# endif
405#endif
406
407/*
408 * While it would be nice to be compatible with the SGI
409 * REG_L and REG_S macros, because they do not take parameters, it
410 * is impossible to use them with the _MIPS_SIM_ABIX32 model.
411 *
412 * These macros hide the use of mips3 instructions from the
413 * assembler to prevent the assembler from generating 64-bit style
414 * ABI calls.
415 */
416#if _MIPS_SZPTR == 32
417#define	PTR_ADD		add
418#define	PTR_ADDI	addi
419#define	PTR_ADDU	addu
420#define	PTR_ADDIU	addiu
421#define	PTR_SUB		add
422#define	PTR_SUBI	subi
423#define	PTR_SUBU	subu
424#define	PTR_SUBIU	subu
425#define	PTR_L		lw
426#define	PTR_LA		la
427#define	PTR_LI		li
428#define	PTR_S		sw
429#define	PTR_SLL		sll
430#define	PTR_SLLV	sllv
431#define	PTR_SRL		srl
432#define	PTR_SRLV	srlv
433#define	PTR_SRA		sra
434#define	PTR_SRAV	srav
435#define	PTR_LL		ll
436#define	PTR_SC		sc
437#define	PTR_WORD	.word
438#define	PTR_SCALESHIFT	2
439#else /* _MIPS_SZPTR == 64 */
440#define	PTR_ADD		dadd
441#define	PTR_ADDI	daddi
442#define	PTR_ADDU	daddu
443#define	PTR_ADDIU	daddiu
444#define	PTR_SUB		dadd
445#define	PTR_SUBI	dsubi
446#define	PTR_SUBU	dsubu
447#define	PTR_SUBIU	dsubu
448#define	PTR_L		ld
449#define	PTR_LA		dla
450#define	PTR_LI		dli
451#define	PTR_S		sd
452#define	PTR_SLL		dsll
453#define	PTR_SLLV	dsllv
454#define	PTR_SRL		dsrl
455#define	PTR_SRLV	dsrlv
456#define	PTR_SRA		dsra
457#define	PTR_SRAV	dsrav
458#define	PTR_LL		lld
459#define	PTR_SC		scd
460#define	PTR_WORD	.dword
461#define	PTR_SCALESHIFT	3
462#endif /* _MIPS_SZPTR == 64 */
463
464#if _MIPS_SZINT == 32
465#define	INT_ADD		add
466#define	INT_ADDI	addi
467#define	INT_ADDU	addu
468#define	INT_ADDIU	addiu
469#define	INT_SUB		add
470#define	INT_SUBI	subi
471#define	INT_SUBU	subu
472#define	INT_SUBIU	subu
473#define	INT_L		lw
474#define	INT_LA		la
475#define	INT_S		sw
476#define	INT_SLL		sll
477#define	INT_SLLV	sllv
478#define	INT_SRL		srl
479#define	INT_SRLV	srlv
480#define	INT_SRA		sra
481#define	INT_SRAV	srav
482#define	INT_LL		ll
483#define	INT_SC		sc
484#define	INT_WORD	.word
485#define	INT_SCALESHIFT	2
486#else
487#define	INT_ADD		dadd
488#define	INT_ADDI	daddi
489#define	INT_ADDU	daddu
490#define	INT_ADDIU	daddiu
491#define	INT_SUB		dadd
492#define	INT_SUBI	dsubi
493#define	INT_SUBU	dsubu
494#define	INT_SUBIU	dsubu
495#define	INT_L		ld
496#define	INT_LA		dla
497#define	INT_S		sd
498#define	INT_SLL		dsll
499#define	INT_SLLV	dsllv
500#define	INT_SRL		dsrl
501#define	INT_SRLV	dsrlv
502#define	INT_SRA		dsra
503#define	INT_SRAV	dsrav
504#define	INT_LL		lld
505#define	INT_SC		scd
506#define	INT_WORD	.dword
507#define	INT_SCALESHIFT	3
508#endif
509
510#if _MIPS_SZLONG == 32
511#define	LONG_ADD	add
512#define	LONG_ADDI	addi
513#define	LONG_ADDU	addu
514#define	LONG_ADDIU	addiu
515#define	LONG_SUB	add
516#define	LONG_SUBI	subi
517#define	LONG_SUBU	subu
518#define	LONG_SUBIU	subu
519#define	LONG_L		lw
520#define	LONG_LA		la
521#define	LONG_S		sw
522#define	LONG_SLL	sll
523#define	LONG_SLLV	sllv
524#define	LONG_SRL	srl
525#define	LONG_SRLV	srlv
526#define	LONG_SRA	sra
527#define	LONG_SRAV	srav
528#define	LONG_LL		ll
529#define	LONG_SC		sc
530#define	LONG_WORD	.word
531#define	LONG_SCALESHIFT	2
532#else
533#define	LONG_ADD	dadd
534#define	LONG_ADDI	daddi
535#define	LONG_ADDU	daddu
536#define	LONG_ADDIU	daddiu
537#define	LONG_SUB	dadd
538#define	LONG_SUBI	dsubi
539#define	LONG_SUBU	dsubu
540#define	LONG_SUBIU	dsubu
541#define	LONG_L		ld
542#define	LONG_LA		dla
543#define	LONG_S		sd
544#define	LONG_SLL	dsll
545#define	LONG_SLLV	dsllv
546#define	LONG_SRL	dsrl
547#define	LONG_SRLV	dsrlv
548#define	LONG_SRA	dsra
549#define	LONG_SRAV	dsrav
550#define	LONG_LL		lld
551#define	LONG_SC		scd
552#define	LONG_WORD	.dword
553#define	LONG_SCALESHIFT	3
554#endif
555
556#if SZREG == 4
557#define	REG_L		lw
558#define	REG_S		sw
559#define	REG_LI		li
560#define	REG_ADDU	addu
561#define	REG_SLL		sll
562#define	REG_SLLV	sllv
563#define	REG_SRL		srl
564#define	REG_SRLV	srlv
565#define	REG_SRA		sra
566#define	REG_SRAV	srav
567#define	REG_LL		ll
568#define	REG_SC		sc
569#define	REG_SCALESHIFT	2
570#else
571#define	REG_L		ld
572#define	REG_S		sd
573#define	REG_LI		dli
574#define	REG_ADDU	daddu
575#define	REG_SLL		dsll
576#define	REG_SLLV	dsllv
577#define	REG_SRL		dsrl
578#define	REG_SRLV	dsrlv
579#define	REG_SRA		dsra
580#define	REG_SRAV	dsrav
581#define	REG_LL		lld
582#define	REG_SC		scd
583#define	REG_SCALESHIFT	3
584#endif
585
586#if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \
587    _MIPS_ISA == _MIPS_ISA_MIPS32
588#define	MFC0		mfc0
589#define	MTC0		mtc0
590#endif
591#if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \
592    _MIPS_ISA == _MIPS_ISA_MIPS64
593#define	MFC0		dmfc0
594#define	MTC0		dmtc0
595#endif
596
597#if defined(__mips_o32) || defined(__mips_o64)
598
599#ifdef __ABICALLS__
600#define	CPRESTORE(r)	.cprestore r
601#define	CPLOAD(r)	.cpload r
602#else
603#define	CPRESTORE(r)	/* not needed */
604#define	CPLOAD(r)	/* not needed */
605#endif
606
607#define	SETUP_GP	\
608			.set push;				\
609			.set noreorder;				\
610			.cpload	t9;				\
611			.set pop
612#define	SETUP_GPX(r)	\
613			.set push;				\
614			.set noreorder;				\
615			move	r,ra;	/* save old ra */	\
616			bal	7f;				\
617			nop;					\
618		7:	.cpload	ra;				\
619			move	ra,r;				\
620			.set pop
621#define	SETUP_GPX_L(r,lbl)	\
622			.set push;				\
623			.set noreorder;				\
624			move	r,ra;	/* save old ra */	\
625			bal	lbl;				\
626			nop;					\
627		lbl:	.cpload	ra;				\
628			move	ra,r;				\
629			.set pop
630#define	SAVE_GP(x)	.cprestore x
631
632#define	SETUP_GP64(a,b)		/* n32/n64 specific */
633#define	SETUP_GP64_R(a,b)	/* n32/n64 specific */
634#define	SETUP_GPX64(a,b)	/* n32/n64 specific */
635#define	SETUP_GPX64_L(a,b,c)	/* n32/n64 specific */
636#define	RESTORE_GP64		/* n32/n64 specific */
637#define	USE_ALT_CP(a)		/* n32/n64 specific */
638#endif /* __mips_o32 || __mips_o64 */
639
640#if defined(__mips_o32) || defined(__mips_o64)
641#define	REG_PROLOGUE	.set push
642#define	REG_EPILOGUE	.set pop
643#endif
644#if defined(__mips_n32) || defined(__mips_n64)
645#define	REG_PROLOGUE	.set push ; .set mips3
646#define	REG_EPILOGUE	.set pop
647#endif
648
649#if defined(__mips_n32) || defined(__mips_n64)
650#define	SETUP_GP		/* o32 specific */
651#define	SETUP_GPX(r)		/* o32 specific */
652#define	SETUP_GPX_L(r,lbl)	/* o32 specific */
653#define	SAVE_GP(x)		/* o32 specific */
654#define	SETUP_GP64(a,b)		.cpsetup $25, a, b
655#define	SETUP_GPX64(a,b)	\
656				.set push;			\
657				move	b,ra;			\
658				.set noreorder;			\
659				bal	7f;			\
660				nop;				\
661			7:	.set pop;			\
662				.cpsetup ra, a, 7b;		\
663				move	ra,b
664#define	SETUP_GPX64_L(a,b,c)	\
665				.set push;			\
666				move	b,ra;			\
667				.set noreorder;			\
668				bal	c;			\
669				nop;				\
670			c:	.set pop;			\
671				.cpsetup ra, a, c;		\
672				move	ra,b
673#define	RESTORE_GP64		.cpreturn
674#define	USE_ALT_CP(a)		.cplocal a
675#endif	/* __mips_n32 || __mips_n64 */
676
677#define	mfc0_macro(data, spr)						\
678	__asm __volatile ("mfc0 %0, $%1"				\
679			: "=r" (data)	/* outputs */			\
680			: "i" (spr));	/* inputs */
681
682#define	mtc0_macro(data, spr)						\
683	__asm __volatile ("mtc0 %0, $%1"				\
684			:				/* outputs */	\
685			: "r" (data), "i" (spr));	/* inputs */
686
687#define	cfc0_macro(data, spr)						\
688	__asm __volatile ("cfc0 %0, $%1"				\
689			: "=r" (data)	/* outputs */			\
690			: "i" (spr));	/* inputs */
691
692#define	ctc0_macro(data, spr)						\
693	__asm __volatile ("ctc0 %0, $%1"				\
694			:				/* outputs */	\
695			: "r" (data), "i" (spr));	/* inputs */
696
697
698#define	lbu_macro(data, addr)						\
699	__asm __volatile ("lbu %0, 0x0(%1)"				\
700			: "=r" (data)	/* outputs */			\
701			: "r" (addr));	/* inputs */
702
703#define	lb_macro(data, addr)						\
704	__asm __volatile ("lb %0, 0x0(%1)"				\
705			: "=r" (data)	/* outputs */			\
706			: "r" (addr));	/* inputs */
707
708#define	lwl_macro(data, addr)						\
709	__asm __volatile ("lwl %0, 0x0(%1)"				\
710			: "=r" (data)	/* outputs */			\
711			: "r" (addr));	/* inputs */
712
713#define	lwr_macro(data, addr)						\
714	__asm __volatile ("lwr %0, 0x0(%1)"				\
715			: "=r" (data)	/* outputs */			\
716			: "r" (addr));	/* inputs */
717
718#define	ldl_macro(data, addr)						\
719	__asm __volatile ("ldl %0, 0x0(%1)"				\
720			: "=r" (data)	/* outputs */			\
721			: "r" (addr));	/* inputs */
722
723#define	ldr_macro(data, addr)						\
724	__asm __volatile ("ldr %0, 0x0(%1)"				\
725			: "=r" (data)	/* outputs */			\
726			: "r" (addr));	/* inputs */
727
728#define	sb_macro(data, addr)						\
729	__asm __volatile ("sb %0, 0x0(%1)"				\
730			:				/* outputs */	\
731			: "r" (data), "r" (addr));	/* inputs */
732
733#define	swl_macro(data, addr)						\
734	__asm __volatile ("swl %0, 0x0(%1)"				\
735			: 				/* outputs */	\
736			: "r" (data), "r" (addr));	/* inputs */
737
738#define	swr_macro(data, addr)						\
739	__asm __volatile ("swr %0, 0x0(%1)"				\
740			: 				/* outputs */	\
741			: "r" (data), "r" (addr));	/* inputs */
742
743#define	sdl_macro(data, addr)						\
744	__asm __volatile ("sdl %0, 0x0(%1)"				\
745			: 				/* outputs */	\
746			: "r" (data), "r" (addr));	/* inputs */
747
748#define	sdr_macro(data, addr)						\
749	__asm __volatile ("sdr %0, 0x0(%1)"				\
750			:				/* outputs */	\
751			: "r" (data), "r" (addr));	/* inputs */
752
753#define	mfgr_macro(data, gr)						\
754	__asm __volatile ("move %0, $%1"				\
755			: "=r" (data)	/* outputs */			\
756			: "i" (gr));	/* inputs */
757
758#define	dmfc0_macro(data, spr)						\
759	__asm __volatile ("dmfc0 %0, $%1"				\
760			: "=r" (data)	/* outputs */			\
761			: "i" (spr));	/* inputs */
762
763#define	dmtc0_macro(data, spr, sel)					\
764	__asm __volatile ("dmtc0	%0, $%1, %2"			\
765			:			/* no  outputs */	\
766			: "r" (data), "i" (spr), "i" (sel)); /* inputs */
767
768/*
769 * The DYNAMIC_STATUS_MASK option adds an additional masking operation
770 * when updating the hardware interrupt mask in the status register.
771 *
772 * This is useful for platforms that need to at run-time mask
773 * interrupts based on motherboard configuration or to handle
774 * slowly clearing interrupts.
775 *
776 * XXX this is only currently implemented for mips3.
777 */
778#ifdef MIPS_DYNAMIC_STATUS_MASK
779#define	DYNAMIC_STATUS_MASK(sr,scratch)			\
780	lw	scratch, mips_dynamic_status_mask;	\
781	and	sr, sr, scratch
782
783#define	DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)		\
784	ori	sr, (MIPS_INT_MASK | MIPS_SR_INT_IE);	\
785	DYNAMIC_STATUS_MASK(sr,scratch1)
786#else
787#define	DYNAMIC_STATUS_MASK(sr,scratch)
788#define	DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
789#endif
790
791#define	GET_CPU_PCPU(reg)		\
792	PTR_L	reg, _C_LABEL(pcpup);
793
794/*
795 * Description of the setjmp buffer
796 *
797 * word  0	magic number	(dependant on creator)
798 *       1	RA
799 *       2	S0
800 *       3	S1
801 *       4	S2
802 *       5	S3
803 *       6	S4
804 *       7	S5
805 *       8	S6
806 *       9	S7
807 *       10	SP
808 *       11	S8
809 *       12	GP		(dependent on ABI)
810 *       13	signal mask	(dependant on magic)
811 *       14	(con't)
812 *       15	(con't)
813 *       16	(con't)
814 *
815 * The magic number number identifies the jmp_buf and
816 * how the buffer was created as well as providing
817 * a sanity check
818 *
819 */
820
821#define _JB_MAGIC__SETJMP	0xBADFACED
822#define _JB_MAGIC_SETJMP	0xFACEDBAD
823
824/* Valid for all jmp_buf's */
825
826#define _JB_MAGIC		0
827#define _JB_REG_RA		1
828#define _JB_REG_S0		2
829#define _JB_REG_S1		3
830#define _JB_REG_S2		4
831#define _JB_REG_S3		5
832#define _JB_REG_S4		6
833#define _JB_REG_S5		7
834#define _JB_REG_S6		8
835#define _JB_REG_S7		9
836#define _JB_REG_SP		10
837#define _JB_REG_S8		11
838#if defined(__mips_n32) || defined(__mips_n64)
839#define	_JB_REG_GP		12
840#endif
841
842/* Only valid with the _JB_MAGIC_SETJMP magic */
843
844#define _JB_SIGMASK		13
845
846/*
847 * Various macros for dealing with TLB hazards
848 * (a) why so many?
849 * (b) when to use?
850 * (c) why not used everywhere?
851 */
852/*
853 * Assume that w alaways need nops to escape CP0 hazard
854 * TODO: Make hazard delays configurable. Stuck with 5 cycles on the moment
855 * For more info on CP0 hazards see Chapter 7 (p.99) of "MIPS32 Architecture
856 *    For Programmers Volume III: The MIPS32 Privileged Resource Architecture"
857 */
858#define	ITLBNOPFIX	nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
859#define	HAZARD_DELAY	nop;nop;nop;nop;nop;
860#endif /* !_MACHINE_ASM_H_ */
861