mach_sfmmu.h revision 490:277f5c2e364f
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * VM - Hardware Address Translation management.
29 *
30 * This file describes the contents of the sun reference mmu (sfmmu)
31 * specific hat data structures and the sfmmu specific hat procedures.
32 * The machine independent interface is described in <vm/hat.h>.
33 */
34
35#ifndef _VM_MACH_SFMMU_H
36#define	_VM_MACH_SFMMU_H
37
38#pragma ident	"%Z%%M%	%I%	%E% SMI"
39
40#include <sys/x_call.h>
41#include <sys/cheetahregs.h>
42#include <sys/spitregs.h>
43#ifdef	__cplusplus
44extern "C" {
45#endif
46
47/*
48 * Define UTSB_PHYS if user TSB is always accessed via physical address.
49 * On sun4u platform, user TSB is accessed via virtual address.
50 */
51#undef	UTSB_PHYS
52
53#ifdef _ASM
54
55/*
56 * This macro is used in the MMU code to check if TL should be lowered from
57 * 2 to 1 to pop trapstat's state.  See the block comment in trapstat.c
58 * for details.
59 */
60
61#define	TSTAT_CHECK_TL1(label, scr1, scr2)			\
62	rdpr	%tpc, scr1;					\
63	sethi	%hi(KERNELBASE), scr2;				\
64	or	scr2, %lo(KERNELBASE), scr2; 			\
65	cmp	scr1, scr2; 					\
66	bgeu	%xcc, 9f;					\
67	nop;							\
68	ba	label;						\
69	wrpr	%g0, 1, %tl;					\
709:
71
72
73/*
74 * The following macros allow us to share majority of the
75 * SFMMU code between sun4u and sun4v platforms.
76 */
77
78#define	SETUP_TSB_ASI(qlp, tmp)					\
79	movrz	qlp, ASI_N, tmp;				\
80	movrnz	qlp, ASI_MEM, tmp;				\
81	mov	tmp, %asi
82
83#define	SETUP_UTSB_ATOMIC_ASI(tmp1, tmp2)			\
84	mov	ASI_NQUAD_LD, %asi
85/*
86 * Macro to swtich to alternate global register on sun4u platforms
87 * (not applicable to sun4v platforms)
88 */
89#define	USE_ALTERNATE_GLOBALS(scr)				\
90	rdpr	%pstate, scr;					\
91	wrpr	scr, PSTATE_MG | PSTATE_AG, %pstate
92
93/*
94 * Macro to set %gl register value on sun4v platforms
95 * (not applicable to sun4u platforms)
96 */
97#define	SET_GL_REG(val)
98
99/*
100 * Get MMU data tag access register value
101 *
102 * In:
103 *   tagacc, scr1 = scratch registers
104 * Out:
105 *   tagacc = MMU data tag access register value
106 */
107#define	GET_MMU_D_TAGACC(tagacc, scr1)				\
108	mov	MMU_TAG_ACCESS, scr1;				\
109	ldxa	[scr1]ASI_DMMU, tagacc
110
111/*
112 * Get MMU data tag target register
113 *
114 * In:
115 *   ttarget, scr1 = scratch registers
116 * Out:
117 *   ttarget = MMU data tag target register value
118 */
119#define	GET_MMU_D_TTARGET(ttarget, scr1)			\
120	ldxa	[%g0]ASI_DMMU, ttarget
121
122/*
123 * Get MMU data/instruction tag access register values
124 *
125 * In:
126 *   dtagacc, itagacc, scr1, scr2 = scratch registers
127 * Out:
128 *   dtagacc = MMU data tag access register value
129 *   itagacc = MMU instruction tag access register value
130 */
131#define	GET_MMU_BOTH_TAGACC(dtagacc, itagacc, scr1, scr2)	\
132	mov	MMU_TAG_ACCESS, scr1;				\
133	ldxa	[scr1]ASI_DMMU, dtagacc;			\
134	ldxa	[scr1]ASI_IMMU, itagacc
135
136/*
137 * Get MMU data fault address from the tag access register
138 *
139 * In:
140 *   daddr, scr1 = scratch registers
141 * Out:
142 *   daddr = MMU data fault address
143 */
144#define	GET_MMU_D_ADDR(daddr, scr1)				\
145	mov	MMU_TAG_ACCESS, scr1;				\
146	ldxa	[scr1]ASI_DMMU, daddr;				\
147	set	TAGACC_CTX_MASK, scr1;				\
148	andn	daddr, scr1, daddr
149
150
151/*
152 * Load ITLB entry
153 *
154 * In:
155 *   tte = reg containing tte
156 *   scr1, scr2, scr3, scr4 = scratch registers (not used)
157 */
158#define	ITLB_STUFF(tte, scr1, scr2, scr3, scr4)			\
159	stxa	tte, [%g0]ASI_ITLB_IN
160
161/*
162 * Load DTLB entry
163 *
164 * In:
165 *   tte = reg containing tte
166 *   scr1, scr2, scr3, scr4 = scratch register (not used)
167 */
168#define	DTLB_STUFF(tte, scr1, scr2, scr3, scr4)			\
169	stxa	tte, [%g0]ASI_DTLB_IN
170
171
172/*
173 * Returns PFN given the TTE and vaddr
174 *
175 * In:
176 *   tte = reg containing tte
177 *   vaddr = reg containing vaddr
178 *   scr1, scr2, scr3 = scratch registers
179 * Out:
180 *   tte = PFN value
181 */
182#define	TTETOPFN(tte, vaddr, label, scr1, scr2, scr3)			\
183	srlx	tte, TTE_SZ_SHFT, scr1;					\
184	and	scr1, TTE_SZ_BITS, scr1;	/* scr1 = tte_size */	\
185	srlx	tte, TTE_SZ2_SHFT, scr3;				\
186	and	scr3, TTE_SZ2_BITS, scr3;	/* scr3 = tte_size2 */	\
187	or	scr1, scr3, scr1;					\
188	sllx	scr1, 1, scr2;						\
189	add	scr2, scr1, scr2;		/* mulx 3 */		\
190	sllx	tte, TTE_PA_LSHIFT, tte;				\
191	add	scr2, MMU_PAGESHIFT + TTE_PA_LSHIFT, scr3;		\
192	/* BEGIN CSTYLED */						\
193	brz,pt	scr2, label/**/1;					\
194	  srlx	tte, scr3, tte;						\
195	/* END CSTYLED */						\
196	sllx	tte, scr2, tte;						\
197	set	1, scr1;						\
198	add	scr2, MMU_PAGESHIFT, scr3;				\
199	sllx	scr1, scr3, scr1;					\
200	sub	scr1, 1, scr1;		/* g2=TTE_PAGE_OFFSET(ttesz) */	\
201	and	vaddr, scr1, scr2;					\
202	srln	scr2, MMU_PAGESHIFT, scr2;				\
203	or	tte, scr2, tte;						\
204	/* CSTYLED */							\
205label/**/1:
206
207
208/*
209 * TTE_SET_REF_ML is a macro that updates the reference bit if it is
210 * not already set.
211 *
212 * Parameters:
213 * tte      = reg containing tte
214 * ttepa    = physical pointer to tte
215 * tteva    = virtual ptr to tte
216 * tsbarea  = tsb miss area
217 * tmp1     = tmp reg
218 * label    = temporary label
219 */
220
221#define	TTE_SET_REF_ML(tte, ttepa, tteva, tsbarea, tmp1, label)		\
222	/* BEGIN CSTYLED */						\
223	/* check reference bit */					\
224	andcc	tte, TTE_REF_INT, %g0;					\
225	bnz,pt	%xcc, label/**/4;	/* if ref bit set-skip ahead */	\
226	  nop;								\
227	GET_CPU_IMPL(tmp1);						\
228	cmp	tmp1, CHEETAH_IMPL;					\
229	bl,a	%icc, label/**/1;					\
230	/* update reference bit */					\
231	lduh	[tsbarea + TSBMISS_DMASK], tmp1;			\
232	stxa	%g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */	\
233	membar	#Sync;							\
234	ba	label/**/2;						\
235label/**/1:								\
236	and	tteva, tmp1, tmp1;					\
237	stxa	%g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */	\
238	membar	#Sync;							\
239label/**/2:								\
240	or	tte, TTE_REF_INT, tmp1;					\
241	casxa	[ttepa]ASI_MEM, tte, tmp1; 	/* update ref bit */	\
242	cmp	tte, tmp1;						\
243	bne,a,pn %xcc, label/**/2;					\
244	ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
245	or	tte, TTE_REF_INT, tte;					\
246label/**/4:								\
247	/* END CSTYLED */
248
249
250/*
251 * TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
252 * if not already set.
253 *
254 * Parameters:
255 * tte      = reg containing tte
256 * ttepa    = physical pointer to tte
257 * tteva    = virtual ptr to tte
258 * tsbarea  = tsb miss area
259 * tmp1     = tmp reg
260 * label    = temporary label
261 * exitlabel = label where to jump to if write perm bit not set.
262 */
263
264#define	TTE_SET_REFMOD_ML(tte, ttepa, tteva, tsbarea, tmp1, label,	\
265	exitlabel)							\
266	/* BEGIN CSTYLED */						\
267	/* check reference bit */					\
268	andcc	tte, TTE_WRPRM_INT, %g0;				\
269	bz,pn	%xcc, exitlabel;	/* exit if wr_perm not set */	\
270	  nop;								\
271	andcc	tte, TTE_HWWR_INT, %g0;					\
272	bnz,pn	%xcc, label/**/4;	/* nothing to do */		\
273	  nop;								\
274	GET_CPU_IMPL(tmp1);						\
275	cmp	tmp1, CHEETAH_IMPL;					\
276	bl,a	%icc, label/**/1;					\
277	/* update reference bit */					\
278	lduh	[tsbarea + TSBMISS_DMASK], tmp1;			\
279	stxa    %g0, [ttepa]ASI_DC_INVAL; /* flush line from dcache */ 	\
280	membar	#Sync;							\
281	ba	label/**/2;						\
282label/**/1:								\
283	and	tteva, tmp1, tmp1;					\
284	stxa	%g0, [tmp1]ASI_DC_TAG; /* flush line from dcache */	\
285	membar	#Sync;							\
286label/**/2:								\
287	or	tte, TTE_HWWR_INT | TTE_REF_INT, tmp1;			\
288	casxa	[ttepa]ASI_MEM, tte, tmp1; /* update ref/mod bit */	\
289	cmp	tte, tmp1;						\
290	bne,a,pn %xcc, label/**/2;					\
291	  ldxa	[ttepa]ASI_MEM, tte;	/* MMU_READTTE through pa */	\
292	or	tte, TTE_HWWR_INT | TTE_REF_INT, tte;			\
293label/**/4:								\
294	/* END CSTYLED */
295
296
297/*
298 * Synthesize TSB base register contents for a process with
299 * a single TSB.
300 *
301 * We patch the virtual address mask in at runtime since the
302 * number of significant virtual address bits in the TSB VA
303 * can vary depending upon the TSB slab size being used on the
304 * machine.
305 *
306 * In:
307 *   tsbinfo = TSB info pointer (ro)
308 *   vabase = value of utsb_vabase (ro)
309 * Out:
310 *   tsbreg = value to program into TSB base register
311 */
312
313#define	MAKE_TSBREG(tsbreg, tsbinfo, vabase, tmp1, tmp2, label)		\
314	/* BEGIN CSTYLED */						\
315	ldx	[tsbinfo + TSBINFO_VADDR], tmp1;			\
316	.global	label/**/_tsbreg_vamask					;\
317label/**/_tsbreg_vamask:						\
318	or	%g0, RUNTIME_PATCH, tsbreg;				\
319	lduh	[tsbinfo + TSBINFO_SZCODE], tmp2;			\
320	sllx	tsbreg, TSBREG_VAMASK_SHIFT, tsbreg;			\
321	or	vabase, tmp2, tmp2;					\
322	and	tmp1, tsbreg, tsbreg;					\
323	or	tsbreg, tmp2, tsbreg;					\
324	/* END CSTYLED */
325
326
327/*
328 * Synthesize TSB base register contents for a process with
329 * two TSBs.  See hat_sfmmu.h for the layout of the TSB base
330 * register in this case.
331 *
332 * In:
333 *   tsb1 = pointer to first TSB info (ro)
334 *   tsb2 = pointer to second TSB info (ro)
335 * Out:
336 *   tsbreg = value to program into TSB base register
337 */
338#define	MAKE_TSBREG_SECTSB(tsbreg, tsb1, tsb2, tmp1, tmp2, tmp3, label)	\
339	/* BEGIN CSTYLED */						\
340	set	TSBREG_MSB_CONST, tmp3					;\
341	sllx	tmp3, TSBREG_MSB_SHIFT, tsbreg				;\
342	.global	label/**/_tsbreg_vamask					;\
343label/**/_tsbreg_vamask:						;\
344	or	%g0, RUNTIME_PATCH, tmp3				;\
345	sll	tmp3, TSBREG_VAMASK_SHIFT, tmp3				;\
346	ldx	[tsb1 + TSBINFO_VADDR], tmp1				;\
347	ldx	[tsb2 + TSBINFO_VADDR], tmp2				;\
348	and	tmp1, tmp3, tmp1					;\
349	and	tmp2, tmp3, tmp2					;\
350	sllx	tmp2, TSBREG_SECTSB_MKSHIFT, tmp2			;\
351	or	tmp1, tmp2, tmp3					;\
352	or	tsbreg, tmp3, tsbreg					;\
353	lduh	[tsb1 + TSBINFO_SZCODE], tmp1				;\
354	lduh	[tsb2 + TSBINFO_SZCODE], tmp2				;\
355	and	tmp1, TSB_SOFTSZ_MASK, tmp1				;\
356	and	tmp2, TSB_SOFTSZ_MASK, tmp2				;\
357	sllx	tmp2, TSBREG_SECSZ_SHIFT, tmp2				;\
358	or	tmp1, tmp2, tmp3					;\
359	or	tsbreg, tmp3, tsbreg					;\
360	/* END CSTYLED */
361
362/*
363 * Load TSB base register.  In the single TSB case this register
364 * contains utsb_vabase, bits 21:13 of tsbinfo->tsb_va, and the
365 * TSB size code in bits 2:0.  See hat_sfmmu.h for the layout in
366 * the case where we have multiple TSBs per process.
367 *
368 * In:
369 *   tsbreg = value to load (ro)
370 */
371#define	LOAD_TSBREG(tsbreg, tmp1, tmp2)					\
372	mov	MMU_TSB, tmp1;						\
373	sethi	%hi(FLUSH_ADDR), tmp2;					\
374	stxa	tsbreg, [tmp1]ASI_DMMU;		/* dtsb reg */		\
375	stxa	tsbreg, [tmp1]ASI_IMMU;		/* itsb reg */		\
376	flush	tmp2
377
378/*
379 * Load the locked TSB TLB entry.
380 *
381 * In:
382 *   tsbinfo = tsb_info pointer as va (ro)
383 *   tteidx = shifted index into TLB to load the locked entry (ro)
384 *   va = virtual address at which to load the locked TSB entry (ro)
385 * Out:
386 * Scratch:
387 *   tmp
388 */
389#define	LOAD_TSBTTE(tsbinfo, tteidx, va, tmp)				\
390	mov	MMU_TAG_ACCESS, tmp;					\
391	stxa	va, [tmp]ASI_DMMU;		/* set tag access */	\
392	membar	#Sync;							\
393	ldx	[tsbinfo + TSBINFO_TTE], tmp;	/* fetch locked tte */	\
394	stxa	tmp, [tteidx]ASI_DTLB_ACCESS;	/* load locked tte */	\
395	membar	#Sync
396
397
398/*
399 * In the current implementation, TSBs usually come from physically
400 * contiguous chunks of memory up to 4MB in size, but 8K TSBs may be
401 * allocated from 8K chunks of memory under certain conditions.  To
402 * prevent aliasing in the virtual address cache when the TSB slab is
403 * 8K in size we must align the reserved (TL>0) TSB virtual address to
404 * have the same low-order bits as the kernel (TL=0) TSB virtual address,
405 * and map 8K TSBs with an 8K TTE.  In cases where the TSB reserved VA
406 * range is smaller than the assumed 4M we will patch the shift at
407 * runtime; otherwise we leave it alone (which is why RUNTIME_PATCH
408 * constant doesn't appear below).
409 *
410 * In:
411 *   tsbinfo (ro)
412 *   resva: reserved VA base for this TSB
413 * Out:
414 *   resva: corrected VA for this TSB
415 */
416#define	RESV_OFFSET(tsbinfo, resva, tmp1, label)			\
417	/* BEGIN CSTYLED */						\
418	lduh	[tsbinfo + TSBINFO_SZCODE], tmp1			;\
419	brgz,pn	tmp1, 9f						;\
420	  nop								;\
421	ldx	[tsbinfo + TSBINFO_VADDR], tmp1				;\
422	.global	label/**/_resv_offset					;\
423label/**/_resv_offset:							;\
424	sllx	tmp1, (64 - MMU_PAGESHIFT4M), tmp1			;\
425	srlx	tmp1, (64 - MMU_PAGESHIFT4M), tmp1			;\
426	or	tmp1, resva, resva					;\
4279:	/* END CSYLED */
428
429/*
430 * Determine the pointer of the entry in the first TSB to probe given
431 * the 8K TSB pointer register contents.
432 *
433 * In:
434 *   tsbp8k = 8K TSB pointer register (ro)
435 *   tmp = scratch register
436 *   label = label for hot patching of utsb_vabase
437 *
438 * Out: tsbe_ptr = TSB entry address
439 *
440 * Note: This function is patched at runtime for performance reasons.
441 * 	 Any changes here require sfmmu_patch_utsb fixed.
442 */
443
444#define	GET_1ST_TSBE_PTR(tsbp8k, tsbe_ptr, tmp, label)			\
445	/* BEGIN CSTYLED */						\
446label/**/_get_1st_tsbe_ptr:						;\
447	RUNTIME_PATCH_SETX(tsbe_ptr, tmp)				;\
448	/* tsbeptr = contents of utsb_vabase */				;\
449	/* clear upper bits leaving just bits 21:0 of TSB ptr. */	;\
450	sllx	tsbp8k, TSBREG_FIRTSB_SHIFT, tmp			;\
451	/* finish clear */						;\
452	srlx	tmp, TSBREG_FIRTSB_SHIFT, tmp				;\
453	/* or-in bits 41:22 of the VA to form the real pointer. */	;\
454	or	tsbe_ptr, tmp, tsbe_ptr					\
455	/* END CSTYLED */
456
457
458/*
459 * Will probe the first TSB, and if it finds a match, will insert it
460 * into the TLB and retry.
461 *
462 * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
463 * vpg_4m = 4M virtual page number for tag matching  (in, ro)
464 * label = where to branch to if this is a miss (text)
465 * %asi = atomic ASI to use for the TSB access
466 *
467 * For trapstat, we have to explicily use these registers.
468 * g4 = location tag will be retrieved into from TSB (out)
469 * g5 = location data(tte) will be retrieved into from TSB (out)
470 */
471#define	PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label)	/* g4/g5 clobbered */	\
472	/* BEGIN CSTYLED */						\
473	ldda	[tsbe_ptr]%asi, %g4	/* g4 = tag, g5 = data */	;\
474	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
475	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
476	  nop								;\
477	TT_TRACE(trace_tsbhit)						;\
478	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
479	/* trapstat expects tte in %g5 */				;\
480	retry				/* retry faulted instruction */	;\
481label/**/1:								\
482	/* END CSTYLED */
483
484
485/*
486 * Same as above, only if the TTE doesn't have the execute
487 * bit set, will branch to exec_fault directly.
488 */
489#define	PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label)				\
490	/* BEGIN CSTYLED */						\
491	ldda	[tsbe_ptr]%asi, %g4	/* g4 = tag, g5 = data */	;\
492	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
493	bne,pn	%xcc, label/**/1	/* branch if !match */		;\
494	  nop								;\
495	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
496	bz,pn	%icc, exec_fault					;\
497	  nop								;\
498	TT_TRACE(trace_tsbhit)						;\
499	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
500	retry				/* retry faulted instruction */	;\
501label/**/1:								\
502	/* END CSTYLED */
503
504
505/*
506 * Determine the base address of the second TSB given the 8K TSB
507 * pointer register contents.
508 *
509 * In:
510 *   tsbp8k = 8K TSB pointer register (ro)
511 *   tmp = scratch register
512 *   label = label for hot patching of utsb_vabase
513 *
514 * Out:
515 *   tsbbase = TSB base address
516 *
517 * Note: This function is patched at runtime for performance reasons.
518 *	 Any changes here require sfmmu_patch_utsb fixed.
519 */
520
521#define	GET_2ND_TSB_BASE(tsbp8k, tsbbase, tmp, label)			\
522	/* BEGIN CSTYLED */						\
523label/**/_get_2nd_tsb_base:						;\
524	RUNTIME_PATCH_SETX(tsbbase, tmp)				;\
525	/* tsbbase = contents of utsb4m_vabase */			;\
526	/* clear upper bits leaving just bits 21:xx of TSB addr. */	;\
527	sllx	tsbp8k, TSBREG_SECTSB_LSHIFT, tmp			;\
528	/* clear lower bits leaving just 21:13 in 8:0 */		;\
529	srlx	tmp, (TSBREG_SECTSB_RSHIFT + MMU_PAGESHIFT), tmp	;\
530	/* adjust TSB offset to bits 21:13 */				;\
531	sllx	tmp, MMU_PAGESHIFT, tmp					;\
532	or	tsbbase, tmp, tsbbase					;\
533	/* END CSTYLED */
534
535/*
536 * Determine the size code of the second TSB given the 8K TSB
537 * pointer register contents.
538 *
539 * In:
540 *   tsbp8k = 8K TSB pointer register (ro)
541 * Out:
542 *   size = TSB size code
543 */
544
545#define	GET_2ND_TSB_SIZE(tsbp8k, size)					\
546	srlx	tsbp8k, TSBREG_SECSZ_SHIFT, size;			\
547	and	size, TSB_SOFTSZ_MASK, size
548
549/*
550 * Get the location in the 2nd TSB of the tsbe for this fault.
551 * Assumes that the second TSB only contains 4M mappings.
552 *
553 * In:
554 *   tagacc = tag access register (clobbered)
555 *   tsbp8k = contents of TSB8K pointer register (ro)
556 *   tmp1, tmp2 = scratch registers
557 *   label = label at which to patch in reserved TSB 4M VA range
558 * Out:
559 *   tsbe_ptr = pointer to the tsbe in the 2nd TSB
560 */
561#define	GET_2ND_TSBE_PTR(tagacc, tsbp8k, tsbe_ptr, tmp1, tmp2, label)	\
562	GET_2ND_TSB_BASE(tsbp8k, tsbe_ptr, tmp2, label);		\
563	/* tsbe_ptr = TSB base address, tmp2 = junk */			\
564	GET_2ND_TSB_SIZE(tsbp8k, tmp1);					\
565	/* tmp1 = TSB size code */					\
566	GET_TSBE_POINTER(MMU_PAGESHIFT4M, tsbe_ptr, tagacc, tmp1, tmp2)
567
568
569/*
570 * vpg_4m = 4M virtual page number for tag matching (in)
571 * tsbe_ptr = precomputed second TSB entry pointer (in)
572 * label = label to use to make branch targets unique (text)
573 *
574 * For trapstat, we have to explicity use these registers.
575 * g4 = tag portion of TSBE (out)
576 * g5 = data portion of TSBE (out)
577 */
578#define	PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label)				\
579	/* BEGIN CSTYLED */						\
580	ldda	[tsbe_ptr]%asi, %g4	/* g4 = tag, g5 = data */	;\
581	/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
582	cmp	%g4, vpg_4m						;\
583	bne,pn	%xcc, label/**/1					;\
584	  nop								;\
585	mov	tsbe_ptr, %g1		/* trace_tsbhit wants ptr in %g1 */ ;\
586	TT_TRACE(trace_tsbhit)						;\
587	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
588	/* trapstat expects tte in %g5 */				;\
589	retry				/* retry faulted instruction */	;\
590label/**/1:								\
591	/* END CSTYLED */
592
593#ifndef TRAPTRACE
594/*
595 * Same as above, with the following additions:
596 * If the TTE found is not executable, branch directly
597 * to exec_fault after checking for ITLB synthesis.
598 * If a TSB miss, branch to TSB miss handler.
599 */
600#define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label)				\
601	/* BEGIN CSTYLED */						\
602	ldda	[tsbe_ptr]%asi, %g4	/* g4 = tag, g5 = data */	;\
603	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
604	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
605	  or	%g0, TTE4M, %g6						;\
606	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
607	bz,a,pn	%icc, label/**/1					;\
608	  sllx	%g6, TTE_SZ_SHFT, %g6					;\
609	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
610	retry				/* retry faulted instruction */ ;\
611label/**/1:								;\
612	andcc %g5, TTE_E_SYNTH_INT, %g0					;\
613	bz,pn	%icc, exec_fault					;\
614	  or	%g5, %g6, %g5						;\
615	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
616	retry				/* retry faulted instruction */	\
617	/* END CSTYLED */
618#else /* TRAPTRACE */
619/*
620 * Same as above, with the TT_TRACE and mov tsbe_ptr, %g1 additions.
621 */
622#define	PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label)				\
623	/* BEGIN CSTYLED */						\
624	ldda	[tsbe_ptr]%asi, %g4	/* g4 = tag, g5 = data */	;\
625	cmp	%g4, vpg_4m		/* compare tag w/ TSB */	;\
626	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* branch if !match */		;\
627	  or	%g0, TTE4M, %g6						;\
628	andcc	%g5, TTE_EXECPRM_INT, %g0  /* check execute bit */	;\
629	bz,a,pn	%icc, label/**/1					;\
630	  sllx	%g6, TTE_SZ_SHFT, %g6					;\
631	mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
632	TT_TRACE(trace_tsbhit)						;\
633	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
634	retry				/* retry faulted instruction */ ;\
635label/**/1:								;\
636	andcc %g5, TTE_E_SYNTH_INT, %g0				;\
637	bz,pn	%icc, exec_fault					;\
638	  mov	tsbe_ptr, %g1		/* trap trace wants ptr in %g1 */ ;\
639	or	%g5, %g6, %g5						;\
640	TT_TRACE(trace_tsbhit)						;\
641	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)				;\
642	retry				/* retry faulted instruction */	\
643	/* END CSTYLED */
644
645#endif /* TRAPTRACE */
646#endif /* _ASM */
647
648#ifdef	__cplusplus
649}
650#endif
651
652#endif	/* _VM_MACH_SFMMU_H */
653