1/* tlb-miss.S: TLB miss handlers
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16#include <asm/highmem.h>
17#include <asm/spr-regs.h>
18
19	.section	.text
20	.balign		4
21
22	.globl		__entry_insn_mmu_miss
23__entry_insn_mmu_miss:
24	break
25	nop
26
27	.globl		__entry_insn_mmu_exception
28__entry_insn_mmu_exception:
29	break
30	nop
31
32	.globl		__entry_data_mmu_miss
33__entry_data_mmu_miss:
34	break
35	nop
36
37	.globl		__entry_data_mmu_exception
38__entry_data_mmu_exception:
39	break
40	nop
41
42###############################################################################
43#
44# handle a lookup failure of one sort or another in a kernel TLB handler
45# On entry:
46#   GR29 - faulting address
47#   SCR2 - saved CCR
48#
49###############################################################################
50	.type		__tlb_kernel_fault,@function
51__tlb_kernel_fault:
52	# see if we're supposed to re-enable single-step mode upon return
53	sethi.p		%hi(__break_tlb_miss_return_break),gr30
54	setlo		%lo(__break_tlb_miss_return_break),gr30
55	movsg		pcsr,gr31
56
57	subcc		gr31,gr30,gr0,icc0
58	beq		icc0,#0,__tlb_kernel_fault_sstep
59
60	movsg		scr2,gr30
61	movgs		gr30,ccr
62	movgs		gr29,scr2			/* save EAR0 value */
63	sethi.p		%hi(__kernel_current_task),gr29
64	setlo		%lo(__kernel_current_task),gr29
65	ldi.p		@(gr29,#0),gr29			/* restore GR29 */
66
67	bra		__entry_kernel_handle_mmu_fault
68
69	# we've got to re-enable single-stepping
70__tlb_kernel_fault_sstep:
71	sethi.p		%hi(__break_tlb_miss_real_return_info),gr30
72	setlo		%lo(__break_tlb_miss_real_return_info),gr30
73	lddi		@(gr30,0),gr30
74	movgs		gr30,pcsr
75	movgs		gr31,psr
76
77	movsg		scr2,gr30
78	movgs		gr30,ccr
79	movgs		gr29,scr2			/* save EAR0 value */
80	sethi.p		%hi(__kernel_current_task),gr29
81	setlo		%lo(__kernel_current_task),gr29
82	ldi.p		@(gr29,#0),gr29			/* restore GR29 */
83	bra		__entry_kernel_handle_mmu_fault_sstep
84
85	.size		__tlb_kernel_fault, .-__tlb_kernel_fault
86
87###############################################################################
88#
89# handle a lookup failure of one sort or another in a user TLB handler
90# On entry:
91#   GR28 - faulting address
92#   SCR2 - saved CCR
93#
94###############################################################################
95	.type		__tlb_user_fault,@function
96__tlb_user_fault:
97	# see if we're supposed to re-enable single-step mode upon return
98	sethi.p		%hi(__break_tlb_miss_return_break),gr30
99	setlo		%lo(__break_tlb_miss_return_break),gr30
100	movsg		pcsr,gr31
101	subcc		gr31,gr30,gr0,icc0
102	beq		icc0,#0,__tlb_user_fault_sstep
103
104	movsg		scr2,gr30
105	movgs		gr30,ccr
106	bra		__entry_uspace_handle_mmu_fault
107
108	# we've got to re-enable single-stepping
109__tlb_user_fault_sstep:
110	sethi.p		%hi(__break_tlb_miss_real_return_info),gr30
111	setlo		%lo(__break_tlb_miss_real_return_info),gr30
112	lddi		@(gr30,0),gr30
113	movgs		gr30,pcsr
114	movgs		gr31,psr
115	movsg		scr2,gr30
116	movgs		gr30,ccr
117	bra		__entry_uspace_handle_mmu_fault_sstep
118
119	.size		__tlb_user_fault, .-__tlb_user_fault
120
121###############################################################################
122#
123# Kernel instruction TLB miss handler
124# On entry:
125#   GR1   - kernel stack pointer
126#   GR28  - saved exception frame pointer
127#   GR29  - faulting address
128#   GR31  - EAR0 ^ SCR0
129#   SCR0  - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
130#   DAMR3 - mapped page directory
131#   DAMR4 - mapped page table as matched by SCR0
132#
133###############################################################################
134	.globl		__entry_kernel_insn_tlb_miss
135	.type		__entry_kernel_insn_tlb_miss,@function
136__entry_kernel_insn_tlb_miss:
137
138	movsg		ccr,gr30			/* save CCR */
139	movgs		gr30,scr2
140
141	# see if the cached page table mapping is appropriate
142	srlicc.p	gr31,#26,gr0,icc0
143	setlos		0x3ffc,gr30
144	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
145	bne		icc0,#0,__itlb_k_PTD_miss
146
147__itlb_k_PTD_mapped:
148	# access the PTD with EAR0[25:14]
149	# - DAMLR4 points to the virtual address of the appropriate page table
150	# - the PTD holds 4096 PTEs
151	# - the PTD must be accessed uncached
152	# - the PTE must be marked accessed if it was valid
153	#
154	and		gr31,gr30,gr31
155	movsg		damlr4,gr30
156	add		gr30,gr31,gr31
157	ldi		@(gr31,#0),gr30			/* fetch the PTE */
158	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
159	ori.p		gr30,#_PAGE_ACCESSED,gr30
160	beq		icc0,#0,__tlb_kernel_fault	/* jump if PTE invalid */
161	sti.p		gr30,@(gr31,#0)			/* update the PTE */
162	andi		gr30,#~_PAGE_ACCESSED,gr30
163
164	# we're using IAMR1 as an extra TLB entry
165	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
166	# - need to check DAMR1 lest we cause an multiple-DAT-hit exception
167	# - IAMPR1 has no WP bit, and we mustn't lose WP information
168	movsg		iampr1,gr31
169	andicc		gr31,#xAMPRx_V,gr0,icc0
170	setlos.p	0xfffff000,gr31
171	beq		icc0,#0,__itlb_k_nopunt		/* punt not required */
172
173	movsg		iamlr1,gr31
174	movgs		gr31,tplr			/* set TPLR.CXN */
175	tlbpr		gr31,gr0,#4,#0			/* delete matches from TLB, IAMR1, DAMR1 */
176
177	movsg		dampr1,gr31
178	ori		gr31,#xAMPRx_V,gr31		/* entry was invalidated by tlbpr #4 */
179	movgs		gr31,tppr
180	movsg		iamlr1,gr31			/* set TPLR.CXN */
181	movgs		gr31,tplr
182	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
183	movsg		tpxr,gr31			/* check the TLB write error flag */
184	andicc.p	gr31,#TPXR_E,gr0,icc0
185	setlos		#0xfffff000,gr31
186	bne		icc0,#0,__tlb_kernel_fault
187
188__itlb_k_nopunt:
189
190	# assemble the new TLB entry
191	and		gr29,gr31,gr29
192	movsg		cxnr,gr31
193	or		gr29,gr31,gr29
194	movgs		gr29,iamlr1			/* xAMLR = address | context number */
195	movgs		gr30,iampr1
196	movgs		gr29,damlr1
197	movgs		gr30,dampr1
198
199	# return, restoring registers
200	movsg		scr2,gr30
201	movgs		gr30,ccr
202	sethi.p		%hi(__kernel_current_task),gr29
203	setlo		%lo(__kernel_current_task),gr29
204	ldi		@(gr29,#0),gr29
205	rett		#0
206	beq		icc0,#3,0			/* prevent icache prefetch */
207
208	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
209	# appropriate page table and map that instead
210	#   - access the PGD with EAR0[31:26]
211	#   - DAMLR3 points to the virtual address of the page directory
212	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
213__itlb_k_PTD_miss:
214	srli		gr29,#26,gr31			/* calculate PGE offset */
215	slli		gr31,#8,gr31			/* and clear bottom bits */
216
217	movsg		damlr3,gr30
218	ld		@(gr31,gr30),gr30		/* access the PGE */
219
220	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
221	andicc		gr30,#xAMPRx_SS,gr0,icc1
222
223	# map this PTD instead and record coverage address
224	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
225	beq		icc0,#0,__tlb_kernel_fault	/* jump if PGE not present */
226	slli.p		gr31,#18,gr31
227	bne		icc1,#0,__itlb_k_bigpage
228	movgs		gr30,dampr4
229	movgs		gr31,scr0
230
231	# we can now resume normal service
232	setlos		0x3ffc,gr30
233	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
234	bra		__itlb_k_PTD_mapped
235
236__itlb_k_bigpage:
237	break
238	nop
239
240	.size		__entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
241
242###############################################################################
243#
244# Kernel data TLB miss handler
245# On entry:
246#   GR1   - kernel stack pointer
247#   GR28  - saved exception frame pointer
248#   GR29  - faulting address
249#   GR31  - EAR0 ^ SCR1
250#   SCR1  - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
251#   DAMR3 - mapped page directory
252#   DAMR5 - mapped page table as matched by SCR1
253#
254###############################################################################
255	.globl		__entry_kernel_data_tlb_miss
256	.type		__entry_kernel_data_tlb_miss,@function
257__entry_kernel_data_tlb_miss:
258
259	movsg		ccr,gr30			/* save CCR */
260	movgs		gr30,scr2
261
262	# see if the cached page table mapping is appropriate
263	srlicc.p	gr31,#26,gr0,icc0
264	setlos		0x3ffc,gr30
265	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
266	bne		icc0,#0,__dtlb_k_PTD_miss
267
268__dtlb_k_PTD_mapped:
269	# access the PTD with EAR0[25:14]
270	# - DAMLR5 points to the virtual address of the appropriate page table
271	# - the PTD holds 4096 PTEs
272	# - the PTD must be accessed uncached
273	# - the PTE must be marked accessed if it was valid
274	#
275	and		gr31,gr30,gr31
276	movsg		damlr5,gr30
277	add		gr30,gr31,gr31
278	ldi		@(gr31,#0),gr30			/* fetch the PTE */
279	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
280	ori.p		gr30,#_PAGE_ACCESSED,gr30
281	beq		icc0,#0,__tlb_kernel_fault	/* jump if PTE invalid */
282	sti.p		gr30,@(gr31,#0)			/* update the PTE */
283	andi		gr30,#~_PAGE_ACCESSED,gr30
284
285	# we're using DAMR1 as an extra TLB entry
286	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
287	# - need to check IAMR1 lest we cause an multiple-DAT-hit exception
288	movsg		dampr1,gr31
289	andicc		gr31,#xAMPRx_V,gr0,icc0
290	setlos.p	0xfffff000,gr31
291	beq		icc0,#0,__dtlb_k_nopunt		/* punt not required */
292
293	movsg		damlr1,gr31
294	movgs		gr31,tplr			/* set TPLR.CXN */
295	tlbpr		gr31,gr0,#4,#0			/* delete matches from TLB, IAMR1, DAMR1 */
296
297	movsg		dampr1,gr31
298	ori		gr31,#xAMPRx_V,gr31		/* entry was invalidated by tlbpr #4 */
299	movgs		gr31,tppr
300	movsg		damlr1,gr31			/* set TPLR.CXN */
301	movgs		gr31,tplr
302	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
303	movsg		tpxr,gr31			/* check the TLB write error flag */
304	andicc.p	gr31,#TPXR_E,gr0,icc0
305	setlos		#0xfffff000,gr31
306	bne		icc0,#0,__tlb_kernel_fault
307
308__dtlb_k_nopunt:
309
310	# assemble the new TLB entry
311	and		gr29,gr31,gr29
312	movsg		cxnr,gr31
313	or		gr29,gr31,gr29
314	movgs		gr29,iamlr1			/* xAMLR = address | context number */
315	movgs		gr30,iampr1
316	movgs		gr29,damlr1
317	movgs		gr30,dampr1
318
319	# return, restoring registers
320	movsg		scr2,gr30
321	movgs		gr30,ccr
322	sethi.p		%hi(__kernel_current_task),gr29
323	setlo		%lo(__kernel_current_task),gr29
324	ldi		@(gr29,#0),gr29
325	rett		#0
326	beq		icc0,#3,0			/* prevent icache prefetch */
327
328	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
329	# appropriate page table and map that instead
330	#   - access the PGD with EAR0[31:26]
331	#   - DAMLR3 points to the virtual address of the page directory
332	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
333__dtlb_k_PTD_miss:
334	srli		gr29,#26,gr31			/* calculate PGE offset */
335	slli		gr31,#8,gr31			/* and clear bottom bits */
336
337	movsg		damlr3,gr30
338	ld		@(gr31,gr30),gr30		/* access the PGE */
339
340	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
341	andicc		gr30,#xAMPRx_SS,gr0,icc1
342
343	# map this PTD instead and record coverage address
344	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
345	beq		icc0,#0,__tlb_kernel_fault	/* jump if PGE not present */
346	slli.p		gr31,#18,gr31
347	bne		icc1,#0,__dtlb_k_bigpage
348	movgs		gr30,dampr5
349	movgs		gr31,scr1
350
351	# we can now resume normal service
352	setlos		0x3ffc,gr30
353	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
354	bra		__dtlb_k_PTD_mapped
355
356__dtlb_k_bigpage:
357	break
358	nop
359
360	.size		__entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
361
362###############################################################################
363#
364# Userspace instruction TLB miss handler (with PGE prediction)
365# On entry:
366#   GR28  - faulting address
367#   GR31  - EAR0 ^ SCR0
368#   SCR0  - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
369#   DAMR3 - mapped page directory
370#   DAMR4 - mapped page table as matched by SCR0
371#
372###############################################################################
373	.globl		__entry_user_insn_tlb_miss
374	.type		__entry_user_insn_tlb_miss,@function
375__entry_user_insn_tlb_miss:
376
377	movsg		ccr,gr30			/* save CCR */
378	movgs		gr30,scr2
379
380	# see if the cached page table mapping is appropriate
381	srlicc.p	gr31,#26,gr0,icc0
382	setlos		0x3ffc,gr30
383	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
384	bne		icc0,#0,__itlb_u_PTD_miss
385
386__itlb_u_PTD_mapped:
387	# access the PTD with EAR0[25:14]
388	# - DAMLR4 points to the virtual address of the appropriate page table
389	# - the PTD holds 4096 PTEs
390	# - the PTD must be accessed uncached
391	# - the PTE must be marked accessed if it was valid
392	#
393	and		gr31,gr30,gr31
394	movsg		damlr4,gr30
395	add		gr30,gr31,gr31
396	ldi		@(gr31,#0),gr30			/* fetch the PTE */
397	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
398	ori.p		gr30,#_PAGE_ACCESSED,gr30
399	beq		icc0,#0,__tlb_user_fault	/* jump if PTE invalid */
400	sti.p		gr30,@(gr31,#0)			/* update the PTE */
401	andi		gr30,#~_PAGE_ACCESSED,gr30
402
403	# we're using IAMR1/DAMR1 as an extra TLB entry
404	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
405	movsg		dampr1,gr31
406	andicc		gr31,#xAMPRx_V,gr0,icc0
407	setlos.p	0xfffff000,gr31
408	beq		icc0,#0,__itlb_u_nopunt		/* punt not required */
409
410	movsg		dampr1,gr31
411	movgs		gr31,tppr
412	movsg		damlr1,gr31			/* set TPLR.CXN */
413	movgs		gr31,tplr
414	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
415	movsg		tpxr,gr31			/* check the TLB write error flag */
416	andicc.p	gr31,#TPXR_E,gr0,icc0
417	setlos		#0xfffff000,gr31
418	bne		icc0,#0,__tlb_user_fault
419
420__itlb_u_nopunt:
421
422	# assemble the new TLB entry
423	and		gr28,gr31,gr28
424	movsg		cxnr,gr31
425	or		gr28,gr31,gr28
426	movgs		gr28,iamlr1			/* xAMLR = address | context number */
427	movgs		gr30,iampr1
428	movgs		gr28,damlr1
429	movgs		gr30,dampr1
430
431	# return, restoring registers
432	movsg		scr2,gr30
433	movgs		gr30,ccr
434	rett		#0
435	beq		icc0,#3,0			/* prevent icache prefetch */
436
437	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
438	# appropriate page table and map that instead
439	#   - access the PGD with EAR0[31:26]
440	#   - DAMLR3 points to the virtual address of the page directory
441	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
442__itlb_u_PTD_miss:
443	srli		gr28,#26,gr31			/* calculate PGE offset */
444	slli		gr31,#8,gr31			/* and clear bottom bits */
445
446	movsg		damlr3,gr30
447	ld		@(gr31,gr30),gr30		/* access the PGE */
448
449	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
450	andicc		gr30,#xAMPRx_SS,gr0,icc1
451
452	# map this PTD instead and record coverage address
453	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
454	beq		icc0,#0,__tlb_user_fault	/* jump if PGE not present */
455	slli.p		gr31,#18,gr31
456	bne		icc1,#0,__itlb_u_bigpage
457	movgs		gr30,dampr4
458	movgs		gr31,scr0
459
460	# we can now resume normal service
461	setlos		0x3ffc,gr30
462	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
463	bra		__itlb_u_PTD_mapped
464
465__itlb_u_bigpage:
466	break
467	nop
468
469	.size		__entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
470
471###############################################################################
472#
473# Userspace data TLB miss handler
474# On entry:
475#   GR28  - faulting address
476#   GR31  - EAR0 ^ SCR1
477#   SCR1  - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
478#   DAMR3 - mapped page directory
479#   DAMR5 - mapped page table as matched by SCR1
480#
481###############################################################################
482	.globl		__entry_user_data_tlb_miss
483	.type		__entry_user_data_tlb_miss,@function
484__entry_user_data_tlb_miss:
485
486	movsg		ccr,gr30			/* save CCR */
487	movgs		gr30,scr2
488
489	# see if the cached page table mapping is appropriate
490	srlicc.p	gr31,#26,gr0,icc0
491	setlos		0x3ffc,gr30
492	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
493	bne		icc0,#0,__dtlb_u_PTD_miss
494
495__dtlb_u_PTD_mapped:
496	# access the PTD with EAR0[25:14]
497	# - DAMLR5 points to the virtual address of the appropriate page table
498	# - the PTD holds 4096 PTEs
499	# - the PTD must be accessed uncached
500	# - the PTE must be marked accessed if it was valid
501	#
502	and		gr31,gr30,gr31
503	movsg		damlr5,gr30
504
505__dtlb_u_using_iPTD:
506	add		gr30,gr31,gr31
507	ldi		@(gr31,#0),gr30			/* fetch the PTE */
508	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
509	ori.p		gr30,#_PAGE_ACCESSED,gr30
510	beq		icc0,#0,__tlb_user_fault	/* jump if PTE invalid */
511	sti.p		gr30,@(gr31,#0)			/* update the PTE */
512	andi		gr30,#~_PAGE_ACCESSED,gr30
513
514	# we're using DAMR1 as an extra TLB entry
515	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
516	movsg		dampr1,gr31
517	andicc		gr31,#xAMPRx_V,gr0,icc0
518	setlos.p	0xfffff000,gr31
519	beq		icc0,#0,__dtlb_u_nopunt		/* punt not required */
520
521	movsg		dampr1,gr31
522	movgs		gr31,tppr
523	movsg		damlr1,gr31			/* set TPLR.CXN */
524	movgs		gr31,tplr
525	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
526	movsg		tpxr,gr31			/* check the TLB write error flag */
527	andicc.p	gr31,#TPXR_E,gr0,icc0
528	setlos		#0xfffff000,gr31
529	bne		icc0,#0,__tlb_user_fault
530
531__dtlb_u_nopunt:
532
533	# assemble the new TLB entry
534	and		gr28,gr31,gr28
535	movsg		cxnr,gr31
536	or		gr28,gr31,gr28
537	movgs		gr28,iamlr1			/* xAMLR = address | context number */
538	movgs		gr30,iampr1
539	movgs		gr28,damlr1
540	movgs		gr30,dampr1
541
542	# return, restoring registers
543	movsg		scr2,gr30
544	movgs		gr30,ccr
545	rett		#0
546	beq		icc0,#3,0			/* prevent icache prefetch */
547
548	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
549	# appropriate page table and map that instead
550	#   - first of all, check the insn PGE cache - we may well get a hit there
551	#   - access the PGD with EAR0[31:26]
552	#   - DAMLR3 points to the virtual address of the page directory
553	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
554__dtlb_u_PTD_miss:
555	movsg		scr0,gr31			/* consult the insn-PGE-cache key */
556	xor		gr28,gr31,gr31
557	srlicc		gr31,#26,gr0,icc0
558	srli		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
559	bne		icc0,#0,__dtlb_u_iPGE_miss
560
561	# what we're looking for is covered by the insn-PGE-cache
562	setlos		0x3ffc,gr30
563	and		gr31,gr30,gr31
564	movsg		damlr4,gr30
565	bra		__dtlb_u_using_iPTD
566
567__dtlb_u_iPGE_miss:
568	srli		gr28,#26,gr31			/* calculate PGE offset */
569	slli		gr31,#8,gr31			/* and clear bottom bits */
570
571	movsg		damlr3,gr30
572	ld		@(gr31,gr30),gr30		/* access the PGE */
573
574	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
575	andicc		gr30,#xAMPRx_SS,gr0,icc1
576
577	# map this PTD instead and record coverage address
578	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
579	beq		icc0,#0,__tlb_user_fault	/* jump if PGE not present */
580	slli.p		gr31,#18,gr31
581	bne		icc1,#0,__dtlb_u_bigpage
582	movgs		gr30,dampr5
583	movgs		gr31,scr1
584
585	# we can now resume normal service
586	setlos		0x3ffc,gr30
587	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
588	bra		__dtlb_u_PTD_mapped
589
590__dtlb_u_bigpage:
591	break
592	nop
593
594	.size		__entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss
595