• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/frv/mm/
1/* tlb-miss.S: TLB miss handlers
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16#include <asm/spr-regs.h>
17
18	.section	.text..tlbmiss
19	.balign		4
20
21	.globl		__entry_insn_mmu_miss
22__entry_insn_mmu_miss:
23	break
24	nop
25
26	.globl		__entry_insn_mmu_exception
27__entry_insn_mmu_exception:
28	break
29	nop
30
31	.globl		__entry_data_mmu_miss
32__entry_data_mmu_miss:
33	break
34	nop
35
36	.globl		__entry_data_mmu_exception
37__entry_data_mmu_exception:
38	break
39	nop
40
41###############################################################################
42#
43# handle a lookup failure of one sort or another in a kernel TLB handler
44# On entry:
45#   GR29 - faulting address
46#   SCR2 - saved CCR
47#
48###############################################################################
49	.type		__tlb_kernel_fault,@function
50__tlb_kernel_fault:
51	# see if we're supposed to re-enable single-step mode upon return
52	sethi.p		%hi(__break_tlb_miss_return_break),gr30
53	setlo		%lo(__break_tlb_miss_return_break),gr30
54	movsg		pcsr,gr31
55
56	subcc		gr31,gr30,gr0,icc0
57	beq		icc0,#0,__tlb_kernel_fault_sstep
58
59	movsg		scr2,gr30
60	movgs		gr30,ccr
61	movgs		gr29,scr2			/* save EAR0 value */
62	sethi.p		%hi(__kernel_current_task),gr29
63	setlo		%lo(__kernel_current_task),gr29
64	ldi.p		@(gr29,#0),gr29			/* restore GR29 */
65
66	bra		__entry_kernel_handle_mmu_fault
67
68	# we've got to re-enable single-stepping
69__tlb_kernel_fault_sstep:
70	sethi.p		%hi(__break_tlb_miss_real_return_info),gr30
71	setlo		%lo(__break_tlb_miss_real_return_info),gr30
72	lddi		@(gr30,0),gr30
73	movgs		gr30,pcsr
74	movgs		gr31,psr
75
76	movsg		scr2,gr30
77	movgs		gr30,ccr
78	movgs		gr29,scr2			/* save EAR0 value */
79	sethi.p		%hi(__kernel_current_task),gr29
80	setlo		%lo(__kernel_current_task),gr29
81	ldi.p		@(gr29,#0),gr29			/* restore GR29 */
82	bra		__entry_kernel_handle_mmu_fault_sstep
83
84	.size		__tlb_kernel_fault, .-__tlb_kernel_fault
85
86###############################################################################
87#
88# handle a lookup failure of one sort or another in a user TLB handler
89# On entry:
90#   GR28 - faulting address
91#   SCR2 - saved CCR
92#
93###############################################################################
94	.type		__tlb_user_fault,@function
95__tlb_user_fault:
96	# see if we're supposed to re-enable single-step mode upon return
97	sethi.p		%hi(__break_tlb_miss_return_break),gr30
98	setlo		%lo(__break_tlb_miss_return_break),gr30
99	movsg		pcsr,gr31
100	subcc		gr31,gr30,gr0,icc0
101	beq		icc0,#0,__tlb_user_fault_sstep
102
103	movsg		scr2,gr30
104	movgs		gr30,ccr
105	bra		__entry_uspace_handle_mmu_fault
106
107	# we've got to re-enable single-stepping
108__tlb_user_fault_sstep:
109	sethi.p		%hi(__break_tlb_miss_real_return_info),gr30
110	setlo		%lo(__break_tlb_miss_real_return_info),gr30
111	lddi		@(gr30,0),gr30
112	movgs		gr30,pcsr
113	movgs		gr31,psr
114	movsg		scr2,gr30
115	movgs		gr30,ccr
116	bra		__entry_uspace_handle_mmu_fault_sstep
117
118	.size		__tlb_user_fault, .-__tlb_user_fault
119
120###############################################################################
121#
122# Kernel instruction TLB miss handler
123# On entry:
124#   GR1   - kernel stack pointer
125#   GR28  - saved exception frame pointer
126#   GR29  - faulting address
127#   GR31  - EAR0 ^ SCR0
128#   SCR0  - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
129#   DAMR3 - mapped page directory
130#   DAMR4 - mapped page table as matched by SCR0
131#
132###############################################################################
133	.globl		__entry_kernel_insn_tlb_miss
134	.type		__entry_kernel_insn_tlb_miss,@function
135__entry_kernel_insn_tlb_miss:
136
137	movsg		ccr,gr30			/* save CCR */
138	movgs		gr30,scr2
139
140	# see if the cached page table mapping is appropriate
141	srlicc.p	gr31,#26,gr0,icc0
142	setlos		0x3ffc,gr30
143	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
144	bne		icc0,#0,__itlb_k_PTD_miss
145
146__itlb_k_PTD_mapped:
147	# access the PTD with EAR0[25:14]
148	# - DAMLR4 points to the virtual address of the appropriate page table
149	# - the PTD holds 4096 PTEs
150	# - the PTD must be accessed uncached
151	# - the PTE must be marked accessed if it was valid
152	#
153	and		gr31,gr30,gr31
154	movsg		damlr4,gr30
155	add		gr30,gr31,gr31
156	ldi		@(gr31,#0),gr30			/* fetch the PTE */
157	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
158	ori.p		gr30,#_PAGE_ACCESSED,gr30
159	beq		icc0,#0,__tlb_kernel_fault	/* jump if PTE invalid */
160	sti.p		gr30,@(gr31,#0)			/* update the PTE */
161	andi		gr30,#~_PAGE_ACCESSED,gr30
162
163	# we're using IAMR1 as an extra TLB entry
164	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
165	# - need to check DAMR1 lest we cause an multiple-DAT-hit exception
166	# - IAMPR1 has no WP bit, and we mustn't lose WP information
167	movsg		iampr1,gr31
168	andicc		gr31,#xAMPRx_V,gr0,icc0
169	setlos.p	0xfffff000,gr31
170	beq		icc0,#0,__itlb_k_nopunt		/* punt not required */
171
172	movsg		iamlr1,gr31
173	movgs		gr31,tplr			/* set TPLR.CXN */
174	tlbpr		gr31,gr0,#4,#0			/* delete matches from TLB, IAMR1, DAMR1 */
175
176	movsg		dampr1,gr31
177	ori		gr31,#xAMPRx_V,gr31		/* entry was invalidated by tlbpr #4 */
178	movgs		gr31,tppr
179	movsg		iamlr1,gr31			/* set TPLR.CXN */
180	movgs		gr31,tplr
181	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
182	movsg		tpxr,gr31			/* check the TLB write error flag */
183	andicc.p	gr31,#TPXR_E,gr0,icc0
184	setlos		#0xfffff000,gr31
185	bne		icc0,#0,__tlb_kernel_fault
186
187__itlb_k_nopunt:
188
189	# assemble the new TLB entry
190	and		gr29,gr31,gr29
191	movsg		cxnr,gr31
192	or		gr29,gr31,gr29
193	movgs		gr29,iamlr1			/* xAMLR = address | context number */
194	movgs		gr30,iampr1
195	movgs		gr29,damlr1
196	movgs		gr30,dampr1
197
198	# return, restoring registers
199	movsg		scr2,gr30
200	movgs		gr30,ccr
201	sethi.p		%hi(__kernel_current_task),gr29
202	setlo		%lo(__kernel_current_task),gr29
203	ldi		@(gr29,#0),gr29
204	rett		#0
205	beq		icc0,#3,0			/* prevent icache prefetch */
206
207	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
208	# appropriate page table and map that instead
209	#   - access the PGD with EAR0[31:26]
210	#   - DAMLR3 points to the virtual address of the page directory
211	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
212__itlb_k_PTD_miss:
213	srli		gr29,#26,gr31			/* calculate PGE offset */
214	slli		gr31,#8,gr31			/* and clear bottom bits */
215
216	movsg		damlr3,gr30
217	ld		@(gr31,gr30),gr30		/* access the PGE */
218
219	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
220	andicc		gr30,#xAMPRx_SS,gr0,icc1
221
222	# map this PTD instead and record coverage address
223	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
224	beq		icc0,#0,__tlb_kernel_fault	/* jump if PGE not present */
225	slli.p		gr31,#18,gr31
226	bne		icc1,#0,__itlb_k_bigpage
227	movgs		gr30,dampr4
228	movgs		gr31,scr0
229
230	# we can now resume normal service
231	setlos		0x3ffc,gr30
232	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
233	bra		__itlb_k_PTD_mapped
234
235__itlb_k_bigpage:
236	break
237	nop
238
239	.size		__entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
240
241###############################################################################
242#
243# Kernel data TLB miss handler
244# On entry:
245#   GR1   - kernel stack pointer
246#   GR28  - saved exception frame pointer
247#   GR29  - faulting address
248#   GR31  - EAR0 ^ SCR1
249#   SCR1  - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
250#   DAMR3 - mapped page directory
251#   DAMR5 - mapped page table as matched by SCR1
252#
253###############################################################################
254	.globl		__entry_kernel_data_tlb_miss
255	.type		__entry_kernel_data_tlb_miss,@function
256__entry_kernel_data_tlb_miss:
257
258	movsg		ccr,gr30			/* save CCR */
259	movgs		gr30,scr2
260
261	# see if the cached page table mapping is appropriate
262	srlicc.p	gr31,#26,gr0,icc0
263	setlos		0x3ffc,gr30
264	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
265	bne		icc0,#0,__dtlb_k_PTD_miss
266
267__dtlb_k_PTD_mapped:
268	# access the PTD with EAR0[25:14]
269	# - DAMLR5 points to the virtual address of the appropriate page table
270	# - the PTD holds 4096 PTEs
271	# - the PTD must be accessed uncached
272	# - the PTE must be marked accessed if it was valid
273	#
274	and		gr31,gr30,gr31
275	movsg		damlr5,gr30
276	add		gr30,gr31,gr31
277	ldi		@(gr31,#0),gr30			/* fetch the PTE */
278	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
279	ori.p		gr30,#_PAGE_ACCESSED,gr30
280	beq		icc0,#0,__tlb_kernel_fault	/* jump if PTE invalid */
281	sti.p		gr30,@(gr31,#0)			/* update the PTE */
282	andi		gr30,#~_PAGE_ACCESSED,gr30
283
284	# we're using DAMR1 as an extra TLB entry
285	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
286	# - need to check IAMR1 lest we cause an multiple-DAT-hit exception
287	movsg		dampr1,gr31
288	andicc		gr31,#xAMPRx_V,gr0,icc0
289	setlos.p	0xfffff000,gr31
290	beq		icc0,#0,__dtlb_k_nopunt		/* punt not required */
291
292	movsg		damlr1,gr31
293	movgs		gr31,tplr			/* set TPLR.CXN */
294	tlbpr		gr31,gr0,#4,#0			/* delete matches from TLB, IAMR1, DAMR1 */
295
296	movsg		dampr1,gr31
297	ori		gr31,#xAMPRx_V,gr31		/* entry was invalidated by tlbpr #4 */
298	movgs		gr31,tppr
299	movsg		damlr1,gr31			/* set TPLR.CXN */
300	movgs		gr31,tplr
301	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
302	movsg		tpxr,gr31			/* check the TLB write error flag */
303	andicc.p	gr31,#TPXR_E,gr0,icc0
304	setlos		#0xfffff000,gr31
305	bne		icc0,#0,__tlb_kernel_fault
306
307__dtlb_k_nopunt:
308
309	# assemble the new TLB entry
310	and		gr29,gr31,gr29
311	movsg		cxnr,gr31
312	or		gr29,gr31,gr29
313	movgs		gr29,iamlr1			/* xAMLR = address | context number */
314	movgs		gr30,iampr1
315	movgs		gr29,damlr1
316	movgs		gr30,dampr1
317
318	# return, restoring registers
319	movsg		scr2,gr30
320	movgs		gr30,ccr
321	sethi.p		%hi(__kernel_current_task),gr29
322	setlo		%lo(__kernel_current_task),gr29
323	ldi		@(gr29,#0),gr29
324	rett		#0
325	beq		icc0,#3,0			/* prevent icache prefetch */
326
327	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
328	# appropriate page table and map that instead
329	#   - access the PGD with EAR0[31:26]
330	#   - DAMLR3 points to the virtual address of the page directory
331	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
332__dtlb_k_PTD_miss:
333	srli		gr29,#26,gr31			/* calculate PGE offset */
334	slli		gr31,#8,gr31			/* and clear bottom bits */
335
336	movsg		damlr3,gr30
337	ld		@(gr31,gr30),gr30		/* access the PGE */
338
339	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
340	andicc		gr30,#xAMPRx_SS,gr0,icc1
341
342	# map this PTD instead and record coverage address
343	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
344	beq		icc0,#0,__tlb_kernel_fault	/* jump if PGE not present */
345	slli.p		gr31,#18,gr31
346	bne		icc1,#0,__dtlb_k_bigpage
347	movgs		gr30,dampr5
348	movgs		gr31,scr1
349
350	# we can now resume normal service
351	setlos		0x3ffc,gr30
352	srli.p		gr29,#12,gr31			/* use EAR0[25:14] as PTE index */
353	bra		__dtlb_k_PTD_mapped
354
355__dtlb_k_bigpage:
356	break
357	nop
358
359	.size		__entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
360
361###############################################################################
362#
363# Userspace instruction TLB miss handler (with PGE prediction)
364# On entry:
365#   GR28  - faulting address
366#   GR31  - EAR0 ^ SCR0
367#   SCR0  - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
368#   DAMR3 - mapped page directory
369#   DAMR4 - mapped page table as matched by SCR0
370#
371###############################################################################
372	.globl		__entry_user_insn_tlb_miss
373	.type		__entry_user_insn_tlb_miss,@function
374__entry_user_insn_tlb_miss:
375
376	movsg		ccr,gr30			/* save CCR */
377	movgs		gr30,scr2
378
379	# see if the cached page table mapping is appropriate
380	srlicc.p	gr31,#26,gr0,icc0
381	setlos		0x3ffc,gr30
382	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
383	bne		icc0,#0,__itlb_u_PTD_miss
384
385__itlb_u_PTD_mapped:
386	# access the PTD with EAR0[25:14]
387	# - DAMLR4 points to the virtual address of the appropriate page table
388	# - the PTD holds 4096 PTEs
389	# - the PTD must be accessed uncached
390	# - the PTE must be marked accessed if it was valid
391	#
392	and		gr31,gr30,gr31
393	movsg		damlr4,gr30
394	add		gr30,gr31,gr31
395	ldi		@(gr31,#0),gr30			/* fetch the PTE */
396	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
397	ori.p		gr30,#_PAGE_ACCESSED,gr30
398	beq		icc0,#0,__tlb_user_fault	/* jump if PTE invalid */
399	sti.p		gr30,@(gr31,#0)			/* update the PTE */
400	andi		gr30,#~_PAGE_ACCESSED,gr30
401
402	# we're using IAMR1/DAMR1 as an extra TLB entry
403	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
404	movsg		dampr1,gr31
405	andicc		gr31,#xAMPRx_V,gr0,icc0
406	setlos.p	0xfffff000,gr31
407	beq		icc0,#0,__itlb_u_nopunt		/* punt not required */
408
409	movsg		dampr1,gr31
410	movgs		gr31,tppr
411	movsg		damlr1,gr31			/* set TPLR.CXN */
412	movgs		gr31,tplr
413	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
414	movsg		tpxr,gr31			/* check the TLB write error flag */
415	andicc.p	gr31,#TPXR_E,gr0,icc0
416	setlos		#0xfffff000,gr31
417	bne		icc0,#0,__tlb_user_fault
418
419__itlb_u_nopunt:
420
421	# assemble the new TLB entry
422	and		gr28,gr31,gr28
423	movsg		cxnr,gr31
424	or		gr28,gr31,gr28
425	movgs		gr28,iamlr1			/* xAMLR = address | context number */
426	movgs		gr30,iampr1
427	movgs		gr28,damlr1
428	movgs		gr30,dampr1
429
430	# return, restoring registers
431	movsg		scr2,gr30
432	movgs		gr30,ccr
433	rett		#0
434	beq		icc0,#3,0			/* prevent icache prefetch */
435
436	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
437	# appropriate page table and map that instead
438	#   - access the PGD with EAR0[31:26]
439	#   - DAMLR3 points to the virtual address of the page directory
440	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
441__itlb_u_PTD_miss:
442	srli		gr28,#26,gr31			/* calculate PGE offset */
443	slli		gr31,#8,gr31			/* and clear bottom bits */
444
445	movsg		damlr3,gr30
446	ld		@(gr31,gr30),gr30		/* access the PGE */
447
448	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
449	andicc		gr30,#xAMPRx_SS,gr0,icc1
450
451	# map this PTD instead and record coverage address
452	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
453	beq		icc0,#0,__tlb_user_fault	/* jump if PGE not present */
454	slli.p		gr31,#18,gr31
455	bne		icc1,#0,__itlb_u_bigpage
456	movgs		gr30,dampr4
457	movgs		gr31,scr0
458
459	# we can now resume normal service
460	setlos		0x3ffc,gr30
461	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
462	bra		__itlb_u_PTD_mapped
463
464__itlb_u_bigpage:
465	break
466	nop
467
468	.size		__entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
469
470###############################################################################
471#
472# Userspace data TLB miss handler
473# On entry:
474#   GR28  - faulting address
475#   GR31  - EAR0 ^ SCR1
476#   SCR1  - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
477#   DAMR3 - mapped page directory
478#   DAMR5 - mapped page table as matched by SCR1
479#
480###############################################################################
481	.globl		__entry_user_data_tlb_miss
482	.type		__entry_user_data_tlb_miss,@function
483__entry_user_data_tlb_miss:
484
485	movsg		ccr,gr30			/* save CCR */
486	movgs		gr30,scr2
487
488	# see if the cached page table mapping is appropriate
489	srlicc.p	gr31,#26,gr0,icc0
490	setlos		0x3ffc,gr30
491	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
492	bne		icc0,#0,__dtlb_u_PTD_miss
493
494__dtlb_u_PTD_mapped:
495	# access the PTD with EAR0[25:14]
496	# - DAMLR5 points to the virtual address of the appropriate page table
497	# - the PTD holds 4096 PTEs
498	# - the PTD must be accessed uncached
499	# - the PTE must be marked accessed if it was valid
500	#
501	and		gr31,gr30,gr31
502	movsg		damlr5,gr30
503
504__dtlb_u_using_iPTD:
505	add		gr30,gr31,gr31
506	ldi		@(gr31,#0),gr30			/* fetch the PTE */
507	andicc		gr30,#_PAGE_PRESENT,gr0,icc0
508	ori.p		gr30,#_PAGE_ACCESSED,gr30
509	beq		icc0,#0,__tlb_user_fault	/* jump if PTE invalid */
510	sti.p		gr30,@(gr31,#0)			/* update the PTE */
511	andi		gr30,#~_PAGE_ACCESSED,gr30
512
513	# we're using DAMR1 as an extra TLB entry
514	# - punt the entry here (if valid) to the real TLB and then replace with the new PTE
515	movsg		dampr1,gr31
516	andicc		gr31,#xAMPRx_V,gr0,icc0
517	setlos.p	0xfffff000,gr31
518	beq		icc0,#0,__dtlb_u_nopunt		/* punt not required */
519
520	movsg		dampr1,gr31
521	movgs		gr31,tppr
522	movsg		damlr1,gr31			/* set TPLR.CXN */
523	movgs		gr31,tplr
524	tlbpr		gr31,gr0,#2,#0			/* save to the TLB */
525	movsg		tpxr,gr31			/* check the TLB write error flag */
526	andicc.p	gr31,#TPXR_E,gr0,icc0
527	setlos		#0xfffff000,gr31
528	bne		icc0,#0,__tlb_user_fault
529
530__dtlb_u_nopunt:
531
532	# assemble the new TLB entry
533	and		gr28,gr31,gr28
534	movsg		cxnr,gr31
535	or		gr28,gr31,gr28
536	movgs		gr28,iamlr1			/* xAMLR = address | context number */
537	movgs		gr30,iampr1
538	movgs		gr28,damlr1
539	movgs		gr30,dampr1
540
541	# return, restoring registers
542	movsg		scr2,gr30
543	movgs		gr30,ccr
544	rett		#0
545	beq		icc0,#3,0			/* prevent icache prefetch */
546
547	# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
548	# appropriate page table and map that instead
549	#   - first of all, check the insn PGE cache - we may well get a hit there
550	#   - access the PGD with EAR0[31:26]
551	#   - DAMLR3 points to the virtual address of the page directory
552	#   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
553__dtlb_u_PTD_miss:
554	movsg		scr0,gr31			/* consult the insn-PGE-cache key */
555	xor		gr28,gr31,gr31
556	srlicc		gr31,#26,gr0,icc0
557	srli		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
558	bne		icc0,#0,__dtlb_u_iPGE_miss
559
560	# what we're looking for is covered by the insn-PGE-cache
561	setlos		0x3ffc,gr30
562	and		gr31,gr30,gr31
563	movsg		damlr4,gr30
564	bra		__dtlb_u_using_iPTD
565
566__dtlb_u_iPGE_miss:
567	srli		gr28,#26,gr31			/* calculate PGE offset */
568	slli		gr31,#8,gr31			/* and clear bottom bits */
569
570	movsg		damlr3,gr30
571	ld		@(gr31,gr30),gr30		/* access the PGE */
572
573	andicc.p	gr30,#_PAGE_PRESENT,gr0,icc0
574	andicc		gr30,#xAMPRx_SS,gr0,icc1
575
576	# map this PTD instead and record coverage address
577	ori.p		gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
578	beq		icc0,#0,__tlb_user_fault	/* jump if PGE not present */
579	slli.p		gr31,#18,gr31
580	bne		icc1,#0,__dtlb_u_bigpage
581	movgs		gr30,dampr5
582	movgs		gr31,scr1
583
584	# we can now resume normal service
585	setlos		0x3ffc,gr30
586	srli.p		gr28,#12,gr31			/* use EAR0[25:14] as PTE index */
587	bra		__dtlb_u_PTD_mapped
588
589__dtlb_u_bigpage:
590	break
591	nop
592
593	.size		__entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss
594