1/*
2 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 *    Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 *  This program is free software; you can redistribute it and/or
12 *  modify it under the terms of the GNU General Public License
13 *  as published by the Free Software Foundation; either version
14 *  2 of the License, or (at your option) any later version.
15 */
16
17#include <asm/processor.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/cputable.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/pgtable.h>
24#include <asm/firmware.h>
25
26/* void slb_allocate_realmode(unsigned long ea);
27 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * 	r3 = faulting address, r13 = PACA
30 *	r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
33_GLOBAL(slb_allocate_realmode)
34	/* r3 = faulting address */
35
36	srdi	r9,r3,60		/* get region */
37	srdi	r10,r3,28		/* get esid */
38	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */
39
40	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
41	blt	cr7,0f			/* user or kernel? */
42
43	/* kernel address: proto-VSID = ESID */
44	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
45	 * this code will generate the protoVSID 0xfffffffff for the
46	 * top segment.  That's ok, the scramble below will translate
47	 * it to VSID 0, which is reserved as a bad VSID - one which
48	 * will never have any pages in it.  */
49
50	/* Check if hitting the linear mapping of the vmalloc/ioremap
51	 * kernel space
52	*/
53	bne	cr7,1f
54
55	/* Linear mapping encoding bits, the "li" instruction below will
56	 * be patched by the kernel at boot
57	 */
58_GLOBAL(slb_miss_kernel_load_linear)
59	li	r11,0
60	b	slb_finish_load
61
621:	/* vmalloc/ioremap mapping encoding bits, the "li" instructions below
63	 * will be patched by the kernel at boot
64	 */
65BEGIN_FTR_SECTION
66	/* check whether this is in vmalloc or ioremap space */
67	clrldi	r11,r10,48
68	cmpldi	r11,(VMALLOC_SIZE >> 28) - 1
69	bgt	5f
70	lhz	r11,PACAVMALLOCSLLP(r13)
71	b	slb_finish_load
725:
73END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
74_GLOBAL(slb_miss_kernel_load_io)
75	li	r11,0
76	b	slb_finish_load
77
78
790:	/* user address: proto-VSID = context << 15 | ESID. First check
80	 * if the address is within the boundaries of the user region
81	 */
82	srdi.	r9,r10,USER_ESID_BITS
83	bne-	8f			/* invalid ea bits set */
84
85
86#ifdef CONFIG_PPC_MM_SLICES
87	cmpldi	r10,16
88
89	/* Get the slice index * 4 in r11 and matching slice size mask in r9 */
90	ld	r9,PACALOWSLICESPSIZE(r13)
91	sldi	r11,r10,2
92	blt	5f
93	ld	r9,PACAHIGHSLICEPSIZE(r13)
94	srdi	r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
95	andi.	r11,r11,0x3c
96
975:	/* Extract the psize and multiply to get an array offset */
98	srd	r9,r9,r11
99	andi.	r9,r9,0xf
100	mulli	r9,r9,MMUPSIZEDEFSIZE
101
102	/* Now get to the array and obtain the sllp
103	 */
104	ld	r11,PACATOC(r13)
105	ld	r11,mmu_psize_defs@got(r11)
106	add	r11,r11,r9
107	ld	r11,MMUPSIZESLLP(r11)
108	ori	r11,r11,SLB_VSID_USER
109#else
110	/* paca context sllp already contains the SLB_VSID_USER bits */
111	lhz	r11,PACACONTEXTSLLP(r13)
112#endif /* CONFIG_PPC_MM_SLICES */
113
114	ld	r9,PACACONTEXTID(r13)
115	rldimi	r10,r9,USER_ESID_BITS,0
116	b	slb_finish_load
117
1188:	/* invalid EA */
119	li	r10,0			/* BAD_VSID */
120	li	r11,SLB_VSID_USER	/* flags don't much matter */
121	b	slb_finish_load
122
123#ifdef __DISABLED__
124
125/* void slb_allocate_user(unsigned long ea);
126 *
127 * Create an SLB entry for the given EA (user or kernel).
128 * 	r3 = faulting address, r13 = PACA
129 *	r9, r10, r11 are clobbered by this function
130 * No other registers are examined or changed.
131 *
132 * It is called with translation enabled in order to be able to walk the
133 * page tables. This is not currently used.
134 */
135_GLOBAL(slb_allocate_user)
136	/* r3 = faulting address */
137	srdi	r10,r3,28		/* get esid */
138
139	crset	4*cr7+lt		/* set "user" flag for later */
140
141	/* check if we fit in the range covered by the pagetables*/
142	srdi.	r9,r3,PGTABLE_EADDR_SIZE
143	crnot	4*cr0+eq,4*cr0+eq
144	beqlr
145
146	/* now we need to get to the page tables in order to get the page
147	 * size encoding from the PMD. In the future, we'll be able to deal
148	 * with 1T segments too by getting the encoding from the PGD instead
149	 */
150	ld	r9,PACAPGDIR(r13)
151	cmpldi	cr0,r9,0
152	beqlr
153	rlwinm	r11,r10,8,25,28
154	ldx	r9,r9,r11		/* get pgd_t */
155	cmpldi	cr0,r9,0
156	beqlr
157	rlwinm	r11,r10,3,17,28
158	ldx	r9,r9,r11		/* get pmd_t */
159	cmpldi	cr0,r9,0
160	beqlr
161
162	/* build vsid flags */
163	andi.	r11,r9,SLB_VSID_LLP
164	ori	r11,r11,SLB_VSID_USER
165
166	/* get context to calculate proto-VSID */
167	ld	r9,PACACONTEXTID(r13)
168	rldimi	r10,r9,USER_ESID_BITS,0
169
170	/* fall through slb_finish_load */
171
172#endif /* __DISABLED__ */
173
174
175/*
176 * Finish loading of an SLB entry and return
177 *
178 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
179 */
180slb_finish_load:
181	ASM_VSID_SCRAMBLE(r10,r9)
182	rldimi	r11,r10,SLB_VSID_SHIFT,16	/* combine VSID and flags */
183
184	/* r3 = EA, r11 = VSID data */
185	/*
186	 * Find a slot, round robin. Previously we tried to find a
187	 * free slot first but that took too long. Unfortunately we
188 	 * dont have any LRU information to help us choose a slot.
189 	 */
190#ifdef CONFIG_PPC_ISERIES
191BEGIN_FW_FTR_SECTION
192	/*
193	 * On iSeries, the "bolted" stack segment can be cast out on
194	 * shared processor switch so we need to check for a miss on
195	 * it and restore it to the right slot.
196	 */
197	ld	r9,PACAKSAVE(r13)
198	clrrdi	r9,r9,28
199	clrrdi	r3,r3,28
200	li	r10,SLB_NUM_BOLTED-1	/* Stack goes in last bolted slot */
201	cmpld	r9,r3
202	beq	3f
203END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
204#endif /* CONFIG_PPC_ISERIES */
205
206	ld	r10,PACASTABRR(r13)
207	addi	r10,r10,1
208	/* use a cpu feature mask if we ever change our slb size */
209	cmpldi	r10,SLB_NUM_ENTRIES
210
211	blt+	4f
212	li	r10,SLB_NUM_BOLTED
213
2144:
215	std	r10,PACASTABRR(r13)
216
2173:
218	rldimi	r3,r10,0,36		/* r3= EA[0:35] | entry */
219	oris	r10,r3,SLB_ESID_V@h	/* r3 |= SLB_ESID_V */
220
221	/* r3 = ESID data, r11 = VSID data */
222
223	/*
224	 * No need for an isync before or after this slbmte. The exception
225	 * we enter with and the rfid we exit with are context synchronizing.
226	 */
227	slbmte	r11,r10
228
229	/* we're done for kernel addresses */
230	crclr	4*cr0+eq		/* set result to "success" */
231	bgelr	cr7
232
233	/* Update the slb cache */
234	lhz	r3,PACASLBCACHEPTR(r13)	/* offset = paca->slb_cache_ptr */
235	cmpldi	r3,SLB_CACHE_ENTRIES
236	bge	1f
237
238	/* still room in the slb cache */
239	sldi	r11,r3,1		/* r11 = offset * sizeof(u16) */
240	rldicl	r10,r10,36,28		/* get low 16 bits of the ESID */
241	add	r11,r11,r13		/* r11 = (u16 *)paca + offset */
242	sth	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */
243	addi	r3,r3,1			/* offset++ */
244	b	2f
2451:					/* offset >= SLB_CACHE_ENTRIES */
246	li	r3,SLB_CACHE_ENTRIES+1
2472:
248	sth	r3,PACASLBCACHEPTR(r13)	/* paca->slb_cache_ptr = offset */
249	crclr	4*cr0+eq		/* set result to "success" */
250	blr
251