• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/powerpc/include/asm/
1#ifndef _ASM_POWERPC_MMU_HASH64_H_
2#define _ASM_POWERPC_MMU_HASH64_H_
3/*
4 * PowerPC64 memory management structures
5 *
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 *   PPC64 rework.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17
18/*
19 * Segment table
20 */
21
22#define STE_ESID_V	0x80
23#define STE_ESID_KS	0x20
24#define STE_ESID_KP	0x10
25#define STE_ESID_N	0x08
26
27#define STE_VSID_SHIFT	12
28
29/* Location of cpu0's segment table */
30#define STAB0_PAGE	0x6
31#define STAB0_OFFSET	(STAB0_PAGE << 12)
32#define STAB0_PHYS_ADDR	(STAB0_OFFSET + PHYSICAL_START)
33
34#ifndef __ASSEMBLY__
35extern char initial_stab[];
36#endif /* ! __ASSEMBLY */
37
38/*
39 * SLB
40 */
41
42#define SLB_NUM_BOLTED		3
43#define SLB_CACHE_ENTRIES	8
44#define SLB_MIN_SIZE		32
45
46/* Bits in the SLB ESID word */
47#define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */
48
49/* Bits in the SLB VSID word */
50#define SLB_VSID_SHIFT		12
51#define SLB_VSID_SHIFT_1T	24
52#define SLB_VSID_SSIZE_SHIFT	62
53#define SLB_VSID_B		ASM_CONST(0xc000000000000000)
54#define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
55#define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
56#define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
57#define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
58#define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
59#define SLB_VSID_L		ASM_CONST(0x0000000000000100)
60#define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
61#define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
62#define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
63#define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
64#define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
65#define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
66#define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)
67
68#define SLB_VSID_KERNEL		(SLB_VSID_KP)
69#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
70
71#define SLBIE_C			(0x08000000)
72#define SLBIE_SSIZE_SHIFT	25
73
74/*
75 * Hash table
76 */
77
78#define HPTES_PER_GROUP 8
79
80#define HPTE_V_SSIZE_SHIFT	62
81#define HPTE_V_AVPN_SHIFT	7
82#define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
83#define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
84#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
85#define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
86#define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
87#define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
88#define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
89#define HPTE_V_VALID		ASM_CONST(0x0000000000000001)
90
91#define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
92#define HPTE_R_TS		ASM_CONST(0x4000000000000000)
93#define HPTE_R_RPN_SHIFT	12
94#define HPTE_R_RPN		ASM_CONST(0x3ffffffffffff000)
95#define HPTE_R_FLAGS		ASM_CONST(0x00000000000003ff)
96#define HPTE_R_PP		ASM_CONST(0x0000000000000003)
97#define HPTE_R_N		ASM_CONST(0x0000000000000004)
98#define HPTE_R_C		ASM_CONST(0x0000000000000080)
99#define HPTE_R_R		ASM_CONST(0x0000000000000100)
100
101#define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
102#define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)
103
104/* Values for PP (assumes Ks=0, Kp=1) */
105/* pp0 will always be 0 for linux     */
106#define PP_RWXX	0	/* Supervisor read/write, User none */
107#define PP_RWRX 1	/* Supervisor read/write, User read */
108#define PP_RWRW 2	/* Supervisor read/write, User read/write */
109#define PP_RXRX 3	/* Supervisor read,       User read */
110
111#ifndef __ASSEMBLY__
112
113struct hash_pte {
114	unsigned long v;
115	unsigned long r;
116};
117
118extern struct hash_pte *htab_address;
119extern unsigned long htab_size_bytes;
120extern unsigned long htab_hash_mask;
121
122/*
123 * Page size definition
124 *
125 *    shift : is the "PAGE_SHIFT" value for that page size
126 *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
127 *            directly to a slbmte "vsid" value
128 *    penc  : is the HPTE encoding mask for the "LP" field:
129 *
130 */
131struct mmu_psize_def
132{
133	unsigned int	shift;	/* number of bits */
134	unsigned int	penc;	/* HPTE encoding */
135	unsigned int	tlbiel;	/* tlbiel supported for that page size */
136	unsigned long	avpnm;	/* bits to mask out in AVPN in the HPTE */
137	unsigned long	sllp;	/* SLB L||LP (exact mask to use in slbmte) */
138};
139
140#endif /* __ASSEMBLY__ */
141
142/*
143 * Segment sizes.
144 * These are the values used by hardware in the B field of
145 * SLB entries and the first dword of MMU hashtable entries.
146 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
147 */
148#define MMU_SEGSIZE_256M	0
149#define MMU_SEGSIZE_1T		1
150
151
152#ifndef __ASSEMBLY__
153
154/*
155 * The current system page and segment sizes
156 */
157extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
158extern int mmu_linear_psize;
159extern int mmu_virtual_psize;
160extern int mmu_vmalloc_psize;
161extern int mmu_vmemmap_psize;
162extern int mmu_io_psize;
163extern int mmu_kernel_ssize;
164extern int mmu_highuser_ssize;
165extern u16 mmu_slb_size;
166extern unsigned long tce_alloc_start, tce_alloc_end;
167
168/*
169 * If the processor supports 64k normal pages but not 64k cache
170 * inhibited pages, we have to be prepared to switch processes
171 * to use 4k pages when they create cache-inhibited mappings.
172 * If this is the case, mmu_ci_restrictions will be set to 1.
173 */
174extern int mmu_ci_restrictions;
175
176/*
177 * This function sets the AVPN and L fields of the HPTE  appropriately
178 * for the page size
179 */
180static inline unsigned long hpte_encode_v(unsigned long va, int psize,
181					  int ssize)
182{
183	unsigned long v;
184	v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
185	v <<= HPTE_V_AVPN_SHIFT;
186	if (psize != MMU_PAGE_4K)
187		v |= HPTE_V_LARGE;
188	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
189	return v;
190}
191
192/*
193 * This function sets the ARPN, and LP fields of the HPTE appropriately
194 * for the page size. We assume the pa is already "clean" that is properly
195 * aligned for the requested page size
196 */
197static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
198{
199	unsigned long r;
200
201	/* A 4K page needs no special encoding */
202	if (psize == MMU_PAGE_4K)
203		return pa & HPTE_R_RPN;
204	else {
205		unsigned int penc = mmu_psize_defs[psize].penc;
206		unsigned int shift = mmu_psize_defs[psize].shift;
207		return (pa & ~((1ul << shift) - 1)) | (penc << 12);
208	}
209	return r;
210}
211
212/*
213 * Build a VA given VSID, EA and segment size
214 */
215static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
216				   int ssize)
217{
218	if (ssize == MMU_SEGSIZE_256M)
219		return (vsid << 28) | (ea & 0xfffffffUL);
220	return (vsid << 40) | (ea & 0xffffffffffUL);
221}
222
223/*
224 * This hashes a virtual address
225 */
226
227static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
228				     int ssize)
229{
230	unsigned long hash, vsid;
231
232	if (ssize == MMU_SEGSIZE_256M) {
233		hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
234	} else {
235		vsid = va >> 40;
236		hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
237	}
238	return hash & 0x7fffffffffUL;
239}
240
241extern int __hash_page_4K(unsigned long ea, unsigned long access,
242			  unsigned long vsid, pte_t *ptep, unsigned long trap,
243			  unsigned int local, int ssize, int subpage_prot);
244extern int __hash_page_64K(unsigned long ea, unsigned long access,
245			   unsigned long vsid, pte_t *ptep, unsigned long trap,
246			   unsigned int local, int ssize);
247struct mm_struct;
248unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
249extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
250int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
251		     pte_t *ptep, unsigned long trap, int local, int ssize,
252		     unsigned int shift, unsigned int mmu_psize);
253extern void hash_failure_debug(unsigned long ea, unsigned long access,
254			       unsigned long vsid, unsigned long trap,
255			       int ssize, int psize, unsigned long pte);
256extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
257			     unsigned long pstart, unsigned long prot,
258			     int psize, int ssize);
259extern void add_gpage(unsigned long addr, unsigned long page_size,
260			  unsigned long number_of_pages);
261extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
262
263extern void hpte_init_native(void);
264extern void hpte_init_lpar(void);
265extern void hpte_init_iSeries(void);
266extern void hpte_init_beat(void);
267extern void hpte_init_beat_v3(void);
268
269extern void stabs_alloc(void);
270extern void slb_initialize(void);
271extern void slb_flush_and_rebolt(void);
272extern void stab_initialize(unsigned long stab);
273
274extern void slb_vmalloc_update(void);
275extern void slb_set_size(u16 size);
276#endif /* __ASSEMBLY__ */
277
278/*
279 * VSID allocation
280 *
281 * We first generate a 36-bit "proto-VSID".  For kernel addresses this
282 * is equal to the ESID, for user addresses it is:
283 *	(context << 15) | (esid & 0x7fff)
284 *
285 * The two forms are distinguishable because the top bit is 0 for user
286 * addresses, whereas the top two bits are 1 for kernel addresses.
287 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
288 * now.
289 *
290 * The proto-VSIDs are then scrambled into real VSIDs with the
291 * multiplicative hash:
292 *
293 *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
294 *	where	VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
295 *		VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
296 *
297 * This scramble is only well defined for proto-VSIDs below
298 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
299 * reserved.  VSID_MULTIPLIER is prime, so in particular it is
300 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
301 * Because the modulus is 2^n-1 we can compute it efficiently without
302 * a divide or extra multiply (see below).
303 *
304 * This scheme has several advantages over older methods:
305 *
306 * 	- We have VSIDs allocated for every kernel address
307 * (i.e. everything above 0xC000000000000000), except the very top
308 * segment, which simplifies several things.
309 *
310 * 	- We allow for 15 significant bits of ESID and 20 bits of
311 * context for user addresses.  i.e. 8T (43 bits) of address space for
312 * up to 1M contexts (although the page table structure and context
313 * allocation will need changes to take advantage of this).
314 *
315 * 	- The scramble function gives robust scattering in the hash
316 * table (at least based on some initial results).  The previous
317 * method was more susceptible to pathological cases giving excessive
318 * hash collisions.
319 */
320/*
321 * WARNING - If you change these you must make sure the asm
322 * implementations in slb_allocate (slb_low.S), do_stab_bolted
323 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
324 *
325 * You'll also need to change the precomputed VSID values in head.S
326 * which are used by the iSeries firmware.
327 */
328
329#define VSID_MULTIPLIER_256M	ASM_CONST(200730139)	/* 28-bit prime */
330#define VSID_BITS_256M		36
331#define VSID_MODULUS_256M	((1UL<<VSID_BITS_256M)-1)
332
333#define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
334#define VSID_BITS_1T		24
335#define VSID_MODULUS_1T		((1UL<<VSID_BITS_1T)-1)
336
337#define CONTEXT_BITS		19
338#define USER_ESID_BITS		16
339#define USER_ESID_BITS_1T	4
340
341#define USER_VSID_RANGE	(1UL << (USER_ESID_BITS + SID_SHIFT))
342
343/*
344 * This macro generates asm code to compute the VSID scramble
345 * function.  Used in slb_allocate() and do_stab_bolted.  The function
346 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
347 *
348 *	rt = register continaing the proto-VSID and into which the
349 *		VSID will be stored
350 *	rx = scratch register (clobbered)
351 *
352 * 	- rt and rx must be different registers
353 * 	- The answer will end up in the low VSID_BITS bits of rt.  The higher
354 * 	  bits may contain other garbage, so you may need to mask the
355 * 	  result.
356 */
357#define ASM_VSID_SCRAMBLE(rt, rx, size)					\
358	lis	rx,VSID_MULTIPLIER_##size@h;				\
359	ori	rx,rx,VSID_MULTIPLIER_##size@l;				\
360	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
361									\
362	srdi	rx,rt,VSID_BITS_##size;					\
363	clrldi	rt,rt,(64-VSID_BITS_##size);				\
364	add	rt,rt,rx;		/* add high and low bits */	\
365	/* Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
366	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
367	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
368	 * the bit clear, r3 already has the answer we want, if it	\
369	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
370	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
371	addi	rx,rt,1;						\
372	srdi	rx,rx,VSID_BITS_##size;	/* extract 2^VSID_BITS bit */	\
373	add	rt,rt,rx
374
375
376#ifndef __ASSEMBLY__
377
378#ifdef CONFIG_PPC_SUBPAGE_PROT
379/*
380 * For the sub-page protection option, we extend the PGD with one of
381 * these.  Basically we have a 3-level tree, with the top level being
382 * the protptrs array.  To optimize speed and memory consumption when
383 * only addresses < 4GB are being protected, pointers to the first
384 * four pages of sub-page protection words are stored in the low_prot
385 * array.
386 * Each page of sub-page protection words protects 1GB (4 bytes
387 * protects 64k).  For the 3-level tree, each page of pointers then
388 * protects 8TB.
389 */
390struct subpage_prot_table {
391	unsigned long maxaddr;	/* only addresses < this are protected */
392	unsigned int **protptrs[2];
393	unsigned int *low_prot[4];
394};
395
396#define SBP_L1_BITS		(PAGE_SHIFT - 2)
397#define SBP_L2_BITS		(PAGE_SHIFT - 3)
398#define SBP_L1_COUNT		(1 << SBP_L1_BITS)
399#define SBP_L2_COUNT		(1 << SBP_L2_BITS)
400#define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
401#define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)
402
403extern void subpage_prot_free(struct mm_struct *mm);
404extern void subpage_prot_init_new_context(struct mm_struct *mm);
405#else
406static inline void subpage_prot_free(struct mm_struct *mm) {}
407static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
408#endif /* CONFIG_PPC_SUBPAGE_PROT */
409
410typedef unsigned long mm_context_id_t;
411
412typedef struct {
413	mm_context_id_t id;
414	u16 user_psize;		/* page size index */
415
416#ifdef CONFIG_PPC_MM_SLICES
417	u64 low_slices_psize;	/* SLB page size encodings */
418	u64 high_slices_psize;  /* 4 bits per slice for now */
419#else
420	u16 sllp;		/* SLB page size encoding */
421#endif
422	unsigned long vdso_base;
423#ifdef CONFIG_PPC_SUBPAGE_PROT
424	struct subpage_prot_table spt;
425#endif /* CONFIG_PPC_SUBPAGE_PROT */
426} mm_context_t;
427
428
429#define vsid_scramble(protovsid, size) \
430	({								 \
431		unsigned long x;					 \
432		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
433		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
434		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
435	})
436
437/* This is only valid for addresses >= PAGE_OFFSET */
438static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
439{
440	if (ssize == MMU_SEGSIZE_256M)
441		return vsid_scramble(ea >> SID_SHIFT, 256M);
442	return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
443}
444
445/* Returns the segment size indicator for a user address */
446static inline int user_segment_size(unsigned long addr)
447{
448	/* Use 1T segments if possible for addresses >= 1T */
449	if (addr >= (1UL << SID_SHIFT_1T))
450		return mmu_highuser_ssize;
451	return MMU_SEGSIZE_256M;
452}
453
454/* This is only valid for user addresses (which are below 2^44) */
455static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
456				     int ssize)
457{
458	if (ssize == MMU_SEGSIZE_256M)
459		return vsid_scramble((context << USER_ESID_BITS)
460				     | (ea >> SID_SHIFT), 256M);
461	return vsid_scramble((context << USER_ESID_BITS_1T)
462			     | (ea >> SID_SHIFT_1T), 1T);
463}
464
465/*
466 * This is only used on legacy iSeries in lparmap.c,
467 * hence the 256MB segment assumption.
468 */
469#define VSID_SCRAMBLE(pvsid)	(((pvsid) * VSID_MULTIPLIER_256M) %	\
470				 VSID_MODULUS_256M)
471#define KERNEL_VSID(ea)		VSID_SCRAMBLE(GET_ESID(ea))
472
473#endif /* __ASSEMBLY__ */
474
475#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
476