1/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 *   {mikejc|engebret}@us.ibm.com
4 *
5 *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *    Module name: htab.c
11 *
12 *    Description:
13 *      PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/config.h>
22#include <linux/spinlock.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/proc_fs.h>
26#include <linux/stat.h>
27#include <linux/sysctl.h>
28#include <linux/ctype.h>
29#include <linux/cache.h>
30
31#include <asm/ppcdebug.h>
32#include <asm/processor.h>
33#include <asm/pgtable.h>
34#include <asm/mmu.h>
35#include <asm/mmu_context.h>
36#include <asm/page.h>
37#include <asm/types.h>
38#include <asm/uaccess.h>
39#include <asm/naca.h>
40#include <asm/pmc.h>
41#include <asm/machdep.h>
42#include <asm/lmb.h>
43#include <asm/abs_addr.h>
44#include <asm/io.h>
45#include <asm/eeh.h>
46#include <asm/hvcall.h>
47#include <asm/iSeries/LparData.h>
48#include <asm/iSeries/HvCallHpt.h>
49
50/*
51 * Note:  pte   --> Linux PTE
52 *        HPTE  --> PowerPC Hashed Page Table Entry
53 *
54 * Execution context:
55 *   htab_initialize is called with the MMU off (of course), but
56 *   the kernel has been copied down to zero so it can directly
57 *   reference global data.  At this point it is very difficult
58 *   to print debug info.
59 *
60 */
61
62HTAB htab_data = {NULL, 0, 0, 0, 0};
63
64extern unsigned long _SDR1;
65extern unsigned long klimit;
66
67void make_pte(HPTE *htab, unsigned long va, unsigned long pa,
68	      int mode, unsigned long hash_mask, int large);
69long plpar_pte_enter(unsigned long flags,
70		     unsigned long ptex,
71		     unsigned long new_pteh, unsigned long new_ptel,
72		     unsigned long *old_pteh_ret, unsigned long *old_ptel_ret);
73static long hpte_remove(unsigned long hpte_group);
74static long rpa_lpar_hpte_remove(unsigned long hpte_group);
75static long iSeries_hpte_remove(unsigned long hpte_group);
76
77static spinlock_t pSeries_tlbie_lock = SPIN_LOCK_UNLOCKED;
78static spinlock_t pSeries_lpar_tlbie_lock = SPIN_LOCK_UNLOCKED;
79spinlock_t hash_table_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
80
81#define KB (1024)
82#define MB (1024*KB)
83
84static inline void
85loop_forever(void)
86{
87	volatile unsigned long x = 1;
88	for(;x;x|=1)
89		;
90}
91
92static inline void
93create_pte_mapping(unsigned long start, unsigned long end,
94		   unsigned long mode, unsigned long mask, int large)
95{
96	unsigned long addr;
97	HPTE *htab = (HPTE *)__v2a(htab_data.htab);
98	unsigned int step;
99
100	if (large)
101		step = 16*MB;
102	else
103		step = 4*KB;
104
105	for (addr = start; addr < end; addr += step) {
106		unsigned long vsid = get_kernel_vsid(addr);
107		unsigned long va = (vsid << 28) | (addr & 0xfffffff);
108		make_pte(htab, va, (unsigned long)__v2a(addr),
109			 mode, mask, large);
110	}
111}
112
113void
114htab_initialize(void)
115{
116	unsigned long table, htab_size_bytes;
117	unsigned long pteg_count;
118	unsigned long mode_rw, mask;
119
120	/*
121	 * Calculate the required size of the htab.  We want the number of
122	 * PTEGs to equal one half the number of real pages.
123	 */
124	htab_size_bytes = 1UL << naca->pftSize;
125	pteg_count = htab_size_bytes >> 7;
126
127	/* For debug, make the HTAB 1/8 as big as it normally would be. */
128	ifppcdebug(PPCDBG_HTABSIZE) {
129		pteg_count >>= 3;
130		htab_size_bytes = pteg_count << 7;
131	}
132
133	htab_data.htab_num_ptegs = pteg_count;
134	htab_data.htab_hash_mask = pteg_count - 1;
135
136	if(naca->platform == PLATFORM_PSERIES) {
137		/* Find storage for the HPT.  Must be contiguous in
138		 * the absolute address space.
139		 */
140		table = lmb_alloc(htab_size_bytes, htab_size_bytes);
141		if ( !table ) {
142			ppc64_terminate_msg(0x20, "hpt space");
143			loop_forever();
144		}
145		htab_data.htab = (HPTE *)__a2v(table);
146
147		/* htab absolute addr + encoded htabsize */
148		_SDR1 = table + __ilog2(pteg_count) - 11;
149
150		/* Initialize the HPT with no entries */
151		memset((void *)table, 0, htab_size_bytes);
152	} else {
153		/* Using a hypervisor which owns the htab */
154		htab_data.htab = NULL;
155		_SDR1 = 0;
156	}
157
158	mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
159	mask = pteg_count-1;
160
161	if ((naca->platform & PLATFORM_PSERIES) &&
162	   cpu_has_largepage() && (naca->physicalMemorySize > 256*MB)) {
163		create_pte_mapping((unsigned long)KERNELBASE,
164				   KERNELBASE + 256*MB, mode_rw, mask, 0);
165		create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
166				   KERNELBASE + (naca->physicalMemorySize),
167				   mode_rw, mask, 1);
168	} else {
169		create_pte_mapping((unsigned long)KERNELBASE,
170				   KERNELBASE+(naca->physicalMemorySize),
171				   mode_rw, mask, 0);
172	}
173}
174#undef KB
175#undef MB
176
177/*
178 * Create a pte. Used during initialization only.
179 * We assume the PTE will fit in the primary PTEG.
180 */
181void make_pte(HPTE *htab, unsigned long va, unsigned long pa,
182	      int mode, unsigned long hash_mask, int large)
183{
184	HPTE *hptep, local_hpte, rhpte;
185	unsigned long hash, vpn, flags, lpar_rc;
186	unsigned long i, dummy1, dummy2;
187	long slot;
188
189	if (large)
190		vpn = va >> LARGE_PAGE_SHIFT;
191	else
192		vpn = va >> PAGE_SHIFT;
193
194	hash = hpt_hash(vpn, large);
195
196	local_hpte.dw1.dword1 = pa | mode;
197	local_hpte.dw0.dword0 = 0;
198	local_hpte.dw0.dw0.avpn = va >> 23;
199	local_hpte.dw0.dw0.bolted = 1;		/* bolted */
200	if (large) {
201		local_hpte.dw0.dw0.l = 1;	/* large page */
202		local_hpte.dw0.dw0.avpn &= ~0x1UL;
203	}
204	local_hpte.dw0.dw0.v = 1;
205
206	if (naca->platform == PLATFORM_PSERIES) {
207		hptep  = htab + ((hash & hash_mask)*HPTES_PER_GROUP);
208
209		for (i = 0; i < 8; ++i, ++hptep) {
210			if (hptep->dw0.dw0.v == 0) {		/* !valid */
211				*hptep = local_hpte;
212				return;
213			}
214		}
215	} else if (naca->platform == PLATFORM_PSERIES_LPAR) {
216		slot = ((hash & hash_mask)*HPTES_PER_GROUP);
217
218		/* Set CEC cookie to 0                   */
219		/* Zero page = 0                         */
220		/* I-cache Invalidate = 0                */
221		/* I-cache synchronize = 0               */
222		/* Exact = 0 - modify any entry in group */
223		flags = 0;
224
225		lpar_rc =  plpar_pte_enter(flags, slot, local_hpte.dw0.dword0,
226					   local_hpte.dw1.dword1,
227					   &dummy1, &dummy2);
228		if (lpar_rc != H_Success) {
229			ppc64_terminate_msg(0x21, "hpte enter");
230			loop_forever();
231		}
232		return;
233	} else if (naca->platform == PLATFORM_ISERIES_LPAR) {
234		slot = HvCallHpt_findValid(&rhpte, vpn);
235		if (slot < 0) {
236			/* Must find space in primary group */
237			panic("hash_page: hpte already exists\n");
238		}
239		HvCallHpt_addValidate(slot, 0, (HPTE *)&local_hpte );
240		return;
241	}
242
243	/* We should _never_ get here and too early to call xmon. */
244	ppc64_terminate_msg(0x22, "hpte platform");
245	loop_forever();
246}
247
248/*
249 * find_linux_pte returns the address of a linux pte for a given
250 * effective address and directory.  If not found, it returns zero.
251 */
252pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
253{
254	pgd_t *pg;
255	pmd_t *pm;
256	pte_t *pt = NULL;
257	pte_t pte;
258
259	pg = pgdir + pgd_index(ea);
260	if (!pgd_none(*pg)) {
261		pm = pmd_offset(pg, ea);
262		if (!pmd_none(*pm)) {
263			pt = pte_offset(pm, ea);
264			pte = *pt;
265			if (!pte_present(pte))
266				pt = NULL;
267		}
268	}
269
270	return pt;
271}
272
273static inline unsigned long computeHptePP(unsigned long pte)
274{
275	return (pte & _PAGE_USER) |
276		(((pte & _PAGE_USER) >> 1) &
277		 ((~((pte >> 2) &	/* _PAGE_RW */
278		     (pte >> 7))) &	/* _PAGE_DIRTY */
279		  1));
280}
281
282/*
283 * Handle a fault by adding an HPTE. If the address can't be determined
284 * to be valid via Linux page tables, return 1. If handled return 0
285 */
286int __hash_page(unsigned long ea, unsigned long access,
287		unsigned long vsid, pte_t *ptep)
288{
289	unsigned long va, vpn;
290	unsigned long newpp, prpn;
291	unsigned long hpteflags;
292	long slot;
293	pte_t old_pte, new_pte;
294
295	/* Search the Linux page table for a match with va */
296	va = (vsid << 28) | (ea & 0x0fffffff);
297	vpn = va >> PAGE_SHIFT;
298
299	/* Acquire the hash table lock to guarantee that the linux
300	 * pte we fetch will not change
301	 */
302	spin_lock( &hash_table_lock );
303
304	/*
305	 * Check the user's access rights to the page.  If access should be
306	 * prevented then send the problem up to do_page_fault.
307	 */
308	access |= _PAGE_PRESENT;
309	if (unlikely(access & ~(pte_val(*ptep)))) {
310		spin_unlock( &hash_table_lock );
311		return 1;
312	}
313
314	/*
315	 * We have found a pte (which was present).
316	 * The spinlocks prevent this status from changing
317	 * The hash_table_lock prevents the _PAGE_HASHPTE status
318	 * from changing (RPN, DIRTY and ACCESSED too)
319	 * The page_table_lock prevents the pte from being
320	 * invalidated or modified
321	 */
322
323	/*
324	 * At this point, we have a pte (old_pte) which can be used to build
325	 * or update an HPTE. There are 2 cases:
326	 *
327	 * 1. There is a valid (present) pte with no associated HPTE (this is
328	 *	the most common case)
329	 * 2. There is a valid (present) pte with an associated HPTE. The
330	 *	current values of the pp bits in the HPTE prevent access
331	 *	because we are doing software DIRTY bit management and the
332	 *	page is currently not DIRTY.
333	 */
334
335	old_pte = *ptep;
336	new_pte = old_pte;
337
338	/* If the attempted access was a store */
339	if (access & _PAGE_RW)
340		pte_val(new_pte) |= _PAGE_ACCESSED | _PAGE_DIRTY;
341	else
342		pte_val(new_pte) |= _PAGE_ACCESSED;
343
344	newpp = computeHptePP(pte_val(new_pte));
345
346	/* Check if pte already has an hpte (case 2) */
347	if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
348		/* There MIGHT be an HPTE for this pte */
349		unsigned long hash, slot, secondary;
350
351		hash = hpt_hash(vpn, 0);
352		secondary = (pte_val(old_pte) & _PAGE_SECONDARY) >> 15;
353		if (secondary)
354			hash = ~hash;
355		slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
356		slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
357
358		if (ppc_md.hpte_updatepp(slot, secondary,
359					 newpp, va, 0) == -1) {
360			pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
361		} else {
362			if (!pte_same(old_pte, new_pte)) {
363				*ptep = new_pte;
364			}
365		}
366	}
367
368	if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {
369		/* Update the linux pte with the HPTE slot */
370		pte_val(new_pte) &= ~_PAGE_HPTEFLAGS;
371		pte_val(new_pte) |= _PAGE_HASHPTE;
372		prpn = pte_val(old_pte) >> PTE_SHIFT;
373
374		/* copy appropriate flags from linux pte */
375		hpteflags = (pte_val(new_pte) & 0x1f8) | newpp;
376
377		slot = ppc_md.hpte_insert(vpn, prpn, hpteflags, 0, 0);
378
379		pte_val(new_pte) |= ((slot<<12) &
380				     (_PAGE_GROUP_IX | _PAGE_SECONDARY));
381
382		*ptep = new_pte;
383	}
384
385	spin_unlock(&hash_table_lock);
386
387	return 0;
388}
389
390/*
391 * Handle a fault by adding an HPTE. If the address can't be determined
392 * to be valid via Linux page tables, return 1. If handled return 0
393 */
394int hash_page(unsigned long ea, unsigned long access)
395{
396	void *pgdir;
397	unsigned long vsid;
398	struct mm_struct *mm;
399	pte_t *ptep;
400	int ret;
401
402	/* Check for invalid addresses. */
403	if (!IS_VALID_EA(ea)) return 1;
404
405 	switch (REGION_ID(ea)) {
406	case USER_REGION_ID:
407		mm = current->mm;
408		if (mm == NULL) return 1;
409		vsid = get_vsid(mm->context, ea);
410		break;
411	case IO_REGION_ID:
412		mm = &ioremap_mm;
413		vsid = get_kernel_vsid(ea);
414		break;
415	case VMALLOC_REGION_ID:
416		mm = &init_mm;
417		vsid = get_kernel_vsid(ea);
418		break;
419	case IO_UNMAPPED_REGION_ID:
420		udbg_printf("EEH Error ea = 0x%lx\n", ea);
421		PPCDBG_ENTER_DEBUGGER();
422		panic("EEH Error ea = 0x%lx\n", ea);
423		break;
424	case KERNEL_REGION_ID:
425		/*
426		 * As htab_initialize is now, we shouldn't ever get here since
427		 * we're bolting the entire 0xC0... region.
428		 */
429		udbg_printf("Little faulted on kernel address 0x%lx\n", ea);
430		PPCDBG_ENTER_DEBUGGER();
431		panic("Little faulted on kernel address 0x%lx\n", ea);
432		break;
433	default:
434		/* Not a valid range, send the problem up to do_page_fault */
435		return 1;
436		break;
437	}
438
439	pgdir = mm->pgd;
440	if (pgdir == NULL) return 1;
441
442	/*
443	 * Lock the Linux page table to prevent mmap and kswapd
444	 * from modifying entries while we search and update
445	 */
446	spin_lock(&mm->page_table_lock);
447
448	ptep = find_linux_pte(pgdir, ea);
449	/*
450	 * If no pte found or not present, send the problem up to
451	 * do_page_fault
452	 */
453	if (ptep && pte_present(*ptep)) {
454		ret = __hash_page(ea, access, vsid, ptep);
455	} else {
456		/* If no pte, send the problem up to do_page_fault */
457		ret = 1;
458	}
459
460	spin_unlock(&mm->page_table_lock);
461
462	return ret;
463}
464
465void flush_hash_page(unsigned long context, unsigned long ea, pte_t *ptep)
466{
467	unsigned long vsid, vpn, va, hash, secondary, slot, flags;
468	unsigned long large = 0, local = 0;
469	pte_t pte;
470
471	if ((ea >= USER_START) && (ea <= USER_END))
472		vsid = get_vsid(context, ea);
473	else
474		vsid = get_kernel_vsid(ea);
475
476	va = (vsid << 28) | (ea & 0x0fffffff);
477	if (large)
478		vpn = va >> LARGE_PAGE_SHIFT;
479	else
480		vpn = va >> PAGE_SHIFT;
481	hash = hpt_hash(vpn, large);
482
483	spin_lock_irqsave( &hash_table_lock, flags);
484
485	pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
486	secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
487	if (secondary) hash = ~hash;
488	slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
489	slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
490
491	if (pte_val(pte) & _PAGE_HASHPTE) {
492		ppc_md.hpte_invalidate(slot, secondary, va, large, local);
493	}
494
495	spin_unlock_irqrestore( &hash_table_lock, flags );
496}
497
498long plpar_pte_enter(unsigned long flags,
499		     unsigned long ptex,
500		     unsigned long new_pteh, unsigned long new_ptel,
501		     unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
502{
503	unsigned long dummy, ret;
504	ret = plpar_hcall(H_ENTER, flags, ptex, new_pteh, new_ptel,
505			   old_pteh_ret, old_ptel_ret, &dummy);
506	return(ret);
507}
508
509long plpar_pte_remove(unsigned long flags,
510		      unsigned long ptex,
511		      unsigned long avpn,
512		      unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
513{
514	unsigned long dummy;
515	return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0,
516			   old_pteh_ret, old_ptel_ret, &dummy);
517}
518
519long plpar_pte_read(unsigned long flags,
520		    unsigned long ptex,
521		    unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
522{
523	unsigned long dummy;
524	return plpar_hcall(H_READ, flags, ptex, 0, 0,
525			   old_pteh_ret, old_ptel_ret, &dummy);
526}
527
528long plpar_pte_protect(unsigned long flags,
529		       unsigned long ptex,
530		       unsigned long avpn)
531{
532	return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
533}
534
535static __inline__ void set_pp_bit(unsigned long pp, HPTE *addr)
536{
537	unsigned long old;
538	unsigned long *p = &addr->dw1.dword1;
539
540	__asm__ __volatile__(
541        "1:	ldarx	%0,0,%3\n\
542                rldimi  %0,%2,0,62\n\
543                stdcx.	%0,0,%3\n\
544            	bne	1b"
545        : "=&r" (old), "=m" (*p)
546        : "r" (pp), "r" (p), "m" (*p)
547        : "cc");
548}
549
550/*
551 * Functions used to retrieve word 0 of a given page table entry.
552 *
553 * Input : slot : PTE index within the page table of the entry to retrieve
554 * Output: Contents of word 0 of the specified entry
555 */
556static unsigned long rpa_lpar_hpte_getword0(unsigned long slot)
557{
558	unsigned long dword0;
559	unsigned long lpar_rc;
560	unsigned long dummy_word1;
561	unsigned long flags;
562
563	/* Read 1 pte at a time                        */
564	/* Do not need RPN to logical page translation */
565	/* No cross CEC PFT access                     */
566	flags = 0;
567
568	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
569
570	if (lpar_rc != H_Success)
571		panic("Error on pte read in get_hpte0 rc = %lx\n", lpar_rc);
572
573	return dword0;
574}
575
576unsigned long iSeries_hpte_getword0(unsigned long slot)
577{
578	unsigned long dword0;
579
580	HPTE hpte;
581	HvCallHpt_get(&hpte, slot);
582	dword0 = hpte.dw0.dword0;
583
584	return dword0;
585}
586
587/*
588 * Functions used to find the PTE for a particular virtual address.
589 * Only used during boot when bolting pages.
590 *
591 * Input : vpn      : virtual page number
592 * Output: PTE index within the page table of the entry
593 *         -1 on failure
594 */
595static long hpte_find(unsigned long vpn)
596{
597	HPTE *hptep;
598	unsigned long hash;
599	unsigned long i, j;
600	long slot;
601	Hpte_dword0 dw0;
602
603	hash = hpt_hash(vpn, 0);
604
605	for (j = 0; j < 2; j++) {
606		slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
607		for (i = 0; i < HPTES_PER_GROUP; i++) {
608			hptep = htab_data.htab + slot;
609			dw0 = hptep->dw0.dw0;
610
611			if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
612			    (dw0.h == j)) {
613				/* HPTE matches */
614				if (j)
615					slot = -slot;
616				return slot;
617			}
618			++slot;
619		}
620		hash = ~hash;
621	}
622
623	return -1;
624}
625
626static long rpa_lpar_hpte_find(unsigned long vpn)
627{
628	unsigned long hash;
629	unsigned long i, j;
630	long slot;
631	union {
632		unsigned long dword0;
633		Hpte_dword0 dw0;
634	} hpte_dw0;
635	Hpte_dword0 dw0;
636
637	hash = hpt_hash(vpn, 0);
638
639	for (j = 0; j < 2; j++) {
640		slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
641		for (i = 0; i < HPTES_PER_GROUP; i++) {
642			hpte_dw0.dword0 = rpa_lpar_hpte_getword0(slot);
643			dw0 = hpte_dw0.dw0;
644
645			if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
646			    (dw0.h == j)) {
647				/* HPTE matches */
648				if (j)
649					slot = -slot;
650				return slot;
651			}
652			++slot;
653		}
654		hash = ~hash;
655	}
656
657	return -1;
658}
659
660static long iSeries_hpte_find(unsigned long vpn)
661{
662	HPTE hpte;
663	long slot;
664
665	/*
666	 * The HvCallHpt_findValid interface is as follows:
667	 * 0xffffffffffffffff : No entry found.
668	 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
669	 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
670	 */
671	slot = HvCallHpt_findValid(&hpte, vpn);
672	if (hpte.dw0.dw0.v) {
673		if (slot < 0) {
674			slot &= 0x7fffffffffffffff;
675			slot = -slot;
676		}
677	} else {
678		slot = -1;
679	}
680
681	return slot;
682}
683
684/*
685 * Functions used to invalidate a page table entry from the page table
686 * and tlb.
687 *
688 * Input : slot  : PTE index within the page table of the entry to invalidated
689 *         va    : Virtual address of the entry being invalidated
690 *         large : 1 = large page (16M)
691 *         local : 1 = Use tlbiel to only invalidate the local tlb
692 */
693static void hpte_invalidate(unsigned long slot,
694			    unsigned long secondary,
695			    unsigned long va,
696			    int large, int local)
697{
698	HPTE *hptep = htab_data.htab + slot;
699	Hpte_dword0 dw0;
700	unsigned long vpn, avpn;
701	unsigned long flags;
702
703	if (large)
704		vpn = va >> LARGE_PAGE_SHIFT;
705	else
706		vpn = va >> PAGE_SHIFT;
707
708	avpn = vpn >> 11;
709
710	dw0 = hptep->dw0.dw0;
711
712	/*
713	 * Do not remove bolted entries.  Alternatively, we could check
714	 * the AVPN, hash group, and valid bits.  By doing it this way,
715	 * it is common with the pSeries LPAR optimal path.
716	 */
717	if (dw0.bolted) return;
718
719	/* Invalidate the hpte. */
720	hptep->dw0.dword0 = 0;
721
722	/* Invalidate the tlb */
723	spin_lock_irqsave(&pSeries_tlbie_lock, flags);
724	_tlbie(va, large);
725	spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
726}
727
728static void rpa_lpar_hpte_invalidate(unsigned long slot,
729				     unsigned long secondary,
730				     unsigned long va,
731				     int large, int local)
732{
733	unsigned long lpar_rc;
734	unsigned long dummy1, dummy2;
735
736	/*
737	 * Don't remove a bolted entry.  This case can occur when we bolt
738	 * pages dynamically after initial boot.
739	 */
740	lpar_rc = plpar_pte_remove(H_ANDCOND, slot, (0x1UL << 4),
741				   &dummy1, &dummy2);
742
743	if (lpar_rc != H_Success)
744		panic("Bad return code from invalidate rc = %lx\n", lpar_rc);
745}
746
747static void iSeries_hpte_invalidate(unsigned long slot,
748				    unsigned long secondary,
749				    unsigned long va,
750				    int large, int local)
751{
752	HPTE lhpte;
753	unsigned long vpn, avpn;
754
755	if (large)
756		vpn = va >> LARGE_PAGE_SHIFT;
757	else
758		vpn = va >> PAGE_SHIFT;
759
760	avpn = vpn >> 11;
761
762	lhpte.dw0.dword0 = iSeries_hpte_getword0(slot);
763
764	if ((lhpte.dw0.dw0.avpn == avpn) &&
765	    (lhpte.dw0.dw0.v) &&
766	    (lhpte.dw0.dw0.h == secondary)) {
767		HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
768	}
769}
770
771/*
772 * Functions used to update page protection bits.
773 *
774 * Input : slot  : PTE index within the page table of the entry to update
775 *         newpp : new page protection bits
776 *         va    : Virtual address of the entry being updated
777 *         large : 1 = large page (16M)
778 * Output: 0 on success, -1 on failure
779 */
780static long hpte_updatepp(unsigned long slot,
781			  unsigned long secondary,
782			  unsigned long newpp,
783			  unsigned long va, int large)
784{
785	HPTE *hptep = htab_data.htab + slot;
786	Hpte_dword0 dw0;
787	Hpte_dword1 dw1;
788	unsigned long vpn, avpn;
789	unsigned long flags;
790
791	if (large)
792		vpn = va >> LARGE_PAGE_SHIFT;
793	else
794		vpn = va >> PAGE_SHIFT;
795
796	avpn = vpn >> 11;
797
798	dw0 = hptep->dw0.dw0;
799	if ((dw0.avpn == avpn) &&
800	    (dw0.v) && (dw0.h == secondary)) {
801		/* Turn off valid bit in HPTE */
802		dw0.v = 0;
803		hptep->dw0.dw0 = dw0;
804
805		/* Ensure it is out of the tlb too */
806		spin_lock_irqsave(&pSeries_tlbie_lock, flags);
807		_tlbie(va, large);
808		spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
809
810		/* Insert the new pp bits into the HPTE */
811		dw1 = hptep->dw1.dw1;
812		dw1.pp = newpp;
813		hptep->dw1.dw1 = dw1;
814
815		/* Ensure it is visible before validating */
816		__asm__ __volatile__ ("eieio" : : : "memory");
817
818		/* Turn the valid bit back on in HPTE */
819		dw0.v = 1;
820		hptep->dw0.dw0 = dw0;
821
822		__asm__ __volatile__ ("ptesync" : : : "memory");
823
824		return 0;
825	}
826
827	return -1;
828}
829
830static long rpa_lpar_hpte_updatepp(unsigned long slot,
831				   unsigned long secondary,
832				   unsigned long newpp,
833				   unsigned long va, int large)
834{
835	unsigned long lpar_rc;
836	unsigned long flags = (newpp & 7);
837	unsigned long avpn = va >> 23;
838	HPTE hpte;
839
840	lpar_rc = plpar_pte_read(0, slot, &hpte.dw0.dword0, &hpte.dw1.dword1);
841
842	if ((hpte.dw0.dw0.avpn == avpn) &&
843	    (hpte.dw0.dw0.v) &&
844	    (hpte.dw0.dw0.h == secondary)) {
845		lpar_rc = plpar_pte_protect(flags, slot, 0);
846		if (lpar_rc != H_Success)
847			panic("bad return code from pte protect rc = %lx\n",
848			      lpar_rc);
849		return 0;
850	}
851
852	return -1;
853}
854
855static long iSeries_hpte_updatepp(unsigned long slot,
856				  unsigned long secondary,
857				  unsigned long newpp,
858				  unsigned long va, int large)
859{
860	unsigned long vpn, avpn;
861	HPTE hpte;
862
863	if (large)
864		vpn = va >> LARGE_PAGE_SHIFT;
865	else
866		vpn = va >> PAGE_SHIFT;
867
868	avpn = vpn >> 11;
869
870	HvCallHpt_get(&hpte, slot);
871	if ((hpte.dw0.dw0.avpn == avpn) &&
872	    (hpte.dw0.dw0.v) &&
873	    (hpte.dw0.dw0.h == secondary)) {
874		HvCallHpt_setPp(slot, newpp);
875		return 0;
876	}
877	return -1;
878}
879
880/*
881 * Functions used to update the page protection bits. Intended to be used
882 * to create guard pages for kernel data structures on pages which are bolted
883 * in the HPT. Assumes pages being operated on will not be stolen.
884 * Does not work on large pages. No need to lock here because we are the
885 * only user.
886 *
887 * Input : newpp : page protection flags
888 *         ea    : effective kernel address to bolt.
889 */
890static void hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
891{
892	unsigned long vsid, va, vpn, flags;
893	long slot;
894	HPTE *hptep;
895
896	vsid = get_kernel_vsid(ea);
897	va = (vsid << 28) | (ea & 0x0fffffff);
898	vpn = va >> PAGE_SHIFT;
899
900	slot = hpte_find(vpn);
901	if (slot == -1)
902		panic("could not find page to bolt\n");
903	hptep = htab_data.htab + slot;
904
905	set_pp_bit(newpp, hptep);
906
907	/* Ensure it is out of the tlb too */
908	spin_lock_irqsave(&pSeries_tlbie_lock, flags);
909	_tlbie(va, 0);
910	spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
911}
912
913static void rpa_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
914{
915	unsigned long lpar_rc;
916	unsigned long vsid, va, vpn, flags;
917	long slot;
918
919	vsid = get_kernel_vsid(ea);
920	va = (vsid << 28) | (ea & 0x0fffffff);
921	vpn = va >> PAGE_SHIFT;
922
923	slot = rpa_lpar_hpte_find(vpn);
924	if (slot == -1)
925		panic("updateboltedpp: Could not find page to bolt\n");
926
927	flags = newpp & 3;
928	lpar_rc = plpar_pte_protect(flags, slot, 0);
929
930	if (lpar_rc != H_Success)
931		panic("Bad return code from pte bolted protect rc = %lx\n",
932		      lpar_rc);
933}
934
935void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
936{
937	unsigned long vsid,va,vpn;
938	long slot;
939
940	vsid = get_kernel_vsid( ea );
941	va = ( vsid << 28 ) | ( ea & 0x0fffffff );
942	vpn = va >> PAGE_SHIFT;
943
944	slot = iSeries_hpte_find(vpn);
945	if (slot == -1)
946		panic("updateboltedpp: Could not find page to bolt\n");
947
948	HvCallHpt_setPp(slot, newpp);
949}
950
951/*
952 * Functions used to insert new hardware page table entries.
953 * Will castout non-bolted entries as necessary using a random
954 * algorithm.
955 *
956 * Input : vpn      : virtual page number
957 *         prpn     : real page number in absolute space
958 *         hpteflags: page protection flags
959 *         bolted   : 1 = bolt the page
960 *         large    : 1 = large page (16M)
961 * Output: hsss, where h = hash group, sss = slot within that group
962 */
963static long hpte_insert(unsigned long vpn, unsigned long prpn,
964			unsigned long hpteflags, int bolted, int large)
965{
966	HPTE *hptep;
967	Hpte_dword0 dw0;
968	HPTE lhpte;
969	int i, secondary;
970	unsigned long hash = hpt_hash(vpn, 0);
971	unsigned long avpn = vpn >> 11;
972	unsigned long arpn = physRpn_to_absRpn(prpn);
973	unsigned long hpte_group;
974
975repeat:
976	secondary = 0;
977	hpte_group = ((hash & htab_data.htab_hash_mask) *
978		      HPTES_PER_GROUP) & ~0x7UL;
979	hptep = htab_data.htab + hpte_group;
980
981	for (i = 0; i < HPTES_PER_GROUP; i++) {
982		dw0 = hptep->dw0.dw0;
983		if (!dw0.v) {
984			/* retry with lock held */
985			dw0 = hptep->dw0.dw0;
986			if (!dw0.v)
987				break;
988		}
989		hptep++;
990	}
991
992	if (i == HPTES_PER_GROUP) {
993		secondary = 1;
994		hpte_group = ((~hash & htab_data.htab_hash_mask) *
995			      HPTES_PER_GROUP) & ~0x7UL;
996		hptep = htab_data.htab + hpte_group;
997
998		for (i = 0; i < HPTES_PER_GROUP; i++) {
999			dw0 = hptep->dw0.dw0;
1000			if (!dw0.v) {
1001				/* retry with lock held */
1002				dw0 = hptep->dw0.dw0;
1003				if (!dw0.v)
1004					break;
1005			}
1006			hptep++;
1007		}
1008		if (i == HPTES_PER_GROUP) {
1009			if (mftb() & 0x1)
1010				hpte_group=((hash & htab_data.htab_hash_mask)*
1011					    HPTES_PER_GROUP) & ~0x7UL;
1012
1013			hpte_remove(hpte_group);
1014			goto repeat;
1015		}
1016	}
1017
1018	lhpte.dw1.dword1      = 0;
1019	lhpte.dw1.dw1.rpn     = arpn;
1020	lhpte.dw1.flags.flags = hpteflags;
1021
1022	lhpte.dw0.dword0      = 0;
1023	lhpte.dw0.dw0.avpn    = avpn;
1024	lhpte.dw0.dw0.h       = secondary;
1025	lhpte.dw0.dw0.bolted  = bolted;
1026	lhpte.dw0.dw0.v       = 1;
1027
1028	if (large) lhpte.dw0.dw0.l = 1;
1029
1030	hptep->dw1.dword1 = lhpte.dw1.dword1;
1031
1032	/* Guarantee the second dword is visible before the valid bit */
1033	__asm__ __volatile__ ("eieio" : : : "memory");
1034
1035	/*
1036	 * Now set the first dword including the valid bit
1037	 * NOTE: this also unlocks the hpte
1038	 */
1039	hptep->dw0.dword0 = lhpte.dw0.dword0;
1040
1041	__asm__ __volatile__ ("ptesync" : : : "memory");
1042
1043	return ((secondary << 3) | i);
1044}
1045
1046static long rpa_lpar_hpte_insert(unsigned long vpn, unsigned long prpn,
1047				 unsigned long hpteflags,
1048				 int bolted, int large)
1049{
1050	unsigned long lpar_rc;
1051	unsigned long flags;
1052	unsigned long slot;
1053	HPTE lhpte;
1054	int secondary;
1055	unsigned long hash = hpt_hash(vpn, 0);
1056	unsigned long avpn = vpn >> 11;
1057	unsigned long arpn = physRpn_to_absRpn(prpn);
1058	unsigned long hpte_group;
1059
1060	/* Fill in the local HPTE with absolute rpn, avpn and flags */
1061	lhpte.dw1.dword1      = 0;
1062	lhpte.dw1.dw1.rpn     = arpn;
1063	lhpte.dw1.flags.flags = hpteflags;
1064
1065	lhpte.dw0.dword0      = 0;
1066	lhpte.dw0.dw0.avpn    = avpn;
1067	lhpte.dw0.dw0.bolted  = bolted;
1068	lhpte.dw0.dw0.v       = 1;
1069
1070	if (large) lhpte.dw0.dw0.l = 1;
1071
1072	/* Now fill in the actual HPTE */
1073	/* Set CEC cookie to 0         */
1074	/* Large page = 0              */
1075	/* Zero page = 0               */
1076	/* I-cache Invalidate = 0      */
1077	/* I-cache synchronize = 0     */
1078	/* Exact = 0                   */
1079	flags = 0;
1080
1081	/*   -- Because at one point we hit a case where non cachable
1082	 *      pages where marked coherent & this is rejected by the HV.
1083	 *      Perhaps it is no longer an issue ... DRENG.
1084	 */
1085	if (hpteflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
1086		lhpte.dw1.flags.flags &= ~_PAGE_COHERENT;
1087
1088repeat:
1089	secondary = 0;
1090	lhpte.dw0.dw0.h = secondary;
1091	hpte_group = ((hash & htab_data.htab_hash_mask) *
1092		      HPTES_PER_GROUP) & ~0x7UL;
1093
1094	__asm__ __volatile__ (
1095		H_ENTER_r3
1096		"mr    4, %2\n"
1097                "mr    5, %3\n"
1098                "mr    6, %4\n"
1099                "mr    7, %5\n"
1100                HSC
1101                "mr    %0, 3\n"
1102                "mr    %1, 4\n"
1103		: "=r" (lpar_rc), "=r" (slot)
1104		: "r" (flags), "r" (hpte_group), "r" (lhpte.dw0.dword0),
1105		"r" (lhpte.dw1.dword1)
1106		: "r0", "r3", "r4", "r5", "r6", "r7",
1107		  "r8", "r9", "r10", "r11", "r12", "cc");
1108
1109	if (lpar_rc == H_PTEG_Full) {
1110		secondary = 1;
1111		lhpte.dw0.dw0.h = secondary;
1112		hpte_group = ((~hash & htab_data.htab_hash_mask) *
1113			      HPTES_PER_GROUP) & ~0x7UL;
1114
1115		__asm__ __volatile__ (
1116			      H_ENTER_r3
1117			      "mr    4, %2\n"
1118			      "mr    5, %3\n"
1119			      "mr    6, %4\n"
1120			      "mr    7, %5\n"
1121			      HSC
1122			      "mr    %0, 3\n"
1123			      "mr    %1, 4\n"
1124			      : "=r" (lpar_rc), "=r" (slot)
1125			      : "r" (flags), "r" (hpte_group), "r" (lhpte.dw0.dword0),
1126			      "r" (lhpte.dw1.dword1)
1127			      : "r0", "r3", "r4", "r5", "r6", "r7",
1128			        "r8", "r9", "r10", "r11", "r12", "cc");
1129		if (lpar_rc == H_PTEG_Full) {
1130			if (mftb() & 0x1)
1131				hpte_group=((hash & htab_data.htab_hash_mask)*
1132					    HPTES_PER_GROUP) & ~0x7UL;
1133
1134			rpa_lpar_hpte_remove(hpte_group);
1135			goto repeat;
1136		}
1137	}
1138
1139	if (lpar_rc != H_Success)
1140		panic("Bad return code from pte enter rc = %lx\n", lpar_rc);
1141
1142	return ((secondary << 3) | (slot & 0x7));
1143}
1144
1145static long iSeries_hpte_insert(unsigned long vpn, unsigned long prpn,
1146				unsigned long hpteflags,
1147				int bolted, int large)
1148{
1149	HPTE lhpte;
1150	unsigned long hash, hpte_group;
1151	unsigned long avpn = vpn >> 11;
1152	unsigned long arpn = physRpn_to_absRpn( prpn );
1153	int secondary = 0;
1154	long slot;
1155
1156	hash = hpt_hash(vpn, 0);
1157
1158repeat:
1159	slot = HvCallHpt_findValid(&lhpte, vpn);
1160	if (lhpte.dw0.dw0.v) {
1161		panic("select_hpte_slot found entry already valid\n");
1162	}
1163
1164	if (slot == -1) { /* No available entry found in either group */
1165		if (mftb() & 0x1) {
1166			hpte_group=((hash & htab_data.htab_hash_mask)*
1167				    HPTES_PER_GROUP) & ~0x7UL;
1168		} else {
1169			hpte_group=((~hash & htab_data.htab_hash_mask)*
1170				    HPTES_PER_GROUP) & ~0x7UL;
1171		}
1172
1173		hash = hpt_hash(vpn, 0);
1174		iSeries_hpte_remove(hpte_group);
1175		goto repeat;
1176	} else if (slot < 0) {
1177		slot &= 0x7fffffffffffffff;
1178		secondary = 1;
1179	}
1180
1181	/* Create the HPTE */
1182	lhpte.dw1.dword1      = 0;
1183	lhpte.dw1.dw1.rpn     = arpn;
1184	lhpte.dw1.flags.flags = hpteflags;
1185
1186	lhpte.dw0.dword0     = 0;
1187	lhpte.dw0.dw0.avpn   = avpn;
1188	lhpte.dw0.dw0.h      = secondary;
1189	lhpte.dw0.dw0.bolted = bolted;
1190	lhpte.dw0.dw0.v      = 1;
1191
1192	/* Now fill in the actual HPTE */
1193	HvCallHpt_addValidate(slot, secondary, (HPTE *)&lhpte);
1194	return ((secondary << 3) | (slot & 0x7));
1195}
1196
1197/*
1198 * Functions used to remove hardware page table entries.
1199 *
1200 * Input : hpte_group: PTE index of the first entry in a group
1201 * Output: offset within the group of the entry removed or
1202 *         -1 on failure
1203 */
1204static long hpte_remove(unsigned long hpte_group)
1205{
1206	HPTE *hptep;
1207	Hpte_dword0 dw0;
1208	int i;
1209	int slot_offset;
1210	unsigned long vsid, group, pi, pi_high;
1211	unsigned long slot;
1212	unsigned long flags;
1213	int large;
1214	unsigned long va;
1215
1216	/* pick a random slot to start at */
1217	slot_offset = mftb() & 0x7;
1218
1219	for (i = 0; i < HPTES_PER_GROUP; i++) {
1220		hptep = htab_data.htab + hpte_group + slot_offset;
1221		dw0 = hptep->dw0.dw0;
1222
1223		if (dw0.v && !dw0.bolted) {
1224			/* retry with lock held */
1225			dw0 = hptep->dw0.dw0;
1226			if (dw0.v && !dw0.bolted)
1227				break;
1228		}
1229
1230		slot_offset++;
1231		slot_offset &= 0x7;
1232	}
1233
1234	if (i == HPTES_PER_GROUP)
1235		return -1;
1236
1237	large = dw0.l;
1238
1239	/* Invalidate the hpte. NOTE: this also unlocks it */
1240	hptep->dw0.dword0 = 0;
1241
1242	/* Invalidate the tlb */
1243	vsid = dw0.avpn >> 5;
1244	slot = hptep - htab_data.htab;
1245	group = slot >> 3;
1246	if (dw0.h)
1247		group = ~group;
1248	pi = (vsid ^ group) & 0x7ff;
1249	pi_high = (dw0.avpn & 0x1f) << 11;
1250	pi |= pi_high;
1251
1252	if (large)
1253		va = pi << LARGE_PAGE_SHIFT;
1254	else
1255		va = pi << PAGE_SHIFT;
1256
1257	spin_lock_irqsave(&pSeries_tlbie_lock, flags);
1258	_tlbie(va, large);
1259	spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
1260
1261	return i;
1262}
1263
1264static long rpa_lpar_hpte_remove(unsigned long hpte_group)
1265{
1266	unsigned long slot_offset;
1267	unsigned long lpar_rc;
1268	int i;
1269	unsigned long dummy1, dummy2;
1270
1271	/* pick a random slot to start at */
1272	slot_offset = mftb() & 0x7;
1273
1274	for (i = 0; i < HPTES_PER_GROUP; i++) {
1275
1276		/* Don't remove a bolted entry */
1277		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
1278					   (0x1UL << 4), &dummy1, &dummy2);
1279
1280		if (lpar_rc == H_Success)
1281			return i;
1282
1283		if (lpar_rc != H_Not_Found)
1284			panic("Bad return code from pte remove rc = %lx\n",
1285			      lpar_rc);
1286
1287		slot_offset++;
1288		slot_offset &= 0x7;
1289	}
1290
1291	return -1;
1292}
1293
1294static long iSeries_hpte_remove(unsigned long hpte_group)
1295{
1296	unsigned long slot_offset;
1297	int i;
1298	HPTE lhpte;
1299
1300	/* Pick a random slot to start at */
1301	slot_offset = mftb() & 0x7;
1302
1303	for (i = 0; i < HPTES_PER_GROUP; i++) {
1304		lhpte.dw0.dword0 =
1305			iSeries_hpte_getword0(hpte_group + slot_offset);
1306
1307		if (!lhpte.dw0.dw0.bolted) {
1308			HvCallHpt_invalidateSetSwBitsGet(hpte_group +
1309							 slot_offset, 0, 0);
1310			return i;
1311		}
1312
1313		slot_offset++;
1314		slot_offset &= 0x7;
1315	}
1316
1317	return -1;
1318}
1319
1320void hpte_init_pSeries(void)
1321{
1322	ppc_md.hpte_invalidate     = hpte_invalidate;
1323	ppc_md.hpte_updatepp       = hpte_updatepp;
1324	ppc_md.hpte_updateboltedpp = hpte_updateboltedpp;
1325	ppc_md.hpte_insert	   = hpte_insert;
1326	ppc_md.hpte_remove	   = hpte_remove;
1327}
1328
1329void pSeries_lpar_mm_init(void)
1330{
1331	ppc_md.hpte_invalidate     = rpa_lpar_hpte_invalidate;
1332	ppc_md.hpte_updatepp       = rpa_lpar_hpte_updatepp;
1333	ppc_md.hpte_updateboltedpp = rpa_lpar_hpte_updateboltedpp;
1334	ppc_md.hpte_insert         = rpa_lpar_hpte_insert;
1335	ppc_md.hpte_remove         = rpa_lpar_hpte_remove;
1336}
1337
1338void hpte_init_iSeries(void)
1339{
1340	ppc_md.hpte_invalidate     = iSeries_hpte_invalidate;
1341	ppc_md.hpte_updatepp       = iSeries_hpte_updatepp;
1342	ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
1343	ppc_md.hpte_insert         = iSeries_hpte_insert;
1344	ppc_md.hpte_remove         = iSeries_hpte_remove;
1345}
1346
1347