• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/powerpc/kvm/
1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 *
6 * Description:
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/kvm.h>
19#include <linux/kvm_host.h>
20#include <linux/highmem.h>
21#include <asm/kvm_ppc.h>
22#include <asm/kvm_e500.h>
23
24#include "../mm/mmu_decl.h"
25#include "e500_tlb.h"
26#include "trace.h"
27
28#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
29
30static unsigned int tlb1_entry_num;
31
32void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
33{
34	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
35	struct tlbe *tlbe;
36	int i, tlbsel;
37
38	printk("| %8s | %8s | %8s | %8s | %8s |\n",
39			"nr", "mas1", "mas2", "mas3", "mas7");
40
41	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
42		printk("Guest TLB%d:\n", tlbsel);
43		for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
44			tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
45			if (tlbe->mas1 & MAS1_VALID)
46				printk(" G[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
47					tlbsel, i, tlbe->mas1, tlbe->mas2,
48					tlbe->mas3, tlbe->mas7);
49		}
50	}
51
52	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
53		printk("Shadow TLB%d:\n", tlbsel);
54		for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
55			tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
56			if (tlbe->mas1 & MAS1_VALID)
57				printk(" S[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
58					tlbsel, i, tlbe->mas1, tlbe->mas2,
59					tlbe->mas3, tlbe->mas7);
60		}
61	}
62}
63
64static inline unsigned int tlb0_get_next_victim(
65		struct kvmppc_vcpu_e500 *vcpu_e500)
66{
67	unsigned int victim;
68
69	victim = vcpu_e500->guest_tlb_nv[0]++;
70	if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
71		vcpu_e500->guest_tlb_nv[0] = 0;
72
73	return victim;
74}
75
76static inline unsigned int tlb1_max_shadow_size(void)
77{
78	return tlb1_entry_num - tlbcam_index;
79}
80
81static inline int tlbe_is_writable(struct tlbe *tlbe)
82{
83	return tlbe->mas3 & (MAS3_SW|MAS3_UW);
84}
85
86static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
87{
88	/* Mask off reserved bits. */
89	mas3 &= MAS3_ATTRIB_MASK;
90
91	if (!usermode) {
92		/* Guest is in supervisor mode,
93		 * so we need to translate guest
94		 * supervisor permissions into user permissions. */
95		mas3 &= ~E500_TLB_USER_PERM_MASK;
96		mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
97	}
98
99	return mas3 | E500_TLB_SUPER_PERM_MASK;
100}
101
102static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
103{
104#ifdef CONFIG_SMP
105	return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
106#else
107	return mas2 & MAS2_ATTRIB_MASK;
108#endif
109}
110
111/*
112 * writing shadow tlb entry to host TLB
113 */
114static inline void __write_host_tlbe(struct tlbe *stlbe)
115{
116	mtspr(SPRN_MAS1, stlbe->mas1);
117	mtspr(SPRN_MAS2, stlbe->mas2);
118	mtspr(SPRN_MAS3, stlbe->mas3);
119	mtspr(SPRN_MAS7, stlbe->mas7);
120	__asm__ __volatile__ ("tlbwe\n" : : );
121}
122
123static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
124		int tlbsel, int esel)
125{
126	struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
127
128	local_irq_disable();
129	if (tlbsel == 0) {
130		__write_host_tlbe(stlbe);
131	} else {
132		unsigned register mas0;
133
134		mas0 = mfspr(SPRN_MAS0);
135
136		mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
137		__write_host_tlbe(stlbe);
138
139		mtspr(SPRN_MAS0, mas0);
140	}
141	local_irq_enable();
142}
143
144void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
145{
146	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
147	int i;
148	unsigned register mas0;
149
150	/* Load all valid TLB1 entries to reduce guest tlb miss fault */
151	local_irq_disable();
152	mas0 = mfspr(SPRN_MAS0);
153	for (i = 0; i < tlb1_max_shadow_size(); i++) {
154		struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
155
156		if (get_tlb_v(stlbe)) {
157			mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
158					| MAS0_ESEL(to_htlb1_esel(i)));
159			__write_host_tlbe(stlbe);
160		}
161	}
162	mtspr(SPRN_MAS0, mas0);
163	local_irq_enable();
164}
165
166void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
167{
168	_tlbil_all();
169}
170
171/* Search the guest TLB for a matching entry. */
172static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
173		gva_t eaddr, int tlbsel, unsigned int pid, int as)
174{
175	int i;
176
177	for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
178		struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
179		unsigned int tid;
180
181		if (eaddr < get_tlb_eaddr(tlbe))
182			continue;
183
184		if (eaddr > get_tlb_end(tlbe))
185			continue;
186
187		tid = get_tlb_tid(tlbe);
188		if (tid && (tid != pid))
189			continue;
190
191		if (!get_tlb_v(tlbe))
192			continue;
193
194		if (get_tlb_ts(tlbe) != as && as != -1)
195			continue;
196
197		return i;
198	}
199
200	return -1;
201}
202
203static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
204		int tlbsel, int esel)
205{
206	struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
207	struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
208
209	if (page) {
210		vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
211
212		if (get_tlb_v(stlbe)) {
213			if (tlbe_is_writable(stlbe))
214				kvm_release_page_dirty(page);
215			else
216				kvm_release_page_clean(page);
217		}
218	}
219}
220
221static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
222		int tlbsel, int esel)
223{
224	struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
225
226	kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
227	stlbe->mas1 = 0;
228	trace_kvm_stlb_inval(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
229			     stlbe->mas3, stlbe->mas7);
230}
231
232static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
233		gva_t eaddr, gva_t eend, u32 tid)
234{
235	unsigned int pid = tid & 0xff;
236	unsigned int i;
237
238	for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
239		struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
240		unsigned int tid;
241
242		if (!get_tlb_v(stlbe))
243			continue;
244
245		if (eend < get_tlb_eaddr(stlbe))
246			continue;
247
248		if (eaddr > get_tlb_end(stlbe))
249			continue;
250
251		tid = get_tlb_tid(stlbe);
252		if (tid && (tid != pid))
253			continue;
254
255		kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
256		write_host_tlbe(vcpu_e500, 1, i);
257	}
258}
259
260static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
261		unsigned int eaddr, int as)
262{
263	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
264	unsigned int victim, pidsel, tsized;
265	int tlbsel;
266
267	/* since we only have two TLBs, only lower bit is used. */
268	tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
269	victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
270	pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
271	tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
272
273	vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
274		| MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
275	vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
276		| MAS1_TID(vcpu_e500->pid[pidsel])
277		| MAS1_TSIZE(tsized);
278	vcpu_e500->mas2 = (eaddr & MAS2_EPN)
279		| (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
280	vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
281	vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
282		| (get_cur_pid(vcpu) << 16)
283		| (as ? MAS6_SAS : 0);
284	vcpu_e500->mas7 = 0;
285}
286
287static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
288	u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
289{
290	struct page *new_page;
291	struct tlbe *stlbe;
292	hpa_t hpaddr;
293
294	stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
295
296	/* Get reference to new page. */
297	new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
298	if (is_error_page(new_page)) {
299		printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
300		kvm_release_page_clean(new_page);
301		return;
302	}
303	hpaddr = page_to_phys(new_page);
304
305	/* Drop reference to old page. */
306	kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
307
308	vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
309
310	/* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
311	stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
312		| MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
313	stlbe->mas2 = (gvaddr & MAS2_EPN)
314		| e500_shadow_mas2_attrib(gtlbe->mas2,
315				vcpu_e500->vcpu.arch.msr & MSR_PR);
316	stlbe->mas3 = (hpaddr & MAS3_RPN)
317		| e500_shadow_mas3_attrib(gtlbe->mas3,
318				vcpu_e500->vcpu.arch.msr & MSR_PR);
319	stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
320
321	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
322			     stlbe->mas3, stlbe->mas7);
323}
324
325static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
326		int tlbsel, int esel)
327{
328	struct tlbe *gtlbe;
329
330	gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
331
332	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
333			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
334			gtlbe, tlbsel, esel);
335
336	return esel;
337}
338
339/* Caller must ensure that the specified guest TLB entry is safe to insert into
340 * the shadow TLB. */
341static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
342		u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
343{
344	unsigned int victim;
345
346	victim = vcpu_e500->guest_tlb_nv[1]++;
347
348	if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
349		vcpu_e500->guest_tlb_nv[1] = 0;
350
351	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
352
353	return victim;
354}
355
356/* Invalidate all guest kernel mappings when enter usermode,
357 * so that when they fault back in they will get the
358 * proper permission bits. */
359void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
360{
361	if (usermode) {
362		struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
363		int i;
364
365		for (i = 0; i < tlb1_max_shadow_size(); i++)
366			kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
367
368		_tlbil_all();
369	}
370}
371
372static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
373		int tlbsel, int esel)
374{
375	struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
376
377	if (unlikely(get_tlb_iprot(gtlbe)))
378		return -1;
379
380	if (tlbsel == 1) {
381		kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
382				get_tlb_end(gtlbe),
383				get_tlb_tid(gtlbe));
384	} else {
385		kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
386	}
387
388	gtlbe->mas1 = 0;
389
390	return 0;
391}
392
393int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
394{
395	int esel;
396
397	if (value & MMUCSR0_TLB0FI)
398		for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
399			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
400	if (value & MMUCSR0_TLB1FI)
401		for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
402			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
403
404	_tlbil_all();
405
406	return EMULATE_DONE;
407}
408
409int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
410{
411	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
412	unsigned int ia;
413	int esel, tlbsel;
414	gva_t ea;
415
416	ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
417
418	ia = (ea >> 2) & 0x1;
419
420	/* since we only have two TLBs, only lower bit is used. */
421	tlbsel = (ea >> 3) & 0x1;
422
423	if (ia) {
424		/* invalidate all entries */
425		for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
426			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
427	} else {
428		ea &= 0xfffff000;
429		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
430				get_cur_pid(vcpu), -1);
431		if (esel >= 0)
432			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
433	}
434
435	_tlbil_all();
436
437	return EMULATE_DONE;
438}
439
440int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
441{
442	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
443	int tlbsel, esel;
444	struct tlbe *gtlbe;
445
446	tlbsel = get_tlb_tlbsel(vcpu_e500);
447	esel = get_tlb_esel(vcpu_e500, tlbsel);
448
449	gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
450	vcpu_e500->mas0 &= ~MAS0_NV(~0);
451	vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
452	vcpu_e500->mas1 = gtlbe->mas1;
453	vcpu_e500->mas2 = gtlbe->mas2;
454	vcpu_e500->mas3 = gtlbe->mas3;
455	vcpu_e500->mas7 = gtlbe->mas7;
456
457	return EMULATE_DONE;
458}
459
460int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
461{
462	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
463	int as = !!get_cur_sas(vcpu_e500);
464	unsigned int pid = get_cur_spid(vcpu_e500);
465	int esel, tlbsel;
466	struct tlbe *gtlbe = NULL;
467	gva_t ea;
468
469	ea = kvmppc_get_gpr(vcpu, rb);
470
471	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
472		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
473		if (esel >= 0) {
474			gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
475			break;
476		}
477	}
478
479	if (gtlbe) {
480		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
481			| MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
482		vcpu_e500->mas1 = gtlbe->mas1;
483		vcpu_e500->mas2 = gtlbe->mas2;
484		vcpu_e500->mas3 = gtlbe->mas3;
485		vcpu_e500->mas7 = gtlbe->mas7;
486	} else {
487		int victim;
488
489		/* since we only have two TLBs, only lower bit is used. */
490		tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
491		victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
492
493		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
494			| MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
495		vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
496			| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
497			| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
498		vcpu_e500->mas2 &= MAS2_EPN;
499		vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
500		vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
501		vcpu_e500->mas7 = 0;
502	}
503
504	return EMULATE_DONE;
505}
506
507int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
508{
509	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
510	u64 eaddr;
511	u64 raddr;
512	u32 tid;
513	struct tlbe *gtlbe;
514	int tlbsel, esel, stlbsel, sesel;
515
516	tlbsel = get_tlb_tlbsel(vcpu_e500);
517	esel = get_tlb_esel(vcpu_e500, tlbsel);
518
519	gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
520
521	if (get_tlb_v(gtlbe) && tlbsel == 1) {
522		eaddr = get_tlb_eaddr(gtlbe);
523		tid = get_tlb_tid(gtlbe);
524		kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
525				get_tlb_end(gtlbe), tid);
526	}
527
528	gtlbe->mas1 = vcpu_e500->mas1;
529	gtlbe->mas2 = vcpu_e500->mas2;
530	gtlbe->mas3 = vcpu_e500->mas3;
531	gtlbe->mas7 = vcpu_e500->mas7;
532
533	trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
534			     gtlbe->mas3, gtlbe->mas7);
535
536	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
537	if (tlbe_is_host_safe(vcpu, gtlbe)) {
538		switch (tlbsel) {
539		case 0:
540			/* TLB0 */
541			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
542			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
543
544			stlbsel = 0;
545			sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
546
547			break;
548
549		case 1:
550			/* TLB1 */
551			eaddr = get_tlb_eaddr(gtlbe);
552			raddr = get_tlb_raddr(gtlbe);
553
554			/* Create a 4KB mapping on the host.
555			 * If the guest wanted a large page,
556			 * only the first 4KB is mapped here and the rest
557			 * are mapped on the fly. */
558			stlbsel = 1;
559			sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
560					raddr >> PAGE_SHIFT, gtlbe);
561			break;
562
563		default:
564			BUG();
565		}
566		write_host_tlbe(vcpu_e500, stlbsel, sesel);
567	}
568
569	return EMULATE_DONE;
570}
571
572int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
573{
574	unsigned int as = !!(vcpu->arch.msr & MSR_IS);
575
576	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
577}
578
579int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
580{
581	unsigned int as = !!(vcpu->arch.msr & MSR_DS);
582
583	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
584}
585
586void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
587{
588	unsigned int as = !!(vcpu->arch.msr & MSR_IS);
589
590	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
591}
592
593void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
594{
595	unsigned int as = !!(vcpu->arch.msr & MSR_DS);
596
597	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
598}
599
600gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
601			gva_t eaddr)
602{
603	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
604	struct tlbe *gtlbe =
605		&vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
606	u64 pgmask = get_tlb_bytes(gtlbe) - 1;
607
608	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
609}
610
611void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
612{
613	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
614	int tlbsel, i;
615
616	for (tlbsel = 0; tlbsel < 2; tlbsel++)
617		for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
618			kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
619
620	/* discard all guest mapping */
621	_tlbil_all();
622}
623
624void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
625			unsigned int index)
626{
627	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
628	int tlbsel = tlbsel_of(index);
629	int esel = esel_of(index);
630	int stlbsel, sesel;
631
632	switch (tlbsel) {
633	case 0:
634		stlbsel = 0;
635		sesel = esel;
636		break;
637
638	case 1: {
639		gfn_t gfn = gpaddr >> PAGE_SHIFT;
640		struct tlbe *gtlbe
641			= &vcpu_e500->guest_tlb[tlbsel][esel];
642
643		stlbsel = 1;
644		sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
645		break;
646	}
647
648	default:
649		BUG();
650		break;
651	}
652	write_host_tlbe(vcpu_e500, stlbsel, sesel);
653}
654
655int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
656				gva_t eaddr, unsigned int pid, int as)
657{
658	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
659	int esel, tlbsel;
660
661	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
662		esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
663		if (esel >= 0)
664			return index_of(tlbsel, esel);
665	}
666
667	return -1;
668}
669
670void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
671{
672	struct tlbe *tlbe;
673
674	/* Insert large initial mapping for guest. */
675	tlbe = &vcpu_e500->guest_tlb[1][0];
676	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
677	tlbe->mas2 = 0;
678	tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
679	tlbe->mas7 = 0;
680
681	/* 4K map for serial output. Used by kernel wrapper. */
682	tlbe = &vcpu_e500->guest_tlb[1][1];
683	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
684	tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
685	tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
686	tlbe->mas7 = 0;
687}
688
689int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
690{
691	tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
692
693	vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
694	vcpu_e500->guest_tlb[0] =
695		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
696	if (vcpu_e500->guest_tlb[0] == NULL)
697		goto err_out;
698
699	vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
700	vcpu_e500->shadow_tlb[0] =
701		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
702	if (vcpu_e500->shadow_tlb[0] == NULL)
703		goto err_out_guest0;
704
705	vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
706	vcpu_e500->guest_tlb[1] =
707		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
708	if (vcpu_e500->guest_tlb[1] == NULL)
709		goto err_out_shadow0;
710
711	vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
712	vcpu_e500->shadow_tlb[1] =
713		kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
714	if (vcpu_e500->shadow_tlb[1] == NULL)
715		goto err_out_guest1;
716
717	vcpu_e500->shadow_pages[0] = (struct page **)
718		kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
719	if (vcpu_e500->shadow_pages[0] == NULL)
720		goto err_out_shadow1;
721
722	vcpu_e500->shadow_pages[1] = (struct page **)
723		kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
724	if (vcpu_e500->shadow_pages[1] == NULL)
725		goto err_out_page0;
726
727	/* Init TLB configuration register */
728	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
729	vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
730	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
731	vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
732
733	return 0;
734
735err_out_page0:
736	kfree(vcpu_e500->shadow_pages[0]);
737err_out_shadow1:
738	kfree(vcpu_e500->shadow_tlb[1]);
739err_out_guest1:
740	kfree(vcpu_e500->guest_tlb[1]);
741err_out_shadow0:
742	kfree(vcpu_e500->shadow_tlb[0]);
743err_out_guest0:
744	kfree(vcpu_e500->guest_tlb[0]);
745err_out:
746	return -1;
747}
748
749void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
750{
751	kfree(vcpu_e500->shadow_pages[1]);
752	kfree(vcpu_e500->shadow_pages[0]);
753	kfree(vcpu_e500->shadow_tlb[1]);
754	kfree(vcpu_e500->guest_tlb[1]);
755	kfree(vcpu_e500->shadow_tlb[0]);
756	kfree(vcpu_e500->guest_tlb[0]);
757}
758