• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/ia64/kvm/
1/*
2 * kvm_vcpu.c: handling all virtual cpu related thing.
3 * Copyright (c) 2005, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 *  Shaofan Li (Susue Li) <susie.li@intel.com>
19 *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 *  Xiantao Zhang <xiantao.zhang@intel.com>
22 */
23
24#include <linux/kvm_host.h>
25#include <linux/types.h>
26
27#include <asm/processor.h>
28#include <asm/ia64regs.h>
29#include <asm/gcc_intrin.h>
30#include <asm/kregs.h>
31#include <asm/pgtable.h>
32#include <asm/tlb.h>
33
34#include "asm-offsets.h"
35#include "vcpu.h"
36
37/*
38 * Special notes:
39 * - Index by it/dt/rt sequence
40 * - Only existing mode transitions are allowed in this table
41 * - RSE is placed at lazy mode when emulating guest partial mode
42 * - If gva happens to be rr0 and rr4, only allowed case is identity
43 *   mapping (gva=gpa), or panic! (How?)
44 */
45int mm_switch_table[8][8] = {
46	/*  2004/09/12(Kevin): Allow switch to self */
47	/*
48	 *  (it,dt,rt): (0,0,0) -> (1,1,1)
49	 *  This kind of transition usually occurs in the very early
50	 *  stage of Linux boot up procedure. Another case is in efi
51	 *  and pal calls. (see "arch/ia64/kernel/head.S")
52	 *
53	 *  (it,dt,rt): (0,0,0) -> (0,1,1)
54	 *  This kind of transition is found when OSYa exits efi boot
55	 *  service. Due to gva = gpa in this case (Same region),
56	 *  data access can be satisfied though itlb entry for physical
57	 *  emulation is hit.
58	 */
59	{SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
60	{0,  0,  0,  0,  0,  0,  0,  0},
61	{0,  0,  0,  0,  0,  0,  0,  0},
62	/*
63	 *  (it,dt,rt): (0,1,1) -> (1,1,1)
64	 *  This kind of transition is found in OSYa.
65	 *
66	 *  (it,dt,rt): (0,1,1) -> (0,0,0)
67	 *  This kind of transition is found in OSYa
68	 */
69	{SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
70	/* (1,0,0)->(1,1,1) */
71	{0,  0,  0,  0,  0,  0,  0,  SW_P2V},
72	/*
73	 *  (it,dt,rt): (1,0,1) -> (1,1,1)
74	 *  This kind of transition usually occurs when Linux returns
75	 *  from the low level TLB miss handlers.
76	 *  (see "arch/ia64/kernel/ivt.S")
77	 */
78	{0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
79	{0,  0,  0,  0,  0,  0,  0,  0},
80	/*
81	 *  (it,dt,rt): (1,1,1) -> (1,0,1)
82	 *  This kind of transition usually occurs in Linux low level
83	 *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
84	 *
85	 *  (it,dt,rt): (1,1,1) -> (0,0,0)
86	 *  This kind of transition usually occurs in pal and efi calls,
87	 *  which requires running in physical mode.
88	 *  (see "arch/ia64/kernel/head.S")
89	 *  (1,1,1)->(1,0,0)
90	 */
91
92	{SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
93};
94
95void physical_mode_init(struct kvm_vcpu  *vcpu)
96{
97	vcpu->arch.mode_flags = GUEST_IN_PHY;
98}
99
100void switch_to_physical_rid(struct kvm_vcpu *vcpu)
101{
102	unsigned long psr;
103
104	/* Save original virtual mode rr[0] and rr[4] */
105	psr = ia64_clear_ic();
106	ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
107	ia64_srlz_d();
108	ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
109	ia64_srlz_d();
110
111	ia64_set_psr(psr);
112	return;
113}
114
115void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
116{
117	unsigned long psr;
118
119	psr = ia64_clear_ic();
120	ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
121	ia64_srlz_d();
122	ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
123	ia64_srlz_d();
124	ia64_set_psr(psr);
125	return;
126}
127
128static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
129{
130	return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
131}
132
133void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
134					struct ia64_psr new_psr)
135{
136	int act;
137	act = mm_switch_action(old_psr, new_psr);
138	switch (act) {
139	case SW_V2P:
140		/*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
141		old_psr.val, new_psr.val);*/
142		switch_to_physical_rid(vcpu);
143		/*
144		 * Set rse to enforced lazy, to prevent active rse
145		 *save/restor when guest physical mode.
146		 */
147		vcpu->arch.mode_flags |= GUEST_IN_PHY;
148		break;
149	case SW_P2V:
150		switch_to_virtual_rid(vcpu);
151		/*
152		 * recover old mode which is saved when entering
153		 * guest physical mode
154		 */
155		vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
156		break;
157	case SW_SELF:
158		break;
159	case SW_NOP:
160		break;
161	default:
162		/* Sanity check */
163		break;
164	}
165	return;
166}
167
168/*
169 * In physical mode, insert tc/tr for region 0 and 4 uses
170 * RID[0] and RID[4] which is for physical mode emulation.
171 * However what those inserted tc/tr wants is rid for
172 * virtual mode. So original virtual rid needs to be restored
173 * before insert.
174 *
175 * Operations which required such switch include:
176 *  - insertions (itc.*, itr.*)
177 *  - purges (ptc.* and ptr.*)
178 *  - tpa
179 *  - tak
180 *  - thash?, ttag?
181 * All above needs actual virtual rid for destination entry.
182 */
183
184void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
185					struct ia64_psr new_psr)
186{
187
188	if ((old_psr.dt != new_psr.dt)
189			|| (old_psr.it != new_psr.it)
190			|| (old_psr.rt != new_psr.rt))
191		switch_mm_mode(vcpu, old_psr, new_psr);
192
193	return;
194}
195
196
197/*
198 * In physical mode, insert tc/tr for region 0 and 4 uses
199 * RID[0] and RID[4] which is for physical mode emulation.
200 * However what those inserted tc/tr wants is rid for
201 * virtual mode. So original virtual rid needs to be restored
202 * before insert.
203 *
204 * Operations which required such switch include:
205 *  - insertions (itc.*, itr.*)
206 *  - purges (ptc.* and ptr.*)
207 *  - tpa
208 *  - tak
209 *  - thash?, ttag?
210 * All above needs actual virtual rid for destination entry.
211 */
212
213void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
214{
215	if (is_physical_mode(vcpu)) {
216		vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
217		switch_to_virtual_rid(vcpu);
218	}
219	return;
220}
221
222/* Recover always follows prepare */
223void recover_if_physical_mode(struct kvm_vcpu *vcpu)
224{
225	if (is_physical_mode(vcpu))
226		switch_to_physical_rid(vcpu);
227	vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
228	return;
229}
230
231#define RPT(x)	((u16) &((struct kvm_pt_regs *)0)->x)
232
233static u16 gr_info[32] = {
234	0, 	/* r0 is read-only : WE SHOULD NEVER GET THIS */
235	RPT(r1), RPT(r2), RPT(r3),
236	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
237	RPT(r8), RPT(r9), RPT(r10), RPT(r11),
238	RPT(r12), RPT(r13), RPT(r14), RPT(r15),
239	RPT(r16), RPT(r17), RPT(r18), RPT(r19),
240	RPT(r20), RPT(r21), RPT(r22), RPT(r23),
241	RPT(r24), RPT(r25), RPT(r26), RPT(r27),
242	RPT(r28), RPT(r29), RPT(r30), RPT(r31)
243};
244
245#define IA64_FIRST_STACKED_GR   32
246#define IA64_FIRST_ROTATING_FR  32
247
248static inline unsigned long
249rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
250{
251	reg += rrb;
252	if (reg >= sor)
253		reg -= sor;
254	return reg;
255}
256
257/*
258 * Return the (rotated) index for floating point register
259 * be in the REGNUM (REGNUM must range from 32-127,
260 * result is in the range from 0-95.
261 */
262static inline unsigned long fph_index(struct kvm_pt_regs *regs,
263						long regnum)
264{
265	unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
266	return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
267}
268
269/*
270 * The inverse of the above: given bspstore and the number of
271 * registers, calculate ar.bsp.
272 */
273static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
274							long num_regs)
275{
276	long delta = ia64_rse_slot_num(addr) + num_regs;
277	int i = 0;
278
279	if (num_regs < 0)
280		delta -= 0x3e;
281	if (delta < 0) {
282		while (delta <= -0x3f) {
283			i--;
284			delta += 0x3f;
285		}
286	} else {
287		while (delta >= 0x3f) {
288			i++;
289			delta -= 0x3f;
290		}
291	}
292
293	return addr + num_regs + i;
294}
295
296static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
297					unsigned long *val, int *nat)
298{
299	unsigned long *bsp, *addr, *rnat_addr, *bspstore;
300	unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
301	unsigned long nat_mask;
302	unsigned long old_rsc, new_rsc;
303	long sof = (regs->cr_ifs) & 0x7f;
304	long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
305	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
306	long ridx = r1 - 32;
307
308	if (ridx < sor)
309		ridx = rotate_reg(sor, rrb_gr, ridx);
310
311	old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
312	new_rsc = old_rsc&(~(0x3));
313	ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
314
315	bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
316	bsp = kbs + (regs->loadrs >> 19);
317
318	addr = kvm_rse_skip_regs(bsp, -sof + ridx);
319	nat_mask = 1UL << ia64_rse_slot_num(addr);
320	rnat_addr = ia64_rse_rnat_addr(addr);
321
322	if (addr >= bspstore) {
323		ia64_flushrs();
324		ia64_mf();
325		bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
326	}
327	*val = *addr;
328	if (nat) {
329		if (bspstore < rnat_addr)
330			*nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
331							& nat_mask);
332		else
333			*nat = (int)!!((*rnat_addr) & nat_mask);
334		ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
335	}
336}
337
338void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
339				unsigned long val, unsigned long nat)
340{
341	unsigned long *bsp, *bspstore, *addr, *rnat_addr;
342	unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
343	unsigned long nat_mask;
344	unsigned long old_rsc, new_rsc, psr;
345	unsigned long rnat;
346	long sof = (regs->cr_ifs) & 0x7f;
347	long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
348	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
349	long ridx = r1 - 32;
350
351	if (ridx < sor)
352		ridx = rotate_reg(sor, rrb_gr, ridx);
353
354	old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
355	/* put RSC to lazy mode, and set loadrs 0 */
356	new_rsc = old_rsc & (~0x3fff0003);
357	ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
358	bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
359
360	addr = kvm_rse_skip_regs(bsp, -sof + ridx);
361	nat_mask = 1UL << ia64_rse_slot_num(addr);
362	rnat_addr = ia64_rse_rnat_addr(addr);
363
364	local_irq_save(psr);
365	bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
366	if (addr >= bspstore) {
367
368		ia64_flushrs();
369		ia64_mf();
370		*addr = val;
371		bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
372		rnat = ia64_getreg(_IA64_REG_AR_RNAT);
373		if (bspstore < rnat_addr)
374			rnat = rnat & (~nat_mask);
375		else
376			*rnat_addr = (*rnat_addr)&(~nat_mask);
377
378		ia64_mf();
379		ia64_loadrs();
380		ia64_setreg(_IA64_REG_AR_RNAT, rnat);
381	} else {
382		rnat = ia64_getreg(_IA64_REG_AR_RNAT);
383		*addr = val;
384		if (bspstore < rnat_addr)
385			rnat = rnat&(~nat_mask);
386		else
387			*rnat_addr = (*rnat_addr) & (~nat_mask);
388
389		ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
390		ia64_setreg(_IA64_REG_AR_RNAT, rnat);
391	}
392	local_irq_restore(psr);
393	ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
394}
395
396void getreg(unsigned long regnum, unsigned long *val,
397				int *nat, struct kvm_pt_regs *regs)
398{
399	unsigned long addr, *unat;
400	if (regnum >= IA64_FIRST_STACKED_GR) {
401		get_rse_reg(regs, regnum, val, nat);
402		return;
403	}
404
405	/*
406	 * Now look at registers in [0-31] range and init correct UNAT
407	 */
408	addr = (unsigned long)regs;
409	unat = &regs->eml_unat;
410
411	addr += gr_info[regnum];
412
413	*val  = *(unsigned long *)addr;
414	/*
415	 * do it only when requested
416	 */
417	if (nat)
418		*nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
419}
420
421void setreg(unsigned long regnum, unsigned long val,
422			int nat, struct kvm_pt_regs *regs)
423{
424	unsigned long addr;
425	unsigned long bitmask;
426	unsigned long *unat;
427
428	/*
429	 * First takes care of stacked registers
430	 */
431	if (regnum >= IA64_FIRST_STACKED_GR) {
432		set_rse_reg(regs, regnum, val, nat);
433		return;
434	}
435
436	/*
437	 * Now look at registers in [0-31] range and init correct UNAT
438	 */
439	addr = (unsigned long)regs;
440	unat = &regs->eml_unat;
441	/*
442	 * add offset from base of struct
443	 * and do it !
444	 */
445	addr += gr_info[regnum];
446
447	*(unsigned long *)addr = val;
448
449	/*
450	 * We need to clear the corresponding UNAT bit to fully emulate the load
451	 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
452	 */
453	bitmask   = 1UL << ((addr >> 3) & 0x3f);
454	if (nat)
455		*unat |= bitmask;
456	 else
457		*unat &= ~bitmask;
458
459}
460
461u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
462{
463	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
464	unsigned long val;
465
466	if (!reg)
467		return 0;
468	getreg(reg, &val, 0, regs);
469	return val;
470}
471
472void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
473{
474	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
475	long sof = (regs->cr_ifs) & 0x7f;
476
477	if (!reg)
478		return;
479	if (reg >= sof + 32)
480		return;
481	setreg(reg, value, nat, regs);
482}
483
484void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
485				struct kvm_pt_regs *regs)
486{
487	/* Take floating register rotation into consideration*/
488	if (regnum >= IA64_FIRST_ROTATING_FR)
489		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
490#define CASE_FIXED_FP(reg)			\
491	case  (reg) :				\
492		ia64_stf_spill(fpval, reg);	\
493	break
494
495	switch (regnum) {
496		CASE_FIXED_FP(0);
497		CASE_FIXED_FP(1);
498		CASE_FIXED_FP(2);
499		CASE_FIXED_FP(3);
500		CASE_FIXED_FP(4);
501		CASE_FIXED_FP(5);
502
503		CASE_FIXED_FP(6);
504		CASE_FIXED_FP(7);
505		CASE_FIXED_FP(8);
506		CASE_FIXED_FP(9);
507		CASE_FIXED_FP(10);
508		CASE_FIXED_FP(11);
509
510		CASE_FIXED_FP(12);
511		CASE_FIXED_FP(13);
512		CASE_FIXED_FP(14);
513		CASE_FIXED_FP(15);
514		CASE_FIXED_FP(16);
515		CASE_FIXED_FP(17);
516		CASE_FIXED_FP(18);
517		CASE_FIXED_FP(19);
518		CASE_FIXED_FP(20);
519		CASE_FIXED_FP(21);
520		CASE_FIXED_FP(22);
521		CASE_FIXED_FP(23);
522		CASE_FIXED_FP(24);
523		CASE_FIXED_FP(25);
524		CASE_FIXED_FP(26);
525		CASE_FIXED_FP(27);
526		CASE_FIXED_FP(28);
527		CASE_FIXED_FP(29);
528		CASE_FIXED_FP(30);
529		CASE_FIXED_FP(31);
530		CASE_FIXED_FP(32);
531		CASE_FIXED_FP(33);
532		CASE_FIXED_FP(34);
533		CASE_FIXED_FP(35);
534		CASE_FIXED_FP(36);
535		CASE_FIXED_FP(37);
536		CASE_FIXED_FP(38);
537		CASE_FIXED_FP(39);
538		CASE_FIXED_FP(40);
539		CASE_FIXED_FP(41);
540		CASE_FIXED_FP(42);
541		CASE_FIXED_FP(43);
542		CASE_FIXED_FP(44);
543		CASE_FIXED_FP(45);
544		CASE_FIXED_FP(46);
545		CASE_FIXED_FP(47);
546		CASE_FIXED_FP(48);
547		CASE_FIXED_FP(49);
548		CASE_FIXED_FP(50);
549		CASE_FIXED_FP(51);
550		CASE_FIXED_FP(52);
551		CASE_FIXED_FP(53);
552		CASE_FIXED_FP(54);
553		CASE_FIXED_FP(55);
554		CASE_FIXED_FP(56);
555		CASE_FIXED_FP(57);
556		CASE_FIXED_FP(58);
557		CASE_FIXED_FP(59);
558		CASE_FIXED_FP(60);
559		CASE_FIXED_FP(61);
560		CASE_FIXED_FP(62);
561		CASE_FIXED_FP(63);
562		CASE_FIXED_FP(64);
563		CASE_FIXED_FP(65);
564		CASE_FIXED_FP(66);
565		CASE_FIXED_FP(67);
566		CASE_FIXED_FP(68);
567		CASE_FIXED_FP(69);
568		CASE_FIXED_FP(70);
569		CASE_FIXED_FP(71);
570		CASE_FIXED_FP(72);
571		CASE_FIXED_FP(73);
572		CASE_FIXED_FP(74);
573		CASE_FIXED_FP(75);
574		CASE_FIXED_FP(76);
575		CASE_FIXED_FP(77);
576		CASE_FIXED_FP(78);
577		CASE_FIXED_FP(79);
578		CASE_FIXED_FP(80);
579		CASE_FIXED_FP(81);
580		CASE_FIXED_FP(82);
581		CASE_FIXED_FP(83);
582		CASE_FIXED_FP(84);
583		CASE_FIXED_FP(85);
584		CASE_FIXED_FP(86);
585		CASE_FIXED_FP(87);
586		CASE_FIXED_FP(88);
587		CASE_FIXED_FP(89);
588		CASE_FIXED_FP(90);
589		CASE_FIXED_FP(91);
590		CASE_FIXED_FP(92);
591		CASE_FIXED_FP(93);
592		CASE_FIXED_FP(94);
593		CASE_FIXED_FP(95);
594		CASE_FIXED_FP(96);
595		CASE_FIXED_FP(97);
596		CASE_FIXED_FP(98);
597		CASE_FIXED_FP(99);
598		CASE_FIXED_FP(100);
599		CASE_FIXED_FP(101);
600		CASE_FIXED_FP(102);
601		CASE_FIXED_FP(103);
602		CASE_FIXED_FP(104);
603		CASE_FIXED_FP(105);
604		CASE_FIXED_FP(106);
605		CASE_FIXED_FP(107);
606		CASE_FIXED_FP(108);
607		CASE_FIXED_FP(109);
608		CASE_FIXED_FP(110);
609		CASE_FIXED_FP(111);
610		CASE_FIXED_FP(112);
611		CASE_FIXED_FP(113);
612		CASE_FIXED_FP(114);
613		CASE_FIXED_FP(115);
614		CASE_FIXED_FP(116);
615		CASE_FIXED_FP(117);
616		CASE_FIXED_FP(118);
617		CASE_FIXED_FP(119);
618		CASE_FIXED_FP(120);
619		CASE_FIXED_FP(121);
620		CASE_FIXED_FP(122);
621		CASE_FIXED_FP(123);
622		CASE_FIXED_FP(124);
623		CASE_FIXED_FP(125);
624		CASE_FIXED_FP(126);
625		CASE_FIXED_FP(127);
626	}
627#undef CASE_FIXED_FP
628}
629
630void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
631					struct kvm_pt_regs *regs)
632{
633	/* Take floating register rotation into consideration*/
634	if (regnum >= IA64_FIRST_ROTATING_FR)
635		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
636
637#define CASE_FIXED_FP(reg)			\
638	case (reg) :				\
639		ia64_ldf_fill(reg, fpval);	\
640	break
641
642	switch (regnum) {
643		CASE_FIXED_FP(2);
644		CASE_FIXED_FP(3);
645		CASE_FIXED_FP(4);
646		CASE_FIXED_FP(5);
647
648		CASE_FIXED_FP(6);
649		CASE_FIXED_FP(7);
650		CASE_FIXED_FP(8);
651		CASE_FIXED_FP(9);
652		CASE_FIXED_FP(10);
653		CASE_FIXED_FP(11);
654
655		CASE_FIXED_FP(12);
656		CASE_FIXED_FP(13);
657		CASE_FIXED_FP(14);
658		CASE_FIXED_FP(15);
659		CASE_FIXED_FP(16);
660		CASE_FIXED_FP(17);
661		CASE_FIXED_FP(18);
662		CASE_FIXED_FP(19);
663		CASE_FIXED_FP(20);
664		CASE_FIXED_FP(21);
665		CASE_FIXED_FP(22);
666		CASE_FIXED_FP(23);
667		CASE_FIXED_FP(24);
668		CASE_FIXED_FP(25);
669		CASE_FIXED_FP(26);
670		CASE_FIXED_FP(27);
671		CASE_FIXED_FP(28);
672		CASE_FIXED_FP(29);
673		CASE_FIXED_FP(30);
674		CASE_FIXED_FP(31);
675		CASE_FIXED_FP(32);
676		CASE_FIXED_FP(33);
677		CASE_FIXED_FP(34);
678		CASE_FIXED_FP(35);
679		CASE_FIXED_FP(36);
680		CASE_FIXED_FP(37);
681		CASE_FIXED_FP(38);
682		CASE_FIXED_FP(39);
683		CASE_FIXED_FP(40);
684		CASE_FIXED_FP(41);
685		CASE_FIXED_FP(42);
686		CASE_FIXED_FP(43);
687		CASE_FIXED_FP(44);
688		CASE_FIXED_FP(45);
689		CASE_FIXED_FP(46);
690		CASE_FIXED_FP(47);
691		CASE_FIXED_FP(48);
692		CASE_FIXED_FP(49);
693		CASE_FIXED_FP(50);
694		CASE_FIXED_FP(51);
695		CASE_FIXED_FP(52);
696		CASE_FIXED_FP(53);
697		CASE_FIXED_FP(54);
698		CASE_FIXED_FP(55);
699		CASE_FIXED_FP(56);
700		CASE_FIXED_FP(57);
701		CASE_FIXED_FP(58);
702		CASE_FIXED_FP(59);
703		CASE_FIXED_FP(60);
704		CASE_FIXED_FP(61);
705		CASE_FIXED_FP(62);
706		CASE_FIXED_FP(63);
707		CASE_FIXED_FP(64);
708		CASE_FIXED_FP(65);
709		CASE_FIXED_FP(66);
710		CASE_FIXED_FP(67);
711		CASE_FIXED_FP(68);
712		CASE_FIXED_FP(69);
713		CASE_FIXED_FP(70);
714		CASE_FIXED_FP(71);
715		CASE_FIXED_FP(72);
716		CASE_FIXED_FP(73);
717		CASE_FIXED_FP(74);
718		CASE_FIXED_FP(75);
719		CASE_FIXED_FP(76);
720		CASE_FIXED_FP(77);
721		CASE_FIXED_FP(78);
722		CASE_FIXED_FP(79);
723		CASE_FIXED_FP(80);
724		CASE_FIXED_FP(81);
725		CASE_FIXED_FP(82);
726		CASE_FIXED_FP(83);
727		CASE_FIXED_FP(84);
728		CASE_FIXED_FP(85);
729		CASE_FIXED_FP(86);
730		CASE_FIXED_FP(87);
731		CASE_FIXED_FP(88);
732		CASE_FIXED_FP(89);
733		CASE_FIXED_FP(90);
734		CASE_FIXED_FP(91);
735		CASE_FIXED_FP(92);
736		CASE_FIXED_FP(93);
737		CASE_FIXED_FP(94);
738		CASE_FIXED_FP(95);
739		CASE_FIXED_FP(96);
740		CASE_FIXED_FP(97);
741		CASE_FIXED_FP(98);
742		CASE_FIXED_FP(99);
743		CASE_FIXED_FP(100);
744		CASE_FIXED_FP(101);
745		CASE_FIXED_FP(102);
746		CASE_FIXED_FP(103);
747		CASE_FIXED_FP(104);
748		CASE_FIXED_FP(105);
749		CASE_FIXED_FP(106);
750		CASE_FIXED_FP(107);
751		CASE_FIXED_FP(108);
752		CASE_FIXED_FP(109);
753		CASE_FIXED_FP(110);
754		CASE_FIXED_FP(111);
755		CASE_FIXED_FP(112);
756		CASE_FIXED_FP(113);
757		CASE_FIXED_FP(114);
758		CASE_FIXED_FP(115);
759		CASE_FIXED_FP(116);
760		CASE_FIXED_FP(117);
761		CASE_FIXED_FP(118);
762		CASE_FIXED_FP(119);
763		CASE_FIXED_FP(120);
764		CASE_FIXED_FP(121);
765		CASE_FIXED_FP(122);
766		CASE_FIXED_FP(123);
767		CASE_FIXED_FP(124);
768		CASE_FIXED_FP(125);
769		CASE_FIXED_FP(126);
770		CASE_FIXED_FP(127);
771	}
772}
773
774void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
775						struct ia64_fpreg *val)
776{
777	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
778
779	getfpreg(reg, val, regs);
780}
781
782void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
783						struct ia64_fpreg *val)
784{
785	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
786
787	if (reg > 1)
788		setfpreg(reg, val, regs);
789}
790
791/*
792 * The Altix RTC is mapped specially here for the vmm module
793 */
794#define SN_RTC_BASE	(u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
795static long kvm_get_itc(struct kvm_vcpu *vcpu)
796{
797#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
798	struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
799
800	if (kvm->arch.is_sn2)
801		return (*SN_RTC_BASE);
802	else
803#endif
804		return ia64_getreg(_IA64_REG_AR_ITC);
805}
806
807/************************************************************************
808 * lsapic timer
809 ***********************************************************************/
810u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
811{
812	unsigned long guest_itc;
813	guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
814
815	if (guest_itc >= VMX(vcpu, last_itc)) {
816		VMX(vcpu, last_itc) = guest_itc;
817		return  guest_itc;
818	} else
819		return VMX(vcpu, last_itc);
820}
821
822static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
823static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
824{
825	struct kvm_vcpu *v;
826	struct kvm *kvm;
827	int i;
828	long itc_offset = val - kvm_get_itc(vcpu);
829	unsigned long vitv = VCPU(vcpu, itv);
830
831	kvm = (struct kvm *)KVM_VM_BASE;
832
833	if (kvm_vcpu_is_bsp(vcpu)) {
834		for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
835			v = (struct kvm_vcpu *)((char *)vcpu +
836					sizeof(struct kvm_vcpu_data) * i);
837			VMX(v, itc_offset) = itc_offset;
838			VMX(v, last_itc) = 0;
839		}
840	}
841	VMX(vcpu, last_itc) = 0;
842	if (VCPU(vcpu, itm) <= val) {
843		VMX(vcpu, itc_check) = 0;
844		vcpu_unpend_interrupt(vcpu, vitv);
845	} else {
846		VMX(vcpu, itc_check) = 1;
847		vcpu_set_itm(vcpu, VCPU(vcpu, itm));
848	}
849
850}
851
852static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
853{
854	return ((u64)VCPU(vcpu, itm));
855}
856
857static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
858{
859	unsigned long vitv = VCPU(vcpu, itv);
860	VCPU(vcpu, itm) = val;
861
862	if (val > vcpu_get_itc(vcpu)) {
863		VMX(vcpu, itc_check) = 1;
864		vcpu_unpend_interrupt(vcpu, vitv);
865		VMX(vcpu, timer_pending) = 0;
866	} else
867		VMX(vcpu, itc_check) = 0;
868}
869
870#define  ITV_VECTOR(itv)    (itv&0xff)
871#define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
872
873static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
874{
875	VCPU(vcpu, itv) = val;
876	if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
877		vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
878		vcpu->arch.timer_pending = 0;
879	}
880}
881
882static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
883{
884	int vec;
885
886	vec = highest_inservice_irq(vcpu);
887	if (vec == NULL_VECTOR)
888		return;
889	VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
890	VCPU(vcpu, eoi) = 0;
891	vcpu->arch.irq_new_pending = 1;
892
893}
894
895/* See Table 5-8 in SDM vol2 for the definition */
896int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
897{
898	union ia64_tpr vtpr;
899
900	vtpr.val = VCPU(vcpu, tpr);
901
902	if (h_inservice == NMI_VECTOR)
903		return IRQ_MASKED_BY_INSVC;
904
905	if (h_pending == NMI_VECTOR) {
906		/* Non Maskable Interrupt */
907		return IRQ_NO_MASKED;
908	}
909
910	if (h_inservice == ExtINT_VECTOR)
911		return IRQ_MASKED_BY_INSVC;
912
913	if (h_pending == ExtINT_VECTOR) {
914		if (vtpr.mmi) {
915			/* mask all external IRQ */
916			return IRQ_MASKED_BY_VTPR;
917		} else
918			return IRQ_NO_MASKED;
919	}
920
921	if (is_higher_irq(h_pending, h_inservice)) {
922		if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
923			return IRQ_NO_MASKED;
924		else
925			return IRQ_MASKED_BY_VTPR;
926	} else {
927		return IRQ_MASKED_BY_INSVC;
928	}
929}
930
931void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
932{
933	long spsr;
934	int ret;
935
936	local_irq_save(spsr);
937	ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
938	local_irq_restore(spsr);
939
940	vcpu->arch.irq_new_pending = 1;
941}
942
943void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
944{
945	long spsr;
946	int ret;
947
948	local_irq_save(spsr);
949	ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
950	local_irq_restore(spsr);
951	if (ret) {
952		vcpu->arch.irq_new_pending = 1;
953		wmb();
954	}
955}
956
957void update_vhpi(struct kvm_vcpu *vcpu, int vec)
958{
959	u64 vhpi;
960
961	if (vec == NULL_VECTOR)
962		vhpi = 0;
963	else if (vec == NMI_VECTOR)
964		vhpi = 32;
965	else if (vec == ExtINT_VECTOR)
966		vhpi = 16;
967	else
968		vhpi = vec >> 4;
969
970	VCPU(vcpu, vhpi) = vhpi;
971	if (VCPU(vcpu, vac).a_int)
972		ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
973				(u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
974}
975
976u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
977{
978	int vec, h_inservice, mask;
979
980	vec = highest_pending_irq(vcpu);
981	h_inservice = highest_inservice_irq(vcpu);
982	mask = irq_masked(vcpu, vec, h_inservice);
983	if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
984		if (VCPU(vcpu, vhpi))
985			update_vhpi(vcpu, NULL_VECTOR);
986		return IA64_SPURIOUS_INT_VECTOR;
987	}
988	if (mask == IRQ_MASKED_BY_VTPR) {
989		update_vhpi(vcpu, vec);
990		return IA64_SPURIOUS_INT_VECTOR;
991	}
992	VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
993	vcpu_unpend_interrupt(vcpu, vec);
994	return  (u64)vec;
995}
996
997/**************************************************************************
998  Privileged operation emulation routines
999 **************************************************************************/
1000u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
1001{
1002	union ia64_pta vpta;
1003	union ia64_rr vrr;
1004	u64 pval;
1005	u64 vhpt_offset;
1006
1007	vpta.val = vcpu_get_pta(vcpu);
1008	vrr.val = vcpu_get_rr(vcpu, vadr);
1009	vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
1010	if (vpta.vf) {
1011		pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
1012				vpta.val, 0, 0, 0, 0);
1013	} else {
1014		pval = (vadr & VRN_MASK) | vhpt_offset |
1015			(vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1016	}
1017	return  pval;
1018}
1019
1020u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1021{
1022	union ia64_rr vrr;
1023	union ia64_pta vpta;
1024	u64 pval;
1025
1026	vpta.val = vcpu_get_pta(vcpu);
1027	vrr.val = vcpu_get_rr(vcpu, vadr);
1028	if (vpta.vf) {
1029		pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1030						0, 0, 0, 0, 0);
1031	} else
1032		pval = 1;
1033
1034	return  pval;
1035}
1036
1037u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1038{
1039	struct thash_data *data;
1040	union ia64_pta vpta;
1041	u64 key;
1042
1043	vpta.val = vcpu_get_pta(vcpu);
1044	if (vpta.vf == 0) {
1045		key = 1;
1046		return key;
1047	}
1048	data = vtlb_lookup(vcpu, vadr, D_TLB);
1049	if (!data || !data->p)
1050		key = 1;
1051	else
1052		key = data->key;
1053
1054	return key;
1055}
1056
1057void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1058{
1059	unsigned long thash, vadr;
1060
1061	vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1062	thash = vcpu_thash(vcpu, vadr);
1063	vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1064}
1065
1066void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1067{
1068	unsigned long tag, vadr;
1069
1070	vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1071	tag = vcpu_ttag(vcpu, vadr);
1072	vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1073}
1074
1075int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
1076{
1077	struct thash_data *data;
1078	union ia64_isr visr, pt_isr;
1079	struct kvm_pt_regs *regs;
1080	struct ia64_psr vpsr;
1081
1082	regs = vcpu_regs(vcpu);
1083	pt_isr.val = VMX(vcpu, cr_isr);
1084	visr.val = 0;
1085	visr.ei = pt_isr.ei;
1086	visr.ir = pt_isr.ir;
1087	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1088	visr.na = 1;
1089
1090	data = vhpt_lookup(vadr);
1091	if (data) {
1092		if (data->p == 0) {
1093			vcpu_set_isr(vcpu, visr.val);
1094			data_page_not_present(vcpu, vadr);
1095			return IA64_FAULT;
1096		} else if (data->ma == VA_MATTR_NATPAGE) {
1097			vcpu_set_isr(vcpu, visr.val);
1098			dnat_page_consumption(vcpu, vadr);
1099			return IA64_FAULT;
1100		} else {
1101			*padr = (data->gpaddr >> data->ps << data->ps) |
1102				(vadr & (PSIZE(data->ps) - 1));
1103			return IA64_NO_FAULT;
1104		}
1105	}
1106
1107	data = vtlb_lookup(vcpu, vadr, D_TLB);
1108	if (data) {
1109		if (data->p == 0) {
1110			vcpu_set_isr(vcpu, visr.val);
1111			data_page_not_present(vcpu, vadr);
1112			return IA64_FAULT;
1113		} else if (data->ma == VA_MATTR_NATPAGE) {
1114			vcpu_set_isr(vcpu, visr.val);
1115			dnat_page_consumption(vcpu, vadr);
1116			return IA64_FAULT;
1117		} else{
1118			*padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1119				| (vadr & (PSIZE(data->ps) - 1));
1120			return IA64_NO_FAULT;
1121		}
1122	}
1123	if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1124		if (vpsr.ic) {
1125			vcpu_set_isr(vcpu, visr.val);
1126			alt_dtlb(vcpu, vadr);
1127			return IA64_FAULT;
1128		} else {
1129			nested_dtlb(vcpu);
1130			return IA64_FAULT;
1131		}
1132	} else {
1133		if (vpsr.ic) {
1134			vcpu_set_isr(vcpu, visr.val);
1135			dvhpt_fault(vcpu, vadr);
1136			return IA64_FAULT;
1137		} else{
1138			nested_dtlb(vcpu);
1139			return IA64_FAULT;
1140		}
1141	}
1142
1143	return IA64_NO_FAULT;
1144}
1145
1146int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1147{
1148	unsigned long r1, r3;
1149
1150	r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1151
1152	if (vcpu_tpa(vcpu, r3, &r1))
1153		return IA64_FAULT;
1154
1155	vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1156	return(IA64_NO_FAULT);
1157}
1158
1159void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1160{
1161	unsigned long r1, r3;
1162
1163	r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1164	r1 = vcpu_tak(vcpu, r3);
1165	vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1166}
1167
1168/************************************
1169 * Insert/Purge translation register/cache
1170 ************************************/
1171void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1172{
1173	thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1174}
1175
1176void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1177{
1178	thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1179}
1180
1181void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1182{
1183	u64 ps, va, rid;
1184	struct thash_data *p_itr;
1185
1186	ps = itir_ps(itir);
1187	va = PAGEALIGN(ifa, ps);
1188	pte &= ~PAGE_FLAGS_RV_MASK;
1189	rid = vcpu_get_rr(vcpu, ifa);
1190	rid = rid & RR_RID_MASK;
1191	p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1192	vcpu_set_tr(p_itr, pte, itir, va, rid);
1193	vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1194}
1195
1196
1197void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1198{
1199	u64 gpfn;
1200	u64 ps, va, rid;
1201	struct thash_data *p_dtr;
1202
1203	ps = itir_ps(itir);
1204	va = PAGEALIGN(ifa, ps);
1205	pte &= ~PAGE_FLAGS_RV_MASK;
1206
1207	if (ps != _PAGE_SIZE_16M)
1208		thash_purge_entries(vcpu, va, ps);
1209	gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1210	if (__gpfn_is_io(gpfn))
1211		pte |= VTLB_PTE_IO;
1212	rid = vcpu_get_rr(vcpu, va);
1213	rid = rid & RR_RID_MASK;
1214	p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1215	vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1216							pte, itir, va, rid);
1217	vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1218}
1219
1220void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1221{
1222	int index;
1223	u64 va;
1224
1225	va = PAGEALIGN(ifa, ps);
1226	while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1227		vcpu->arch.dtrs[index].page_flags = 0;
1228
1229	thash_purge_entries(vcpu, va, ps);
1230}
1231
1232void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1233{
1234	int index;
1235	u64 va;
1236
1237	va = PAGEALIGN(ifa, ps);
1238	while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1239		vcpu->arch.itrs[index].page_flags = 0;
1240
1241	thash_purge_entries(vcpu, va, ps);
1242}
1243
1244void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1245{
1246	va = PAGEALIGN(va, ps);
1247	thash_purge_entries(vcpu, va, ps);
1248}
1249
1250void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1251{
1252	thash_purge_all(vcpu);
1253}
1254
1255void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1256{
1257	struct exit_ctl_data *p = &vcpu->arch.exit_data;
1258	long psr;
1259	local_irq_save(psr);
1260	p->exit_reason = EXIT_REASON_PTC_G;
1261
1262	p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1263	p->u.ptc_g_data.vaddr = va;
1264	p->u.ptc_g_data.ps = ps;
1265	vmm_transition(vcpu);
1266	/* Do Local Purge Here*/
1267	vcpu_ptc_l(vcpu, va, ps);
1268	local_irq_restore(psr);
1269}
1270
1271
1272void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1273{
1274	vcpu_ptc_ga(vcpu, va, ps);
1275}
1276
1277void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1278{
1279	unsigned long ifa;
1280
1281	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1282	vcpu_ptc_e(vcpu, ifa);
1283}
1284
1285void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1286{
1287	unsigned long ifa, itir;
1288
1289	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1290	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1291	vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1292}
1293
1294void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1295{
1296	unsigned long ifa, itir;
1297
1298	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1299	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1300	vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1301}
1302
1303void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1304{
1305	unsigned long ifa, itir;
1306
1307	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1308	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1309	vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1310}
1311
1312void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1313{
1314	unsigned long ifa, itir;
1315
1316	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1317	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1318	vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1319}
1320
1321void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1322{
1323	unsigned long ifa, itir;
1324
1325	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1326	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1327	vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1328}
1329
1330void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1331{
1332	unsigned long itir, ifa, pte, slot;
1333
1334	slot = vcpu_get_gr(vcpu, inst.M45.r3);
1335	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1336	itir = vcpu_get_itir(vcpu);
1337	ifa = vcpu_get_ifa(vcpu);
1338	vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1339}
1340
1341
1342
1343void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1344{
1345	unsigned long itir, ifa, pte, slot;
1346
1347	slot = vcpu_get_gr(vcpu, inst.M45.r3);
1348	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1349	itir = vcpu_get_itir(vcpu);
1350	ifa = vcpu_get_ifa(vcpu);
1351	vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1352}
1353
1354void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1355{
1356	unsigned long itir, ifa, pte;
1357
1358	itir = vcpu_get_itir(vcpu);
1359	ifa = vcpu_get_ifa(vcpu);
1360	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1361	vcpu_itc_d(vcpu, pte, itir, ifa);
1362}
1363
1364void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1365{
1366	unsigned long itir, ifa, pte;
1367
1368	itir = vcpu_get_itir(vcpu);
1369	ifa = vcpu_get_ifa(vcpu);
1370	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1371	vcpu_itc_i(vcpu, pte, itir, ifa);
1372}
1373
1374/*************************************
1375 * Moves to semi-privileged registers
1376 *************************************/
1377
1378void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1379{
1380	unsigned long imm;
1381
1382	if (inst.M30.s)
1383		imm = -inst.M30.imm;
1384	else
1385		imm = inst.M30.imm;
1386
1387	vcpu_set_itc(vcpu, imm);
1388}
1389
1390void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1391{
1392	unsigned long r2;
1393
1394	r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1395	vcpu_set_itc(vcpu, r2);
1396}
1397
1398void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1399{
1400	unsigned long r1;
1401
1402	r1 = vcpu_get_itc(vcpu);
1403	vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1404}
1405
1406/**************************************************************************
1407  struct kvm_vcpu protection key register access routines
1408 **************************************************************************/
1409
1410unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1411{
1412	return ((unsigned long)ia64_get_pkr(reg));
1413}
1414
1415void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1416{
1417	ia64_set_pkr(reg, val);
1418}
1419
1420/********************************
1421 * Moves to privileged registers
1422 ********************************/
1423unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1424					unsigned long val)
1425{
1426	union ia64_rr oldrr, newrr;
1427	unsigned long rrval;
1428	struct exit_ctl_data *p = &vcpu->arch.exit_data;
1429	unsigned long psr;
1430
1431	oldrr.val = vcpu_get_rr(vcpu, reg);
1432	newrr.val = val;
1433	vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1434
1435	switch ((unsigned long)(reg >> VRN_SHIFT)) {
1436	case VRN6:
1437		vcpu->arch.vmm_rr = vrrtomrr(val);
1438		local_irq_save(psr);
1439		p->exit_reason = EXIT_REASON_SWITCH_RR6;
1440		vmm_transition(vcpu);
1441		local_irq_restore(psr);
1442		break;
1443	case VRN4:
1444		rrval = vrrtomrr(val);
1445		vcpu->arch.metaphysical_saved_rr4 = rrval;
1446		if (!is_physical_mode(vcpu))
1447			ia64_set_rr(reg, rrval);
1448		break;
1449	case VRN0:
1450		rrval = vrrtomrr(val);
1451		vcpu->arch.metaphysical_saved_rr0 = rrval;
1452		if (!is_physical_mode(vcpu))
1453			ia64_set_rr(reg, rrval);
1454		break;
1455	default:
1456		ia64_set_rr(reg, vrrtomrr(val));
1457		break;
1458	}
1459
1460	return (IA64_NO_FAULT);
1461}
1462
1463void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1464{
1465	unsigned long r3, r2;
1466
1467	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1468	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1469	vcpu_set_rr(vcpu, r3, r2);
1470}
1471
1472void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1473{
1474}
1475
1476void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1477{
1478}
1479
1480void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1481{
1482	unsigned long r3, r2;
1483
1484	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1485	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1486	vcpu_set_pmc(vcpu, r3, r2);
1487}
1488
1489void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1490{
1491	unsigned long r3, r2;
1492
1493	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1494	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1495	vcpu_set_pmd(vcpu, r3, r2);
1496}
1497
1498void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1499{
1500	u64 r3, r2;
1501
1502	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1503	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1504	vcpu_set_pkr(vcpu, r3, r2);
1505}
1506
1507void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1508{
1509	unsigned long r3, r1;
1510
1511	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1512	r1 = vcpu_get_rr(vcpu, r3);
1513	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1514}
1515
1516void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1517{
1518	unsigned long r3, r1;
1519
1520	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1521	r1 = vcpu_get_pkr(vcpu, r3);
1522	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1523}
1524
1525void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1526{
1527	unsigned long r3, r1;
1528
1529	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1530	r1 = vcpu_get_dbr(vcpu, r3);
1531	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1532}
1533
1534void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1535{
1536	unsigned long r3, r1;
1537
1538	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1539	r1 = vcpu_get_ibr(vcpu, r3);
1540	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1541}
1542
1543void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1544{
1545	unsigned long r3, r1;
1546
1547	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1548	r1 = vcpu_get_pmc(vcpu, r3);
1549	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1550}
1551
1552unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1553{
1554	if (reg > (ia64_get_cpuid(3) & 0xff))
1555		return 0;
1556	else
1557		return ia64_get_cpuid(reg);
1558}
1559
1560void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1561{
1562	unsigned long r3, r1;
1563
1564	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1565	r1 = vcpu_get_cpuid(vcpu, r3);
1566	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1567}
1568
1569void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1570{
1571	VCPU(vcpu, tpr) = val;
1572	vcpu->arch.irq_check = 1;
1573}
1574
1575unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1576{
1577	unsigned long r2;
1578
1579	r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1580	VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1581
1582	switch (inst.M32.cr3) {
1583	case 0:
1584		vcpu_set_dcr(vcpu, r2);
1585		break;
1586	case 1:
1587		vcpu_set_itm(vcpu, r2);
1588		break;
1589	case 66:
1590		vcpu_set_tpr(vcpu, r2);
1591		break;
1592	case 67:
1593		vcpu_set_eoi(vcpu, r2);
1594		break;
1595	default:
1596		break;
1597	}
1598
1599	return 0;
1600}
1601
1602unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1603{
1604	unsigned long tgt = inst.M33.r1;
1605	unsigned long val;
1606
1607	switch (inst.M33.cr3) {
1608	case 65:
1609		val = vcpu_get_ivr(vcpu);
1610		vcpu_set_gr(vcpu, tgt, val, 0);
1611		break;
1612
1613	case 67:
1614		vcpu_set_gr(vcpu, tgt, 0L, 0);
1615		break;
1616	default:
1617		val = VCPU(vcpu, vcr[inst.M33.cr3]);
1618		vcpu_set_gr(vcpu, tgt, val, 0);
1619		break;
1620	}
1621
1622	return 0;
1623}
1624
1625void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1626{
1627
1628	unsigned long mask;
1629	struct kvm_pt_regs *regs;
1630	struct ia64_psr old_psr, new_psr;
1631
1632	old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1633
1634	regs = vcpu_regs(vcpu);
1635	/* We only support guest as:
1636	 *  vpsr.pk = 0
1637	 *  vpsr.is = 0
1638	 * Otherwise panic
1639	 */
1640	if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1641		panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
1642				"& vpsr.is=0\n");
1643
1644	/*
1645	 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1646	 * Since these bits will become 0, after success execution of each
1647	 * instruction, we will change set them to mIA64_PSR
1648	 */
1649	VCPU(vcpu, vpsr) = val
1650		& (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1651			IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1652
1653	if (!old_psr.i && (val & IA64_PSR_I)) {
1654		/* vpsr.i 0->1 */
1655		vcpu->arch.irq_check = 1;
1656	}
1657	new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1658
1659	/*
1660	 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1661	 * , except for the following bits:
1662	 *  ic/i/dt/si/rt/mc/it/bn/vm
1663	 */
1664	mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1665		IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1666		IA64_PSR_VM;
1667
1668	regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1669
1670	check_mm_mode_switch(vcpu, old_psr, new_psr);
1671
1672	return ;
1673}
1674
1675unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1676{
1677	struct ia64_psr vpsr;
1678
1679	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1680	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1681
1682	if (!vpsr.ic)
1683		VCPU(vcpu, ifs) = regs->cr_ifs;
1684	regs->cr_ifs = IA64_IFS_V;
1685	return (IA64_NO_FAULT);
1686}
1687
1688
1689
1690/**************************************************************************
1691  VCPU banked general register access routines
1692 **************************************************************************/
1693#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)	\
1694	do {     							\
1695		__asm__ __volatile__ (					\
1696				";;extr.u %0 = %3,%6,16;;\n"		\
1697				"dep %1 = %0, %1, 0, 16;;\n"		\
1698				"st8 [%4] = %1\n"			\
1699				"extr.u %0 = %2, 16, 16;;\n"		\
1700				"dep %3 = %0, %3, %6, 16;;\n"		\
1701				"st8 [%5] = %3\n"			\
1702				::"r"(i), "r"(*b1unat), "r"(*b0unat),	\
1703				"r"(*runat), "r"(b1unat), "r"(runat),	\
1704				"i"(VMM_PT_REGS_R16_SLOT) : "memory");	\
1705	} while (0)
1706
1707void vcpu_bsw0(struct kvm_vcpu *vcpu)
1708{
1709	unsigned long i;
1710
1711	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1712	unsigned long *r = &regs->r16;
1713	unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1714	unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1715	unsigned long *runat = &regs->eml_unat;
1716	unsigned long *b0unat = &VCPU(vcpu, vbnat);
1717	unsigned long *b1unat = &VCPU(vcpu, vnat);
1718
1719
1720	if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1721		for (i = 0; i < 16; i++) {
1722			*b1++ = *r;
1723			*r++ = *b0++;
1724		}
1725		vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1726				VMM_PT_REGS_R16_SLOT);
1727		VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1728	}
1729}
1730
1731#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)	\
1732	do {             						\
1733		__asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"	\
1734				"dep %1 = %0, %1, 16, 16;;\n"		\
1735				"st8 [%4] = %1\n"			\
1736				"extr.u %0 = %2, 0, 16;;\n"		\
1737				"dep %3 = %0, %3, %6, 16;;\n"		\
1738				"st8 [%5] = %3\n"			\
1739				::"r"(i), "r"(*b0unat), "r"(*b1unat),	\
1740				"r"(*runat), "r"(b0unat), "r"(runat),	\
1741				"i"(VMM_PT_REGS_R16_SLOT) : "memory");	\
1742	} while (0)
1743
1744void vcpu_bsw1(struct kvm_vcpu *vcpu)
1745{
1746	unsigned long i;
1747	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1748	unsigned long *r = &regs->r16;
1749	unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1750	unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1751	unsigned long *runat = &regs->eml_unat;
1752	unsigned long *b0unat = &VCPU(vcpu, vbnat);
1753	unsigned long *b1unat = &VCPU(vcpu, vnat);
1754
1755	if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1756		for (i = 0; i < 16; i++) {
1757			*b0++ = *r;
1758			*r++ = *b1++;
1759		}
1760		vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1761				VMM_PT_REGS_R16_SLOT);
1762		VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1763	}
1764}
1765
1766void vcpu_rfi(struct kvm_vcpu *vcpu)
1767{
1768	unsigned long ifs, psr;
1769	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1770
1771	psr = VCPU(vcpu, ipsr);
1772	if (psr & IA64_PSR_BN)
1773		vcpu_bsw1(vcpu);
1774	else
1775		vcpu_bsw0(vcpu);
1776	vcpu_set_psr(vcpu, psr);
1777	ifs = VCPU(vcpu, ifs);
1778	if (ifs >> 63)
1779		regs->cr_ifs = ifs;
1780	regs->cr_iip = VCPU(vcpu, iip);
1781}
1782
1783/*
1784   VPSR can't keep track of below bits of guest PSR
1785   This function gets guest PSR
1786 */
1787
1788unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1789{
1790	unsigned long mask;
1791	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1792
1793	mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1794		IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1795	return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1796}
1797
1798void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1799{
1800	unsigned long vpsr;
1801	unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1802					| inst.M44.imm;
1803
1804	vpsr = vcpu_get_psr(vcpu);
1805	vpsr &= (~imm24);
1806	vcpu_set_psr(vcpu, vpsr);
1807}
1808
1809void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1810{
1811	unsigned long vpsr;
1812	unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1813				| inst.M44.imm;
1814
1815	vpsr = vcpu_get_psr(vcpu);
1816	vpsr |= imm24;
1817	vcpu_set_psr(vcpu, vpsr);
1818}
1819
1820/* Generate Mask
1821 * Parameter:
1822 *  bit -- starting bit
1823 *  len -- how many bits
1824 */
1825#define MASK(bit,len)				   	\
1826({							\
1827		__u64	ret;				\
1828							\
1829		__asm __volatile("dep %0=-1, r0, %1, %2"\
1830				: "=r" (ret):		\
1831		  "M" (bit),				\
1832		  "M" (len));				\
1833		ret;					\
1834})
1835
1836void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1837{
1838	val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1839	vcpu_set_psr(vcpu, val);
1840}
1841
1842void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1843{
1844	unsigned long val;
1845
1846	val = vcpu_get_gr(vcpu, inst.M35.r2);
1847	vcpu_set_psr_l(vcpu, val);
1848}
1849
1850void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1851{
1852	unsigned long val;
1853
1854	val = vcpu_get_psr(vcpu);
1855	val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1856	vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1857}
1858
1859void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1860{
1861	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1862	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1863	if (ipsr->ri == 2) {
1864		ipsr->ri = 0;
1865		regs->cr_iip += 16;
1866	} else
1867		ipsr->ri++;
1868}
1869
1870void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1871{
1872	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1873	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1874
1875	if (ipsr->ri == 0) {
1876		ipsr->ri = 2;
1877		regs->cr_iip -= 16;
1878	} else
1879		ipsr->ri--;
1880}
1881
1882/** Emulate a privileged operation.
1883 *
1884 *
1885 * @param vcpu virtual cpu
1886 * @cause the reason cause virtualization fault
1887 * @opcode the instruction code which cause virtualization fault
1888 */
1889
1890void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1891{
1892	unsigned long status, cause, opcode ;
1893	INST64 inst;
1894
1895	status = IA64_NO_FAULT;
1896	cause = VMX(vcpu, cause);
1897	opcode = VMX(vcpu, opcode);
1898	inst.inst = opcode;
1899	/*
1900	 * Switch to actual virtual rid in rr0 and rr4,
1901	 * which is required by some tlb related instructions.
1902	 */
1903	prepare_if_physical_mode(vcpu);
1904
1905	switch (cause) {
1906	case EVENT_RSM:
1907		kvm_rsm(vcpu, inst);
1908		break;
1909	case EVENT_SSM:
1910		kvm_ssm(vcpu, inst);
1911		break;
1912	case EVENT_MOV_TO_PSR:
1913		kvm_mov_to_psr(vcpu, inst);
1914		break;
1915	case EVENT_MOV_FROM_PSR:
1916		kvm_mov_from_psr(vcpu, inst);
1917		break;
1918	case EVENT_MOV_FROM_CR:
1919		kvm_mov_from_cr(vcpu, inst);
1920		break;
1921	case EVENT_MOV_TO_CR:
1922		kvm_mov_to_cr(vcpu, inst);
1923		break;
1924	case EVENT_BSW_0:
1925		vcpu_bsw0(vcpu);
1926		break;
1927	case EVENT_BSW_1:
1928		vcpu_bsw1(vcpu);
1929		break;
1930	case EVENT_COVER:
1931		vcpu_cover(vcpu);
1932		break;
1933	case EVENT_RFI:
1934		vcpu_rfi(vcpu);
1935		break;
1936	case EVENT_ITR_D:
1937		kvm_itr_d(vcpu, inst);
1938		break;
1939	case EVENT_ITR_I:
1940		kvm_itr_i(vcpu, inst);
1941		break;
1942	case EVENT_PTR_D:
1943		kvm_ptr_d(vcpu, inst);
1944		break;
1945	case EVENT_PTR_I:
1946		kvm_ptr_i(vcpu, inst);
1947		break;
1948	case EVENT_ITC_D:
1949		kvm_itc_d(vcpu, inst);
1950		break;
1951	case EVENT_ITC_I:
1952		kvm_itc_i(vcpu, inst);
1953		break;
1954	case EVENT_PTC_L:
1955		kvm_ptc_l(vcpu, inst);
1956		break;
1957	case EVENT_PTC_G:
1958		kvm_ptc_g(vcpu, inst);
1959		break;
1960	case EVENT_PTC_GA:
1961		kvm_ptc_ga(vcpu, inst);
1962		break;
1963	case EVENT_PTC_E:
1964		kvm_ptc_e(vcpu, inst);
1965		break;
1966	case EVENT_MOV_TO_RR:
1967		kvm_mov_to_rr(vcpu, inst);
1968		break;
1969	case EVENT_MOV_FROM_RR:
1970		kvm_mov_from_rr(vcpu, inst);
1971		break;
1972	case EVENT_THASH:
1973		kvm_thash(vcpu, inst);
1974		break;
1975	case EVENT_TTAG:
1976		kvm_ttag(vcpu, inst);
1977		break;
1978	case EVENT_TPA:
1979		status = kvm_tpa(vcpu, inst);
1980		break;
1981	case EVENT_TAK:
1982		kvm_tak(vcpu, inst);
1983		break;
1984	case EVENT_MOV_TO_AR_IMM:
1985		kvm_mov_to_ar_imm(vcpu, inst);
1986		break;
1987	case EVENT_MOV_TO_AR:
1988		kvm_mov_to_ar_reg(vcpu, inst);
1989		break;
1990	case EVENT_MOV_FROM_AR:
1991		kvm_mov_from_ar_reg(vcpu, inst);
1992		break;
1993	case EVENT_MOV_TO_DBR:
1994		kvm_mov_to_dbr(vcpu, inst);
1995		break;
1996	case EVENT_MOV_TO_IBR:
1997		kvm_mov_to_ibr(vcpu, inst);
1998		break;
1999	case EVENT_MOV_TO_PMC:
2000		kvm_mov_to_pmc(vcpu, inst);
2001		break;
2002	case EVENT_MOV_TO_PMD:
2003		kvm_mov_to_pmd(vcpu, inst);
2004		break;
2005	case EVENT_MOV_TO_PKR:
2006		kvm_mov_to_pkr(vcpu, inst);
2007		break;
2008	case EVENT_MOV_FROM_DBR:
2009		kvm_mov_from_dbr(vcpu, inst);
2010		break;
2011	case EVENT_MOV_FROM_IBR:
2012		kvm_mov_from_ibr(vcpu, inst);
2013		break;
2014	case EVENT_MOV_FROM_PMC:
2015		kvm_mov_from_pmc(vcpu, inst);
2016		break;
2017	case EVENT_MOV_FROM_PKR:
2018		kvm_mov_from_pkr(vcpu, inst);
2019		break;
2020	case EVENT_MOV_FROM_CPUID:
2021		kvm_mov_from_cpuid(vcpu, inst);
2022		break;
2023	case EVENT_VMSW:
2024		status = IA64_FAULT;
2025		break;
2026	default:
2027		break;
2028	};
2029	/*Assume all status is NO_FAULT ?*/
2030	if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2031		vcpu_increment_iip(vcpu);
2032
2033	recover_if_physical_mode(vcpu);
2034}
2035
2036void init_vcpu(struct kvm_vcpu *vcpu)
2037{
2038	int i;
2039
2040	vcpu->arch.mode_flags = GUEST_IN_PHY;
2041	VMX(vcpu, vrr[0]) = 0x38;
2042	VMX(vcpu, vrr[1]) = 0x38;
2043	VMX(vcpu, vrr[2]) = 0x38;
2044	VMX(vcpu, vrr[3]) = 0x38;
2045	VMX(vcpu, vrr[4]) = 0x38;
2046	VMX(vcpu, vrr[5]) = 0x38;
2047	VMX(vcpu, vrr[6]) = 0x38;
2048	VMX(vcpu, vrr[7]) = 0x38;
2049	VCPU(vcpu, vpsr) = IA64_PSR_BN;
2050	VCPU(vcpu, dcr) = 0;
2051	/* pta.size must not be 0.  The minimum is 15 (32k) */
2052	VCPU(vcpu, pta) = 15 << 2;
2053	VCPU(vcpu, itv) = 0x10000;
2054	VCPU(vcpu, itm) = 0;
2055	VMX(vcpu, last_itc) = 0;
2056
2057	VCPU(vcpu, lid) = VCPU_LID(vcpu);
2058	VCPU(vcpu, ivr) = 0;
2059	VCPU(vcpu, tpr) = 0x10000;
2060	VCPU(vcpu, eoi) = 0;
2061	VCPU(vcpu, irr[0]) = 0;
2062	VCPU(vcpu, irr[1]) = 0;
2063	VCPU(vcpu, irr[2]) = 0;
2064	VCPU(vcpu, irr[3]) = 0;
2065	VCPU(vcpu, pmv) = 0x10000;
2066	VCPU(vcpu, cmcv) = 0x10000;
2067	VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
2068	VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
2069	update_vhpi(vcpu, NULL_VECTOR);
2070	VLSAPIC_XTP(vcpu) = 0x80;	/* disabled */
2071
2072	for (i = 0; i < 4; i++)
2073		VLSAPIC_INSVC(vcpu, i) = 0;
2074}
2075
2076void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2077{
2078	unsigned long psr;
2079
2080	local_irq_save(psr);
2081
2082	/* WARNING: not allow co-exist of both virtual mode and physical
2083	 * mode in same region
2084	 */
2085
2086	vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2087	vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2088
2089	if (is_physical_mode(vcpu)) {
2090		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2091			panic_vm(vcpu, "Machine Status conflicts!\n");
2092
2093		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2094		ia64_dv_serialize_data();
2095		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2096		ia64_dv_serialize_data();
2097	} else {
2098		ia64_set_rr((VRN0 << VRN_SHIFT),
2099				vcpu->arch.metaphysical_saved_rr0);
2100		ia64_dv_serialize_data();
2101		ia64_set_rr((VRN4 << VRN_SHIFT),
2102				vcpu->arch.metaphysical_saved_rr4);
2103		ia64_dv_serialize_data();
2104	}
2105	ia64_set_rr((VRN1 << VRN_SHIFT),
2106			vrrtomrr(VMX(vcpu, vrr[VRN1])));
2107	ia64_dv_serialize_data();
2108	ia64_set_rr((VRN2 << VRN_SHIFT),
2109			vrrtomrr(VMX(vcpu, vrr[VRN2])));
2110	ia64_dv_serialize_data();
2111	ia64_set_rr((VRN3 << VRN_SHIFT),
2112			vrrtomrr(VMX(vcpu, vrr[VRN3])));
2113	ia64_dv_serialize_data();
2114	ia64_set_rr((VRN5 << VRN_SHIFT),
2115			vrrtomrr(VMX(vcpu, vrr[VRN5])));
2116	ia64_dv_serialize_data();
2117	ia64_set_rr((VRN7 << VRN_SHIFT),
2118			vrrtomrr(VMX(vcpu, vrr[VRN7])));
2119	ia64_dv_serialize_data();
2120	ia64_srlz_d();
2121	ia64_set_psr(psr);
2122}
2123
2124int vmm_entry(void)
2125{
2126	struct kvm_vcpu *v;
2127	v = current_vcpu;
2128
2129	ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2130						0, 0, 0, 0, 0, 0);
2131	kvm_init_vtlb(v);
2132	kvm_init_vhpt(v);
2133	init_vcpu(v);
2134	kvm_init_all_rr(v);
2135	vmm_reset_entry();
2136
2137	return 0;
2138}
2139
2140static void kvm_show_registers(struct kvm_pt_regs *regs)
2141{
2142	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2143
2144	struct kvm_vcpu *vcpu = current_vcpu;
2145	if (vcpu != NULL)
2146		printk("vcpu 0x%p vcpu %d\n",
2147		       vcpu, vcpu->vcpu_id);
2148
2149	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
2150	       regs->cr_ipsr, regs->cr_ifs, ip);
2151
2152	printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2153	       regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2154	printk("rnat: %016lx bspstore: %016lx pr  : %016lx\n",
2155	       regs->ar_rnat, regs->ar_bspstore, regs->pr);
2156	printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2157	       regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2158	printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2159	printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0,
2160							regs->b6, regs->b7);
2161	printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
2162	       regs->f6.u.bits[1], regs->f6.u.bits[0],
2163	       regs->f7.u.bits[1], regs->f7.u.bits[0]);
2164	printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
2165	       regs->f8.u.bits[1], regs->f8.u.bits[0],
2166	       regs->f9.u.bits[1], regs->f9.u.bits[0]);
2167	printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2168	       regs->f10.u.bits[1], regs->f10.u.bits[0],
2169	       regs->f11.u.bits[1], regs->f11.u.bits[0]);
2170
2171	printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1,
2172							regs->r2, regs->r3);
2173	printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8,
2174							regs->r9, regs->r10);
2175	printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2176							regs->r12, regs->r13);
2177	printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2178							regs->r15, regs->r16);
2179	printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2180							regs->r18, regs->r19);
2181	printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2182							regs->r21, regs->r22);
2183	printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2184							regs->r24, regs->r25);
2185	printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2186							regs->r27, regs->r28);
2187	printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2188							regs->r30, regs->r31);
2189
2190}
2191
2192void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2193{
2194	va_list args;
2195	char buf[256];
2196
2197	struct kvm_pt_regs *regs = vcpu_regs(v);
2198	struct exit_ctl_data *p = &v->arch.exit_data;
2199	va_start(args, fmt);
2200	vsnprintf(buf, sizeof(buf), fmt, args);
2201	va_end(args);
2202	printk(buf);
2203	kvm_show_registers(regs);
2204	p->exit_reason = EXIT_REASON_VM_PANIC;
2205	vmm_transition(v);
2206	/*Never to return*/
2207	while (1);
2208}
2209