1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 *   Yaniv Kamay  <yaniv@qumranet.com>
10 *   Avi Kivity   <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2.  See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/vmalloc.h>
20#include <linux/highmem.h>
21#include <linux/profile.h>
22#include <linux/sched.h>
23#include <asm/desc.h>
24
25#include "kvm_svm.h"
26#include "x86_emulate.h"
27
28MODULE_AUTHOR("Qumranet");
29MODULE_LICENSE("GPL");
30
31#define IOPM_ALLOC_ORDER 2
32#define MSRPM_ALLOC_ORDER 1
33
34#define DB_VECTOR 1
35#define UD_VECTOR 6
36#define GP_VECTOR 13
37
38#define DR7_GD_MASK (1 << 13)
39#define DR6_BD_MASK (1 << 13)
40#define CR4_DE_MASK (1UL << 3)
41
42#define SEG_TYPE_LDT 2
43#define SEG_TYPE_BUSY_TSS16 3
44
45#define KVM_EFER_LMA (1 << 10)
46#define KVM_EFER_LME (1 << 8)
47
48#define SVM_FEATURE_NPT  (1 << 0)
49#define SVM_FEATURE_LBRV (1 << 1)
50#define SVM_DEATURE_SVML (1 << 2)
51
52unsigned long iopm_base;
53unsigned long msrpm_base;
54
55struct kvm_ldttss_desc {
56	u16 limit0;
57	u16 base0;
58	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
59	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
60	u32 base3;
61	u32 zero1;
62} __attribute__((packed));
63
64struct svm_cpu_data {
65	int cpu;
66
67	u64 asid_generation;
68	u32 max_asid;
69	u32 next_asid;
70	struct kvm_ldttss_desc *tss_desc;
71
72	struct page *save_area;
73};
74
75static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
76static uint32_t svm_features;
77
78struct svm_init_data {
79	int cpu;
80	int r;
81};
82
83static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
84
85#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
86#define MSRS_RANGE_SIZE 2048
87#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
88
89#define MAX_INST_SIZE 15
90
91static inline u32 svm_has(u32 feat)
92{
93	return svm_features & feat;
94}
95
96static unsigned get_addr_size(struct kvm_vcpu *vcpu)
97{
98	struct vmcb_save_area *sa = &vcpu->svm->vmcb->save;
99	u16 cs_attrib;
100
101	if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM))
102		return 2;
103
104	cs_attrib = sa->cs.attrib;
105
106	return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 :
107				(cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2;
108}
109
110static inline u8 pop_irq(struct kvm_vcpu *vcpu)
111{
112	int word_index = __ffs(vcpu->irq_summary);
113	int bit_index = __ffs(vcpu->irq_pending[word_index]);
114	int irq = word_index * BITS_PER_LONG + bit_index;
115
116	clear_bit(bit_index, &vcpu->irq_pending[word_index]);
117	if (!vcpu->irq_pending[word_index])
118		clear_bit(word_index, &vcpu->irq_summary);
119	return irq;
120}
121
122static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
123{
124	set_bit(irq, vcpu->irq_pending);
125	set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
126}
127
128static inline void clgi(void)
129{
130	asm volatile (SVM_CLGI);
131}
132
133static inline void stgi(void)
134{
135	asm volatile (SVM_STGI);
136}
137
138static inline void invlpga(unsigned long addr, u32 asid)
139{
140	asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
141}
142
143static inline unsigned long kvm_read_cr2(void)
144{
145	unsigned long cr2;
146
147	asm volatile ("mov %%cr2, %0" : "=r" (cr2));
148	return cr2;
149}
150
151static inline void kvm_write_cr2(unsigned long val)
152{
153	asm volatile ("mov %0, %%cr2" :: "r" (val));
154}
155
156static inline unsigned long read_dr6(void)
157{
158	unsigned long dr6;
159
160	asm volatile ("mov %%dr6, %0" : "=r" (dr6));
161	return dr6;
162}
163
164static inline void write_dr6(unsigned long val)
165{
166	asm volatile ("mov %0, %%dr6" :: "r" (val));
167}
168
169static inline unsigned long read_dr7(void)
170{
171	unsigned long dr7;
172
173	asm volatile ("mov %%dr7, %0" : "=r" (dr7));
174	return dr7;
175}
176
177static inline void write_dr7(unsigned long val)
178{
179	asm volatile ("mov %0, %%dr7" :: "r" (val));
180}
181
182static inline void force_new_asid(struct kvm_vcpu *vcpu)
183{
184	vcpu->svm->asid_generation--;
185}
186
187static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
188{
189	force_new_asid(vcpu);
190}
191
192static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
193{
194	if (!(efer & KVM_EFER_LMA))
195		efer &= ~KVM_EFER_LME;
196
197	vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
198	vcpu->shadow_efer = efer;
199}
200
201static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
202{
203	vcpu->svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
204						SVM_EVTINJ_VALID_ERR |
205						SVM_EVTINJ_TYPE_EXEPT |
206						GP_VECTOR;
207	vcpu->svm->vmcb->control.event_inj_err = error_code;
208}
209
210static void inject_ud(struct kvm_vcpu *vcpu)
211{
212	vcpu->svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
213						SVM_EVTINJ_TYPE_EXEPT |
214						UD_VECTOR;
215}
216
217static int is_page_fault(uint32_t info)
218{
219	info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
220	return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT);
221}
222
223static int is_external_interrupt(u32 info)
224{
225	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
226	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
227}
228
229static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
230{
231	if (!vcpu->svm->next_rip) {
232		printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
233		return;
234	}
235	if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) {
236		printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
237		       __FUNCTION__,
238		       vcpu->svm->vmcb->save.rip,
239		       vcpu->svm->next_rip);
240	}
241
242	vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
243	vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
244
245	vcpu->interrupt_window_open = 1;
246}
247
248static int has_svm(void)
249{
250	uint32_t eax, ebx, ecx, edx;
251
252	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
253		printk(KERN_INFO "has_svm: not amd\n");
254		return 0;
255	}
256
257	cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
258	if (eax < SVM_CPUID_FUNC) {
259		printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
260		return 0;
261	}
262
263	cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
264	if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
265		printk(KERN_DEBUG "has_svm: svm not available\n");
266		return 0;
267	}
268	return 1;
269}
270
271static void svm_hardware_disable(void *garbage)
272{
273	struct svm_cpu_data *svm_data
274		= per_cpu(svm_data, raw_smp_processor_id());
275
276	if (svm_data) {
277		uint64_t efer;
278
279		wrmsrl(MSR_VM_HSAVE_PA, 0);
280		rdmsrl(MSR_EFER, efer);
281		wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
282		per_cpu(svm_data, raw_smp_processor_id()) = NULL;
283		__free_page(svm_data->save_area);
284		kfree(svm_data);
285	}
286}
287
288static void svm_hardware_enable(void *garbage)
289{
290
291	struct svm_cpu_data *svm_data;
292	uint64_t efer;
293#ifdef CONFIG_X86_64
294	struct desc_ptr gdt_descr;
295#else
296	struct Xgt_desc_struct gdt_descr;
297#endif
298	struct desc_struct *gdt;
299	int me = raw_smp_processor_id();
300
301	if (!has_svm()) {
302		printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
303		return;
304	}
305	svm_data = per_cpu(svm_data, me);
306
307	if (!svm_data) {
308		printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
309		       me);
310		return;
311	}
312
313	svm_data->asid_generation = 1;
314	svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
315	svm_data->next_asid = svm_data->max_asid + 1;
316	svm_features = cpuid_edx(SVM_CPUID_FUNC);
317
318	asm volatile ( "sgdt %0" : "=m"(gdt_descr) );
319	gdt = (struct desc_struct *)gdt_descr.address;
320	svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
321
322	rdmsrl(MSR_EFER, efer);
323	wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
324
325	wrmsrl(MSR_VM_HSAVE_PA,
326	       page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
327}
328
329static int svm_cpu_init(int cpu)
330{
331	struct svm_cpu_data *svm_data;
332	int r;
333
334	svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
335	if (!svm_data)
336		return -ENOMEM;
337	svm_data->cpu = cpu;
338	svm_data->save_area = alloc_page(GFP_KERNEL);
339	r = -ENOMEM;
340	if (!svm_data->save_area)
341		goto err_1;
342
343	per_cpu(svm_data, cpu) = svm_data;
344
345	return 0;
346
347err_1:
348	kfree(svm_data);
349	return r;
350
351}
352
353static int set_msr_interception(u32 *msrpm, unsigned msr,
354				int read, int write)
355{
356	int i;
357
358	for (i = 0; i < NUM_MSR_MAPS; i++) {
359		if (msr >= msrpm_ranges[i] &&
360		    msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
361			u32 msr_offset = (i * MSRS_IN_RANGE + msr -
362					  msrpm_ranges[i]) * 2;
363
364			u32 *base = msrpm + (msr_offset / 32);
365			u32 msr_shift = msr_offset % 32;
366			u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
367			*base = (*base & ~(0x3 << msr_shift)) |
368				(mask << msr_shift);
369			return 1;
370		}
371	}
372	printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr);
373	return 0;
374}
375
376static __init int svm_hardware_setup(void)
377{
378	int cpu;
379	struct page *iopm_pages;
380	struct page *msrpm_pages;
381	void *msrpm_va;
382	int r;
383
384	kvm_emulator_want_group7_invlpg();
385
386	iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
387
388	if (!iopm_pages)
389		return -ENOMEM;
390	memset(page_address(iopm_pages), 0xff,
391					PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
392	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
393
394
395	msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
396
397	r = -ENOMEM;
398	if (!msrpm_pages)
399		goto err_1;
400
401	msrpm_va = page_address(msrpm_pages);
402	memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
403	msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
404
405#ifdef CONFIG_X86_64
406	set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
407	set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
408	set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
409	set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
410	set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
411	set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
412#endif
413	set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
414	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
415	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
416	set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
417
418	for_each_online_cpu(cpu) {
419		r = svm_cpu_init(cpu);
420		if (r)
421			goto err_2;
422	}
423	return 0;
424
425err_2:
426	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
427	msrpm_base = 0;
428err_1:
429	__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
430	iopm_base = 0;
431	return r;
432}
433
434static __exit void svm_hardware_unsetup(void)
435{
436	__free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
437	__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
438	iopm_base = msrpm_base = 0;
439}
440
441static void init_seg(struct vmcb_seg *seg)
442{
443	seg->selector = 0;
444	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
445		SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
446	seg->limit = 0xffff;
447	seg->base = 0;
448}
449
450static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
451{
452	seg->selector = 0;
453	seg->attrib = SVM_SELECTOR_P_MASK | type;
454	seg->limit = 0xffff;
455	seg->base = 0;
456}
457
458static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
459{
460	return 0;
461}
462
463static void init_vmcb(struct vmcb *vmcb)
464{
465	struct vmcb_control_area *control = &vmcb->control;
466	struct vmcb_save_area *save = &vmcb->save;
467
468	control->intercept_cr_read = 	INTERCEPT_CR0_MASK |
469					INTERCEPT_CR3_MASK |
470					INTERCEPT_CR4_MASK;
471
472	control->intercept_cr_write = 	INTERCEPT_CR0_MASK |
473					INTERCEPT_CR3_MASK |
474					INTERCEPT_CR4_MASK;
475
476	control->intercept_dr_read = 	INTERCEPT_DR0_MASK |
477					INTERCEPT_DR1_MASK |
478					INTERCEPT_DR2_MASK |
479					INTERCEPT_DR3_MASK;
480
481	control->intercept_dr_write = 	INTERCEPT_DR0_MASK |
482					INTERCEPT_DR1_MASK |
483					INTERCEPT_DR2_MASK |
484					INTERCEPT_DR3_MASK |
485					INTERCEPT_DR5_MASK |
486					INTERCEPT_DR7_MASK;
487
488	control->intercept_exceptions = 1 << PF_VECTOR;
489
490
491	control->intercept = 	(1ULL << INTERCEPT_INTR) |
492				(1ULL << INTERCEPT_NMI) |
493				(1ULL << INTERCEPT_SMI) |
494		/*
495		 * selective cr0 intercept bug?
496		 *    	0:   0f 22 d8                mov    %eax,%cr3
497		 *	3:   0f 20 c0                mov    %cr0,%eax
498		 *	6:   0d 00 00 00 80          or     $0x80000000,%eax
499		 *	b:   0f 22 c0                mov    %eax,%cr0
500		 * set cr3 ->interception
501		 * get cr0 ->interception
502		 * set cr0 -> no interception
503		 */
504		/*              (1ULL << INTERCEPT_SELECTIVE_CR0) | */
505				(1ULL << INTERCEPT_CPUID) |
506				(1ULL << INTERCEPT_HLT) |
507				(1ULL << INTERCEPT_INVLPGA) |
508				(1ULL << INTERCEPT_IOIO_PROT) |
509				(1ULL << INTERCEPT_MSR_PROT) |
510				(1ULL << INTERCEPT_TASK_SWITCH) |
511				(1ULL << INTERCEPT_SHUTDOWN) |
512				(1ULL << INTERCEPT_VMRUN) |
513				(1ULL << INTERCEPT_VMMCALL) |
514				(1ULL << INTERCEPT_VMLOAD) |
515				(1ULL << INTERCEPT_VMSAVE) |
516				(1ULL << INTERCEPT_STGI) |
517				(1ULL << INTERCEPT_CLGI) |
518				(1ULL << INTERCEPT_SKINIT) |
519				(1ULL << INTERCEPT_MONITOR) |
520				(1ULL << INTERCEPT_MWAIT);
521
522	control->iopm_base_pa = iopm_base;
523	control->msrpm_base_pa = msrpm_base;
524	control->tsc_offset = 0;
525	control->int_ctl = V_INTR_MASKING_MASK;
526
527	init_seg(&save->es);
528	init_seg(&save->ss);
529	init_seg(&save->ds);
530	init_seg(&save->fs);
531	init_seg(&save->gs);
532
533	save->cs.selector = 0xf000;
534	/* Executable/Readable Code Segment */
535	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
536		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
537	save->cs.limit = 0xffff;
538	/*
539	 * cs.base should really be 0xffff0000, but vmx can't handle that, so
540	 * be consistent with it.
541	 *
542	 * Replace when we have real mode working for vmx.
543	 */
544	save->cs.base = 0xf0000;
545
546	save->gdtr.limit = 0xffff;
547	save->idtr.limit = 0xffff;
548
549	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
550	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
551
552	save->efer = MSR_EFER_SVME_MASK;
553
554        save->dr6 = 0xffff0ff0;
555	save->dr7 = 0x400;
556	save->rflags = 2;
557	save->rip = 0x0000fff0;
558
559	/*
560	 * cr0 val on cpu init should be 0x60000010, we enable cpu
561	 * cache by default. the orderly way is to enable cache in bios.
562	 */
563	save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK;
564	save->cr4 = CR4_PAE_MASK;
565	/* rdx = ?? */
566}
567
568static int svm_create_vcpu(struct kvm_vcpu *vcpu)
569{
570	struct page *page;
571	int r;
572
573	r = -ENOMEM;
574	vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL);
575	if (!vcpu->svm)
576		goto out1;
577	page = alloc_page(GFP_KERNEL);
578	if (!page)
579		goto out2;
580
581	vcpu->svm->vmcb = page_address(page);
582	memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
583	vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
584	vcpu->svm->asid_generation = 0;
585	memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
586	init_vmcb(vcpu->svm->vmcb);
587
588	fx_init(vcpu);
589	vcpu->fpu_active = 1;
590	vcpu->apic_base = 0xfee00000 |
591			/*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
592			MSR_IA32_APICBASE_ENABLE;
593
594	return 0;
595
596out2:
597	kfree(vcpu->svm);
598out1:
599	return r;
600}
601
602static void svm_free_vcpu(struct kvm_vcpu *vcpu)
603{
604	if (!vcpu->svm)
605		return;
606	if (vcpu->svm->vmcb)
607		__free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT));
608	kfree(vcpu->svm);
609}
610
611static void svm_vcpu_load(struct kvm_vcpu *vcpu)
612{
613	int cpu, i;
614
615	cpu = get_cpu();
616	if (unlikely(cpu != vcpu->cpu)) {
617		u64 tsc_this, delta;
618
619		/*
620		 * Make sure that the guest sees a monotonically
621		 * increasing TSC.
622		 */
623		rdtscll(tsc_this);
624		delta = vcpu->host_tsc - tsc_this;
625		vcpu->svm->vmcb->control.tsc_offset += delta;
626		vcpu->cpu = cpu;
627	}
628
629	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
630		rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
631}
632
633static void svm_vcpu_put(struct kvm_vcpu *vcpu)
634{
635	int i;
636
637	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
638		wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
639
640	rdtscll(vcpu->host_tsc);
641	put_cpu();
642}
643
644static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
645{
646}
647
648static void svm_cache_regs(struct kvm_vcpu *vcpu)
649{
650	vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
651	vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp;
652	vcpu->rip = vcpu->svm->vmcb->save.rip;
653}
654
655static void svm_decache_regs(struct kvm_vcpu *vcpu)
656{
657	vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
658	vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
659	vcpu->svm->vmcb->save.rip = vcpu->rip;
660}
661
662static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
663{
664	return vcpu->svm->vmcb->save.rflags;
665}
666
667static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
668{
669	vcpu->svm->vmcb->save.rflags = rflags;
670}
671
672static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
673{
674	struct vmcb_save_area *save = &vcpu->svm->vmcb->save;
675
676	switch (seg) {
677	case VCPU_SREG_CS: return &save->cs;
678	case VCPU_SREG_DS: return &save->ds;
679	case VCPU_SREG_ES: return &save->es;
680	case VCPU_SREG_FS: return &save->fs;
681	case VCPU_SREG_GS: return &save->gs;
682	case VCPU_SREG_SS: return &save->ss;
683	case VCPU_SREG_TR: return &save->tr;
684	case VCPU_SREG_LDTR: return &save->ldtr;
685	}
686	BUG();
687	return NULL;
688}
689
690static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
691{
692	struct vmcb_seg *s = svm_seg(vcpu, seg);
693
694	return s->base;
695}
696
697static void svm_get_segment(struct kvm_vcpu *vcpu,
698			    struct kvm_segment *var, int seg)
699{
700	struct vmcb_seg *s = svm_seg(vcpu, seg);
701
702	var->base = s->base;
703	var->limit = s->limit;
704	var->selector = s->selector;
705	var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
706	var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
707	var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
708	var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
709	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
710	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
711	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
712	var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
713	var->unusable = !var->present;
714}
715
716static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
717{
718	struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS);
719
720	*db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
721	*l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
722}
723
724static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
725{
726	dt->limit = vcpu->svm->vmcb->save.idtr.limit;
727	dt->base = vcpu->svm->vmcb->save.idtr.base;
728}
729
730static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
731{
732	vcpu->svm->vmcb->save.idtr.limit = dt->limit;
733	vcpu->svm->vmcb->save.idtr.base = dt->base ;
734}
735
736static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
737{
738	dt->limit = vcpu->svm->vmcb->save.gdtr.limit;
739	dt->base = vcpu->svm->vmcb->save.gdtr.base;
740}
741
742static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
743{
744	vcpu->svm->vmcb->save.gdtr.limit = dt->limit;
745	vcpu->svm->vmcb->save.gdtr.base = dt->base ;
746}
747
748static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
749{
750}
751
752static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
753{
754#ifdef CONFIG_X86_64
755	if (vcpu->shadow_efer & KVM_EFER_LME) {
756		if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
757			vcpu->shadow_efer |= KVM_EFER_LMA;
758			vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
759		}
760
761		if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) {
762			vcpu->shadow_efer &= ~KVM_EFER_LMA;
763			vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
764		}
765	}
766#endif
767	if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) {
768		vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
769		vcpu->fpu_active = 1;
770	}
771
772	vcpu->cr0 = cr0;
773	cr0 |= CR0_PG_MASK | CR0_WP_MASK;
774	cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK);
775	vcpu->svm->vmcb->save.cr0 = cr0;
776}
777
778static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
779{
780       vcpu->cr4 = cr4;
781       vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK;
782}
783
784static void svm_set_segment(struct kvm_vcpu *vcpu,
785			    struct kvm_segment *var, int seg)
786{
787	struct vmcb_seg *s = svm_seg(vcpu, seg);
788
789	s->base = var->base;
790	s->limit = var->limit;
791	s->selector = var->selector;
792	if (var->unusable)
793		s->attrib = 0;
794	else {
795		s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
796		s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
797		s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
798		s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
799		s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
800		s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
801		s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
802		s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
803	}
804	if (seg == VCPU_SREG_CS)
805		vcpu->svm->vmcb->save.cpl
806			= (vcpu->svm->vmcb->save.cs.attrib
807			   >> SVM_SELECTOR_DPL_SHIFT) & 3;
808
809}
810
811
812static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
813{
814	return -EOPNOTSUPP;
815}
816
817static void load_host_msrs(struct kvm_vcpu *vcpu)
818{
819#ifdef CONFIG_X86_64
820	wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
821#endif
822}
823
824static void save_host_msrs(struct kvm_vcpu *vcpu)
825{
826#ifdef CONFIG_X86_64
827	rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
828#endif
829}
830
831static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
832{
833	if (svm_data->next_asid > svm_data->max_asid) {
834		++svm_data->asid_generation;
835		svm_data->next_asid = 1;
836		vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
837	}
838
839	vcpu->cpu = svm_data->cpu;
840	vcpu->svm->asid_generation = svm_data->asid_generation;
841	vcpu->svm->vmcb->control.asid = svm_data->next_asid++;
842}
843
844static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
845{
846	invlpga(address, vcpu->svm->vmcb->control.asid); // is needed?
847}
848
849static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
850{
851	return vcpu->svm->db_regs[dr];
852}
853
854static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
855		       int *exception)
856{
857	*exception = 0;
858
859	if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) {
860		vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
861		vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK;
862		*exception = DB_VECTOR;
863		return;
864	}
865
866	switch (dr) {
867	case 0 ... 3:
868		vcpu->svm->db_regs[dr] = value;
869		return;
870	case 4 ... 5:
871		if (vcpu->cr4 & CR4_DE_MASK) {
872			*exception = UD_VECTOR;
873			return;
874		}
875	case 7: {
876		if (value & ~((1ULL << 32) - 1)) {
877			*exception = GP_VECTOR;
878			return;
879		}
880		vcpu->svm->vmcb->save.dr7 = value;
881		return;
882	}
883	default:
884		printk(KERN_DEBUG "%s: unexpected dr %u\n",
885		       __FUNCTION__, dr);
886		*exception = UD_VECTOR;
887		return;
888	}
889}
890
891static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
892{
893	u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
894	u64 fault_address;
895	u32 error_code;
896	enum emulation_result er;
897	int r;
898
899	if (is_external_interrupt(exit_int_info))
900		push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
901
902	spin_lock(&vcpu->kvm->lock);
903
904	fault_address  = vcpu->svm->vmcb->control.exit_info_2;
905	error_code = vcpu->svm->vmcb->control.exit_info_1;
906	r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
907	if (r < 0) {
908		spin_unlock(&vcpu->kvm->lock);
909		return r;
910	}
911	if (!r) {
912		spin_unlock(&vcpu->kvm->lock);
913		return 1;
914	}
915	er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
916	spin_unlock(&vcpu->kvm->lock);
917
918	switch (er) {
919	case EMULATE_DONE:
920		return 1;
921	case EMULATE_DO_MMIO:
922		++vcpu->stat.mmio_exits;
923		kvm_run->exit_reason = KVM_EXIT_MMIO;
924		return 0;
925	case EMULATE_FAIL:
926		vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
927		break;
928	default:
929		BUG();
930	}
931
932	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
933	return 0;
934}
935
936static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
937{
938       vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
939       if (!(vcpu->cr0 & CR0_TS_MASK))
940               vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK;
941       vcpu->fpu_active = 1;
942
943       return 1;
944}
945
946static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
947{
948	/*
949	 * VMCB is undefined after a SHUTDOWN intercept
950	 * so reinitialize it.
951	 */
952	memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
953	init_vmcb(vcpu->svm->vmcb);
954
955	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
956	return 0;
957}
958
959static int io_get_override(struct kvm_vcpu *vcpu,
960			  struct vmcb_seg **seg,
961			  int *addr_override)
962{
963	u8 inst[MAX_INST_SIZE];
964	unsigned ins_length;
965	gva_t rip;
966	int i;
967
968	rip =  vcpu->svm->vmcb->save.rip;
969	ins_length = vcpu->svm->next_rip - rip;
970	rip += vcpu->svm->vmcb->save.cs.base;
971
972	if (ins_length > MAX_INST_SIZE)
973		printk(KERN_DEBUG
974		       "%s: inst length err, cs base 0x%llx rip 0x%llx "
975		       "next rip 0x%llx ins_length %u\n",
976		       __FUNCTION__,
977		       vcpu->svm->vmcb->save.cs.base,
978		       vcpu->svm->vmcb->save.rip,
979		       vcpu->svm->vmcb->control.exit_info_2,
980		       ins_length);
981
982	if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
983		/* #PF */
984		return 0;
985
986	*addr_override = 0;
987	*seg = NULL;
988	for (i = 0; i < ins_length; i++)
989		switch (inst[i]) {
990		case 0xf0:
991		case 0xf2:
992		case 0xf3:
993		case 0x66:
994			continue;
995		case 0x67:
996			*addr_override = 1;
997			continue;
998		case 0x2e:
999			*seg = &vcpu->svm->vmcb->save.cs;
1000			continue;
1001		case 0x36:
1002			*seg = &vcpu->svm->vmcb->save.ss;
1003			continue;
1004		case 0x3e:
1005			*seg = &vcpu->svm->vmcb->save.ds;
1006			continue;
1007		case 0x26:
1008			*seg = &vcpu->svm->vmcb->save.es;
1009			continue;
1010		case 0x64:
1011			*seg = &vcpu->svm->vmcb->save.fs;
1012			continue;
1013		case 0x65:
1014			*seg = &vcpu->svm->vmcb->save.gs;
1015			continue;
1016		default:
1017			return 1;
1018		}
1019	printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__);
1020	return 0;
1021}
1022
1023static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
1024{
1025	unsigned long addr_mask;
1026	unsigned long *reg;
1027	struct vmcb_seg *seg;
1028	int addr_override;
1029	struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save;
1030	u16 cs_attrib = save_area->cs.attrib;
1031	unsigned addr_size = get_addr_size(vcpu);
1032
1033	if (!io_get_override(vcpu, &seg, &addr_override))
1034		return 0;
1035
1036	if (addr_override)
1037		addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
1038
1039	if (ins) {
1040		reg = &vcpu->regs[VCPU_REGS_RDI];
1041		seg = &vcpu->svm->vmcb->save.es;
1042	} else {
1043		reg = &vcpu->regs[VCPU_REGS_RSI];
1044		seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds;
1045	}
1046
1047	addr_mask = ~0ULL >> (64 - (addr_size * 8));
1048
1049	if ((cs_attrib & SVM_SELECTOR_L_MASK) &&
1050	    !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
1051		*address = (*reg & addr_mask);
1052		return addr_mask;
1053	}
1054
1055	if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
1056		svm_inject_gp(vcpu, 0);
1057		return 0;
1058	}
1059
1060	*address = (*reg & addr_mask) + seg->base;
1061	return addr_mask;
1062}
1063
1064static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1065{
1066	u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug?
1067	int size, down, in, string, rep;
1068	unsigned port;
1069	unsigned long count;
1070	gva_t address = 0;
1071
1072	++vcpu->stat.io_exits;
1073
1074	vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2;
1075
1076	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1077	port = io_info >> 16;
1078	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1079	string = (io_info & SVM_IOIO_STR_MASK) != 0;
1080	rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1081	count = 1;
1082	down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
1083
1084	if (string) {
1085		unsigned addr_mask;
1086
1087		addr_mask = io_adress(vcpu, in, &address);
1088		if (!addr_mask) {
1089			printk(KERN_DEBUG "%s: get io address failed\n",
1090			       __FUNCTION__);
1091			return 1;
1092		}
1093
1094		if (rep)
1095			count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
1096	}
1097	return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1098			     address, rep, port);
1099}
1100
1101static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1102{
1103	return 1;
1104}
1105
1106static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1107{
1108	vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1109	skip_emulated_instruction(vcpu);
1110	if (vcpu->irq_summary)
1111		return 1;
1112
1113	kvm_run->exit_reason = KVM_EXIT_HLT;
1114	++vcpu->stat.halt_exits;
1115	return 0;
1116}
1117
1118static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1119{
1120	vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3;
1121	skip_emulated_instruction(vcpu);
1122	return kvm_hypercall(vcpu, kvm_run);
1123}
1124
1125static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1126{
1127	inject_ud(vcpu);
1128	return 1;
1129}
1130
1131static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1132{
1133	printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
1134	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1135	return 0;
1136}
1137
1138static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1139{
1140	vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1141	kvm_emulate_cpuid(vcpu);
1142	return 1;
1143}
1144
1145static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1146{
1147	if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
1148		printk(KERN_ERR "%s: failed\n", __FUNCTION__);
1149	return 1;
1150}
1151
1152static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1153{
1154	switch (ecx) {
1155	case MSR_IA32_TIME_STAMP_COUNTER: {
1156		u64 tsc;
1157
1158		rdtscll(tsc);
1159		*data = vcpu->svm->vmcb->control.tsc_offset + tsc;
1160		break;
1161	}
1162	case MSR_K6_STAR:
1163		*data = vcpu->svm->vmcb->save.star;
1164		break;
1165#ifdef CONFIG_X86_64
1166	case MSR_LSTAR:
1167		*data = vcpu->svm->vmcb->save.lstar;
1168		break;
1169	case MSR_CSTAR:
1170		*data = vcpu->svm->vmcb->save.cstar;
1171		break;
1172	case MSR_KERNEL_GS_BASE:
1173		*data = vcpu->svm->vmcb->save.kernel_gs_base;
1174		break;
1175	case MSR_SYSCALL_MASK:
1176		*data = vcpu->svm->vmcb->save.sfmask;
1177		break;
1178#endif
1179	case MSR_IA32_SYSENTER_CS:
1180		*data = vcpu->svm->vmcb->save.sysenter_cs;
1181		break;
1182	case MSR_IA32_SYSENTER_EIP:
1183		*data = vcpu->svm->vmcb->save.sysenter_eip;
1184		break;
1185	case MSR_IA32_SYSENTER_ESP:
1186		*data = vcpu->svm->vmcb->save.sysenter_esp;
1187		break;
1188	default:
1189		return kvm_get_msr_common(vcpu, ecx, data);
1190	}
1191	return 0;
1192}
1193
1194static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1195{
1196	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1197	u64 data;
1198
1199	if (svm_get_msr(vcpu, ecx, &data))
1200		svm_inject_gp(vcpu, 0);
1201	else {
1202		vcpu->svm->vmcb->save.rax = data & 0xffffffff;
1203		vcpu->regs[VCPU_REGS_RDX] = data >> 32;
1204		vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1205		skip_emulated_instruction(vcpu);
1206	}
1207	return 1;
1208}
1209
1210static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1211{
1212	switch (ecx) {
1213	case MSR_IA32_TIME_STAMP_COUNTER: {
1214		u64 tsc;
1215
1216		rdtscll(tsc);
1217		vcpu->svm->vmcb->control.tsc_offset = data - tsc;
1218		break;
1219	}
1220	case MSR_K6_STAR:
1221		vcpu->svm->vmcb->save.star = data;
1222		break;
1223#ifdef CONFIG_X86_64
1224	case MSR_LSTAR:
1225		vcpu->svm->vmcb->save.lstar = data;
1226		break;
1227	case MSR_CSTAR:
1228		vcpu->svm->vmcb->save.cstar = data;
1229		break;
1230	case MSR_KERNEL_GS_BASE:
1231		vcpu->svm->vmcb->save.kernel_gs_base = data;
1232		break;
1233	case MSR_SYSCALL_MASK:
1234		vcpu->svm->vmcb->save.sfmask = data;
1235		break;
1236#endif
1237	case MSR_IA32_SYSENTER_CS:
1238		vcpu->svm->vmcb->save.sysenter_cs = data;
1239		break;
1240	case MSR_IA32_SYSENTER_EIP:
1241		vcpu->svm->vmcb->save.sysenter_eip = data;
1242		break;
1243	case MSR_IA32_SYSENTER_ESP:
1244		vcpu->svm->vmcb->save.sysenter_esp = data;
1245		break;
1246	default:
1247		return kvm_set_msr_common(vcpu, ecx, data);
1248	}
1249	return 0;
1250}
1251
1252static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1253{
1254	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1255	u64 data = (vcpu->svm->vmcb->save.rax & -1u)
1256		| ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1257	vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1258	if (svm_set_msr(vcpu, ecx, data))
1259		svm_inject_gp(vcpu, 0);
1260	else
1261		skip_emulated_instruction(vcpu);
1262	return 1;
1263}
1264
1265static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1266{
1267	if (vcpu->svm->vmcb->control.exit_info_1)
1268		return wrmsr_interception(vcpu, kvm_run);
1269	else
1270		return rdmsr_interception(vcpu, kvm_run);
1271}
1272
1273static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1274				   struct kvm_run *kvm_run)
1275{
1276	/*
1277	 * If the user space waits to inject interrupts, exit as soon as
1278	 * possible
1279	 */
1280	if (kvm_run->request_interrupt_window &&
1281	    !vcpu->irq_summary) {
1282		++vcpu->stat.irq_window_exits;
1283		kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1284		return 0;
1285	}
1286
1287	return 1;
1288}
1289
1290static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1291				      struct kvm_run *kvm_run) = {
1292	[SVM_EXIT_READ_CR0]           		= emulate_on_interception,
1293	[SVM_EXIT_READ_CR3]           		= emulate_on_interception,
1294	[SVM_EXIT_READ_CR4]           		= emulate_on_interception,
1295	/* for now: */
1296	[SVM_EXIT_WRITE_CR0]          		= emulate_on_interception,
1297	[SVM_EXIT_WRITE_CR3]          		= emulate_on_interception,
1298	[SVM_EXIT_WRITE_CR4]          		= emulate_on_interception,
1299	[SVM_EXIT_READ_DR0] 			= emulate_on_interception,
1300	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
1301	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
1302	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
1303	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
1304	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
1305	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
1306	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
1307	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
1308	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
1309	[SVM_EXIT_EXCP_BASE + PF_VECTOR] 	= pf_interception,
1310	[SVM_EXIT_EXCP_BASE + NM_VECTOR] 	= nm_interception,
1311	[SVM_EXIT_INTR] 			= nop_on_interception,
1312	[SVM_EXIT_NMI]				= nop_on_interception,
1313	[SVM_EXIT_SMI]				= nop_on_interception,
1314	[SVM_EXIT_INIT]				= nop_on_interception,
1315	[SVM_EXIT_VINTR]			= interrupt_window_interception,
1316	/* [SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception, */
1317	[SVM_EXIT_CPUID]			= cpuid_interception,
1318	[SVM_EXIT_HLT]				= halt_interception,
1319	[SVM_EXIT_INVLPG]			= emulate_on_interception,
1320	[SVM_EXIT_INVLPGA]			= invalid_op_interception,
1321	[SVM_EXIT_IOIO] 		  	= io_interception,
1322	[SVM_EXIT_MSR]				= msr_interception,
1323	[SVM_EXIT_TASK_SWITCH]			= task_switch_interception,
1324	[SVM_EXIT_SHUTDOWN]			= shutdown_interception,
1325	[SVM_EXIT_VMRUN]			= invalid_op_interception,
1326	[SVM_EXIT_VMMCALL]			= vmmcall_interception,
1327	[SVM_EXIT_VMLOAD]			= invalid_op_interception,
1328	[SVM_EXIT_VMSAVE]			= invalid_op_interception,
1329	[SVM_EXIT_STGI]				= invalid_op_interception,
1330	[SVM_EXIT_CLGI]				= invalid_op_interception,
1331	[SVM_EXIT_SKINIT]			= invalid_op_interception,
1332	[SVM_EXIT_MONITOR]			= invalid_op_interception,
1333	[SVM_EXIT_MWAIT]			= invalid_op_interception,
1334};
1335
1336
1337static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1338{
1339	u32 exit_code = vcpu->svm->vmcb->control.exit_code;
1340
1341	if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) &&
1342	    exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
1343		printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1344		       "exit_code 0x%x\n",
1345		       __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
1346		       exit_code);
1347
1348	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
1349	    || svm_exit_handlers[exit_code] == 0) {
1350		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1351		kvm_run->hw.hardware_exit_reason = exit_code;
1352		return 0;
1353	}
1354
1355	return svm_exit_handlers[exit_code](vcpu, kvm_run);
1356}
1357
1358static void reload_tss(struct kvm_vcpu *vcpu)
1359{
1360	int cpu = raw_smp_processor_id();
1361
1362	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1363	svm_data->tss_desc->type = 9; //available 32/64-bit TSS
1364	load_TR_desc();
1365}
1366
1367static void pre_svm_run(struct kvm_vcpu *vcpu)
1368{
1369	int cpu = raw_smp_processor_id();
1370
1371	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1372
1373	vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
1374	if (vcpu->cpu != cpu ||
1375	    vcpu->svm->asid_generation != svm_data->asid_generation)
1376		new_asid(vcpu, svm_data);
1377}
1378
1379
1380static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1381{
1382	struct vmcb_control_area *control;
1383
1384	control = &vcpu->svm->vmcb->control;
1385	control->int_vector = pop_irq(vcpu);
1386	control->int_ctl &= ~V_INTR_PRIO_MASK;
1387	control->int_ctl |= V_IRQ_MASK |
1388		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1389}
1390
1391static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1392{
1393	struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1394
1395	if (control->int_ctl & V_IRQ_MASK) {
1396		control->int_ctl &= ~V_IRQ_MASK;
1397		push_irq(vcpu, control->int_vector);
1398	}
1399
1400	vcpu->interrupt_window_open =
1401		!(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1402}
1403
1404static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1405				       struct kvm_run *kvm_run)
1406{
1407	struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1408
1409	vcpu->interrupt_window_open =
1410		(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1411		 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1412
1413	if (vcpu->interrupt_window_open && vcpu->irq_summary)
1414		/*
1415		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1416		 */
1417		kvm_do_inject_irq(vcpu);
1418
1419	/*
1420	 * Interrupts blocked.  Wait for unblock.
1421	 */
1422	if (!vcpu->interrupt_window_open &&
1423	    (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
1424		control->intercept |= 1ULL << INTERCEPT_VINTR;
1425	} else
1426		control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1427}
1428
1429static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1430			      struct kvm_run *kvm_run)
1431{
1432	kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1433						  vcpu->irq_summary == 0);
1434	kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1435	kvm_run->cr8 = vcpu->cr8;
1436	kvm_run->apic_base = vcpu->apic_base;
1437}
1438
1439/*
1440 * Check if userspace requested an interrupt window, and that the
1441 * interrupt window is open.
1442 *
1443 * No need to exit to userspace if we already have an interrupt queued.
1444 */
1445static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1446					  struct kvm_run *kvm_run)
1447{
1448	return (!vcpu->irq_summary &&
1449		kvm_run->request_interrupt_window &&
1450		vcpu->interrupt_window_open &&
1451		(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1452}
1453
1454static void save_db_regs(unsigned long *db_regs)
1455{
1456	asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1457	asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1458	asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1459	asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
1460}
1461
1462static void load_db_regs(unsigned long *db_regs)
1463{
1464	asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1465	asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1466	asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1467	asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1468}
1469
1470static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1471{
1472	u16 fs_selector;
1473	u16 gs_selector;
1474	u16 ldt_selector;
1475	int r;
1476
1477again:
1478	if (!vcpu->mmio_read_completed)
1479		do_interrupt_requests(vcpu, kvm_run);
1480
1481	clgi();
1482
1483	pre_svm_run(vcpu);
1484
1485	save_host_msrs(vcpu);
1486	fs_selector = read_fs();
1487	gs_selector = read_gs();
1488	ldt_selector = read_ldt();
1489	vcpu->svm->host_cr2 = kvm_read_cr2();
1490	vcpu->svm->host_dr6 = read_dr6();
1491	vcpu->svm->host_dr7 = read_dr7();
1492	vcpu->svm->vmcb->save.cr2 = vcpu->cr2;
1493
1494	if (vcpu->svm->vmcb->save.dr7 & 0xff) {
1495		write_dr7(0);
1496		save_db_regs(vcpu->svm->host_db_regs);
1497		load_db_regs(vcpu->svm->db_regs);
1498	}
1499
1500	if (vcpu->fpu_active) {
1501		fx_save(vcpu->host_fx_image);
1502		fx_restore(vcpu->guest_fx_image);
1503	}
1504
1505	asm volatile (
1506#ifdef CONFIG_X86_64
1507		"push %%rbx; push %%rcx; push %%rdx;"
1508		"push %%rsi; push %%rdi; push %%rbp;"
1509		"push %%r8;  push %%r9;  push %%r10; push %%r11;"
1510		"push %%r12; push %%r13; push %%r14; push %%r15;"
1511#else
1512		"push %%ebx; push %%ecx; push %%edx;"
1513		"push %%esi; push %%edi; push %%ebp;"
1514#endif
1515
1516#ifdef CONFIG_X86_64
1517		"mov %c[rbx](%[vcpu]), %%rbx \n\t"
1518		"mov %c[rcx](%[vcpu]), %%rcx \n\t"
1519		"mov %c[rdx](%[vcpu]), %%rdx \n\t"
1520		"mov %c[rsi](%[vcpu]), %%rsi \n\t"
1521		"mov %c[rdi](%[vcpu]), %%rdi \n\t"
1522		"mov %c[rbp](%[vcpu]), %%rbp \n\t"
1523		"mov %c[r8](%[vcpu]),  %%r8  \n\t"
1524		"mov %c[r9](%[vcpu]),  %%r9  \n\t"
1525		"mov %c[r10](%[vcpu]), %%r10 \n\t"
1526		"mov %c[r11](%[vcpu]), %%r11 \n\t"
1527		"mov %c[r12](%[vcpu]), %%r12 \n\t"
1528		"mov %c[r13](%[vcpu]), %%r13 \n\t"
1529		"mov %c[r14](%[vcpu]), %%r14 \n\t"
1530		"mov %c[r15](%[vcpu]), %%r15 \n\t"
1531#else
1532		"mov %c[rbx](%[vcpu]), %%ebx \n\t"
1533		"mov %c[rcx](%[vcpu]), %%ecx \n\t"
1534		"mov %c[rdx](%[vcpu]), %%edx \n\t"
1535		"mov %c[rsi](%[vcpu]), %%esi \n\t"
1536		"mov %c[rdi](%[vcpu]), %%edi \n\t"
1537		"mov %c[rbp](%[vcpu]), %%ebp \n\t"
1538#endif
1539
1540#ifdef CONFIG_X86_64
1541		/* Enter guest mode */
1542		"push %%rax \n\t"
1543		"mov %c[svm](%[vcpu]), %%rax \n\t"
1544		"mov %c[vmcb](%%rax), %%rax \n\t"
1545		SVM_VMLOAD "\n\t"
1546		SVM_VMRUN "\n\t"
1547		SVM_VMSAVE "\n\t"
1548		"pop %%rax \n\t"
1549#else
1550		/* Enter guest mode */
1551		"push %%eax \n\t"
1552		"mov %c[svm](%[vcpu]), %%eax \n\t"
1553		"mov %c[vmcb](%%eax), %%eax \n\t"
1554		SVM_VMLOAD "\n\t"
1555		SVM_VMRUN "\n\t"
1556		SVM_VMSAVE "\n\t"
1557		"pop %%eax \n\t"
1558#endif
1559
1560		/* Save guest registers, load host registers */
1561#ifdef CONFIG_X86_64
1562		"mov %%rbx, %c[rbx](%[vcpu]) \n\t"
1563		"mov %%rcx, %c[rcx](%[vcpu]) \n\t"
1564		"mov %%rdx, %c[rdx](%[vcpu]) \n\t"
1565		"mov %%rsi, %c[rsi](%[vcpu]) \n\t"
1566		"mov %%rdi, %c[rdi](%[vcpu]) \n\t"
1567		"mov %%rbp, %c[rbp](%[vcpu]) \n\t"
1568		"mov %%r8,  %c[r8](%[vcpu]) \n\t"
1569		"mov %%r9,  %c[r9](%[vcpu]) \n\t"
1570		"mov %%r10, %c[r10](%[vcpu]) \n\t"
1571		"mov %%r11, %c[r11](%[vcpu]) \n\t"
1572		"mov %%r12, %c[r12](%[vcpu]) \n\t"
1573		"mov %%r13, %c[r13](%[vcpu]) \n\t"
1574		"mov %%r14, %c[r14](%[vcpu]) \n\t"
1575		"mov %%r15, %c[r15](%[vcpu]) \n\t"
1576
1577		"pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
1578		"pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
1579		"pop  %%rbp; pop  %%rdi; pop  %%rsi;"
1580		"pop  %%rdx; pop  %%rcx; pop  %%rbx; \n\t"
1581#else
1582		"mov %%ebx, %c[rbx](%[vcpu]) \n\t"
1583		"mov %%ecx, %c[rcx](%[vcpu]) \n\t"
1584		"mov %%edx, %c[rdx](%[vcpu]) \n\t"
1585		"mov %%esi, %c[rsi](%[vcpu]) \n\t"
1586		"mov %%edi, %c[rdi](%[vcpu]) \n\t"
1587		"mov %%ebp, %c[rbp](%[vcpu]) \n\t"
1588
1589		"pop  %%ebp; pop  %%edi; pop  %%esi;"
1590		"pop  %%edx; pop  %%ecx; pop  %%ebx; \n\t"
1591#endif
1592		:
1593		: [vcpu]"a"(vcpu),
1594		  [svm]"i"(offsetof(struct kvm_vcpu, svm)),
1595		  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1596		  [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
1597		  [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
1598		  [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
1599		  [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1600		  [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1601		  [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
1602#ifdef CONFIG_X86_64
1603		  ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1604		  [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1605		  [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
1606		  [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
1607		  [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
1608		  [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
1609		  [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
1610		  [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15]))
1611#endif
1612		: "cc", "memory" );
1613
1614	if (vcpu->fpu_active) {
1615		fx_save(vcpu->guest_fx_image);
1616		fx_restore(vcpu->host_fx_image);
1617	}
1618
1619	if ((vcpu->svm->vmcb->save.dr7 & 0xff))
1620		load_db_regs(vcpu->svm->host_db_regs);
1621
1622	vcpu->cr2 = vcpu->svm->vmcb->save.cr2;
1623
1624	write_dr6(vcpu->svm->host_dr6);
1625	write_dr7(vcpu->svm->host_dr7);
1626	kvm_write_cr2(vcpu->svm->host_cr2);
1627
1628	load_fs(fs_selector);
1629	load_gs(gs_selector);
1630	load_ldt(ldt_selector);
1631	load_host_msrs(vcpu);
1632
1633	reload_tss(vcpu);
1634
1635	/*
1636	 * Profile KVM exit RIPs:
1637	 */
1638	if (unlikely(prof_on == KVM_PROFILING))
1639		profile_hit(KVM_PROFILING,
1640			(void *)(unsigned long)vcpu->svm->vmcb->save.rip);
1641
1642	stgi();
1643
1644	kvm_reput_irq(vcpu);
1645
1646	vcpu->svm->next_rip = 0;
1647
1648	if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1649		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1650		kvm_run->fail_entry.hardware_entry_failure_reason
1651			= vcpu->svm->vmcb->control.exit_code;
1652		post_kvm_run_save(vcpu, kvm_run);
1653		return 0;
1654	}
1655
1656	r = handle_exit(vcpu, kvm_run);
1657	if (r > 0) {
1658		if (signal_pending(current)) {
1659			++vcpu->stat.signal_exits;
1660			post_kvm_run_save(vcpu, kvm_run);
1661			kvm_run->exit_reason = KVM_EXIT_INTR;
1662			return -EINTR;
1663		}
1664
1665		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1666			++vcpu->stat.request_irq_exits;
1667			post_kvm_run_save(vcpu, kvm_run);
1668			kvm_run->exit_reason = KVM_EXIT_INTR;
1669			return -EINTR;
1670		}
1671		kvm_resched(vcpu);
1672		goto again;
1673	}
1674	post_kvm_run_save(vcpu, kvm_run);
1675	return r;
1676}
1677
1678static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1679{
1680	force_new_asid(vcpu);
1681}
1682
1683static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1684{
1685	vcpu->svm->vmcb->save.cr3 = root;
1686	force_new_asid(vcpu);
1687
1688	if (vcpu->fpu_active) {
1689		vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
1690		vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK;
1691		vcpu->fpu_active = 0;
1692	}
1693}
1694
1695static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
1696				  unsigned long  addr,
1697				  uint32_t err_code)
1698{
1699	uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
1700
1701	++vcpu->stat.pf_guest;
1702
1703	if (is_page_fault(exit_int_info)) {
1704
1705		vcpu->svm->vmcb->control.event_inj_err = 0;
1706		vcpu->svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
1707							SVM_EVTINJ_VALID_ERR |
1708							SVM_EVTINJ_TYPE_EXEPT |
1709							DF_VECTOR;
1710		return;
1711	}
1712	vcpu->cr2 = addr;
1713	vcpu->svm->vmcb->save.cr2 = addr;
1714	vcpu->svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
1715						SVM_EVTINJ_VALID_ERR |
1716						SVM_EVTINJ_TYPE_EXEPT |
1717						PF_VECTOR;
1718	vcpu->svm->vmcb->control.event_inj_err = err_code;
1719}
1720
1721
1722static int is_disabled(void)
1723{
1724	return 0;
1725}
1726
1727static void
1728svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1729{
1730	/*
1731	 * Patch in the VMMCALL instruction:
1732	 */
1733	hypercall[0] = 0x0f;
1734	hypercall[1] = 0x01;
1735	hypercall[2] = 0xd9;
1736	hypercall[3] = 0xc3;
1737}
1738
1739static struct kvm_arch_ops svm_arch_ops = {
1740	.cpu_has_kvm_support = has_svm,
1741	.disabled_by_bios = is_disabled,
1742	.hardware_setup = svm_hardware_setup,
1743	.hardware_unsetup = svm_hardware_unsetup,
1744	.hardware_enable = svm_hardware_enable,
1745	.hardware_disable = svm_hardware_disable,
1746
1747	.vcpu_create = svm_create_vcpu,
1748	.vcpu_free = svm_free_vcpu,
1749
1750	.vcpu_load = svm_vcpu_load,
1751	.vcpu_put = svm_vcpu_put,
1752	.vcpu_decache = svm_vcpu_decache,
1753
1754	.set_guest_debug = svm_guest_debug,
1755	.get_msr = svm_get_msr,
1756	.set_msr = svm_set_msr,
1757	.get_segment_base = svm_get_segment_base,
1758	.get_segment = svm_get_segment,
1759	.set_segment = svm_set_segment,
1760	.get_cs_db_l_bits = svm_get_cs_db_l_bits,
1761	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
1762	.set_cr0 = svm_set_cr0,
1763	.set_cr3 = svm_set_cr3,
1764	.set_cr4 = svm_set_cr4,
1765	.set_efer = svm_set_efer,
1766	.get_idt = svm_get_idt,
1767	.set_idt = svm_set_idt,
1768	.get_gdt = svm_get_gdt,
1769	.set_gdt = svm_set_gdt,
1770	.get_dr = svm_get_dr,
1771	.set_dr = svm_set_dr,
1772	.cache_regs = svm_cache_regs,
1773	.decache_regs = svm_decache_regs,
1774	.get_rflags = svm_get_rflags,
1775	.set_rflags = svm_set_rflags,
1776
1777	.invlpg = svm_invlpg,
1778	.tlb_flush = svm_flush_tlb,
1779	.inject_page_fault = svm_inject_page_fault,
1780
1781	.inject_gp = svm_inject_gp,
1782
1783	.run = svm_vcpu_run,
1784	.skip_emulated_instruction = skip_emulated_instruction,
1785	.vcpu_setup = svm_vcpu_setup,
1786	.patch_hypercall = svm_patch_hypercall,
1787};
1788
1789static int __init svm_init(void)
1790{
1791	return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
1792}
1793
1794static void __exit svm_exit(void)
1795{
1796	kvm_exit_arch();
1797}
1798
1799module_init(svm_init)
1800module_exit(svm_exit)
1801