• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/powerpc/kvm/
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/vmalloc.h>
26#include <linux/hrtimer.h>
27#include <linux/fs.h>
28#include <linux/slab.h>
29#include <asm/cputable.h>
30#include <asm/uaccess.h>
31#include <asm/kvm_ppc.h>
32#include <asm/tlbflush.h>
33#include "timing.h"
34#include "../mm/mmu_decl.h"
35
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{
41	return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
42}
43
44
45int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
46{
47	enum emulation_result er;
48	int r;
49
50	er = kvmppc_emulate_instruction(run, vcpu);
51	switch (er) {
52	case EMULATE_DONE:
53		/* Future optimization: only reload non-volatiles if they were
54		 * actually modified. */
55		r = RESUME_GUEST_NV;
56		break;
57	case EMULATE_DO_MMIO:
58		run->exit_reason = KVM_EXIT_MMIO;
59		/* We must reload nonvolatiles because "update" load/store
60		 * instructions modify register state. */
61		/* Future optimization: only reload non-volatiles if they were
62		 * actually modified. */
63		r = RESUME_HOST_NV;
64		break;
65	case EMULATE_FAIL:
66		printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
67		       kvmppc_get_last_inst(vcpu));
68		r = RESUME_HOST;
69		break;
70	default:
71		BUG();
72	}
73
74	return r;
75}
76
77int kvm_arch_hardware_enable(void *garbage)
78{
79	return 0;
80}
81
82void kvm_arch_hardware_disable(void *garbage)
83{
84}
85
86int kvm_arch_hardware_setup(void)
87{
88	return 0;
89}
90
91void kvm_arch_hardware_unsetup(void)
92{
93}
94
95void kvm_arch_check_processor_compat(void *rtn)
96{
97	*(int *)rtn = kvmppc_core_check_processor_compat();
98}
99
100struct kvm *kvm_arch_create_vm(void)
101{
102	struct kvm *kvm;
103
104	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
105	if (!kvm)
106		return ERR_PTR(-ENOMEM);
107
108	return kvm;
109}
110
111static void kvmppc_free_vcpus(struct kvm *kvm)
112{
113	unsigned int i;
114	struct kvm_vcpu *vcpu;
115
116	kvm_for_each_vcpu(i, vcpu, kvm)
117		kvm_arch_vcpu_free(vcpu);
118
119	mutex_lock(&kvm->lock);
120	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
121		kvm->vcpus[i] = NULL;
122
123	atomic_set(&kvm->online_vcpus, 0);
124	mutex_unlock(&kvm->lock);
125}
126
127void kvm_arch_sync_events(struct kvm *kvm)
128{
129}
130
131void kvm_arch_destroy_vm(struct kvm *kvm)
132{
133	kvmppc_free_vcpus(kvm);
134	kvm_free_physmem(kvm);
135	cleanup_srcu_struct(&kvm->srcu);
136	kfree(kvm);
137}
138
139int kvm_dev_ioctl_check_extension(long ext)
140{
141	int r;
142
143	switch (ext) {
144	case KVM_CAP_PPC_SEGSTATE:
145	case KVM_CAP_PPC_PAIRED_SINGLES:
146	case KVM_CAP_PPC_UNSET_IRQ:
147	case KVM_CAP_ENABLE_CAP:
148	case KVM_CAP_PPC_OSI:
149		r = 1;
150		break;
151	case KVM_CAP_COALESCED_MMIO:
152		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
153		break;
154	default:
155		r = 0;
156		break;
157	}
158	return r;
159
160}
161
162long kvm_arch_dev_ioctl(struct file *filp,
163                        unsigned int ioctl, unsigned long arg)
164{
165	return -EINVAL;
166}
167
168int kvm_arch_prepare_memory_region(struct kvm *kvm,
169                                   struct kvm_memory_slot *memslot,
170                                   struct kvm_memory_slot old,
171                                   struct kvm_userspace_memory_region *mem,
172                                   int user_alloc)
173{
174	return 0;
175}
176
177void kvm_arch_commit_memory_region(struct kvm *kvm,
178               struct kvm_userspace_memory_region *mem,
179               struct kvm_memory_slot old,
180               int user_alloc)
181{
182       return;
183}
184
185
186void kvm_arch_flush_shadow(struct kvm *kvm)
187{
188}
189
190struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
191{
192	struct kvm_vcpu *vcpu;
193	vcpu = kvmppc_core_vcpu_create(kvm, id);
194	if (!IS_ERR(vcpu))
195		kvmppc_create_vcpu_debugfs(vcpu, id);
196	return vcpu;
197}
198
199void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
200{
201	/* Make sure we're not using the vcpu anymore */
202	hrtimer_cancel(&vcpu->arch.dec_timer);
203	tasklet_kill(&vcpu->arch.tasklet);
204
205	kvmppc_remove_vcpu_debugfs(vcpu);
206	kvmppc_core_vcpu_free(vcpu);
207}
208
209void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
210{
211	kvm_arch_vcpu_free(vcpu);
212}
213
214int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
215{
216	return kvmppc_core_pending_dec(vcpu);
217}
218
219static void kvmppc_decrementer_func(unsigned long data)
220{
221	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
222
223	kvmppc_core_queue_dec(vcpu);
224
225	if (waitqueue_active(&vcpu->wq)) {
226		wake_up_interruptible(&vcpu->wq);
227		vcpu->stat.halt_wakeup++;
228	}
229}
230
231/*
232 * low level hrtimer wake routine. Because this runs in hardirq context
233 * we schedule a tasklet to do the real work.
234 */
235enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
236{
237	struct kvm_vcpu *vcpu;
238
239	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
240	tasklet_schedule(&vcpu->arch.tasklet);
241
242	return HRTIMER_NORESTART;
243}
244
245int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
246{
247	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
248	tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
249	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
250
251	return 0;
252}
253
254void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
255{
256	kvmppc_mmu_destroy(vcpu);
257}
258
259void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
260{
261	kvmppc_core_vcpu_load(vcpu, cpu);
262}
263
264void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
265{
266	kvmppc_core_vcpu_put(vcpu);
267}
268
269int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
270                                        struct kvm_guest_debug *dbg)
271{
272	return -EINVAL;
273}
274
275static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
276                                     struct kvm_run *run)
277{
278	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
279}
280
281static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
282                                      struct kvm_run *run)
283{
284	u64 uninitialized_var(gpr);
285
286	if (run->mmio.len > sizeof(gpr)) {
287		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
288		return;
289	}
290
291	if (vcpu->arch.mmio_is_bigendian) {
292		switch (run->mmio.len) {
293		case 8: gpr = *(u64 *)run->mmio.data; break;
294		case 4: gpr = *(u32 *)run->mmio.data; break;
295		case 2: gpr = *(u16 *)run->mmio.data; break;
296		case 1: gpr = *(u8 *)run->mmio.data; break;
297		}
298	} else {
299		/* Convert BE data from userland back to LE. */
300		switch (run->mmio.len) {
301		case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
302		case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
303		case 1: gpr = *(u8 *)run->mmio.data; break;
304		}
305	}
306
307	if (vcpu->arch.mmio_sign_extend) {
308		switch (run->mmio.len) {
309#ifdef CONFIG_PPC64
310		case 4:
311			gpr = (s64)(s32)gpr;
312			break;
313#endif
314		case 2:
315			gpr = (s64)(s16)gpr;
316			break;
317		case 1:
318			gpr = (s64)(s8)gpr;
319			break;
320		}
321	}
322
323	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
324
325	switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
326	case KVM_REG_GPR:
327		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
328		break;
329	case KVM_REG_FPR:
330		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
331		break;
332#ifdef CONFIG_PPC_BOOK3S
333	case KVM_REG_QPR:
334		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
335		break;
336	case KVM_REG_FQPR:
337		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
338		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
339		break;
340#endif
341	default:
342		BUG();
343	}
344}
345
346int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
347                       unsigned int rt, unsigned int bytes, int is_bigendian)
348{
349	if (bytes > sizeof(run->mmio.data)) {
350		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
351		       run->mmio.len);
352	}
353
354	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
355	run->mmio.len = bytes;
356	run->mmio.is_write = 0;
357
358	vcpu->arch.io_gpr = rt;
359	vcpu->arch.mmio_is_bigendian = is_bigendian;
360	vcpu->mmio_needed = 1;
361	vcpu->mmio_is_write = 0;
362	vcpu->arch.mmio_sign_extend = 0;
363
364	return EMULATE_DO_MMIO;
365}
366
367/* Same as above, but sign extends */
368int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
369                        unsigned int rt, unsigned int bytes, int is_bigendian)
370{
371	int r;
372
373	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
374	vcpu->arch.mmio_sign_extend = 1;
375
376	return r;
377}
378
379int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
380                        u64 val, unsigned int bytes, int is_bigendian)
381{
382	void *data = run->mmio.data;
383
384	if (bytes > sizeof(run->mmio.data)) {
385		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
386		       run->mmio.len);
387	}
388
389	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
390	run->mmio.len = bytes;
391	run->mmio.is_write = 1;
392	vcpu->mmio_needed = 1;
393	vcpu->mmio_is_write = 1;
394
395	/* Store the value at the lowest bytes in 'data'. */
396	if (is_bigendian) {
397		switch (bytes) {
398		case 8: *(u64 *)data = val; break;
399		case 4: *(u32 *)data = val; break;
400		case 2: *(u16 *)data = val; break;
401		case 1: *(u8  *)data = val; break;
402		}
403	} else {
404		/* Store LE value into 'data'. */
405		switch (bytes) {
406		case 4: st_le32(data, val); break;
407		case 2: st_le16(data, val); break;
408		case 1: *(u8 *)data = val; break;
409		}
410	}
411
412	return EMULATE_DO_MMIO;
413}
414
415int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
416{
417	int r;
418	sigset_t sigsaved;
419
420	if (vcpu->sigset_active)
421		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
422
423	if (vcpu->mmio_needed) {
424		if (!vcpu->mmio_is_write)
425			kvmppc_complete_mmio_load(vcpu, run);
426		vcpu->mmio_needed = 0;
427	} else if (vcpu->arch.dcr_needed) {
428		if (!vcpu->arch.dcr_is_write)
429			kvmppc_complete_dcr_load(vcpu, run);
430		vcpu->arch.dcr_needed = 0;
431	} else if (vcpu->arch.osi_needed) {
432		u64 *gprs = run->osi.gprs;
433		int i;
434
435		for (i = 0; i < 32; i++)
436			kvmppc_set_gpr(vcpu, i, gprs[i]);
437		vcpu->arch.osi_needed = 0;
438	}
439
440	kvmppc_core_deliver_interrupts(vcpu);
441
442	local_irq_disable();
443	kvm_guest_enter();
444	r = __kvmppc_vcpu_run(run, vcpu);
445	kvm_guest_exit();
446	local_irq_enable();
447
448	if (vcpu->sigset_active)
449		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
450
451	return r;
452}
453
454int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
455{
456	if (irq->irq == KVM_INTERRUPT_UNSET)
457		kvmppc_core_dequeue_external(vcpu, irq);
458	else
459		kvmppc_core_queue_external(vcpu, irq);
460
461	if (waitqueue_active(&vcpu->wq)) {
462		wake_up_interruptible(&vcpu->wq);
463		vcpu->stat.halt_wakeup++;
464	}
465
466	return 0;
467}
468
469static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
470				     struct kvm_enable_cap *cap)
471{
472	int r;
473
474	if (cap->flags)
475		return -EINVAL;
476
477	switch (cap->cap) {
478	case KVM_CAP_PPC_OSI:
479		r = 0;
480		vcpu->arch.osi_enabled = true;
481		break;
482	default:
483		r = -EINVAL;
484		break;
485	}
486
487	return r;
488}
489
490int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
491                                    struct kvm_mp_state *mp_state)
492{
493	return -EINVAL;
494}
495
496int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
497                                    struct kvm_mp_state *mp_state)
498{
499	return -EINVAL;
500}
501
502long kvm_arch_vcpu_ioctl(struct file *filp,
503                         unsigned int ioctl, unsigned long arg)
504{
505	struct kvm_vcpu *vcpu = filp->private_data;
506	void __user *argp = (void __user *)arg;
507	long r;
508
509	switch (ioctl) {
510	case KVM_INTERRUPT: {
511		struct kvm_interrupt irq;
512		r = -EFAULT;
513		if (copy_from_user(&irq, argp, sizeof(irq)))
514			goto out;
515		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
516		goto out;
517	}
518
519	case KVM_ENABLE_CAP:
520	{
521		struct kvm_enable_cap cap;
522		r = -EFAULT;
523		if (copy_from_user(&cap, argp, sizeof(cap)))
524			goto out;
525		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
526		break;
527	}
528	default:
529		r = -EINVAL;
530	}
531
532out:
533	return r;
534}
535
536long kvm_arch_vm_ioctl(struct file *filp,
537                       unsigned int ioctl, unsigned long arg)
538{
539	long r;
540
541	switch (ioctl) {
542	default:
543		r = -ENOTTY;
544	}
545
546	return r;
547}
548
549int kvm_arch_init(void *opaque)
550{
551	return 0;
552}
553
554void kvm_arch_exit(void)
555{
556}
557