• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/virt/kvm/
1/*
2 * Kernel-based Virtual Machine - device assignment support
3 *
4 * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.  See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#include <linux/kvm_host.h>
12#include <linux/kvm.h>
13#include <linux/uaccess.h>
14#include <linux/vmalloc.h>
15#include <linux/errno.h>
16#include <linux/spinlock.h>
17#include <linux/pci.h>
18#include <linux/interrupt.h>
19#include <linux/slab.h>
20#include "irq.h"
21
22static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
23						      int assigned_dev_id)
24{
25	struct list_head *ptr;
26	struct kvm_assigned_dev_kernel *match;
27
28	list_for_each(ptr, head) {
29		match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
30		if (match->assigned_dev_id == assigned_dev_id)
31			return match;
32	}
33	return NULL;
34}
35
36static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
37				    *assigned_dev, int irq)
38{
39	int i, index;
40	struct msix_entry *host_msix_entries;
41
42	host_msix_entries = assigned_dev->host_msix_entries;
43
44	index = -1;
45	for (i = 0; i < assigned_dev->entries_nr; i++)
46		if (irq == host_msix_entries[i].vector) {
47			index = i;
48			break;
49		}
50	if (index < 0) {
51		printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
52		return 0;
53	}
54
55	return index;
56}
57
58static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
59{
60	struct kvm_assigned_dev_kernel *assigned_dev;
61	int i;
62
63	assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
64				    interrupt_work);
65
66	spin_lock_irq(&assigned_dev->assigned_dev_lock);
67	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
68		struct kvm_guest_msix_entry *guest_entries =
69			assigned_dev->guest_msix_entries;
70		for (i = 0; i < assigned_dev->entries_nr; i++) {
71			if (!(guest_entries[i].flags &
72					KVM_ASSIGNED_MSIX_PENDING))
73				continue;
74			guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
75			kvm_set_irq(assigned_dev->kvm,
76				    assigned_dev->irq_source_id,
77				    guest_entries[i].vector, 1);
78		}
79	} else
80		kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
81			    assigned_dev->guest_irq, 1);
82
83	spin_unlock_irq(&assigned_dev->assigned_dev_lock);
84}
85
86static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
87{
88	unsigned long flags;
89	struct kvm_assigned_dev_kernel *assigned_dev =
90		(struct kvm_assigned_dev_kernel *) dev_id;
91
92	spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
93	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
94		int index = find_index_from_host_irq(assigned_dev, irq);
95		if (index < 0)
96			goto out;
97		assigned_dev->guest_msix_entries[index].flags |=
98			KVM_ASSIGNED_MSIX_PENDING;
99	}
100
101	schedule_work(&assigned_dev->interrupt_work);
102
103	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
104		disable_irq_nosync(irq);
105		assigned_dev->host_irq_disabled = true;
106	}
107
108out:
109	spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
110	return IRQ_HANDLED;
111}
112
113/* Ack the irq line for an assigned device */
114static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
115{
116	struct kvm_assigned_dev_kernel *dev;
117	unsigned long flags;
118
119	if (kian->gsi == -1)
120		return;
121
122	dev = container_of(kian, struct kvm_assigned_dev_kernel,
123			   ack_notifier);
124
125	kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
126
127	/* The guest irq may be shared so this ack may be
128	 * from another device.
129	 */
130	spin_lock_irqsave(&dev->assigned_dev_lock, flags);
131	if (dev->host_irq_disabled) {
132		enable_irq(dev->host_irq);
133		dev->host_irq_disabled = false;
134	}
135	spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
136}
137
138static void deassign_guest_irq(struct kvm *kvm,
139			       struct kvm_assigned_dev_kernel *assigned_dev)
140{
141	kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
142	assigned_dev->ack_notifier.gsi = -1;
143
144	if (assigned_dev->irq_source_id != -1)
145		kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
146	assigned_dev->irq_source_id = -1;
147	assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
148}
149
150/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
151static void deassign_host_irq(struct kvm *kvm,
152			      struct kvm_assigned_dev_kernel *assigned_dev)
153{
154	/*
155	 * In kvm_free_device_irq, cancel_work_sync return true if:
156	 * 1. work is scheduled, and then cancelled.
157	 * 2. work callback is executed.
158	 *
159	 * The first one ensured that the irq is disabled and no more events
160	 * would happen. But for the second one, the irq may be enabled (e.g.
161	 * for MSI). So we disable irq here to prevent further events.
162	 *
163	 * Notice this maybe result in nested disable if the interrupt type is
164	 * INTx, but it's OK for we are going to free it.
165	 *
166	 * If this function is a part of VM destroy, please ensure that till
167	 * now, the kvm state is still legal for probably we also have to wait
168	 * interrupt_work done.
169	 */
170	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
171		int i;
172		for (i = 0; i < assigned_dev->entries_nr; i++)
173			disable_irq_nosync(assigned_dev->
174					   host_msix_entries[i].vector);
175
176		cancel_work_sync(&assigned_dev->interrupt_work);
177
178		for (i = 0; i < assigned_dev->entries_nr; i++)
179			free_irq(assigned_dev->host_msix_entries[i].vector,
180				 (void *)assigned_dev);
181
182		assigned_dev->entries_nr = 0;
183		kfree(assigned_dev->host_msix_entries);
184		kfree(assigned_dev->guest_msix_entries);
185		pci_disable_msix(assigned_dev->dev);
186	} else {
187		/* Deal with MSI and INTx */
188		disable_irq_nosync(assigned_dev->host_irq);
189		cancel_work_sync(&assigned_dev->interrupt_work);
190
191		free_irq(assigned_dev->host_irq, (void *)assigned_dev);
192
193		if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
194			pci_disable_msi(assigned_dev->dev);
195	}
196
197	assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
198}
199
200static int kvm_deassign_irq(struct kvm *kvm,
201			    struct kvm_assigned_dev_kernel *assigned_dev,
202			    unsigned long irq_requested_type)
203{
204	unsigned long guest_irq_type, host_irq_type;
205
206	if (!irqchip_in_kernel(kvm))
207		return -EINVAL;
208	/* no irq assignment to deassign */
209	if (!assigned_dev->irq_requested_type)
210		return -ENXIO;
211
212	host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
213	guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
214
215	if (host_irq_type)
216		deassign_host_irq(kvm, assigned_dev);
217	if (guest_irq_type)
218		deassign_guest_irq(kvm, assigned_dev);
219
220	return 0;
221}
222
223static void kvm_free_assigned_irq(struct kvm *kvm,
224				  struct kvm_assigned_dev_kernel *assigned_dev)
225{
226	kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
227}
228
229static void kvm_free_assigned_device(struct kvm *kvm,
230				     struct kvm_assigned_dev_kernel
231				     *assigned_dev)
232{
233	kvm_free_assigned_irq(kvm, assigned_dev);
234
235	pci_reset_function(assigned_dev->dev);
236
237	pci_release_regions(assigned_dev->dev);
238	pci_disable_device(assigned_dev->dev);
239	pci_dev_put(assigned_dev->dev);
240
241	list_del(&assigned_dev->list);
242	kfree(assigned_dev);
243}
244
245void kvm_free_all_assigned_devices(struct kvm *kvm)
246{
247	struct list_head *ptr, *ptr2;
248	struct kvm_assigned_dev_kernel *assigned_dev;
249
250	list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
251		assigned_dev = list_entry(ptr,
252					  struct kvm_assigned_dev_kernel,
253					  list);
254
255		kvm_free_assigned_device(kvm, assigned_dev);
256	}
257}
258
259static int assigned_device_enable_host_intx(struct kvm *kvm,
260					    struct kvm_assigned_dev_kernel *dev)
261{
262	dev->host_irq = dev->dev->irq;
263	/* Even though this is PCI, we don't want to use shared
264	 * interrupts. Sharing host devices with guest-assigned devices
265	 * on the same interrupt line is not a happy situation: there
266	 * are going to be long delays in accepting, acking, etc.
267	 */
268	if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
269			0, "kvm_assigned_intx_device", (void *)dev))
270		return -EIO;
271	return 0;
272}
273
274#ifdef __KVM_HAVE_MSI
275static int assigned_device_enable_host_msi(struct kvm *kvm,
276					   struct kvm_assigned_dev_kernel *dev)
277{
278	int r;
279
280	if (!dev->dev->msi_enabled) {
281		r = pci_enable_msi(dev->dev);
282		if (r)
283			return r;
284	}
285
286	dev->host_irq = dev->dev->irq;
287	if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
288			"kvm_assigned_msi_device", (void *)dev)) {
289		pci_disable_msi(dev->dev);
290		return -EIO;
291	}
292
293	return 0;
294}
295#endif
296
297#ifdef __KVM_HAVE_MSIX
298static int assigned_device_enable_host_msix(struct kvm *kvm,
299					    struct kvm_assigned_dev_kernel *dev)
300{
301	int i, r = -EINVAL;
302
303	/* host_msix_entries and guest_msix_entries should have been
304	 * initialized */
305	if (dev->entries_nr == 0)
306		return r;
307
308	r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
309	if (r)
310		return r;
311
312	for (i = 0; i < dev->entries_nr; i++) {
313		r = request_irq(dev->host_msix_entries[i].vector,
314				kvm_assigned_dev_intr, 0,
315				"kvm_assigned_msix_device",
316				(void *)dev);
317		if (r)
318			goto err;
319	}
320
321	return 0;
322err:
323	for (i -= 1; i >= 0; i--)
324		free_irq(dev->host_msix_entries[i].vector, (void *)dev);
325	pci_disable_msix(dev->dev);
326	return r;
327}
328
329#endif
330
331static int assigned_device_enable_guest_intx(struct kvm *kvm,
332				struct kvm_assigned_dev_kernel *dev,
333				struct kvm_assigned_irq *irq)
334{
335	dev->guest_irq = irq->guest_irq;
336	dev->ack_notifier.gsi = irq->guest_irq;
337	return 0;
338}
339
340#ifdef __KVM_HAVE_MSI
341static int assigned_device_enable_guest_msi(struct kvm *kvm,
342			struct kvm_assigned_dev_kernel *dev,
343			struct kvm_assigned_irq *irq)
344{
345	dev->guest_irq = irq->guest_irq;
346	dev->ack_notifier.gsi = -1;
347	dev->host_irq_disabled = false;
348	return 0;
349}
350#endif
351
352#ifdef __KVM_HAVE_MSIX
353static int assigned_device_enable_guest_msix(struct kvm *kvm,
354			struct kvm_assigned_dev_kernel *dev,
355			struct kvm_assigned_irq *irq)
356{
357	dev->guest_irq = irq->guest_irq;
358	dev->ack_notifier.gsi = -1;
359	dev->host_irq_disabled = false;
360	return 0;
361}
362#endif
363
364static int assign_host_irq(struct kvm *kvm,
365			   struct kvm_assigned_dev_kernel *dev,
366			   __u32 host_irq_type)
367{
368	int r = -EEXIST;
369
370	if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
371		return r;
372
373	switch (host_irq_type) {
374	case KVM_DEV_IRQ_HOST_INTX:
375		r = assigned_device_enable_host_intx(kvm, dev);
376		break;
377#ifdef __KVM_HAVE_MSI
378	case KVM_DEV_IRQ_HOST_MSI:
379		r = assigned_device_enable_host_msi(kvm, dev);
380		break;
381#endif
382#ifdef __KVM_HAVE_MSIX
383	case KVM_DEV_IRQ_HOST_MSIX:
384		r = assigned_device_enable_host_msix(kvm, dev);
385		break;
386#endif
387	default:
388		r = -EINVAL;
389	}
390
391	if (!r)
392		dev->irq_requested_type |= host_irq_type;
393
394	return r;
395}
396
397static int assign_guest_irq(struct kvm *kvm,
398			    struct kvm_assigned_dev_kernel *dev,
399			    struct kvm_assigned_irq *irq,
400			    unsigned long guest_irq_type)
401{
402	int id;
403	int r = -EEXIST;
404
405	if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
406		return r;
407
408	id = kvm_request_irq_source_id(kvm);
409	if (id < 0)
410		return id;
411
412	dev->irq_source_id = id;
413
414	switch (guest_irq_type) {
415	case KVM_DEV_IRQ_GUEST_INTX:
416		r = assigned_device_enable_guest_intx(kvm, dev, irq);
417		break;
418#ifdef __KVM_HAVE_MSI
419	case KVM_DEV_IRQ_GUEST_MSI:
420		r = assigned_device_enable_guest_msi(kvm, dev, irq);
421		break;
422#endif
423#ifdef __KVM_HAVE_MSIX
424	case KVM_DEV_IRQ_GUEST_MSIX:
425		r = assigned_device_enable_guest_msix(kvm, dev, irq);
426		break;
427#endif
428	default:
429		r = -EINVAL;
430	}
431
432	if (!r) {
433		dev->irq_requested_type |= guest_irq_type;
434		kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
435	} else
436		kvm_free_irq_source_id(kvm, dev->irq_source_id);
437
438	return r;
439}
440
441/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
442static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
443				   struct kvm_assigned_irq *assigned_irq)
444{
445	int r = -EINVAL;
446	struct kvm_assigned_dev_kernel *match;
447	unsigned long host_irq_type, guest_irq_type;
448
449	if (!irqchip_in_kernel(kvm))
450		return r;
451
452	mutex_lock(&kvm->lock);
453	r = -ENODEV;
454	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
455				      assigned_irq->assigned_dev_id);
456	if (!match)
457		goto out;
458
459	host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
460	guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
461
462	r = -EINVAL;
463	/* can only assign one type at a time */
464	if (hweight_long(host_irq_type) > 1)
465		goto out;
466	if (hweight_long(guest_irq_type) > 1)
467		goto out;
468	if (host_irq_type == 0 && guest_irq_type == 0)
469		goto out;
470
471	r = 0;
472	if (host_irq_type)
473		r = assign_host_irq(kvm, match, host_irq_type);
474	if (r)
475		goto out;
476
477	if (guest_irq_type)
478		r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
479out:
480	mutex_unlock(&kvm->lock);
481	return r;
482}
483
484static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
485					 struct kvm_assigned_irq
486					 *assigned_irq)
487{
488	int r = -ENODEV;
489	struct kvm_assigned_dev_kernel *match;
490
491	mutex_lock(&kvm->lock);
492
493	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
494				      assigned_irq->assigned_dev_id);
495	if (!match)
496		goto out;
497
498	r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
499out:
500	mutex_unlock(&kvm->lock);
501	return r;
502}
503
504static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
505				      struct kvm_assigned_pci_dev *assigned_dev)
506{
507	int r = 0, idx;
508	struct kvm_assigned_dev_kernel *match;
509	struct pci_dev *dev;
510
511	mutex_lock(&kvm->lock);
512	idx = srcu_read_lock(&kvm->srcu);
513
514	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
515				      assigned_dev->assigned_dev_id);
516	if (match) {
517		/* device already assigned */
518		r = -EEXIST;
519		goto out;
520	}
521
522	match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
523	if (match == NULL) {
524		printk(KERN_INFO "%s: Couldn't allocate memory\n",
525		       __func__);
526		r = -ENOMEM;
527		goto out;
528	}
529	dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
530				   assigned_dev->busnr,
531				   assigned_dev->devfn);
532	if (!dev) {
533		printk(KERN_INFO "%s: host device not found\n", __func__);
534		r = -EINVAL;
535		goto out_free;
536	}
537	if (pci_enable_device(dev)) {
538		printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
539		r = -EBUSY;
540		goto out_put;
541	}
542	r = pci_request_regions(dev, "kvm_assigned_device");
543	if (r) {
544		printk(KERN_INFO "%s: Could not get access to device regions\n",
545		       __func__);
546		goto out_disable;
547	}
548
549	pci_reset_function(dev);
550
551	match->assigned_dev_id = assigned_dev->assigned_dev_id;
552	match->host_segnr = assigned_dev->segnr;
553	match->host_busnr = assigned_dev->busnr;
554	match->host_devfn = assigned_dev->devfn;
555	match->flags = assigned_dev->flags;
556	match->dev = dev;
557	spin_lock_init(&match->assigned_dev_lock);
558	match->irq_source_id = -1;
559	match->kvm = kvm;
560	match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
561	INIT_WORK(&match->interrupt_work,
562		  kvm_assigned_dev_interrupt_work_handler);
563
564	list_add(&match->list, &kvm->arch.assigned_dev_head);
565
566	if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
567		if (!kvm->arch.iommu_domain) {
568			r = kvm_iommu_map_guest(kvm);
569			if (r)
570				goto out_list_del;
571		}
572		r = kvm_assign_device(kvm, match);
573		if (r)
574			goto out_list_del;
575	}
576
577out:
578	srcu_read_unlock(&kvm->srcu, idx);
579	mutex_unlock(&kvm->lock);
580	return r;
581out_list_del:
582	list_del(&match->list);
583	pci_release_regions(dev);
584out_disable:
585	pci_disable_device(dev);
586out_put:
587	pci_dev_put(dev);
588out_free:
589	kfree(match);
590	srcu_read_unlock(&kvm->srcu, idx);
591	mutex_unlock(&kvm->lock);
592	return r;
593}
594
595static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
596		struct kvm_assigned_pci_dev *assigned_dev)
597{
598	int r = 0;
599	struct kvm_assigned_dev_kernel *match;
600
601	mutex_lock(&kvm->lock);
602
603	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
604				      assigned_dev->assigned_dev_id);
605	if (!match) {
606		printk(KERN_INFO "%s: device hasn't been assigned before, "
607		  "so cannot be deassigned\n", __func__);
608		r = -EINVAL;
609		goto out;
610	}
611
612	if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
613		kvm_deassign_device(kvm, match);
614
615	kvm_free_assigned_device(kvm, match);
616
617out:
618	mutex_unlock(&kvm->lock);
619	return r;
620}
621
622
623#ifdef __KVM_HAVE_MSIX
624static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
625				    struct kvm_assigned_msix_nr *entry_nr)
626{
627	int r = 0;
628	struct kvm_assigned_dev_kernel *adev;
629
630	mutex_lock(&kvm->lock);
631
632	adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
633				      entry_nr->assigned_dev_id);
634	if (!adev) {
635		r = -EINVAL;
636		goto msix_nr_out;
637	}
638
639	if (adev->entries_nr == 0) {
640		adev->entries_nr = entry_nr->entry_nr;
641		if (adev->entries_nr == 0 ||
642		    adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
643			r = -EINVAL;
644			goto msix_nr_out;
645		}
646
647		adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
648						entry_nr->entry_nr,
649						GFP_KERNEL);
650		if (!adev->host_msix_entries) {
651			r = -ENOMEM;
652			goto msix_nr_out;
653		}
654		adev->guest_msix_entries = kzalloc(
655				sizeof(struct kvm_guest_msix_entry) *
656				entry_nr->entry_nr, GFP_KERNEL);
657		if (!adev->guest_msix_entries) {
658			kfree(adev->host_msix_entries);
659			r = -ENOMEM;
660			goto msix_nr_out;
661		}
662	} else /* Not allowed set MSI-X number twice */
663		r = -EINVAL;
664msix_nr_out:
665	mutex_unlock(&kvm->lock);
666	return r;
667}
668
669static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
670				       struct kvm_assigned_msix_entry *entry)
671{
672	int r = 0, i;
673	struct kvm_assigned_dev_kernel *adev;
674
675	mutex_lock(&kvm->lock);
676
677	adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
678				      entry->assigned_dev_id);
679
680	if (!adev) {
681		r = -EINVAL;
682		goto msix_entry_out;
683	}
684
685	for (i = 0; i < adev->entries_nr; i++)
686		if (adev->guest_msix_entries[i].vector == 0 ||
687		    adev->guest_msix_entries[i].entry == entry->entry) {
688			adev->guest_msix_entries[i].entry = entry->entry;
689			adev->guest_msix_entries[i].vector = entry->gsi;
690			adev->host_msix_entries[i].entry = entry->entry;
691			break;
692		}
693	if (i == adev->entries_nr) {
694		r = -ENOSPC;
695		goto msix_entry_out;
696	}
697
698msix_entry_out:
699	mutex_unlock(&kvm->lock);
700
701	return r;
702}
703#endif
704
705long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
706				  unsigned long arg)
707{
708	void __user *argp = (void __user *)arg;
709	int r = -ENOTTY;
710
711	switch (ioctl) {
712	case KVM_ASSIGN_PCI_DEVICE: {
713		struct kvm_assigned_pci_dev assigned_dev;
714
715		r = -EFAULT;
716		if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
717			goto out;
718		r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
719		if (r)
720			goto out;
721		break;
722	}
723	case KVM_ASSIGN_IRQ: {
724		r = -EOPNOTSUPP;
725		break;
726	}
727#ifdef KVM_CAP_ASSIGN_DEV_IRQ
728	case KVM_ASSIGN_DEV_IRQ: {
729		struct kvm_assigned_irq assigned_irq;
730
731		r = -EFAULT;
732		if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
733			goto out;
734		r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
735		if (r)
736			goto out;
737		break;
738	}
739	case KVM_DEASSIGN_DEV_IRQ: {
740		struct kvm_assigned_irq assigned_irq;
741
742		r = -EFAULT;
743		if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
744			goto out;
745		r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
746		if (r)
747			goto out;
748		break;
749	}
750#endif
751#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
752	case KVM_DEASSIGN_PCI_DEVICE: {
753		struct kvm_assigned_pci_dev assigned_dev;
754
755		r = -EFAULT;
756		if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
757			goto out;
758		r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
759		if (r)
760			goto out;
761		break;
762	}
763#endif
764#ifdef KVM_CAP_IRQ_ROUTING
765	case KVM_SET_GSI_ROUTING: {
766		struct kvm_irq_routing routing;
767		struct kvm_irq_routing __user *urouting;
768		struct kvm_irq_routing_entry *entries;
769
770		r = -EFAULT;
771		if (copy_from_user(&routing, argp, sizeof(routing)))
772			goto out;
773		r = -EINVAL;
774		if (routing.nr >= KVM_MAX_IRQ_ROUTES)
775			goto out;
776		if (routing.flags)
777			goto out;
778		r = -ENOMEM;
779		entries = vmalloc(routing.nr * sizeof(*entries));
780		if (!entries)
781			goto out;
782		r = -EFAULT;
783		urouting = argp;
784		if (copy_from_user(entries, urouting->entries,
785				   routing.nr * sizeof(*entries)))
786			goto out_free_irq_routing;
787		r = kvm_set_irq_routing(kvm, entries, routing.nr,
788					routing.flags);
789	out_free_irq_routing:
790		vfree(entries);
791		break;
792	}
793#endif /* KVM_CAP_IRQ_ROUTING */
794#ifdef __KVM_HAVE_MSIX
795	case KVM_ASSIGN_SET_MSIX_NR: {
796		struct kvm_assigned_msix_nr entry_nr;
797		r = -EFAULT;
798		if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
799			goto out;
800		r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
801		if (r)
802			goto out;
803		break;
804	}
805	case KVM_ASSIGN_SET_MSIX_ENTRY: {
806		struct kvm_assigned_msix_entry entry;
807		r = -EFAULT;
808		if (copy_from_user(&entry, argp, sizeof entry))
809			goto out;
810		r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
811		if (r)
812			goto out;
813		break;
814	}
815#endif
816	}
817out:
818	return r;
819}
820