• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/virt/kvm/

Lines Matching defs:kvm

2  * kvm eventfd support - use eventfd objects to signal various KVM events
25 #include <linux/kvm.h>
47 struct kvm *kvm;
63 struct kvm *kvm = irqfd->kvm;
65 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
66 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
98 /* assumes kvm->irqfds.lock is held */
108 * assumes kvm->irqfds.lock is held
135 struct kvm *kvm = irqfd->kvm;
138 spin_lock_irqsave(&kvm->irqfds.lock, flags);
152 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
167 kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
179 irqfd->kvm = kvm;
206 spin_lock_irq(&kvm->irqfds.lock);
209 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
214 spin_unlock_irq(&kvm->irqfds.lock);
220 list_add_tail(&irqfd->list, &kvm->irqfds.items);
229 spin_unlock_irq(&kvm->irqfds.lock);
251 kvm_eventfd_init(struct kvm *kvm)
253 spin_lock_init(&kvm->irqfds.lock);
254 INIT_LIST_HEAD(&kvm->irqfds.items);
255 INIT_LIST_HEAD(&kvm->ioeventfds);
262 kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
271 spin_lock_irq(&kvm->irqfds.lock);
273 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
278 spin_unlock_irq(&kvm->irqfds.lock);
292 kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
295 return kvm_irqfd_deassign(kvm, fd, gsi);
297 return kvm_irqfd_assign(kvm, fd, gsi);
301 * This function is called as the kvm VM fd is being released. Shutdown all
305 kvm_irqfd_release(struct kvm *kvm)
309 spin_lock_irq(&kvm->irqfds.lock);
311 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
314 spin_unlock_irq(&kvm->irqfds.lock);
318 * since we do not take a kvm* reference.
331 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
447 /* assumes kvm->slots_lock held */
449 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
453 list_for_each_entry(_p, &kvm->ioeventfds, list)
463 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
511 mutex_lock(&kvm->slots_lock);
514 if (ioeventfd_check_collision(kvm, p)) {
521 ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
525 list_add_tail(&p->list, &kvm->ioeventfds);
527 mutex_unlock(&kvm->slots_lock);
532 mutex_unlock(&kvm->slots_lock);
542 kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
554 mutex_lock(&kvm->slots_lock);
556 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
568 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
574 mutex_unlock(&kvm->slots_lock);
582 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
585 return kvm_deassign_ioeventfd(kvm, args);
587 return kvm_assign_ioeventfd(kvm, args);