Lines Matching defs:data

1026 /* This does not remove the slot from struct kvm_memslots data structures */
1564 * slot, kvm_copy_memslot() deliberately does not touch node data.
1770 * data, may be reused by @new.
1847 * old slot as the arch data could have changed between releasing
1850 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
2617 * xarray tracks data using "unsigned long", and as a result so does
3311 void *data, int offset, int len)
3319 r = __copy_from_user(data, (void __user *)addr + offset, len);
3325 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3330 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3334 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3339 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3343 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3351 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3356 data += seg;
3363 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3371 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3376 data += seg;
3384 void *data, int offset, unsigned long len)
3393 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3401 void *data, unsigned long len)
3407 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3413 const void *data, int offset, int len)
3421 r = __copy_to_user((void __user *)addr + offset, data, len);
3429 const void *data, int offset, int len)
3433 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3438 const void *data, int offset, int len)
3442 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3446 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3455 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3460 data += seg;
3467 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3476 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3481 data += seg;
3538 void *data, unsigned int offset,
3557 return kvm_write_guest(kvm, gpa, data, len);
3559 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3569 void *data, unsigned long len)
3571 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3576 void *data, unsigned int offset,
3595 return kvm_read_guest(kvm, gpa, data, len);
3597 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3606 void *data, unsigned long len)
3608 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
4004 * Since algorithm is based on heuristics, accessing another VCPU data without
4211 static int vcpu_get_pid(void *data, u64 *val)
4213 struct kvm_vcpu *vcpu = data;
5212 struct kvm_irqfd data;
5215 if (copy_from_user(&data, argp, sizeof(data)))
5217 r = kvm_irqfd(kvm, &data);
5221 struct kvm_ioeventfd data;
5224 if (copy_from_user(&data, argp, sizeof(data)))
5226 r = kvm_ioeventfd(kvm, &data);
5511 r += PAGE_SIZE; /* pio data page */
6103 static int kvm_stat_data_get(void *data, u64 *val)
6106 struct kvm_stat_data *stat_data = data;
6122 static int kvm_stat_data_clear(void *data, u64 val)
6125 struct kvm_stat_data *stat_data = data;
6541 uintptr_t data;
6555 uintptr_t data = init_context->data;
6585 err = thread_fn(kvm, data);
6610 uintptr_t data, const char *name,
6620 init_context.data = data;