Lines Matching refs:len

3259 static int next_segment(unsigned long len, int offset)
3261 if (len > PAGE_SIZE - offset)
3264 return len;
3267 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
3269 void *data, int offset, int len)
3277 r = __copy_from_user(data, (void __user *)addr + offset, len);
3284 int len)
3288 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3293 int offset, int len)
3297 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3301 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3308 while ((seg = next_segment(len, offset)) != 0) {
3313 len -= seg;
3321 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3328 while ((seg = next_segment(len, offset)) != 0) {
3333 len -= seg;
3342 void *data, int offset, unsigned long len)
3351 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3359 void *data, unsigned long len)
3365 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3369 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
3372 const void *data, int offset, int len)
3380 r = __copy_to_user((void __user *)addr + offset, data, len);
3388 const void *data, int offset, int len)
3392 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3397 const void *data, int offset, int len)
3401 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3406 unsigned long len)
3413 while ((seg = next_segment(len, offset)) != 0) {
3418 len -= seg;
3427 unsigned long len)
3434 while ((seg = next_segment(len, offset)) != 0) {
3439 len -= seg;
3449 gpa_t gpa, unsigned long len)
3453 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3484 ghc->len = len;
3489 gpa_t gpa, unsigned long len)
3492 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3498 unsigned long len)
3504 if (WARN_ON_ONCE(len + offset > ghc->len))
3508 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3516 return kvm_write_guest(kvm, gpa, data, len);
3518 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3528 void *data, unsigned long len)
3530 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3536 unsigned long len)
3542 if (WARN_ON_ONCE(len + offset > ghc->len))
3546 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3554 return kvm_read_guest(kvm, gpa, data, len);
3556 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3565 void *data, unsigned long len)
3567 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3571 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3579 while ((seg = next_segment(len, offset)) != 0) {
3580 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3584 len -= seg;
4538 if (kvm_sigmask.len != sizeof(sigset))
4610 if (kvm_sigmask.len != sizeof(compat_sigset_t))
5718 /* If r2->len == 0, match the exact address. If r2->len != 0,
5723 if (r2->len) {
5724 addr1 += r1->len;
5725 addr2 += r2->len;
5740 gpa_t addr, int len)
5747 .len = len,
5768 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5775 range->len, val))
5785 int len, const void *val)
5793 .len = len,
5806 gpa_t addr, int len, const void *val, long cookie)
5813 .len = len,
5823 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5839 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5846 range->len, val))
5856 int len, void *val)
5864 .len = len,
5875 int len, struct kvm_io_device *dev)
5898 .len = len,