Lines Matching defs:range

364 	 * Fall back to a flushing entire TLBs if the architecture range-based
545 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
589 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
596 const struct kvm_mmu_notifier_range *range)
607 if (WARN_ON_ONCE(range->end <= range->start))
611 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
612 IS_KVM_NULL_FN(range->handler)))
622 range->start, range->end - 1) {
626 hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
627 hva_end = min_t(unsigned long, range->end,
632 * range is covered by zero or one memslots, don't
636 gfn_range.arg = range->arg;
637 gfn_range.may_block = range->may_block;
650 if (!IS_KVM_NULL_FN(range->on_lock))
651 range->on_lock(kvm);
653 if (IS_KVM_NULL_FN(range->handler))
656 r.ret |= range->handler(kvm, &gfn_range);
660 if (range->flush_on_ret && r.ret)
677 const struct kvm_mmu_notifier_range range = {
686 return __kvm_handle_hva_range(kvm, &range).ret;
695 const struct kvm_mmu_notifier_range range = {
704 return __kvm_handle_hva_range(kvm, &range).ret;
735 * returns. Keep things simple and just find the minimal range
737 * enough information to subtract a range after its invalidate
749 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
751 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
752 return kvm_unmap_gfn_range(kvm, range);
756 const struct mmu_notifier_range *range)
760 .start = range->start,
761 .end = range->end,
765 .may_block = mmu_notifier_range_blockable(range),
768 trace_kvm_unmap_hva_range(range->start, range->end);
792 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
825 * Assert that at least one range was added between start() and end().
826 * Not adding a range isn't fatal, but it is a KVM bug.
832 const struct mmu_notifier_range *range)
836 .start = range->start,
837 .end = range->end,
841 .may_block = mmu_notifier_range_blockable(range),
1499 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1524 * Initialize @new's hva range. Do this even when replacing an @old
2396 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2440 struct kvm_mmu_notifier_range *range)
2450 gfn_range.arg = range->arg;
2451 gfn_range.may_block = range->may_block;
2456 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2460 gfn_range.start = max(range->start, slot->base_gfn);
2461 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2468 if (!IS_KVM_NULL_FN(range->on_lock))
2469 range->on_lock(kvm);
2472 ret |= range->handler(kvm, &gfn_range);
2476 if (range->flush_on_ret && ret)
2484 struct kvm_gfn_range *range)
2487 * Unconditionally add the range to the invalidation set, regardless of
2491 * adding the range allows KVM to require that MMU invalidations add at
2492 * least one range between begin() and end(), e.g. allows KVM to detect
2497 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2499 return kvm_arch_pre_set_memory_attributes(kvm, range);
2502 /* Set @attributes for the gfn range [@start, @end). */
2530 /* Nothing to do if the entire range as the desired attributes. */
5702 struct kvm_io_device *pos = bus->range[i].dev;
5742 struct kvm_io_range *range, key;
5750 range = bsearch(&key, bus->range, bus->dev_count,
5752 if (range == NULL)
5755 off = range - bus->range;
5757 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5764 struct kvm_io_range *range, const void *val)
5768 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5773 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5774 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5775 range->len, val))
5788 struct kvm_io_range range;
5791 range = (struct kvm_io_range) {
5799 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5809 struct kvm_io_range range;
5811 range = (struct kvm_io_range) {
5822 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5823 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5831 return __kvm_io_bus_write(vcpu, bus, &range, val);
5835 struct kvm_io_range *range, void *val)
5839 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5844 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5845 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5846 range->len, val))
5859 struct kvm_io_range range;
5862 range = (struct kvm_io_range) {
5870 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5879 struct kvm_io_range range;
5891 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5896 range = (struct kvm_io_range) {
5903 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5908 new_bus->range[i] = range;
5909 memcpy(new_bus->range + i + 1, bus->range + i,
5931 if (bus->range[i].dev == dev) {
5939 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5942 memcpy(new_bus, bus, struct_size(bus, range, i));
5944 memcpy(new_bus->range + i, bus->range + i + 1,
5945 flex_array_size(new_bus, range, new_bus->dev_count - i));
5983 iodev = bus->range[dev_idx].dev;