/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_fdinfo.c | 60 struct amdgpu_vm *vm = &fpriv->vm; local 69 ret = amdgpu_bo_reserve(vm->root.bo, false); 73 amdgpu_vm_get_memory(vm, &stats); 74 amdgpu_bo_unreserve(vm->root.bo); 84 drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
|
H A D | amdgpu_csa.h | 34 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 37 int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
/linux-master/drivers/video/ |
H A D | of_videomode.c | 18 * @vm: set to return value 25 * specified by index into *vm. This function should only be used, if 30 int of_get_videomode(struct device_node *np, struct videomode *vm, argument 45 ret = videomode_from_timings(disp, vm, index);
|
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_gem_gtt.c | 73 * @vm: the &struct i915_address_space 97 int i915_gem_gtt_reserve(struct i915_address_space *vm, argument 108 GEM_BUG_ON(range_overflows(offset, size, vm->total)); 109 GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm); 116 err = drm_mm_reserve_node(&vm->mm, node); 123 err = i915_gem_evict_for_node(vm, ww, node, flags); 125 err = drm_mm_reserve_node(&vm->mm, node); 157 * @vm 190 i915_gem_gtt_insert(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color, u64 start, u64 end, unsigned int flags) argument [all...] |
H A D | i915_vma_resource.h | 40 * op, coalescing with other arguments like vm, stash, cache_level 69 * @rb: Rb node for the vm's pending unbind interval tree. 72 * @vm: non-refcounted pointer to the vm. This is for internal use only and 93 * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting 109 struct i915_address_space *vm; member in struct:i915_vma_resource 117 * coalescing with other arguments like vm, stash, cache_level and flags 181 * @vm: Pointer to the vm. 202 struct i915_address_space *vm, 201 i915_vma_resource_init(struct i915_vma_resource *vma_res, struct i915_address_space *vm, struct sg_table *pages, const struct i915_page_sizes *page_sizes, struct i915_refct_sgt *pages_rsgt, bool readonly, bool lmem, struct intel_memory_region *mr, const struct i915_vma_ops *ops, void *private, u64 start, u64 node_size, u64 size, u32 guard) argument [all...] |
H A D | i915_vma.c | 51 * We may be forced to unbind when the vm is dead, to clean it up. 55 if (kref_read(&vma->vm->ref)) 124 intel_gt_pm_get_untracked(vma->vm->gt); 139 intel_gt_pm_put_async_untracked(vma->vm->gt); 147 struct i915_address_space *vm, 156 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 162 vma->ops = &vm->vma_ops; 199 if (unlikely(vma->size > vm 146 vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_gtt_view *view) argument 285 i915_vma_lookup(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_gtt_view *view) argument 323 i915_vma_instance(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_gtt_view *view) argument 346 struct i915_address_space *vm; member in struct:i915_vma_work 1373 vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb) argument 1624 struct i915_address_space *vm = vma->vm; local 1846 struct i915_address_space *vm = vma->vm; local 1865 struct i915_address_space *vm = vma->vm; local 2161 struct i915_address_space *vm = vma->vm; local 2200 struct i915_address_space *vm = vma->vm; local [all...] |
/linux-master/arch/arm/mm/ |
H A D | ioremap.c | 53 struct vm_struct *vm; local 56 vm = &svm->vm; 57 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) 59 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 62 if (vm->phys_addr > paddr || 63 paddr + size - 1 > vm->phys_addr + vm->size - 1) 75 struct vm_struct *vm; local 78 vm 94 struct vm_struct *vm; local 433 struct vm_struct *vm; local [all...] |
/linux-master/tools/testing/selftests/kvm/x86_64/ |
H A D | smm_test.c | 135 struct kvm_vm *vm; local 142 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 144 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, 146 TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) 149 memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); 150 memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, 157 vcpu_alloc_svm(vm, &nested_gva); 159 vcpu_alloc_vmx(vm, &nested_gva); 201 kvm_vm_release(vm); 203 vcpu = vm_recreate_with_one_vcpu(vm); [all...] |
H A D | sev_migrate_tests.c | 25 struct kvm_vm *vm; local 28 vm = vm_create_barebones(); 30 sev_vm_init(vm); 32 sev_es_vm_init(vm); 35 __vm_vcpu_add(vm, i); 37 sev_vm_launch(vm, es ? SEV_POLICY_ES : 0); 40 vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); 41 return vm; 46 struct kvm_vm *vm; local 49 vm 101 struct kvm_vm *vm; member in struct:locking_thread_input 209 verify_mirror_allowed_cmds(struct kvm_vm *vm) argument [all...] |
H A D | hyperv_evmcs.c | 206 static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm, argument 216 kvm_vm_release(vm); 219 vcpu = vm_recreate_with_one_vcpu(vm); 239 struct kvm_vm *vm; local 248 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 250 hcall_page = vm_vaddr_alloc_pages(vm, 1); 251 memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); 256 vcpu_alloc_vmx(vm, &vmx_pages_gva); 257 vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva); 258 vcpu_args_set(vcpu, 3, vmx_pages_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_pag [all...] |
H A D | hyperv_cpuid.c | 116 void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu) argument 124 ret = __kvm_ioctl(vm->kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); 133 struct kvm_vm *vm; local 139 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 142 test_hv_cpuid_e2big(vm, vcpu); 165 test_hv_cpuid_e2big(vm, NULL); 171 kvm_vm_free(vm);
|
H A D | fix_hypercall_test.c | 111 struct kvm_vm *vm = vcpu->vm; local 113 vm_init_descriptor_tables(vm); 115 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); 118 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, 122 sync_global_to_guest(vm, quirk_disabled); 124 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
|
H A D | tsc_scaling_sync.c | 18 static struct kvm_vm *vm; variable in typeref:struct:kvm_vm 53 vcpu = vm_vcpu_add(vm, vcpu_id, guest_code); 90 vm = vm_create(NR_TEST_VCPUS); 91 vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ); 108 kvm_vm_free(vm);
|
H A D | monitor_mwait_test.c | 74 struct kvm_vm *vm; local 80 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 83 vm_init_descriptor_tables(vm); 109 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, disabled_quirks); 129 kvm_vm_free(vm);
|
/linux-master/tools/testing/selftests/kvm/lib/aarch64/ |
H A D | vgic.c | 20 * vm - KVM VM 33 int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs, argument 47 list_for_each(iter, &vm->vcpus) 54 gic_fd = __kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3); 65 nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE); 66 virt_map(vm, gicd_base_gpa, gicd_base_gpa, nr_gic_pages); 72 nr_gic_pages = vm_calc_num_guest_pages(vm->mode, 74 virt_map(vm, gicr_base_gpa, gicr_base_gpa, nr_gic_pages); 108 int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) argument 120 return _kvm_irq_line(vm, ir 123 kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) argument [all...] |
/linux-master/tools/testing/selftests/kvm/ |
H A D | dirty_log_perf_test.c | 30 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) argument 36 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); 39 static void arch_cleanup_vm(struct kvm_vm *vm) argument 47 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) argument 51 static void arch_cleanup_vm(struct kvm_vm *vm) argument 142 struct kvm_vm *vm; local 155 vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 160 memstress_set_random_seed(vm, p->random_seed); 161 memstress_set_write_percent(vm, p->write_percent); 163 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm [all...] |
H A D | kvm_create_max_vcpus.c | 25 struct kvm_vm *vm; local 31 vm = vm_create_barebones(); 35 __vm_vcpu_add(vm, i); 37 kvm_vm_free(vm);
|
H A D | max_guest_memory_test.c | 66 struct kvm_vm *vm = vcpu->vm; local 69 vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size); 89 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, argument 104 ~((uint64_t)vm->page_size - 1); 172 struct kvm_vm *vm; local 214 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); 216 max_gpa = vm->max_gfn << vm->page_shift; 226 for (i = 0; i < slot_size; i += vm [all...] |
H A D | dirty_log_test.c | 220 static void clear_log_create_vm_done(struct kvm_vm *vm) argument 228 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps); 235 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); 242 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); 243 kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages); 277 static void dirty_ring_create_vm_done(struct kvm_vm *vm) argument 287 pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3; 288 pages = vm_adjust_num_guest_pages(vm->mode, pages); 289 if (vm->page_size < getpagesize()) 290 pages = vm_num_host_pages(vm 484 log_mode_create_vm_done(struct kvm_vm *vm) argument 523 struct kvm_vm *vm = vcpu->vm; local 685 struct kvm_vm *vm; local 706 struct kvm_vm *vm; local [all...] |
/linux-master/drivers/gpu/drm/lima/ |
H A D | lima_mmu.h | 16 void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
|
/linux-master/drivers/gpu/drm/radeon/ |
H A D | radeon_ib.c | 50 * @vm: requested vm 59 struct radeon_ib *ib, struct radeon_vm *vm, 75 ib->vm = vm; 76 if (vm) { 144 /* grab a vm id if necessary */ 145 if (ib->vm) { 147 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); 159 if (ib->vm) 58 radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gem/selftests/ |
H A D | mock_context.c | 45 ctx->vm = &ppgtt->vm; 60 if (ctx->vm) 61 i915_vm_put(ctx->vm); 150 struct i915_address_space *vm) 159 if (vm) { 160 if (pc->vm) 161 i915_vm_put(pc->vm); 162 pc->vm = i915_vm_get(vm); 149 kernel_context(struct drm_i915_private *i915, struct i915_address_space *vm) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_migrate.c | 38 static void xehpsdv_toggle_pdes(struct i915_address_space *vm, argument 48 vm->insert_page(vm, 0, d->offset, 49 i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE), 55 static void xehpsdv_insert_pte(struct i915_address_space *vm, argument 68 vm->insert_page(vm, px_dma(pt), d->offset, 69 i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE), 74 static void insert_pte(struct i915_address_space *vm, argument 80 vm 89 struct i915_ppgtt *vm; local 249 struct i915_address_space *vm; local [all...] |
/linux-master/tools/testing/selftests/kvm/aarch64/ |
H A D | page_fault_test.c | 61 void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run); 364 static void setup_uffd(struct kvm_vm *vm, struct test_params *p, argument 370 setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_PT), &pt_args); 371 setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_TEST_DATA), &data_args); 407 static bool punch_hole_in_backing_store(struct kvm_vm *vm, argument 426 static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run) argument 431 region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); 440 static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run) argument 451 static bool check_write_in_dirty_log(struct kvm_vm *vm, argument 459 /* getpage_size() is not always equal to vm 468 handle_cmd(struct kvm_vm *vm, int cmd) argument 527 load_exec_code_for_test(struct kvm_vm *vm) argument 541 setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu, struct test_desc *test) argument 553 setup_gva_maps(struct kvm_vm *vm) argument 576 setup_memslots(struct kvm_vm *vm, struct test_params *p) argument 615 setup_ucall(struct kvm_vm *vm) argument 657 vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu, struct test_desc *test) argument 702 struct kvm_vm *vm; local [all...] |
/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_gt_pagefault.c | 84 static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr) argument 88 if (vm->usm.last_fault_vma) { /* Fast lookup */ 89 if (vma_matches(vm->usm.last_fault_vma, page_addr)) 90 vma = vm->usm.last_fault_vma; 93 vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K); 102 struct xe_vm *vm = xe_vma_vm(vma); local 109 if (atomic && IS_DGFX(vm->xe)) { 121 err = xe_bo_validate(bo, vm, true); 134 struct xe_vm *vm; local 147 vm 499 get_acc_vma(struct xe_vm *vm, struct acc *acc) argument 512 struct xe_vm *vm; local [all...] |