/linux-master/drivers/virt/acrn/ |
H A D | ioeventfd.c | 43 static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p) argument 45 lockdep_assert_held(&vm->ioeventfds_lock); 52 static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm, argument 57 lockdep_assert_held(&vm->ioeventfds_lock); 60 list_for_each_entry(p, &vm->ioeventfds, list) 76 static int acrn_ioeventfd_assign(struct acrn_vm *vm, argument 121 mutex_lock(&vm->ioeventfds_lock); 123 if (hsm_ioeventfd_is_conflict(vm, p)) { 129 ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type, 134 list_add_tail(&p->list, &vm 147 acrn_ioeventfd_deassign(struct acrn_vm *vm, struct acrn_ioeventfd *args) argument 173 hsm_ioeventfd_match(struct acrn_vm *vm, u64 addr, u64 data, int len, int type) argument 232 acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args) argument 244 acrn_ioeventfd_init(struct acrn_vm *vm) argument 263 acrn_ioeventfd_deinit(struct acrn_vm *vm) argument [all...] |
H A D | acrn_drv.h | 109 * @vm: The VM that the client belongs to 123 struct acrn_vm *vm; member in struct:acrn_ioreq_client 191 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, 193 int acrn_vm_destroy(struct acrn_vm *vm); 194 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, 196 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size); 197 int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap); 198 int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap); 199 int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap); 200 void acrn_vm_all_ram_unmap(struct acrn_vm *vm); [all...] |
/linux-master/drivers/gpu/drm/i915/display/ |
H A D | intel_dpt.c | 17 struct i915_address_space vm; member in struct:i915_dpt 24 #define i915_is_dpt(vm) ((vm)->is_dpt) 27 i915_vm_to_dpt(struct i915_address_space *vm) argument 29 BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); 30 drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm)); 31 return container_of(vm, struct i915_dpt, vm); 34 #define dpt_total_entries(dpt) ((dpt)->vm 41 dpt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) argument 54 dpt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, u32 flags) argument 76 dpt_clear_range(struct i915_address_space *vm, u64 start, u64 length) argument 81 dpt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma_resource *vma_res, unsigned int pat_index, u32 flags) argument 111 dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma_resource *vma_res) argument 117 dpt_cleanup(struct i915_address_space *vm) argument 124 intel_dpt_pin(struct i915_address_space *vm) argument 175 intel_dpt_unpin(struct i915_address_space *vm) argument 247 struct i915_address_space *vm; local 311 intel_dpt_destroy(struct i915_address_space *vm) argument [all...] |
/linux-master/tools/testing/selftests/kvm/lib/ |
H A D | kvm_util.c | 179 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) argument 181 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) 182 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); 184 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); 185 vm->dirty_ring_size = ring_size; 188 static void vm_open(struct kvm_vm *vm) argument 190 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); 194 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm 256 vm_vaddr_populate_bitmap(struct kvm_vm *vm) argument 267 struct kvm_vm *vm; local 413 struct kvm_vm *vm; local 464 struct kvm_vm *vm; local 483 struct kvm_vm *vm; local 526 vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) argument 532 vm_recreate_with_one_vcpu(struct kvm_vm *vm) argument 629 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) argument 668 vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) argument 705 __vm_mem_region_delete(struct kvm_vm *vm, struct userspace_mem_region *region, bool unlink) argument 811 kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) argument 910 __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva) argument 924 vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva) argument 933 __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva, uint32_t guest_memfd, uint64_t guest_memfd_offset) argument 950 vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva, uint32_t guest_memfd, uint64_t guest_memfd_offset) argument 963 vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, uint64_t guest_paddr, uint32_t slot, uint64_t npages, uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset) argument 1140 vm_userspace_mem_region_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, uint64_t guest_paddr, uint32_t slot, uint64_t npages, uint32_t flags) argument 1164 memslot2region(struct kvm_vm *vm, uint32_t memslot) argument 1195 vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) argument 1225 vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) argument 1254 vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) argument 1259 vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, bool punch_hole) argument 1303 vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) argument 1319 __vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) argument 1369 vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) argument 1435 ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, enum kvm_mem_region_type type, bool protected) argument 1465 __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, enum kvm_mem_region_type type) argument 1472 vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, enum kvm_mem_region_type type) argument 1498 vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) argument 1517 vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) argument 1522 __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) argument 1541 vm_vaddr_alloc_page(struct kvm_vm *vm) argument 1562 virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, unsigned int npages) argument 1597 addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) argument 1630 addr_hva2gpa(struct kvm_vm *vm, void *hva) argument 1673 addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) argument 1690 vm_create_irqchip(struct kvm_vm *vm) argument 1797 __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) argument 1807 __kvm_create_device(struct kvm_vm *vm, uint64_t type) argument 1849 _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) argument 1859 kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) argument 1897 _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) argument 1908 kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) argument 1931 vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) argument 2066 __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot, bool protected) argument 2112 vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) argument 2118 vm_alloc_page_table(struct kvm_vm *vm) argument 2136 addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) argument 2141 vm_compute_max_gfn(struct kvm_vm *vm) argument 2267 __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, size_t max_elements) argument 2296 kvm_arch_vm_post_create(struct kvm_vm *vm) argument 2312 vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) argument [all...] |
/linux-master/tools/testing/selftests/kvm/include/ |
H A D | memstress.h | 33 struct kvm_vm *vm; member in struct:memstress_args 62 void memstress_destroy_vm(struct kvm_vm *vm); 64 void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); 65 void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed); 66 void memstress_set_random_access(struct kvm_vm *vm, bool random_access); 73 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]); 75 void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots); 76 void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots); 77 void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots); 78 void memstress_clear_dirty_log(struct kvm_vm *vm, unsigne [all...] |
H A D | kvm_util_base.h | 69 struct kvm_vm *vm; member in struct:kvm_vcpu 157 #define kvm_for_each_vcpu(vm, i, vcpu) \ 158 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 159 if (!((vcpu) = vm->vcpus[i])) \ 164 memslot2region(struct kvm_vm *vm, uint32_t memslot); 166 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, argument 170 return memslot2region(vm, vm->memslots[type]); 313 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } argument 315 #define __vm_ioctl(vm, cm 376 vm_check_cap(struct kvm_vm *vm, long cap) argument 384 __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) argument 390 vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) argument 397 vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, uint64_t size, uint64_t attributes) argument 418 vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, uint64_t size) argument 424 vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, uint64_t size) argument 433 vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, uint64_t size) argument 439 vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size) argument 458 kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) argument 465 kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, uint64_t first_page, uint32_t num_pages) argument 478 kvm_vm_reset_dirty_ring(struct kvm_vm *vm) argument 483 vm_get_stats_fd(struct kvm_vm *vm) argument 533 vm_get_stat(struct kvm_vm *vm, const char *stat_name) argument 543 __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, uint64_t flags) argument 554 vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, uint64_t flags) argument 583 vm_arch_has_protected_memory(struct kvm_vm *vm) argument 614 vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) argument 817 kvm_create_device(struct kvm_vm *vm, uint64_t type) argument 866 vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot) argument 1020 vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, void *guest_code) argument 1033 vm_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) argument 1043 virt_pgd_alloc(struct kvm_vm *vm) argument 1066 virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) argument 1089 addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) argument 1111 virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) argument 1117 __vm_disable_nx_huge_pages(struct kvm_vm *vm) argument [all...] |
/linux-master/tools/testing/selftests/kvm/x86_64/ |
H A D | smaller_maxphyaddr_emulation_test.c | 51 struct kvm_vm *vm; local 60 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 63 vm_init_descriptor_tables(vm); 70 vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1); 72 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 75 gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE, 78 virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1); 79 hva = addr_gpa2hva(vm, MEM_REGION_GPA); 82 pte = vm_get_page_table_entry(vm, MEM_REGION_GVA); 108 kvm_vm_free(vm); [all...] |
H A D | svm_nested_shutdown_test.c | 46 struct kvm_vm *vm; local 50 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 51 vm_init_descriptor_tables(vm); 54 vcpu_alloc_svm(vm, &svm_gva); 56 vcpu_args_set(vcpu, 2, svm_gva, vm->idt); 61 kvm_vm_free(vm);
|
H A D | set_boot_cpu_id.c | 39 int r = __vm_ioctl(vcpu->vm, KVM_SET_BOOT_CPU_ID, 60 test_set_bsp_busy(vcpu, "while running vm"); 79 struct kvm_vm *vm; local 82 vm = vm_create(nr_vcpus); 84 vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(unsigned long)bsp_vcpu_id); 87 vcpus[i] = vm_vcpu_add(vm, i, i == bsp_vcpu_id ? guest_bsp_vcpu : 89 return vm; 95 struct kvm_vm *vm; local 97 vm = create_vm(ARRAY_SIZE(vcpus), bsp_vcpu_id, vcpus); 102 kvm_vm_free(vm); 108 struct kvm_vm *vm; local [all...] |
H A D | hyperv_extended_hypercalls.c | 42 struct kvm_vm *vm; local 55 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 60 hcall_in_page = vm_vaddr_alloc_pages(vm, 1); 61 memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size); 64 hcall_out_page = vm_vaddr_alloc_pages(vm, 1); 65 memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size); 67 vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page), 68 addr_gva2gpa(vm, hcall_out_pag [all...] |
/linux-master/tools/testing/selftests/kvm/aarch64/ |
H A D | smccc_filter.c | 40 static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, argument 49 return __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL, 53 static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, argument 56 int ret = __set_smccc_filter(vm, start, nr_functions, action); 64 struct kvm_vm *vm; local 66 vm = vm_create(1); 67 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); 75 *vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main); 76 return vm; 82 struct kvm_vm *vm local 101 struct kvm_vm *vm = setup_vm(&vcpu); local 123 struct kvm_vm *vm = setup_vm(&vcpu); local 136 struct kvm_vm *vm = setup_vm(&vcpu); local 149 struct kvm_vm *vm = setup_vm(&vcpu); local 164 struct kvm_vm *vm = setup_vm(&vcpu); local 192 struct kvm_vm *vm; local 230 struct kvm_vm *vm; local 247 struct kvm_vm *vm = vm_create_barebones(); local [all...] |
/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_exec.c | 103 struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); local 106 return xe_vm_validate_rebind(vm, &vm_exec->exec, 1); 123 struct xe_vm *vm; local 157 vm = q->vm; 162 (xe_vm_in_lr_mode(vm) ? 186 if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) { 187 err = down_write_killable(&vm [all...] |
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_gtt.c | 41 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) argument 57 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 58 vm->lmem_pt_obj_flags); 60 * Ensure all paging structures for this vm share the same dma-resv 65 obj->base.resv = i915_vm_resv_get(vm); 66 obj->shares_resv_from = vm; 68 if (vm->fpriv) 69 i915_drm_client_add_object(vm->fpriv->client, obj); 75 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) argument 79 if (I915_SELFTEST_ONLY(should_fail(&vm 99 map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) argument 123 map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) argument 183 __i915_vm_close(struct i915_address_space *vm) argument 198 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) argument 211 i915_address_space_fini(struct i915_address_space *vm) argument 226 struct i915_address_space *vm = local 237 struct i915_address_space *vm = local 253 struct i915_address_space *vm = local 262 i915_address_space_init(struct i915_address_space *vm, int subclass) argument 360 setup_scratch_page(struct i915_address_space *vm) argument 424 free_scratch(struct i915_address_space *vm) argument 696 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size) argument 717 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size) argument [all...] |
H A D | intel_gtt.h | 64 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 233 void (*bind_vma)(struct i915_address_space *vm, 242 void (*unbind_vma)(struct i915_address_space *vm, 301 /* Flags used when creating page-table objects for this vm */ 308 (*alloc_pt_dma)(struct i915_address_space *vm, int sz); 310 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); 318 void (*allocate_va_range)(struct i915_address_space *vm, 321 void (*clear_range)(struct i915_address_space *vm, 323 void (*scratch_range)(struct i915_address_space *vm, 325 void (*insert_page)(struct i915_address_space *vm, 367 struct i915_address_space vm; member in struct:i915_ggtt 410 struct i915_address_space vm; member in struct:i915_ppgtt 425 i915_vm_is_4lvl(const struct i915_address_space *vm) argument 431 i915_vm_has_scratch_64K(struct i915_address_space *vm) argument 436 i915_vm_min_alignment(struct i915_address_space *vm, enum intel_memory_type type) argument 446 i915_vm_obj_min_alignment(struct i915_address_space *vm, struct drm_i915_gem_object *obj) argument 456 i915_vm_has_cache_coloring(struct i915_address_space *vm) argument 462 i915_vm_to_ggtt(struct i915_address_space *vm) argument 470 i915_vm_to_ppgtt(struct i915_address_space *vm) argument 478 i915_vm_get(struct i915_address_space *vm) argument 485 i915_vm_tryget(struct i915_address_space *vm) argument 490 assert_vm_alive(struct i915_address_space *vm) argument 501 i915_vm_resv_get(struct i915_address_space *vm) argument 511 i915_vm_put(struct i915_address_space *vm) argument 520 i915_vm_resv_put(struct i915_address_space *vm) argument [all...] |
H A D | gen6_ppgtt.c | 23 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); 74 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, argument 77 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 79 const gen6_pte_t scratch_pte = vm->scratch[0]->encode; 110 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, argument 115 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 120 const u32 pte_encode = vm->pte_encode(0, pat_index, flags); 166 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); 172 static void gen6_alloc_va_range(struct i915_address_space *vm, argument 176 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 221 struct i915_address_space * const vm = &ppgtt->base.vm; local 267 gen6_ppgtt_cleanup(struct i915_address_space *vm) argument 280 pd_vma_bind(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma_resource *vma_res, unsigned int pat_index, u32 unused) argument 296 pd_vma_unbind(struct i915_address_space *vm, struct i915_vma_resource *vma_res) argument [all...] |
H A D | intel_ggtt_gmch.c | 18 static void gmch_ggtt_insert_page(struct i915_address_space *vm, argument 30 static void gmch_ggtt_insert_entries(struct i915_address_space *vm, argument 47 static void gmch_ggtt_clear_range(struct i915_address_space *vm, argument 53 static void gmch_ggtt_remove(struct i915_address_space *vm) argument 79 struct drm_i915_private *i915 = ggtt->vm.i915; 89 intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); 93 ggtt->vm.alloc_pt_dma = alloc_pt_dma; 94 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; 102 ggtt->vm.insert_page = gmch_ggtt_insert_page; 103 ggtt->vm [all...] |
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_gem_evict.h | 16 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 22 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, 26 int i915_gem_evict_vm(struct i915_address_space *vm,
|
/linux-master/tools/testing/selftests/kvm/include/s390x/ |
H A D | ucall.h | 9 static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) argument
|
/linux-master/tools/testing/selftests/kvm/ |
H A D | set_memory_region_test.c | 114 struct kvm_vm *vm; local 118 vm = vm_create_with_one_vcpu(vcpu, guest_code); 120 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, 128 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); 131 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2); 134 hva = addr_gpa2hva(vm, MEM_REGION_GPA); 142 return vm; 183 struct kvm_vm *vm; local 186 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); 188 hva = addr_gpa2hva(vm, MEM_REGION_GP 264 struct kvm_vm *vm; local 314 struct kvm_vm *vm; local 333 struct kvm_vm *vm; local 395 struct kvm_vm *vm; local 449 test_invalid_guest_memfd(struct kvm_vm *vm, int memfd, size_t offset, const char *msg) argument 460 struct kvm_vm *vm, *vm2; local 496 struct kvm_vm *vm; local [all...] |
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vm.c | 119 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on 121 struct amdgpu_vm *vm; member in struct:amdgpu_vm_tlb_seq_struct 130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping 133 * @vm: amdgpu_vm pointer 140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, argument 145 if (vm->pasid == pasid) 148 if (vm->pasid) { 149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); 153 vm->pasid = 0; 157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, 179 struct amdgpu_vm *vm = vm_bo->vm; local 293 amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) argument 323 amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo) argument 368 amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, unsigned int num_fences) argument 385 amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 394 amdgpu_vm_init_entities(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 415 amdgpu_vm_fini_entities(struct amdgpu_vm *vm) argument 430 amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 461 amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) argument 554 amdgpu_vm_ready(struct amdgpu_vm *vm) argument 763 amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo *bo) argument 816 amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate) argument 912 amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, struct dma_fence **fence) argument 1060 struct amdgpu_vm *vm = bo_va->base.vm; local 1079 amdgpu_vm_get_memory(struct amdgpu_vm *vm, struct amdgpu_mem_stats *stats) argument 1121 struct amdgpu_vm *vm = bo_va->base.vm; local 1333 amdgpu_vm_free_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, struct dma_fence *fence) argument 1351 amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 1379 amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) argument 1430 amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) argument 1508 amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint32_t flush_type, uint32_t xcc_mask) argument 1555 amdgpu_vm_bo_add(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo) argument 1599 struct amdgpu_vm *vm = bo_va->base.vm; local 1671 struct amdgpu_vm *vm = bo_va->base.vm; local 1780 struct amdgpu_vm *vm = bo_va->base.vm; local 1829 amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size) argument 1948 amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, uint64_t addr) argument 1962 amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) argument 1999 struct amdgpu_vm *vm = bo_va->base.vm; local 2098 struct amdgpu_vm *vm = bo_base->vm; local 2239 amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) argument 2260 struct amdgpu_vm *vm; local 2291 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm) argument 2319 amdgpu_vm_create_task_info(struct amdgpu_vm *vm) argument 2334 amdgpu_vm_set_task_info(struct amdgpu_vm *vm) argument 2364 amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) argument 2478 amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 2533 amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 2548 amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 2740 struct amdgpu_vm *vm; local 2830 amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) argument 2933 struct amdgpu_vm *vm; local [all...] |
/linux-master/drivers/video/fbdev/omap2/omapfb/dss/ |
H A D | display.c | 259 void videomode_to_omap_video_timings(const struct videomode *vm, argument 264 ovt->pixelclock = vm->pixelclock; 265 ovt->x_res = vm->hactive; 266 ovt->hbp = vm->hback_porch; 267 ovt->hfp = vm->hfront_porch; 268 ovt->hsw = vm->hsync_len; 269 ovt->y_res = vm->vactive; 270 ovt->vbp = vm->vback_porch; 271 ovt->vfp = vm->vfront_porch; 272 ovt->vsw = vm 291 omap_video_timings_to_videomode(const struct omap_video_timings *ovt, struct videomode *vm) argument [all...] |
/linux-master/drivers/gpu/drm/omapdrm/ |
H A D | omap_encoder.c | 44 static void omap_encoder_update_videomode_flags(struct videomode *vm, argument 47 if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW | 50 vm->flags |= DISPLAY_FLAGS_DE_LOW; 52 vm->flags |= DISPLAY_FLAGS_DE_HIGH; 55 if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | 58 vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; 60 vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; 63 if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE | 66 vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE; 68 vm 81 struct videomode vm = { 0 }; local [all...] |
/linux-master/drivers/irqchip/ |
H A D | irq-gic-v4.c | 158 int its_alloc_vcpu_irqs(struct its_vm *vm) argument 162 vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe", 164 if (!vm->fwnode) 167 vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes, 168 vm->fwnode, vpe_domain_ops, 169 vm); 170 if (!vm->domain) 173 for (i = 0; i < vm->nr_vpes; i++) { 174 vm 202 its_free_sgi_irqs(struct its_vm *vm) argument 221 its_free_vcpu_irqs(struct its_vm *vm) argument [all...] |
/linux-master/tools/testing/selftests/kvm/lib/riscv/ |
H A D | processor.c | 28 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) argument 30 return (v + vm->page_size) & ~(vm->page_size - 1); 33 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) argument 39 static uint64_t ptrs_per_pte(struct kvm_vm *vm) argument 58 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) argument 62 TEST_ASSERT(level < vm->pgtable_levels, 68 void virt_arch_pgd_alloc(struct kvm_vm *vm) argument 70 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 81 virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) argument 125 addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) argument 154 pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) argument 176 virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) argument 197 struct kvm_vm *vm = vcpu->vm; local 297 vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) argument 433 vm_init_vector_tables(struct kvm_vm *vm) argument 441 vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler) argument 449 vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler) argument [all...] |
/linux-master/drivers/gpu/drm/radeon/ |
H A D | radeon_vm.c | 78 * radeon_vm_manager_init - init the vm manager 82 * Init the vm manager (cayman+). 100 * radeon_vm_manager_fini - tear down the vm manager 120 * radeon_vm_get_bos - add the vm BOs to a validation list 123 * @vm: vm providing the BOs 130 struct radeon_vm *vm, 136 list = kvmalloc_array(vm->max_pde_used + 2, 141 /* add the vm page table to the list */ 142 list[0].robj = vm 129 radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_vm *vm, struct list_head *head) argument 178 radeon_vm_grab_id(struct radeon_device *rdev, struct radeon_vm *vm, int ring) argument 237 radeon_vm_flush(struct radeon_device *rdev, struct radeon_vm *vm, int ring, struct radeon_fence *updates) argument 269 radeon_vm_fence(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_fence *fence) argument 294 radeon_vm_bo_find(struct radeon_vm *vm, struct radeon_bo *bo) argument 320 radeon_vm_bo_add(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo) argument 453 struct radeon_vm *vm = bo_va->vm; local 640 radeon_vm_update_page_directory(struct radeon_device *rdev, struct radeon_vm *vm) argument 814 radeon_vm_update_ptes(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_ib *ib, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) argument 886 radeon_vm_fence_pts(struct radeon_vm *vm, uint64_t start, uint64_t end, struct radeon_fence *fence) argument 915 struct radeon_vm *vm = bo_va->vm; local 1047 radeon_vm_clear_freed(struct radeon_device *rdev, struct radeon_vm *vm) argument 1085 radeon_vm_clear_invalids(struct radeon_device *rdev, struct radeon_vm *vm) argument 1121 struct radeon_vm *vm = bo_va->vm; local 1173 radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) argument 1233 radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) argument [all...] |