Searched refs:vm (Results 126 - 150 of 497) sorted by relevance

1234567891011>>

/linux-master/drivers/gpu/drm/panthor/
H A Dpanthor_gem.c34 struct panthor_vm *vm; local
40 vm = bo->vm;
44 to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
47 ret = panthor_vm_unmap_range(vm, bo->va_node.start,
52 panthor_vm_free_va(vm, &bo->va_node);
56 panthor_vm_put(vm);
63 * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped.
75 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, argument
84 if (drm_WARN_ON(&ptdev->base, !vm))
[all...]
H A Dpanthor_heap.c89 /** @vm: VM this pool is bound to. */
90 struct panthor_vm *vm; member in struct:panthor_heap_pool
121 static void panthor_free_heap_chunk(struct panthor_vm *vm, argument
135 struct panthor_vm *vm,
147 chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size,
194 static void panthor_free_heap_chunks(struct panthor_vm *vm, argument
200 panthor_free_heap_chunk(vm, heap, chunk);
204 struct panthor_vm *vm,
212 ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true);
229 panthor_free_heap_chunks(pool->vm, hea
134 panthor_alloc_heap_chunk(struct panthor_device *ptdev, struct panthor_vm *vm, struct panthor_heap *heap, bool initial_chunk) argument
203 panthor_alloc_heap_chunks(struct panthor_device *ptdev, struct panthor_vm *vm, struct panthor_heap *heap, u32 chunk_count) argument
277 struct panthor_vm *vm; local
529 panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vm_pt.c150 * @vm: amdgpu_vm structure
157 struct amdgpu_vm *vm, uint64_t start,
162 cursor->entry = &vm->root;
279 * @vm: amdgpu_vm structure
286 struct amdgpu_vm *vm,
293 amdgpu_vm_pt_start(adev, vm, 0, cursor);
340 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
341 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
350 * @vm: VM to clear BO from
359 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, argument
156 amdgpu_vm_pt_start(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t start, struct amdgpu_vm_pt_cursor *cursor) argument
285 amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_pt_cursor *start, struct amdgpu_vm_pt_cursor *cursor) argument
446 amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int level, bool immediate, struct amdgpu_bo_vm **vmbo, int32_t xcp_id) argument
539 amdgpu_vm_pt_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_pt_cursor *cursor, bool immediate) argument
606 struct amdgpu_vm *vm; local
636 struct amdgpu_vm *vm = params->vm; local
686 amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument
710 struct amdgpu_vm *vm = params->vm; local
961 struct amdgpu_vm *vm = params->vm; local
1025 amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_gt_pagefault.c84 static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr) argument
88 if (vm->usm.last_fault_vma) { /* Fast lookup */
89 if (vma_matches(vm->usm.last_fault_vma, page_addr))
90 vma = vm->usm.last_fault_vma;
93 vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
102 struct xe_vm *vm = xe_vma_vm(vma); local
109 if (atomic && IS_DGFX(vm->xe)) {
121 err = xe_bo_validate(bo, vm, true);
134 struct xe_vm *vm; local
147 vm
499 get_acc_vma(struct xe_vm *vm, struct acc *acc) argument
512 struct xe_vm *vm; local
[all...]
H A Dxe_pt.h29 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
32 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
H A Dxe_pt.c51 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, argument
58 if (!xe_vm_has_scratch(vm))
62 return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
65 return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
79 * @vm: The vm to create for.
92 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, argument
110 bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4
145 xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, struct xe_pt *pt) argument
253 struct xe_vm *vm; member in struct:xe_pt_stage_bind_walk
480 struct xe_vm *vm = xe_walk->vm; local
831 struct xe_vm *vm = xe_vma_vm(vma); local
931 struct xe_vm *vm = xe_vma_vm(&uvma->vma); local
1021 struct xe_vm *vm = xe_vma_vm(&uvma->vma); local
1221 struct xe_vm *vm = xe_vma_vm(vma); local
1583 struct xe_vm *vm = xe_vma_vm(vma); local
[all...]
H A Dxe_devcoredump_types.h45 /** @vm: Snapshot of VM state */
46 struct xe_vm_snapshot *vm; member in struct:xe_devcoredump_snapshot
H A Dxe_sched_job.c65 return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION);
92 /* only a kernel context can submit a vm-less job */
93 XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
97 lockdep_assert_held(&q->vm->lock);
98 if (!xe_vm_in_lr_mode(q->vm))
99 xe_vm_assert_held(q->vm);
257 struct xe_vm *vm = q->vm; local
259 if (vm
285 xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) argument
[all...]
/linux-master/tools/testing/selftests/kvm/
H A Ddirty_log_test.c217 static void clear_log_create_vm_done(struct kvm_vm *vm) argument
225 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
232 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
239 kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
240 kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
274 static void dirty_ring_create_vm_done(struct kvm_vm *vm) argument
284 pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
285 pages = vm_adjust_num_guest_pages(vm->mode, pages);
286 if (vm->page_size < getpagesize())
287 pages = vm_num_host_pages(vm
481 log_mode_create_vm_done(struct kvm_vm *vm) argument
669 struct kvm_vm *vm; local
690 struct kvm_vm *vm; local
[all...]
H A Dkvm_page_table_test.c48 struct kvm_vm *vm; member in struct:test_args
247 struct kvm_vm *vm; local
255 vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus, guest_num_pages,
260 guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
270 test_args.vm = vm;
280 vm_userspace_mem_region_add(vm, src_type, guest_test_phys_mem,
284 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
287 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
290 sync_global_to_guest(vm, test_arg
344 struct kvm_vm *vm; local
[all...]
H A Darch_timer.c52 struct kvm_vm *vm = vcpu->vm; local
67 sync_global_from_guest(vm, *shared_data);
145 static void test_run(struct kvm_vm *vm) argument
244 struct kvm_vm *vm; local
252 vm = test_vm_create();
253 test_run(vm);
254 test_vm_cleanup(vm);
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dtriple_fault_event_test.c16 static struct kvm_vm *vm; variable in typeref:struct:kvm_vm
77 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
78 vcpu_alloc_vmx(vm, &vmx_pages_gva);
83 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
84 vcpu_alloc_svm(vm, &svm_gva);
88 vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
H A Dfix_hypercall_test.c111 struct kvm_vm *vm = vcpu->vm; local
113 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
116 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
120 sync_global_to_guest(vm, quirk_disabled);
122 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
H A Damx_test.c216 struct kvm_vm *vm; local
238 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
247 vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
250 amx_cfg = vm_vaddr_alloc_page(vm);
251 memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
254 tiledata = vm_vaddr_alloc_pages(vm, 2);
255 memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize());
258 xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
259 memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
292 void *tiles_data = (void *)addr_gva2hva(vm, tiledat
[all...]
H A Dhyperv_svm_test.c155 struct kvm_vm *vm; local
163 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
165 vcpu_alloc_svm(vm, &nested_gva);
166 vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
168 hcall_page = vm_vaddr_alloc_pages(vm, 1);
169 memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
171 vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
198 kvm_vm_free(vm);
H A Dmonitor_mwait_test.c74 struct kvm_vm *vm; local
81 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
107 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, disabled_quirks);
127 kvm_vm_free(vm);
/linux-master/tools/testing/selftests/kvm/lib/
H A Delf.c104 * vm - Pointer to opaque type that describes the VM.
109 * into the virtual address space of the VM pointed to by vm. On entry
114 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) argument
160 vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
162 seg_vend |= vm->page_size - 1;
165 vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
173 memset(addr_gva2hva(vm, vaddr), 0, seg_size);
190 test_read(fd, addr_gva2hva(vm, phdr.p_vaddr),
/linux-master/tools/testing/selftests/kvm/s390x/
H A Dresets.c207 struct kvm_vm *vm; local
209 vm = vm_create(1);
211 *vcpu = vm_vcpu_add(vm, ARBITRARY_NON_ZERO_VCPU_ID, guest_code_initial);
213 return vm;
219 struct kvm_vm *vm; local
222 vm = create_vm(&vcpu);
236 kvm_vm_free(vm);
242 struct kvm_vm *vm; local
245 vm = create_vm(&vcpu);
259 kvm_vm_free(vm);
265 struct kvm_vm *vm; local
[all...]
/linux-master/drivers/gpu/drm/imx/dcss/
H A Ddcss-dtg.c187 void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm) argument
194 u32 pixclock = vm->pixelclock;
197 dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
198 vm->hactive - 1;
199 dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
200 vm
[all...]
/linux-master/sound/pci/ctxfi/
H A Dctvmem.h54 dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
58 void ct_vm_destroy(struct ct_vm *vm);
/linux-master/tools/testing/selftests/net/
H A Dtest_vxlan_under_vrf.sh13 # | vm-1 netns | | vm-2 netns |
43 # This tests both the connectivity between vm-1 and vm-2, and that the underlay
65 vm[1]=$vm_1
66 vm[2]=$vm_2
97 setup-vm() {
108 ip link set veth-hv netns ${vm[$id]}
109 ip -netns ${vm[$id]} addr add 10.0.0.$id/24 dev veth-hv
110 ip -netns ${vm[
[all...]
/linux-master/tools/testing/selftests/kvm/include/x86_64/
H A Dkvm_util_arch.h29 #define vm_arch_has_protected_memory(vm) \
30 __vm_arch_has_protected_memory(&(vm)->arch)
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Dmock_context.h30 struct i915_address_space *vm);
/linux-master/tools/testing/selftests/kvm/include/
H A Dtimer_test.h43 void test_vm_cleanup(struct kvm_vm *vm);
/linux-master/drivers/gpu/drm/fsl-dcu/
H A Dfsl_dcu_drm_crtc.c89 struct videomode vm; local
93 drm_display_mode_to_videomode(mode, &vm);
99 if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
102 if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
106 DCU_HSYN_PARA_BP(vm.hback_porch) |
107 DCU_HSYN_PARA_PW(vm.hsync_len) |
108 DCU_HSYN_PARA_FP(vm.hfront_porch));
110 DCU_VSYN_PARA_BP(vm.vback_porch) |
111 DCU_VSYN_PARA_PW(vm.vsync_len) |
112 DCU_VSYN_PARA_FP(vm
[all...]

Completed in 257 milliseconds

1234567891011>>