Searched refs:vm (Results 51 - 75 of 497) sorted by relevance

1234567891011>>

/linux-master/drivers/gpu/drm/panthor/
H A Dpanthor_mmu.c45 /** @vm: VM bound to this slot. NULL is no VM is bound. */
46 struct panthor_vm *vm; member in struct:panthor_as_slot
88 /** @vm: VMs management fields */
101 } vm; member in struct:panthor_mmu
349 /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
404 /** @vm: VM targeted by the VM operation. */
405 struct panthor_vm *vm; member in struct:panthor_vm_bind_job
437 struct panthor_vm *vm = cookie; local
441 if (unlikely(!vm->root_page_table)) {
444 drm_WARN_ON(&vm
489 struct panthor_vm *vm = cookie; local
589 mmu_hw_do_operation(struct panthor_vm *vm, u64 iova, u64 size, u32 op) argument
660 panthor_vm_has_unhandled_faults(struct panthor_vm *vm) argument
671 panthor_vm_is_unusable(struct panthor_vm *vm) argument
676 panthor_vm_release_as_locked(struct panthor_vm *vm) argument
700 panthor_vm_active(struct panthor_vm *vm) argument
809 panthor_vm_idle(struct panthor_vm *vm) argument
823 panthor_vm_stop(struct panthor_vm *vm) argument
828 panthor_vm_start(struct panthor_vm *vm) argument
839 panthor_vm_as(struct panthor_vm *vm) argument
865 panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) argument
885 panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) argument
914 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, struct sg_table *sgt, u64 offset, u64 size) argument
1010 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, struct drm_mm_node *va_node) argument
1043 panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) argument
1053 struct drm_gpuvm *vm = vm_bo->vm; local
1086 panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm) argument
1172 panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm, struct panthor_gem_object *bo, u64 offset, u64 size, u64 va, u32 flags) argument
1292 panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm, u64 va, u64 size) argument
1344 panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm) argument
1367 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) argument
1472 struct panthor_vm *vm; local
1497 panthor_vm_destroy(struct panthor_vm *vm) argument
1530 struct panthor_vm *vm; local
1549 struct panthor_vm *vm; local
1567 struct panthor_vm *vm; local
1709 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; local
1752 struct panthor_vm *vm; local
1772 struct panthor_vm *vm; local
1783 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; local
1804 struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base); local
1852 panthor_vm_put(struct panthor_vm *vm) argument
1863 panthor_vm_get(struct panthor_vm *vm) argument
1885 panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create) argument
1943 panthor_vma_link(struct panthor_vm *vm, struct panthor_vma *vma, struct drm_gpuvm_bo *vm_bo) argument
1955 panthor_vma_unlink(struct panthor_vm *vm, struct panthor_vma *vma) argument
1986 struct panthor_vm *vm = priv; local
2015 struct panthor_vm *vm = priv; local
2063 struct panthor_vm *vm = priv; local
2089 panthor_vm_resv(struct panthor_vm *vm) argument
2094 panthor_vm_root_gem(struct panthor_vm *vm) argument
2103 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, bool flag_vm_unusable_on_failure) argument
2237 struct panthor_vm *vm; local
2341 panthor_vm_bind_prepare_op_ctx(struct drm_file *file, struct panthor_vm *vm, const struct drm_panthor_vm_bind_op *op, struct panthor_vm_op_ctx *op_ctx) argument
2412 panthor_vm_bind_job_create(struct drm_file *file, struct panthor_vm *vm, const struct drm_panthor_vm_bind_op *op) argument
2499 panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec, struct dma_fence *fence, enum dma_resv_usage private_usage, enum dma_resv_usage extobj_usage) argument
2515 panthor_vm_bind_exec_sync_op(struct drm_file *file, struct panthor_vm *vm, struct drm_panthor_vm_bind_op *op) argument
2554 panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo, u64 offset, u64 size, u64 va, u32 flags) argument
2581 panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) argument
2609 panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm, u32 slot_count) argument
2635 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; local
2705 show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m) argument
2722 struct panthor_vm *vm; local
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.c43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) argument
45 return vm->gpuvm.r_obj;
54 * without the vm->userptr.notifier_lock held. There is no guarantee that the
70 struct xe_vm *vm = xe_vma_vm(vma); local
71 struct xe_device *xe = vm->xe;
73 lockdep_assert_held(&vm->lock);
79 static bool preempt_fences_waiting(struct xe_vm *vm) argument
83 lockdep_assert_held(&vm->lock);
84 xe_vm_assert_held(vm);
86 list_for_each_entry(q, &vm
105 alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, unsigned int *count) argument
126 wait_for_existing_preempt_fences(struct xe_vm *vm) argument
146 xe_vm_is_idle(struct xe_vm *vm) argument
159 arm_preempt_fences(struct xe_vm *vm, struct list_head *list) argument
178 add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) argument
206 resume_and_reinstall_preempt_fences(struct xe_vm *vm, struct drm_exec *exec) argument
222 xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument
281 xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument
308 __xe_vm_userptr_needs_repin(struct xe_vm *vm) argument
318 xe_vm_kill(struct xe_vm *vm) argument
370 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); local
402 xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, unsigned int num_fences) argument
428 xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, bool *done) argument
467 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); local
576 struct xe_vm *vm = xe_vma_vm(vma); local
645 xe_vm_userptr_pin(struct xe_vm *vm) argument
705 xe_vm_userptr_check_repin(struct xe_vm *vm) argument
716 xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) argument
756 xe_vma_create(struct xe_vm *vm, struct xe_bo *bo, u64 bo_offset_or_userptr, u64 start, u64 end, u16 pat_index, unsigned int flags) argument
859 struct xe_vm *vm = xe_vma_vm(vma); local
908 struct xe_vm *vm = xe_vma_vm(vma); local
951 struct xe_vm *vm = xe_vma_vm(vma); local
983 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) argument
999 xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) argument
1014 xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) argument
1190 xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm) argument
1207 xe_vm_free_scratch(struct xe_vm *vm) argument
1230 struct xe_vm *vm; local
1378 xe_vm_close(struct xe_vm *vm) argument
1385 xe_vm_close_and_put(struct xe_vm *vm) argument
1494 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); local
1519 struct xe_vm *vm; local
1530 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) argument
1537 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument
1547 struct xe_vm *vm = xe_vma_vm(vma); local
1637 struct xe_vm *vm = xe_vma_vm(vma); local
1718 __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument
1759 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument
1779 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
1812 struct xe_vm *vm; local
1912 struct xe_vm *vm; local
1941 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, u32 region, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
1978 prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) argument
2043 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, u64 bo_offset_or_userptr, u64 addr, u64 range, u32 operation, u32 flags, u32 prefetch_region, u16 pat_index) argument
2118 new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, u16 pat_index, unsigned int flags) argument
2203 xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) argument
2267 vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, struct drm_gpuva_ops *ops, struct xe_sync_entry *syncs, u32 num_syncs, struct list_head *ops_list, bool last) argument
2427 op_execute(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument
2515 __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument
2555 xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) argument
2595 xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op) argument
2614 xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, bool post_commit, bool prev_post_commit, bool next_post_commit) argument
2669 vm_bind_ioctl_ops_unwind(struct xe_vm *vm, struct drm_gpuva_ops **ops, int num_ops_list) argument
2695 vm_bind_ioctl_ops_execute(struct xe_vm *vm, struct list_head *ops_list) argument
2842 vm_bind_ioctl_signal_fences(struct xe_vm *vm, struct xe_exec_queue *q, struct xe_sync_entry *syncs, int num_syncs) argument
2858 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, local
2873 struct xe_vm *vm; local
3118 xe_vm_lock(struct xe_vm *vm, bool intr) argument
3132 xe_vm_unlock(struct xe_vm *vm) argument
3204 xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) argument
3264 xe_vm_snapshot_capture(struct xe_vm *vm) argument
[all...]
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dsvm_nested_shutdown_test.c46 struct kvm_vm *vm; local
50 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
51 vcpu_alloc_svm(vm, &svm_gva);
53 vcpu_args_set(vcpu, 2, svm_gva, vm->arch.idt);
58 kvm_vm_free(vm);
H A Dprivate_mem_kvm_exits_test.c49 struct kvm_vm *vm; local
55 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
58 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
63 virt_map(vm, EXITS_TEST_GVA, EXITS_TEST_GPA, EXITS_TEST_NPAGES);
66 vm_mem_set_private(vm, EXITS_TEST_GPA, EXITS_TEST_SIZE);
72 vm_mem_region_delete(vm, EXITS_TEST_SLOT);
82 kvm_vm_free(vm);
87 struct kvm_vm *vm; local
91 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
95 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOU
[all...]
H A Dsvm_vmcall_test.c40 struct kvm_vm *vm; local
44 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
46 vcpu_alloc_svm(vm, &svm_gva);
68 kvm_vm_free(vm);
H A Dvmx_dirty_log_test.c88 struct kvm_vm *vm; local
95 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
96 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
100 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
110 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
123 prepare_eptp(vmx, vm, 0);
124 nested_map_memslot(vmx, vm, 0);
125 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
126 nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
130 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_ME
[all...]
/linux-master/tools/testing/selftests/memfd/
H A Drun_hugetlbfs_test.sh26 nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
34 echo 3 > /proc/sys/vm/drop_caches
35 echo $(( $hpages_needed + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
50 echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
67 echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
/linux-master/drivers/gpu/drm/i915/gt/
H A Dgen8_ppgtt.c90 struct drm_i915_private *i915 = ppgtt->vm.i915;
91 struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
102 if (i915_vm_is_4lvl(&ppgtt->vm)) {
180 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) argument
182 unsigned int shift = __gen8_pte_shift(vm->top);
184 return (vm->total + (1ull << shift) - 1) >> shift;
188 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) argument
190 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
192 if (vm->top == 2)
195 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm
199 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) argument
204 __gen8_ppgtt_cleanup(struct i915_address_space *vm, struct i915_page_directory *pd, int count, int lvl) argument
222 gen8_ppgtt_cleanup(struct i915_address_space *vm) argument
239 __gen8_ppgtt_clear(struct i915_address_space * const vm, struct i915_page_directory * const pd, u64 start, const u64 end, int lvl) argument
307 gen8_ppgtt_clear(struct i915_address_space *vm, u64 start, u64 length) argument
322 __gen8_ppgtt_alloc(struct i915_address_space * const vm, struct i915_vm_pt_stash *stash, struct i915_page_directory * const pd, u64 * const start, const u64 end, int lvl) argument
390 gen8_ppgtt_alloc(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, u64 start, u64 length) argument
406 __gen8_ppgtt_foreach(struct i915_address_space *vm, struct i915_page_directory *pd, u64 *start, u64 end, int lvl, void (*fn)(struct i915_address_space *vm, struct i915_page_table *pt, void *data), void *data) argument
439 gen8_ppgtt_foreach(struct i915_address_space *vm, u64 start, u64 length, void (*fn)(struct i915_address_space *vm, struct i915_page_table *pt, void *data), void *data) argument
503 xehp_ppgtt_insert_huge(struct i915_address_space *vm, struct i915_vma_resource *vma_res, struct sgt_dma *iter, unsigned int pat_index, u32 flags) argument
611 gen8_ppgtt_insert_huge(struct i915_address_space *vm, struct i915_vma_resource *vma_res, struct sgt_dma *iter, unsigned int pat_index, u32 flags) argument
735 gen8_ppgtt_insert(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, u32 flags) argument
763 gen8_ppgtt_insert_entry(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) argument
784 xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) argument
813 xehp_ppgtt_insert_entry(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) argument
826 gen8_init_scratch(struct i915_address_space *vm) argument
894 struct i915_address_space *vm = &ppgtt->vm; local
925 gen8_alloc_top_pd(struct i915_address_space *vm) argument
957 gen8_init_rsvd(struct i915_address_space *vm) argument
[all...]
H A Dintel_ppgtt.c16 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz) argument
24 pt->base = vm->alloc_pt_dma(vm, sz);
53 struct i915_page_directory *alloc_pd(struct i915_address_space *vm) argument
61 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
71 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl) argument
177 trace_i915_ppgtt_create(&ppgtt->vm);
182 void ppgtt_bind_vma(struct i915_address_space *vm, argument
191 vm
207 ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma_resource *vma_res) argument
223 i915_vm_alloc_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, u64 size) argument
276 i915_vm_map_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash) argument
293 i915_vm_free_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash) argument
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_vma_resource.c20 * We use a per-vm interval tree to keep track of vma_resources
22 * the vm mutex, and nodes are removed just after the unbind fence signals.
23 * The removal takes the vm mutex from a kernel thread which we need to
105 struct i915_address_space *vm; local
112 vm = vma_res->vm;
114 intel_runtime_pm_put(&vm->i915->runtime_pm, vma_res->wakeref);
116 vma_res->vm = NULL;
118 mutex_lock(&vm->mutex);
119 vma_res_itree_remove(vma_res, &vm
177 struct i915_address_space *vm = vma_res->vm; local
233 struct i915_address_space *vm = vma_res->vm; local
272 i915_vma_resource_color_adjust_range(struct i915_address_space *vm, u64 *start, u64 *end) argument
295 i915_vma_resource_bind_dep_sync(struct i915_address_space *vm, u64 offset, u64 size, bool intr) argument
330 i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm) argument
379 i915_vma_resource_bind_dep_await(struct i915_address_space *vm, struct i915_sw_fence *sw_fence, u64 offset, u64 size, bool intr, gfp_t gfp) argument
[all...]
/linux-master/drivers/gpu/drm/imx/dcss/
H A Ddcss-ss.c120 void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, argument
129 lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
130 vm->hactive - 1;
131 lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
132 vm->vactive - 1;
136 hsync_start = vm
[all...]
/linux-master/drivers/virt/acrn/
H A Dioreq.c39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, argument
64 ret = hcall_notify_req_finish(vm->vmid, vcpu);
79 if (vcpu >= client->vm->vcpu_num)
84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) argument
97 spin_lock_bh(&vm->ioreq_clients_lock);
98 if (vm->default_client)
99 ret = acrn_ioreq_complete_request(vm->default_client,
101 spin_unlock_bh(&vm
212 acrn_ioreq_request_clear(struct acrn_vm *vm) argument
312 handle_cf8cfc(struct acrn_vm *vm, struct acrn_io_request *req, u16 vcpu) argument
381 find_ioreq_client(struct acrn_vm *vm, struct acrn_io_request *req) argument
415 acrn_ioreq_client_create(struct acrn_vm *vm, ioreq_handler_t handler, void *priv, bool is_default, const char *name) argument
468 struct acrn_vm *vm = client->vm; local
497 acrn_ioreq_dispatch(struct acrn_vm *vm) argument
546 struct acrn_vm *vm; local
596 acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma) argument
636 acrn_ioreq_deinit(struct acrn_vm *vm) argument
[all...]
H A Dhsm.c31 struct acrn_vm *vm; local
33 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
34 if (!vm)
37 vm->vmid = ACRN_INVALID_VMID;
38 filp->private_data = vm;
110 struct acrn_vm *vm = filp->private_data; local
126 if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
144 vm = acrn_vm_create(vm, vm_para
427 struct acrn_vm *vm = filp->private_data; local
[all...]
H A Dmm.c19 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) argument
28 regions->vmid = vm->vmid;
35 "Failed to set memory region for VM[%u]!\n", vm->vmid);
43 * @vm: User VM.
52 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, argument
68 ret = modify_region(vm, region);
79 * @vm: User VM.
85 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size) argument
100 ret = modify_region(vm, region);
108 int acrn_vm_memseg_map(struct acrn_vm *vm, struc argument
131 acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) argument
156 acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) argument
352 acrn_vm_all_ram_unmap(struct acrn_vm *vm) argument
[all...]
H A DMakefile3 acrn-y := hsm.o vm.o mm.o ioreq.o ioeventfd.o irqfd.o
/linux-master/drivers/gpu/drm/omapdrm/dss/
H A Dhdmi_wp.c144 const struct videomode *vm)
150 vsync_inv = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
151 hsync_inv = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
158 r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
164 const struct videomode *vm)
181 timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
182 timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
183 timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
186 timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
187 timing_v |= FLD_VAL(vm
143 hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, const struct videomode *vm) argument
163 hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, const struct videomode *vm) argument
192 hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, struct videomode *vm, const struct hdmi_config *param) argument
[all...]
/linux-master/tools/testing/selftests/kvm/riscv/
H A Darch_timer.c82 struct kvm_vm *vm; local
85 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
89 vm_init_vector_tables(vm);
90 vm_install_interrupt_handler(vm, guest_irq_handler);
97 sync_global_to_guest(vm, timer_freq);
101 sync_global_to_guest(vm, test_args);
103 return vm;
106 void test_vm_cleanup(struct kvm_vm *vm) argument
108 kvm_vm_free(vm);
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_gem_gtt.c170 if (!ppgtt->vm.allocate_va_range)
181 limit = min(ppgtt->vm.total, limit);
185 err = i915_vm_lock_objects(&ppgtt->vm, &ww);
193 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
197 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
199 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
203 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
206 ppgtt->vm.clear_range(&ppgtt->vm,
244 lowlevel_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
390 close_object_list(struct list_head *objects, struct i915_address_space *vm) argument
408 fill_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
633 walk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
716 pot_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
791 drunk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
901 __shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
979 shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
1001 shrink_boom(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
1079 misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr, u64 addr, u64 size, unsigned long flags) argument
1144 misaligned_pin(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
1190 exercise_ppgtt(struct drm_i915_private *dev_priv, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) argument
1276 exercise_ggtt(struct drm_i915_private *i915, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) argument
1453 exercise_mock(struct drm_i915_private *i915, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) argument
1459 struct i915_address_space *vm; local
1506 struct i915_address_space *vm = vma->vm; local
1687 struct i915_address_space *vm = vma->vm; local
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vm.h171 struct amdgpu_vm *vm; member in struct:amdgpu_vm_bo_base
228 * @vm: optional amdgpu_vm we do this update for
230 struct amdgpu_vm *vm; member in struct:amdgpu_vm_update_params
301 * use vm_eviction_lock/unlock(vm)
307 /* Lock to protect vm_bo add/del/move on all lists of vm */
411 /* vm pte handling */
447 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
450 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
451 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
452 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
571 amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) argument
596 amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) argument
602 amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) argument
611 amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) argument
[all...]
/linux-master/arch/mips/math-emu/
H A Dieee754int.h54 #define EXPLODESP(v, vc, vs, ve, vm) \
58 vm = SPMANT(v); \
60 if (vm == 0) \
62 else if (ieee754_csr.nan2008 ^ !(vm & SP_MBIT(SP_FBITS - 1))) \
67 if (vm) { \
74 vm |= SP_HIDDEN_BIT; \
92 #define EXPLODEDP(v, vc, vs, ve, vm) \
94 vm = DPMANT(v); \
98 if (vm == 0) \
100 else if (ieee754_csr.nan2008 ^ !(vm
[all...]
/linux-master/tools/testing/selftests/kvm/include/aarch64/
H A Dvgic.h19 int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
26 void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
27 int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
35 int vgic_its_setup(struct kvm_vm *vm);
/linux-master/tools/testing/selftests/kvm/include/riscv/
H A Ducall.h10 static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) argument
/linux-master/tools/testing/selftests/kvm/
H A Dmemslot_modification_stress_test.c57 struct kvm_vm *vm; member in struct:memslot_antagonist_args
62 static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, argument
65 uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
73 gpa = memstress_args.gpa - pages * vm->page_size;
77 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
80 vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
93 struct kvm_vm *vm; local
95 vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
105 add_remove_memslot(vm,
[all...]
/linux-master/tools/testing/selftests/kvm/lib/x86_64/
H A Dvmx.c66 * vm - The VM to allocate guest-virtual addresses in.
75 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) argument
77 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
78 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
81 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
82 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
83 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
86 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
87 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
88 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_
367 nested_create_pte(struct kvm_vm *vm, struct eptPageTableEntry *pte, uint64_t nested_paddr, uint64_t paddr, int current_level, int target_level) argument
399 __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, int target_level) argument
450 nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr) argument
473 __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size, int level) argument
490 nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size) argument
499 nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t memslot) argument
521 nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t addr, uint64_t size) argument
539 prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t eptp_memslot) argument
549 prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) argument
[all...]
/linux-master/include/video/
H A Dvideomode.h37 * @vm: return value
43 struct videomode *vm);
48 * @vm: return value
55 struct videomode *vm, unsigned int index);

Completed in 241 milliseconds

1234567891011>>