Searched refs:vm (Results 51 - 75 of 485) sorted by relevance

1234567891011>>

/linux-master/tools/testing/selftests/kvm/include/x86_64/
H A Dkvm_util_arch.h20 #define vm_arch_has_protected_memory(vm) \
21 __vm_arch_has_protected_memory(&(vm)->arch)
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.c42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) argument
44 return vm->gpuvm.r_obj;
53 * without the vm->userptr.notifier_lock held. There is no guarantee that the
70 struct xe_vm *vm = xe_vma_vm(vma); local
71 struct xe_device *xe = vm->xe;
79 lockdep_assert_held(&vm->lock);
177 static bool preempt_fences_waiting(struct xe_vm *vm) argument
181 lockdep_assert_held(&vm->lock);
182 xe_vm_assert_held(vm);
184 list_for_each_entry(q, &vm
203 alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, unsigned int *count) argument
224 wait_for_existing_preempt_fences(struct xe_vm *vm) argument
244 xe_vm_is_idle(struct xe_vm *vm) argument
257 arm_preempt_fences(struct xe_vm *vm, struct list_head *list) argument
276 add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) argument
304 resume_and_reinstall_preempt_fences(struct xe_vm *vm, struct drm_exec *exec) argument
320 xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument
379 xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument
406 __xe_vm_userptr_needs_repin(struct xe_vm *vm) argument
416 xe_vm_kill(struct xe_vm *vm) argument
468 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); local
500 xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, unsigned int num_fences) argument
526 xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, bool *done) argument
565 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); local
674 struct xe_vm *vm = xe_vma_vm(vma); local
739 xe_vm_userptr_pin(struct xe_vm *vm) argument
799 xe_vm_userptr_check_repin(struct xe_vm *vm) argument
810 xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) argument
850 xe_vma_create(struct xe_vm *vm, struct xe_bo *bo, u64 bo_offset_or_userptr, u64 start, u64 end, u16 pat_index, unsigned int flags) argument
953 struct xe_vm *vm = xe_vma_vm(vma); local
1009 struct xe_vm *vm = xe_vma_vm(vma); local
1052 struct xe_vm *vm = xe_vma_vm(vma); local
1084 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) argument
1100 xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) argument
1115 xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) argument
1293 xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm) argument
1310 xe_vm_free_scratch(struct xe_vm *vm) argument
1333 struct xe_vm *vm; local
1484 xe_vm_close(struct xe_vm *vm) argument
1491 xe_vm_close_and_put(struct xe_vm *vm) argument
1600 struct xe_vm *vm = local
1626 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); local
1634 struct xe_vm *vm; local
1645 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) argument
1652 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument
1662 struct xe_vm *vm = xe_vma_vm(vma); local
1752 struct xe_vm *vm = xe_vma_vm(vma); local
1833 __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument
1874 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument
1894 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
1927 struct xe_vm *vm; local
2027 struct xe_vm *vm; local
2056 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, u32 region, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument
2093 prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) argument
2158 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, u64 bo_offset_or_userptr, u64 addr, u64 range, u32 operation, u32 flags, u32 prefetch_region, u16 pat_index) argument
2229 new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, u16 pat_index, unsigned int flags) argument
2314 xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) argument
2378 vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, struct drm_gpuva_ops *ops, struct xe_sync_entry *syncs, u32 num_syncs, struct list_head *ops_list, bool last) argument
2536 op_execute(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument
2624 __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument
2664 xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) argument
2704 xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op) argument
2723 xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, bool post_commit, bool prev_post_commit, bool next_post_commit) argument
2778 vm_bind_ioctl_ops_unwind(struct xe_vm *vm, struct drm_gpuva_ops **ops, int num_ops_list) argument
2804 vm_bind_ioctl_ops_execute(struct xe_vm *vm, struct list_head *ops_list) argument
2948 vm_bind_ioctl_signal_fences(struct xe_vm *vm, struct xe_exec_queue *q, struct xe_sync_entry *syncs, int num_syncs) argument
2964 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, local
2979 struct xe_vm *vm; local
3224 xe_vm_lock(struct xe_vm *vm, bool intr) argument
3238 xe_vm_unlock(struct xe_vm *vm) argument
3306 xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) argument
3366 xe_vm_snapshot_capture(struct xe_vm *vm) argument
[all...]
/linux-master/tools/testing/selftests/memfd/
H A Drun_hugetlbfs_test.sh26 nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
34 echo 3 > /proc/sys/vm/drop_caches
35 echo $(( $hpages_needed + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
50 echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
67 echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
/linux-master/drivers/gpu/drm/i915/gt/
H A Dgen8_ppgtt.c90 struct drm_i915_private *i915 = ppgtt->vm.i915;
91 struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
102 if (i915_vm_is_4lvl(&ppgtt->vm)) {
180 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) argument
182 unsigned int shift = __gen8_pte_shift(vm->top);
184 return (vm->total + (1ull << shift) - 1) >> shift;
188 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) argument
190 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
192 if (vm->top == 2)
195 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm
199 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) argument
204 __gen8_ppgtt_cleanup(struct i915_address_space *vm, struct i915_page_directory *pd, int count, int lvl) argument
222 gen8_ppgtt_cleanup(struct i915_address_space *vm) argument
239 __gen8_ppgtt_clear(struct i915_address_space * const vm, struct i915_page_directory * const pd, u64 start, const u64 end, int lvl) argument
307 gen8_ppgtt_clear(struct i915_address_space *vm, u64 start, u64 length) argument
322 __gen8_ppgtt_alloc(struct i915_address_space * const vm, struct i915_vm_pt_stash *stash, struct i915_page_directory * const pd, u64 * const start, const u64 end, int lvl) argument
390 gen8_ppgtt_alloc(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, u64 start, u64 length) argument
406 __gen8_ppgtt_foreach(struct i915_address_space *vm, struct i915_page_directory *pd, u64 *start, u64 end, int lvl, void (*fn)(struct i915_address_space *vm, struct i915_page_table *pt, void *data), void *data) argument
439 gen8_ppgtt_foreach(struct i915_address_space *vm, u64 start, u64 length, void (*fn)(struct i915_address_space *vm, struct i915_page_table *pt, void *data), void *data) argument
503 xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm, struct i915_vma_resource *vma_res, struct sgt_dma *iter, unsigned int pat_index, u32 flags) argument
611 gen8_ppgtt_insert_huge(struct i915_address_space *vm, struct i915_vma_resource *vma_res, struct sgt_dma *iter, unsigned int pat_index, u32 flags) argument
735 gen8_ppgtt_insert(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, u32 flags) argument
763 gen8_ppgtt_insert_entry(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) argument
784 __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) argument
813 xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) argument
826 gen8_init_scratch(struct i915_address_space *vm) argument
894 struct i915_address_space *vm = &ppgtt->vm; local
925 gen8_alloc_top_pd(struct i915_address_space *vm) argument
957 gen8_init_rsvd(struct i915_address_space *vm) argument
[all...]
H A Dintel_ppgtt.c16 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz) argument
24 pt->base = vm->alloc_pt_dma(vm, sz);
53 struct i915_page_directory *alloc_pd(struct i915_address_space *vm) argument
61 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
71 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl) argument
177 trace_i915_ppgtt_create(&ppgtt->vm);
182 void ppgtt_bind_vma(struct i915_address_space *vm, argument
191 vm
207 ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma_resource *vma_res) argument
223 i915_vm_alloc_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, u64 size) argument
276 i915_vm_map_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash) argument
293 i915_vm_free_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash) argument
[all...]
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dprivate_mem_kvm_exits_test.c49 struct kvm_vm *vm; local
55 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
58 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
63 virt_map(vm, EXITS_TEST_GVA, EXITS_TEST_GPA, EXITS_TEST_NPAGES);
66 vm_mem_set_private(vm, EXITS_TEST_GPA, EXITS_TEST_SIZE);
72 vm_mem_region_delete(vm, EXITS_TEST_SLOT);
82 kvm_vm_free(vm);
87 struct kvm_vm *vm; local
91 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
95 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOU
[all...]
H A Dsvm_vmcall_test.c40 struct kvm_vm *vm; local
44 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
46 vcpu_alloc_svm(vm, &svm_gva);
68 kvm_vm_free(vm);
H A Dvmx_dirty_log_test.c91 struct kvm_vm *vm; local
98 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
99 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
103 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
113 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
126 prepare_eptp(vmx, vm, 0);
127 nested_map_memslot(vmx, vm, 0);
128 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
129 nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
133 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_ME
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_vma_resource.c20 * We use a per-vm interval tree to keep track of vma_resources
22 * the vm mutex, and nodes are removed just after the unbind fence signals.
23 * The removal takes the vm mutex from a kernel thread which we need to
105 struct i915_address_space *vm; local
112 vm = vma_res->vm;
114 intel_runtime_pm_put(&vm->i915->runtime_pm, vma_res->wakeref);
116 vma_res->vm = NULL;
118 mutex_lock(&vm->mutex);
119 vma_res_itree_remove(vma_res, &vm
177 struct i915_address_space *vm = vma_res->vm; local
233 struct i915_address_space *vm = vma_res->vm; local
272 i915_vma_resource_color_adjust_range(struct i915_address_space *vm, u64 *start, u64 *end) argument
295 i915_vma_resource_bind_dep_sync(struct i915_address_space *vm, u64 offset, u64 size, bool intr) argument
330 i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm) argument
379 i915_vma_resource_bind_dep_await(struct i915_address_space *vm, struct i915_sw_fence *sw_fence, u64 offset, u64 size, bool intr, gfp_t gfp) argument
[all...]
/linux-master/drivers/gpu/drm/imx/dcss/
H A Ddcss-ss.c120 void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, argument
129 lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
130 vm->hactive - 1;
131 lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
132 vm->vactive - 1;
136 hsync_start = vm
[all...]
/linux-master/drivers/virt/acrn/
H A Dioreq.c39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, argument
64 ret = hcall_notify_req_finish(vm->vmid, vcpu);
79 if (vcpu >= client->vm->vcpu_num)
84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) argument
97 spin_lock_bh(&vm->ioreq_clients_lock);
98 if (vm->default_client)
99 ret = acrn_ioreq_complete_request(vm->default_client,
101 spin_unlock_bh(&vm
212 acrn_ioreq_request_clear(struct acrn_vm *vm) argument
312 handle_cf8cfc(struct acrn_vm *vm, struct acrn_io_request *req, u16 vcpu) argument
381 find_ioreq_client(struct acrn_vm *vm, struct acrn_io_request *req) argument
415 acrn_ioreq_client_create(struct acrn_vm *vm, ioreq_handler_t handler, void *priv, bool is_default, const char *name) argument
468 struct acrn_vm *vm = client->vm; local
497 acrn_ioreq_dispatch(struct acrn_vm *vm) argument
546 struct acrn_vm *vm; local
596 acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma) argument
636 acrn_ioreq_deinit(struct acrn_vm *vm) argument
[all...]
H A Dhsm.c31 struct acrn_vm *vm; local
33 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
34 if (!vm)
37 vm->vmid = ACRN_INVALID_VMID;
38 filp->private_data = vm;
110 struct acrn_vm *vm = filp->private_data; local
126 if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
144 vm = acrn_vm_create(vm, vm_para
427 struct acrn_vm *vm = filp->private_data; local
[all...]
H A Dmm.c18 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) argument
27 regions->vmid = vm->vmid;
34 "Failed to set memory region for VM[%u]!\n", vm->vmid);
42 * @vm: User VM.
51 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, argument
67 ret = modify_region(vm, region);
78 * @vm: User VM.
84 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size) argument
99 ret = modify_region(vm, region);
107 int acrn_vm_memseg_map(struct acrn_vm *vm, struc argument
130 acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) argument
155 acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) argument
315 acrn_vm_all_ram_unmap(struct acrn_vm *vm) argument
[all...]
H A DMakefile3 acrn-y := hsm.o vm.o mm.o ioreq.o ioeventfd.o irqfd.o
/linux-master/drivers/gpu/drm/omapdrm/dss/
H A Dhdmi_wp.c144 const struct videomode *vm)
150 vsync_inv = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
151 hsync_inv = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
158 r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
164 const struct videomode *vm)
181 timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
182 timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
183 timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
186 timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
187 timing_v |= FLD_VAL(vm
143 hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, const struct videomode *vm) argument
163 hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, const struct videomode *vm) argument
192 hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, struct videomode *vm, const struct hdmi_config *param) argument
[all...]
/linux-master/tools/testing/selftests/kvm/riscv/
H A Darch_timer.c84 struct kvm_vm *vm; local
87 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
91 vm_init_vector_tables(vm);
92 vm_install_interrupt_handler(vm, guest_irq_handler);
99 sync_global_to_guest(vm, timer_freq);
103 sync_global_to_guest(vm, test_args);
105 return vm;
108 void test_vm_cleanup(struct kvm_vm *vm) argument
110 kvm_vm_free(vm);
/linux-master/tools/testing/selftests/kvm/lib/x86_64/
H A Dprocessor.c126 void virt_arch_pgd_alloc(struct kvm_vm *vm) argument
128 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
129 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
132 if (!vm->pgd_created) {
133 vm->pgd = vm_alloc_page_table(vm);
134 vm->pgd_created = true;
138 static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte, argument
142 uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
145 TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm
152 virt_create_upper_pte(struct kvm_vm *vm, uint64_t *parent_pte, uint64_t vaddr, uint64_t paddr, int current_level, int target_level) argument
185 __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) argument
241 virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) argument
246 virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, uint64_t nr_bytes, int level) argument
277 __vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, int *level) argument
316 vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr) argument
323 virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) argument
418 kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) argument
456 kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector, struct kvm_segment *segp) argument
488 kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector, struct kvm_segment *segp) argument
504 addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) argument
519 kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt) argument
528 kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp, int selector) argument
543 vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu) argument
575 kvm_arch_vm_post_create(struct kvm_vm *vm) argument
596 vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) argument
639 vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) argument
1082 kvm_init_vm_address_properties(struct kvm_vm *vm) argument
1090 set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, int dpl, unsigned short selector) argument
1142 vm_init_descriptor_tables(struct kvm_vm *vm) argument
1157 struct kvm_vm *vm = vcpu->vm; local
1170 vm_install_exception_handler(struct kvm_vm *vm, int vector, void (*handler)(struct ex_regs *)) argument
1293 vm_compute_max_gfn(struct kvm_vm *vm) argument
1334 vm_is_unrestricted_guest(struct kvm_vm *vm) argument
[all...]
H A Dvmx.c66 * vm - The VM to allocate guest-virtual addresses in.
75 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) argument
77 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
78 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
81 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
82 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
83 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
86 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
87 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
88 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_
367 nested_create_pte(struct kvm_vm *vm, struct eptPageTableEntry *pte, uint64_t nested_paddr, uint64_t paddr, int current_level, int target_level) argument
399 __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, int target_level) argument
450 nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr) argument
473 __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size, int level) argument
490 nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size) argument
499 nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t memslot) argument
521 nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t addr, uint64_t size) argument
539 prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t eptp_memslot) argument
549 prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) argument
[all...]
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_gem_gtt.c170 if (!ppgtt->vm.allocate_va_range)
181 limit = min(ppgtt->vm.total, limit);
185 err = i915_vm_lock_objects(&ppgtt->vm, &ww);
193 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
197 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
199 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
203 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
206 ppgtt->vm.clear_range(&ppgtt->vm,
244 lowlevel_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
390 close_object_list(struct list_head *objects, struct i915_address_space *vm) argument
408 fill_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
633 walk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
716 pot_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
791 drunk_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
901 __shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
979 shrink_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
1001 shrink_boom(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
1079 misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr, u64 addr, u64 size, unsigned long flags) argument
1144 misaligned_pin(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) argument
1190 exercise_ppgtt(struct drm_i915_private *dev_priv, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) argument
1276 exercise_ggtt(struct drm_i915_private *i915, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) argument
1453 exercise_mock(struct drm_i915_private *i915, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time)) argument
1459 struct i915_address_space *vm; local
1506 struct i915_address_space *vm = vma->vm; local
1687 struct i915_address_space *vm = vma->vm; local
[all...]
/linux-master/arch/mips/math-emu/
H A Dieee754int.h54 #define EXPLODESP(v, vc, vs, ve, vm) \
58 vm = SPMANT(v); \
60 if (vm == 0) \
62 else if (ieee754_csr.nan2008 ^ !(vm & SP_MBIT(SP_FBITS - 1))) \
67 if (vm) { \
74 vm |= SP_HIDDEN_BIT; \
92 #define EXPLODEDP(v, vc, vs, ve, vm) \
94 vm = DPMANT(v); \
98 if (vm == 0) \
100 else if (ieee754_csr.nan2008 ^ !(vm
[all...]
/linux-master/tools/testing/selftests/kvm/include/riscv/
H A Ducall.h9 static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) argument
/linux-master/tools/testing/selftests/kvm/
H A Dmemslot_modification_stress_test.c60 struct kvm_vm *vm; member in struct:memslot_antagonist_args
65 static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, argument
68 uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
76 gpa = memstress_args.gpa - pages * vm->page_size;
80 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
83 vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
96 struct kvm_vm *vm; local
98 vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
108 add_remove_memslot(vm,
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vm.h171 struct amdgpu_vm *vm; member in struct:amdgpu_vm_bo_base
228 * @vm: optional amdgpu_vm we do this update for
230 struct amdgpu_vm *vm; member in struct:amdgpu_vm_update_params
296 * use vm_eviction_lock/unlock(vm)
302 /* Lock to protect vm_bo add/del/move on all lists of vm */
405 /* vm pte handling */
439 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
442 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
443 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
444 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
561 amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) argument
586 amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) argument
592 amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) argument
601 amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) argument
[all...]
/linux-master/include/video/
H A Dvideomode.h37 * @vm: return value
43 struct videomode *vm);
48 * @vm: return value
55 struct videomode *vm, unsigned int index);
/linux-master/tools/testing/selftests/kvm/lib/
H A Dmemstress.c89 void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, argument
131 struct kvm_vm *vm; local
171 vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
175 args->vm = vm;
178 region_end_gfn = vm->max_gfn + 1;
213 vm_userspace_mem_region_add(vm, backing_src, region_start,
219 virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
221 memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
226 memstress_setup_nested(vm, nr_vcpu
235 memstress_destroy_vm(struct kvm_vm *vm) argument
240 memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) argument
246 memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed) argument
252 memstress_set_random_access(struct kvm_vm *vm, bool random_access) argument
263 memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus) argument
329 toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable) argument
341 memstress_enable_dirty_logging(struct kvm_vm *vm, int slots) argument
346 memstress_disable_dirty_logging(struct kvm_vm *vm, int slots) argument
351 memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots) argument
362 memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots, uint64_t pages_per_slot) argument
[all...]

Completed in 178 milliseconds

1234567891011>>