/linux-master/drivers/s390/net/ |
H A D | qeth_core_main.c | 3381 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3382 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
|
/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_vm.c | 42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) argument 44 return vm->gpuvm.r_obj; 53 * without the vm->userptr.notifier_lock held. There is no guarantee that the 70 struct xe_vm *vm = xe_vma_vm(vma); local 71 struct xe_device *xe = vm->xe; 79 lockdep_assert_held(&vm->lock); 177 static bool preempt_fences_waiting(struct xe_vm *vm) argument 181 lockdep_assert_held(&vm->lock); 182 xe_vm_assert_held(vm); 184 list_for_each_entry(q, &vm 203 alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, unsigned int *count) argument 224 wait_for_existing_preempt_fences(struct xe_vm *vm) argument 244 xe_vm_is_idle(struct xe_vm *vm) argument 257 arm_preempt_fences(struct xe_vm *vm, struct list_head *list) argument 276 add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) argument 304 resume_and_reinstall_preempt_fences(struct xe_vm *vm, struct drm_exec *exec) argument 320 xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument 379 xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument 406 __xe_vm_userptr_needs_repin(struct xe_vm *vm) argument 416 xe_vm_kill(struct xe_vm *vm) argument 468 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); local 500 xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, unsigned int num_fences) argument 526 xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, bool *done) argument 565 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); local 674 struct xe_vm *vm = xe_vma_vm(vma); local 739 xe_vm_userptr_pin(struct xe_vm *vm) argument 799 xe_vm_userptr_check_repin(struct xe_vm *vm) argument 810 xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) argument 850 xe_vma_create(struct xe_vm *vm, struct xe_bo *bo, u64 bo_offset_or_userptr, u64 start, u64 end, u16 pat_index, unsigned int flags) argument 953 struct xe_vm *vm = xe_vma_vm(vma); local 1009 struct xe_vm *vm = xe_vma_vm(vma); local 1052 struct xe_vm *vm = xe_vma_vm(vma); local 1084 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) argument 1100 xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) argument 1115 xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) argument 1293 xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm) argument 1310 xe_vm_free_scratch(struct xe_vm *vm) argument 1333 struct xe_vm *vm; local 1484 xe_vm_close(struct xe_vm *vm) argument 1491 xe_vm_close_and_put(struct xe_vm *vm) argument 1600 struct xe_vm *vm = local 1626 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); local 1634 struct xe_vm *vm; local 1645 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) argument 1652 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) argument 1662 struct xe_vm *vm = xe_vma_vm(vma); local 1752 struct xe_vm *vm = xe_vma_vm(vma); local 1833 __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument 1874 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, bool last_op) argument 1894 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument 1927 struct xe_vm *vm; local 2027 struct xe_vm *vm; local 2056 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, u32 region, struct xe_sync_entry *syncs, u32 num_syncs, bool first_op, bool last_op) argument 2093 prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) argument 2158 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, u64 bo_offset_or_userptr, u64 addr, u64 range, u32 operation, u32 flags, u32 prefetch_region, u16 pat_index) argument 2229 new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, u16 pat_index, unsigned int flags) argument 2314 xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) argument 2378 vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, struct drm_gpuva_ops *ops, struct xe_sync_entry *syncs, u32 num_syncs, struct list_head *ops_list, bool last) argument 2536 op_execute(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument 2624 __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, struct xe_vma_op *op) argument 2664 xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) argument 2704 xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op) argument 2723 xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, bool post_commit, bool prev_post_commit, bool next_post_commit) argument 2778 vm_bind_ioctl_ops_unwind(struct xe_vm *vm, struct drm_gpuva_ops **ops, int num_ops_list) argument 2804 vm_bind_ioctl_ops_execute(struct xe_vm *vm, struct list_head *ops_list) argument 2948 vm_bind_ioctl_signal_fences(struct xe_vm *vm, struct xe_exec_queue *q, struct xe_sync_entry *syncs, int num_syncs) argument 2964 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, local 2979 struct xe_vm *vm; local 3224 xe_vm_lock(struct xe_vm *vm, bool intr) argument 3238 xe_vm_unlock(struct xe_vm *vm) argument 3306 xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) argument 3366 xe_vm_snapshot_capture(struct xe_vm *vm) argument [all...] |
H A D | xe_migrate.c | 104 xe_vm_lock(m->q->vm, false); 106 xe_vm_unlock(m->q->vm); 112 xe_vm_close_and_put(m->q->vm); 135 struct xe_vm *vm) 140 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; 155 bo = xe_bo_create_pin_map(vm->xe, tile, vm, 163 entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index); 164 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); 170 entry = vm 134 xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm) argument 338 struct xe_vm *vm; local 490 struct xe_vm *vm = m->q->vm; local 1168 xe_migrate_update_pgtables_cpu(struct xe_migrate *m, struct xe_vm *vm, struct xe_bo *bo, const struct xe_vm_pgtable_update *updates, u32 num_updates, bool wait_vm, struct xe_migrate_pt_update *pt_update) argument 1216 no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs) argument 1269 xe_migrate_update_pgtables(struct xe_migrate *m, struct xe_vm *vm, struct xe_bo *bo, struct xe_exec_queue *q, const struct xe_vm_pgtable_update *updates, u32 num_updates, struct xe_sync_entry *syncs, u32 num_syncs, struct xe_migrate_pt_update *pt_update) argument [all...] |
H A D | xe_lrc.c | 696 static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm) argument 698 u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile); 708 struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size) 724 lrc->bo = xe_bo_create_pin_map(xe, tile, vm, 763 if (vm) { 764 xe_lrc_set_ppgtt(lrc, vm); 766 if (vm->xef) 767 xe_drm_client_add_bo(vm->xef->client, lrc->bo); 775 if (xe->info.has_asid && vm) 776 xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm 707 xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size) argument [all...] |
/linux-master/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_process.c | 1670 avm = &drv_priv->vm;
|
H A D | kfd_svm.c | 1272 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, argument 1280 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start, 1348 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); local 1387 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true, 1392 NULL, dma_addr, &vm->last_update); 1404 r = amdgpu_vm_update_pdes(adev, vm, false); 1412 *fence = dma_fence_get(vm->last_update); 1486 struct amdgpu_vm *vm; local 1499 vm = drm_priv_to_vm(pdd->drm_priv); 1501 r = amdgpu_vm_lock_pd(vm, 2717 struct amdgpu_vm *vm; local 3229 struct amdgpu_vm *vm; local [all...] |
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_amdkfd_gpuvm.c | 84 if (entry->bo_va->base.vm == avm) 375 struct amdgpu_vm *vm; local 388 vm = vm_bo->vm; 389 if (!vm) 392 info = vm->process_info; 467 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm, argument 470 struct amdgpu_bo *pd = vm->root.bo; 474 ret = amdgpu_vm_validate(adev, vm, ticket, 481 vm 486 vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) argument 867 kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_vm *vm, bool is_aql) argument 1141 reserve_bo_and_vm(struct kgd_mem *mem, struct amdgpu_vm *vm, struct bo_vm_reservation_context *ctx) argument 1182 reserve_bo_and_cond_vms(struct kgd_mem *mem, struct amdgpu_vm *vm, enum bo_vm_match map_type, struct bo_vm_reservation_context *ctx) argument 1250 struct amdgpu_vm *vm = bo_va->base.vm; local 1367 init_kfd_vm(struct amdgpu_vm *vm, void **process_info, struct dma_fence **ef) argument 1550 amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 2095 struct amdgpu_vm *vm; local [all...] |
H A D | gfx_v9_0.c | 1808 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 1810 soc15_grbm_select(adev, me, pipe, q, vm, 0); 6984 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 7021 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 1807 gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) argument
|
H A D | gfx_v11_0.c | 855 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 857 soc21_grbm_select(adev, me, pipe, q, vm); 6247 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 6281 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 854 gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) argument
|
H A D | gfx_v10_0.c | 4330 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 4332 nv_grbm_select(adev, me, pipe, q, vm); 9245 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ 9279 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 4329 gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev, u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) argument
|
H A D | amdgpu_umsch_mm.c | 72 struct amdgpu_vm *vm; member in struct:umsch_mm_test 79 static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, argument 96 r = amdgpu_vm_lock_pd(vm, &exec, 0); 102 *bo_va = amdgpu_vm_bo_add(adev, vm, bo); 122 r = amdgpu_vm_update_pdes(adev, vm, false); 126 amdgpu_sync_fence(&sync, vm->last_update); 145 static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, argument 159 r = amdgpu_vm_lock_pd(vm, &exec, 0); 216 queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(test->vm->root.bo); 322 test->vm [all...] |
H A D | amdgpu_mes.c | 255 struct amdgpu_vm *vm) 294 process->vm = vm; 297 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); 1095 dma_fence_wait(gang->process->vm->last_update, false); 1180 struct amdgpu_vm *vm, 1198 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1204 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); 1228 r = amdgpu_vm_update_pdes(adev, vm, false); 1233 amdgpu_sync_fence(&sync, vm 254 amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, struct amdgpu_vm *vm) argument 1179 amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_mes_ctx_data *ctx_data) argument 1256 struct amdgpu_vm *vm = bo_va->base.vm; local 1372 struct amdgpu_vm *vm = NULL; local [all...] |
H A D | amdgpu_vm.c | 119 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on 121 struct amdgpu_vm *vm; member in struct:amdgpu_vm_tlb_seq_struct 130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping 133 * @vm: amdgpu_vm pointer 140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, argument 145 if (vm->pasid == pasid) 148 if (vm->pasid) { 149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); 153 vm->pasid = 0; 157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, 179 struct amdgpu_vm *vm = vm_bo->vm; local 293 amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) argument 323 amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo) argument 368 amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, unsigned int num_fences) argument 385 amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 394 amdgpu_vm_init_entities(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 415 amdgpu_vm_fini_entities(struct amdgpu_vm *vm) argument 430 amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 461 amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) argument 554 amdgpu_vm_ready(struct amdgpu_vm *vm) argument 763 amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo *bo) argument 816 amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate) argument 912 amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, struct dma_fence **fence) argument 1060 struct amdgpu_vm *vm = bo_va->base.vm; local 1079 amdgpu_vm_get_memory(struct amdgpu_vm *vm, struct amdgpu_mem_stats *stats) argument 1121 struct amdgpu_vm *vm = bo_va->base.vm; local 1333 amdgpu_vm_free_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, struct dma_fence *fence) argument 1351 amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 1379 amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) argument 1430 amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) argument 1508 amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint32_t flush_type, uint32_t xcc_mask) argument 1555 amdgpu_vm_bo_add(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo) argument 1599 struct amdgpu_vm *vm = bo_va->base.vm; local 1671 struct amdgpu_vm *vm = bo_va->base.vm; local 1780 struct amdgpu_vm *vm = bo_va->base.vm; local 1829 amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size) argument 1948 amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, uint64_t addr) argument 1962 amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) argument 1999 struct amdgpu_vm *vm = bo_va->base.vm; local 2098 struct amdgpu_vm *vm = bo_base->vm; local 2239 amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) argument 2260 struct amdgpu_vm *vm; local 2291 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm) argument 2319 amdgpu_vm_create_task_info(struct amdgpu_vm *vm) argument 2334 amdgpu_vm_set_task_info(struct amdgpu_vm *vm) argument 2364 amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) argument 2478 amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 2533 amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 2548 amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) argument 2740 struct amdgpu_vm *vm; local 2830 amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) argument 2933 struct amdgpu_vm *vm; local [all...] |
H A D | amdgpu_cs.c | 180 struct amdgpu_vm *vm = &fpriv->vm; local 291 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, 307 /* Use this opportunity to fill in task info for the vm */ 308 amdgpu_vm_set_task_info(vm); 333 struct amdgpu_vm *vm = &fpriv->vm; local 367 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ? 838 struct amdgpu_vm *vm local 1090 struct amdgpu_vm *vm = &fpriv->vm; local 1764 struct amdgpu_vm *vm = &fpriv->vm; local [all...] |
H A D | amdgpu_mes.h | 148 struct amdgpu_vm *vm; member in struct:amdgpu_mes_process 344 struct amdgpu_vm *vm); 395 struct amdgpu_vm *vm,
|
H A D | amdgpu_drv.c | 2863 timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
|
/linux-master/tools/testing/selftests/kvm/aarch64/ |
H A D | vgic_init.c | 28 struct kvm_vm *vm; member in struct:vm_gic 81 v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); 82 v.gic_fd = kvm_create_device(v.vm, gic_dev_type); 92 v.vm = vm_create_barebones(); 93 v.gic_fd = kvm_create_device(v.vm, gic_dev_type); 102 kvm_vm_free(v->vm); 347 vcpus[i] = vm_vcpu_add(v.vm, i, guest_code); 466 v.vm = vm_create(NR_VCPUS); 467 (void)vm_vcpu_add(v.vm, 0, guest_code); 469 v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V [all...] |
/linux-master/drivers/irqchip/ |
H A D | irq-gic-v3-its.c | 150 struct its_vm *vm; member in struct:event_lpi_map 158 * LPIs are injected into a guest (GICv4), the event_map.vm field 205 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) argument 207 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); 215 static u16 get_its_list(struct its_vm *vm) argument 224 if (require_its_list_vmovp(vm, its)) 1429 va = page_address(map->vm->vprop_page); 1792 static void its_map_vm(struct its_node *its, struct its_vm *vm) argument 1805 vm->vlpi_count[its->list_nr]++; 1807 if (vm 1825 its_unmap_vm(struct its_node *its, struct its_vm *vm) argument 4498 struct its_vm *vm = domain->host_data; local 4525 struct its_vm *vm = args; local [all...] |
/linux-master/tools/testing/selftests/mm/ |
H A D | run_vmtests.sh | 161 nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages) 166 echo 3 > /proc/sys/vm/drop_caches 167 if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then 216 echo 3 > /proc/sys/vm/drop_caches 218 echo 1 > /proc/sys/vm/compact_memory 265 nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) 267 echo 1 > /proc/sys/vm/nr_hugepages 271 echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages 304 echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages 338 prev_policy=$(cat /proc/sys/vm/overcommit_memor [all...] |
/linux-master/tools/testing/selftests/kvm/x86_64/ |
H A D | vmx_dirty_log_test.c | 91 struct kvm_vm *vm; local 98 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 99 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); 103 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 113 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES); 126 prepare_eptp(vmx, vm, 0); 127 nested_map_memslot(vmx, vm, 0); 128 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096); 129 nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096); 133 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_ME [all...] |
H A D | pmu_counters_test.c | 31 struct kvm_vm *vm; local 33 vm = vm_create_with_one_vcpu(vcpu, guest_code); 34 vm_init_descriptor_tables(vm); 37 sync_global_to_guest(vm, kvm_pmu_version); 38 sync_global_to_guest(vm, is_forced_emulation_enabled); 48 return vm; 293 struct kvm_vm *vm; local 299 vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events, 309 kvm_vm_free(vm); 457 struct kvm_vm *vm; local 524 struct kvm_vm *vm; local [all...] |
/linux-master/tools/testing/selftests/kvm/ |
H A D | set_memory_region_test.c | 114 struct kvm_vm *vm; local 118 vm = vm_create_with_one_vcpu(vcpu, guest_code); 120 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, 128 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); 131 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2); 134 hva = addr_gpa2hva(vm, MEM_REGION_GPA); 142 return vm; 183 struct kvm_vm *vm; local 186 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); 188 hva = addr_gpa2hva(vm, MEM_REGION_GP 264 struct kvm_vm *vm; local 314 struct kvm_vm *vm; local 333 struct kvm_vm *vm; local 395 struct kvm_vm *vm; local 449 test_invalid_guest_memfd(struct kvm_vm *vm, int memfd, size_t offset, const char *msg) argument 460 struct kvm_vm *vm, *vm2; local 496 struct kvm_vm *vm; local [all...] |
H A D | max_guest_memory_test.c | 66 struct kvm_vm *vm = vcpu->vm; local 69 vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size); 89 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, argument 104 ~((uint64_t)vm->page_size - 1); 172 struct kvm_vm *vm; local 214 vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); 216 max_gpa = vm->max_gfn << vm->page_shift; 226 for (i = 0; i < slot_size; i += vm [all...] |
/linux-master/tools/arch/s390/include/uapi/asm/ |
H A D | kvm.h | 276 struct kvm_s390_pv_info_vm vm; member in union:kvm_s390_pv_info::__anon122 384 /* kvm attr_group on vm fd */
|
/linux-master/kernel/ |
H A D | fork.c | 203 static bool try_release_thread_stack_to_cache(struct vm_struct *vm) argument 208 if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) 251 static int memcg_charge_kernel_stack(struct vm_struct *vm) argument 257 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 260 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); 268 memcg_kmem_uncharge_page(vm->pages[i], 0); 274 struct vm_struct *vm; local 317 vm = find_vm_area(stack); 318 if (memcg_charge_kernel_stack(vm)) { 327 tsk->stack_vm_area = vm; 537 struct vm_struct *vm = task_stack_vm_area(tsk); local 557 struct vm_struct *vm; local [all...] |