Searched refs:shadow (Results 101 - 109 of 109) sorted by relevance

12345

/linux-master/mm/
H A Dvmscan.c688 void *shadow = NULL; local
734 shadow = workingset_eviction(folio, target_memcg);
735 __delete_from_swap_cache(folio, swap, shadow);
744 * Remember a shadow entry for reclaimed file cache in
756 * exceptional entries and shadow exceptional entries in the
761 shadow = workingset_eviction(folio, target_memcg);
762 __filemap_remove_folio(folio, shadow);
H A Dmemory.c3334 * mmu page tables (such as kvm shadow page tables), we want the
3942 void *shadow = NULL; local
4036 shadow = get_shadow_from_swap_cache(entry);
4037 if (shadow)
4038 workingset_refault(folio, shadow);
/linux-master/drivers/infiniband/hw/irdma/
H A Dverbs.c563 init_info->shadow_area_pa = qpmr->shadow;
2139 info.shadow_area_pa = cqmr->shadow;
2483 qpmr->shadow = (dma_addr_t)arr[total];
2508 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
/linux-master/
H A DMakefile936 CC_FLAGS_SCS := -fsanitize=shadow-call-stack
/linux-master/arch/x86/kvm/
H A Dx86.c933 * indirect shadow MMUs. If paging is disabled, no updates are needed
1631 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
3636 * a forced sync of the shadow page tables. Ensure all the
5414 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
5501 events->interrupt.shadow);
7777 * shadow page table for L2 guest.
8869 * the issue by unprotecting the gfn, as zapping the shadow page will
8890 * writing instruction, it means the VM-EXIT is caused by shadow
8891 * page protected, we can zap the shadow page and retry this
8987 u32 shadow; local
[all...]
/linux-master/drivers/infiniband/hw/qib/
H A Dqib_iba7322.c548 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
554 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
555 u64 gpio_mask; /* shadow the gpio mask register */
556 u64 extctrl; /* shadow the gpio output enable, etc... */
648 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
649 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
2410 * Not on re-init after reset, establish shadow
2985 * Clear any troublemakers, and update chip from shadow
4605 /* First the dd ones that are "sticky", saved in shadow */
4616 /* Then the ppd ones that are "sticky", saved in shadow */
7005 unsigned long shadow = 0; local
[all...]
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.c2856 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) argument
2874 if (shadow)
3968 * Mark the desired intercept state in shadow bitmap, this is needed
4010 * Mark the desired intercept state in shadow bitmap, this is needed
4304 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
6279 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6282 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
8565 /* NX support is required for shadow paging. */
/linux-master/drivers/gpu/drm/i915/gvt/
H A Dcmd_parser.c794 } else {/* shadow batch buffer */
872 gvt_vgpu_err("failed to get the 4-level shadow vm\n");
885 gvt_vgpu_err("invalid shared shadow vm type\n");
1973 * ip_va saves the virtual address of the shadow batch buffer, while
1975 * As the shadow batch buffer is just a copy from the originial one,
1976 * it should be right to use shadow batch buffer'va and original batch
1977 * buffer's gma in pair. After all, we don't want to pin the shadow
2023 gvt_vgpu_err("invalid shadow batch buffer\n");
2950 /* get shadow ring buffer va */
2982 gvt_vgpu_err("fail to shadow workloa
[all...]
/linux-master/tools/power/pm-graph/
H A Dsleepgraph.py4994 .callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
5047 #devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\

Completed in 517 milliseconds

12345