Searched refs:shadow (Results 101 - 109 of 109) sorted by relevance
12345
/linux-master/mm/ |
H A D | vmscan.c | 688 void *shadow = NULL; local 734 shadow = workingset_eviction(folio, target_memcg); 735 __delete_from_swap_cache(folio, swap, shadow); 744 * Remember a shadow entry for reclaimed file cache in 756 * exceptional entries and shadow exceptional entries in the 761 shadow = workingset_eviction(folio, target_memcg); 762 __filemap_remove_folio(folio, shadow);
|
H A D | memory.c | 3965 void *shadow = NULL; local 4059 shadow = get_shadow_from_swap_cache(entry); 4060 if (shadow) 4061 workingset_refault(folio, shadow);
|
/linux-master/drivers/infiniband/hw/irdma/ |
H A D | verbs.c | 563 init_info->shadow_area_pa = qpmr->shadow; 2139 info.shadow_area_pa = cqmr->shadow; 2483 qpmr->shadow = (dma_addr_t)arr[total]; 2508 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
|
/linux-master/ |
H A D | Makefile | 943 CC_FLAGS_SCS := -fsanitize=shadow-call-stack
|
/linux-master/arch/x86/kvm/ |
H A D | x86.c | 927 * indirect shadow MMUs. If paging is disabled, no updates are needed 1625 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 3627 * a forced sync of the shadow page tables. Ensure all the 5399 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 5486 events->interrupt.shadow); 7793 * shadow page table for L2 guest. 8885 * the issue by unprotecting the gfn, as zapping the shadow page will 8906 * writing instruction, it means the VM-EXIT is caused by shadow 8907 * page protected, we can zap the shadow page and retry this 9003 u32 shadow; local [all...] |
/linux-master/drivers/infiniband/hw/qib/ |
H A D | qib_iba7322.c | 548 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ 554 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ 555 u64 gpio_mask; /* shadow the gpio mask register */ 556 u64 extctrl; /* shadow the gpio output enable, etc... */ 648 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */ 649 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */ 2410 * Not on re-init after reset, establish shadow 2985 * Clear any troublemakers, and update chip from shadow 4604 /* First the dd ones that are "sticky", saved in shadow */ 4615 /* Then the ppd ones that are "sticky", saved in shadow */ 7004 unsigned long shadow = 0; local [all...] |
/linux-master/arch/x86/kvm/vmx/ |
H A D | vmx.c | 2865 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) argument 2883 if (shadow) 3975 * Mark the desired intercept state in shadow bitmap, this is needed 4017 * Mark the desired intercept state in shadow bitmap, this is needed 4311 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ 6297 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 6300 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 8459 /* NX support is required for shadow paging. */
|
/linux-master/drivers/gpu/drm/i915/gvt/ |
H A D | cmd_parser.c | 795 } else {/* shadow batch buffer */ 873 gvt_vgpu_err("failed to get the 4-level shadow vm\n"); 886 gvt_vgpu_err("invalid shared shadow vm type\n"); 1974 * ip_va saves the virtual address of the shadow batch buffer, while 1976 * As the shadow batch buffer is just a copy from the originial one, 1977 * it should be right to use shadow batch buffer'va and original batch 1978 * buffer's gma in pair. After all, we don't want to pin the shadow 2024 gvt_vgpu_err("invalid shadow batch buffer\n"); 2951 /* get shadow ring buffer va */ 2983 gvt_vgpu_err("fail to shadow workloa [all...] |
/linux-master/tools/power/pm-graph/ |
H A D | sleepgraph.py | 4994 .callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\ 5047 #devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
|
Completed in 458 milliseconds
12345