Searched refs:shadow (Results 1 - 25 of 109) sorted by last modified time

12345

/linux-master/rust/
H A DMakefile291 -fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
/linux-master/
H A DMakefile936 CC_FLAGS_SCS := -fsanitize=shadow-call-stack
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_object.c720 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
722 * @vmbo: BO that will be inserted into the shadow list
724 * Insert a BO to the shadow list.
732 vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
733 vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
738 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
740 * @shadow: &amdgpu_bo shadow to be restored
743 * Copies a buffer object's shadow content back to the object.
744 * This is used for recovering a buffer from its shadow i
750 amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) argument
[all...]
H A Damdgpu_vm.c467 struct amdgpu_bo *shadow; local
488 shadow = amdgpu_bo_shadowed(bo);
493 if (shadow) {
494 r = validate(param, shadow);
2093 /* shadow bo doesn't have bo base, its validation needs its parent */
2423 amdgpu_bo_unref(&root->shadow);
2515 /* Free the shadow bo for compute VM */
2516 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
H A Damdgpu_object.h133 struct amdgpu_bo *shadow; member in struct:amdgpu_bo_vm
283 return to_amdgpu_bo_vm(bo)->shadow;
347 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
H A Damdgpu_cs.c571 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata; local
574 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
578 p->jobs[i]->shadow_va = shadow->shadow_va;
579 p->jobs[i]->csa_va = shadow->csa_va;
580 p->jobs[i]->gds_va = shadow->gds_va;
582 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
H A Damdgpu_device.c4900 struct amdgpu_bo *shadow; local
4909 dev_info(adev->dev, "recover vram bo from shadow start\n");
4912 /* If vm is compute context or adev is APU, shadow will be NULL */
4913 if (!vmbo->shadow)
4915 shadow = vmbo->shadow;
4918 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4919 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4920 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4923 r = amdgpu_bo_restore_shadow(shadow,
[all...]
/linux-master/arch/x86/kvm/
H A Dx86.c933 * indirect shadow MMUs. If paging is disabled, no updates are needed
1631 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
3636 * a forced sync of the shadow page tables. Ensure all the
5414 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
5501 events->interrupt.shadow);
7777 * shadow page table for L2 guest.
8869 * the issue by unprotecting the gfn, as zapping the shadow page will
8890 * writing instruction, it means the VM-EXIT is caused by shadow
8891 * page protected, we can zap the shadow page and retry this
8987 u32 shadow; local
[all...]
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.h134 * Cache of the guest's shadow VMCS, existing outside of guest
151 * Indicates if the shadow vmcs or enlightened vmcs must be updated
641 * in order to construct shadow PTEs with the correct protections.
702 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
708 static inline struct vmcs *alloc_vmcs(bool shadow) argument
710 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
H A Dvmx.c2856 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) argument
2874 if (shadow)
3968 * Mark the desired intercept state in shadow bitmap, this is needed
4010 * Mark the desired intercept state in shadow bitmap, this is needed
4304 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
6279 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6282 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
8565 /* NX support is required for shadow paging. */
/linux-master/tools/arch/x86/include/uapi/asm/
H A Dkvm.h329 /* Interrupt shadow states */
346 __u8 shadow; member in struct:kvm_vcpu_events::__anon133
/linux-master/drivers/hv/
H A Dvmbus_drv.c2157 struct resource *iter, *shadow; local
2168 * is already reserved, no shadow allocation is necessary.
2201 shadow = __request_region(iter, start, size, NULL,
2203 if (!shadow)
2208 shadow->name = (char *)*new;
/linux-master/drivers/gpu/drm/msm/adreno/
H A Da6xx_gpu.c77 /* Copy the shadow to the actual register */
234 * Periodically update shadow-wptr if needed, so that we
237 * ringbuffer state, simply due to looking at a shadow
320 * Periodically update shadow-wptr if needed, so that we
323 * ringbuffer state, simply due to looking at a shadow
1632 * privileged buffer to store the RPTR shadow
1636 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
1642 if (IS_ERR(a6xx_gpu->shadow))
1643 return PTR_ERR(a6xx_gpu->shadow);
1645 msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
[all...]
/linux-master/mm/
H A Dmemory.c3334 * mmu page tables (such as kvm shadow page tables), we want the
3942 void *shadow = NULL; local
4036 shadow = get_shadow_from_swap_cache(entry);
4037 if (shadow)
4038 workingset_refault(folio, shadow);
H A Dfilemap.c137 struct folio *folio, void *shadow)
149 xas_store(&xas, shadow);
226 void __filemap_remove_folio(struct folio *folio, void *shadow) argument
232 page_cache_delete(mapping, folio, shadow);
299 /* A swap/dax/shadow entry got inserted? Skip it. */
943 void *shadow = NULL; local
947 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
960 if (!(gfp & __GFP_WRITE) && shadow)
961 workingset_refault(folio, shadow);
1802 * it is returned with an increased refcount. If it is a shadow entr
136 page_cache_delete(struct address_space *mapping, struct folio *folio, void *shadow) argument
4190 void *shadow = (void *)folio; local
[all...]
H A Dworkingset.c177 * slot of the evicted page. This is called a shadow entry.
179 * On cache misses for which there are shadow entries, an eligible
210 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, argument
213 unsigned long entry = xa_to_value(shadow);
260 * Tests if the shadow entry is for a folio that was recently evicted.
261 * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
263 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, argument
271 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
280 static void lru_gen_refault(struct folio *folio, void *shadow) argument
293 recent = lru_gen_test_recent(shadow, typ
334 lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, unsigned long *token, bool *workingset) argument
340 lru_gen_refault(struct folio *folio, void *shadow) argument
418 workingset_test_recent(void *shadow, bool file, bool *workingset) argument
530 workingset_refault(struct folio *folio, void *shadow) argument
[all...]
H A Dvmscan.c688 void *shadow = NULL; local
734 shadow = workingset_eviction(folio, target_memcg);
735 __delete_from_swap_cache(folio, swap, shadow);
744 * Remember a shadow entry for reclaimed file cache in
756 * exceptional entries and shadow exceptional entries in the
761 shadow = workingset_eviction(folio, target_memcg);
762 __filemap_remove_folio(folio, shadow);
H A Dswap_state.c142 swp_entry_t entry, void *shadow)
157 void *entry = xas_store(&xas, shadow);
435 void *shadow = NULL; local
518 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
523 if (shadow)
524 workingset_refault(folio, shadow);
141 __delete_from_swap_cache(struct folio *folio, swp_entry_t entry, void *shadow) argument
/linux-master/arch/x86/include/uapi/asm/
H A Dkvm.h329 /* Interrupt shadow states */
346 __u8 shadow; member in struct:kvm_vcpu_events::__anon4
/linux-master/scripts/kconfig/lxdialog/
H A Dutil.c21 dlg.shadow.atr = A_NORMAL;
57 DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true);
86 DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, false);
167 init_one_color(&dlg.shadow);
485 wattrset(win, dlg.shadow.atr);
H A Ddialog.h83 struct dialog_color shadow; member in struct:dialog_info
/linux-master/drivers/infiniband/hw/irdma/
H A Dverbs.c563 init_info->shadow_area_pa = qpmr->shadow;
2139 info.shadow_area_pa = cqmr->shadow;
2483 qpmr->shadow = (dma_addr_t)arr[total];
2508 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
/linux-master/tools/scripts/
H A DMakefile.include128 EXTRA_WARNINGS += -Wno-shadow
/linux-master/mm/kmsan/
H A Dhooks.c147 * This function creates new shadow/origin pages for the physical pages mapped
148 * into the virtual memory. If those physical pages already had shadow/origin,
156 struct page *shadow, *origin; local
166 shadow = alloc_pages(gfp_mask, 1);
168 if (!shadow || !origin) {
174 vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
180 shadow = NULL;
200 * Something went wrong. Clean up shadow/origin pages allocated
204 if (shadow)
205 __free_pages(shadow,
224 struct page *shadow, *origin; local
[all...]
/linux-master/mm/kasan/
H A Dshadow.c3 * This file contains KASAN runtime code that manages shadow memory for
132 * Perform shadow offset calculation based on untagged address, as
157 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); local
158 *shadow = size & KASAN_GRANULE_MASK;
168 * Perform shadow offset calculation based on untagged address, as
235 * If shadow is mapped already than it must have been mapped
260 * In the latter case we can use vfree() to free shadow.
264 * Currently it's not possible to free shadow mapped
337 * User Mode Linux maps enough shadow memory for all of virtual memory
373 * STORE shadow(
[all...]

Completed in 627 milliseconds

12345