Searched refs:shadow (Results 51 - 75 of 109) sorted by relevance

12345

/linux-master/arch/x86/mm/
H A Dkasan_init_64.c321 unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); local
323 return round_down(shadow, PAGE_SIZE);
328 unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); local
330 return round_up(shadow, PAGE_SIZE);
350 * We use the same shadow offset for 4- and 5-level paging to
397 * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
414 * Populate the shadow for the shared portion of the CPU entry area.
436 * kasan_early_shadow_page has been used as early shadow memory, thus
/linux-master/scripts/
H A DMakefile.kasan32 # -fasan-shadow-offset fails without -fsanitize
34 -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
/linux-master/drivers/infiniband/hw/irdma/
H A Dverbs.h64 dma_addr_t shadow; member in struct:irdma_cq_mr
71 dma_addr_t shadow; member in struct:irdma_qp_mr
/linux-master/scripts/kconfig/lxdialog/
H A Dutil.c21 dlg.shadow.atr = A_NORMAL;
57 DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true);
86 DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, false);
167 init_one_color(&dlg.shadow);
485 wattrset(win, dlg.shadow.atr);
H A Ddialog.h83 struct dialog_color shadow; member in struct:dialog_info
/linux-master/drivers/gpu/drm/i915/gvt/
H A Dscheduler.c80 * when populating shadow ctx from guest, we should not overrride oa related
390 /* allocate shadow ring buffer */
393 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
400 /* get shadow ring buffer va */
449 shadow ppgtt. */
468 rq = i915_request_create(s->shadow[workload->engine->id]);
480 * shadow it as well, include ringbuffer,wa_ctx and ctx.
494 if (workload->shadow)
498 shadow_context_descriptor_update(s->shadow[workload->engine->id],
512 workload->shadow
[all...]
/linux-master/fs/xfs/
H A Dxfs_log_cil.c255 * The simplest solution to this problem is to allocate a shadow buffer when a
330 * if we have no shadow buffer, or it is too small, we need to
392 * old_lv, then remove the space it accounts for and make it the shadow
394 * shadow buffer, so update the pointer to it appropriately.
430 * dependent on the current state of the vector in the CIL - the shadow lv is
433 * lv, then simple swap it out for the shadow lv. We don't free it - that is
465 struct xfs_log_vec *shadow; local
474 * the shadow lv on the log item.
476 shadow = lip->li_lv_shadow;
477 if (shadow
[all...]
/linux-master/drivers/infiniband/hw/qib/
H A Dqib_tx.c212 * update_send_bufs - update shadow copy of the PIO availability map
234 * to avoid conflicting updates; all we change is the shadow, and
284 unsigned long *shadow = dd->pioavailshadow; local
316 if (__test_and_set_bit((2 * i) + 1, shadow))
319 __change_bit(2 * i, shadow);
331 * First time through; shadow exhausted, but may be
391 /* Set or clear the busy bit in the shadow. */
401 * bit is set correctly in shadow, since it could
/linux-master/drivers/gpu/drm/
H A Ddrm_fbdev_generic.c44 void *shadow = info->screen_buffer; local
51 vfree(shadow);
189 * flushing the shadow buffer. In the general case, concurrent
314 * uses a shadow buffer in system memory. The implementation blits the shadow
/linux-master/drivers/media/dvb-frontends/
H A Ds5h1420.c48 u8 shadow[256]; member in struct:s5h1420_state
76 b[1] = state->shadow[(reg - 1) & 0xff];
108 state->shadow[reg] = data;
895 memset(state->shadow, 0xff, sizeof(state->shadow));
898 state->shadow[i] = s5h1420_readreg(state, i);
/linux-master/mm/
H A Dswap_state.c142 swp_entry_t entry, void *shadow)
157 void *entry = xas_store(&xas, shadow);
435 void *shadow = NULL; local
518 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
523 if (shadow)
524 workingset_refault(folio, shadow);
141 __delete_from_swap_cache(struct folio *folio, swp_entry_t entry, void *shadow) argument
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_execbuffer.c234 * copy the user's batchbuffer to a shadow (so that the user doesn't have
2283 struct i915_vma *shadow, *trampoline, *batch; local
2301 * ppGTT backed shadow buffers must be mapped RO, to prevent
2327 shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
2328 if (IS_ERR(shadow))
2329 return PTR_ERR(shadow);
2332 i915_gem_object_set_readonly(shadow->obj);
2333 shadow->private = pool;
2337 trampoline = shadow;
2339 shadow
[all...]
/linux-master/drivers/gpu/drm/exynos/
H A Dexynos_mixer.c355 u32 base, shadow; local
367 shadow = mixer_reg_read(ctx, MXR_CFG_S);
368 if (base != shadow)
372 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
373 if (base != shadow)
377 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
378 if (base != shadow)
759 /* interlace scan need to check shadow register */
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_object.c720 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
722 * @vmbo: BO that will be inserted into the shadow list
724 * Insert a BO to the shadow list.
732 vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
733 vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
738 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
740 * @shadow: &amdgpu_bo shadow to be restored
743 * Copies a buffer object's shadow content back to the object.
744 * This is used for recovering a buffer from its shadow i
750 amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) argument
[all...]
/linux-master/drivers/mfd/
H A Dcs42l43.c591 * for this purpose, which is indicated through the shadow flag.
593 static int cs42l43_mcu_stage_2_3(struct cs42l43 *cs42l43, bool shadow) argument
599 if (shadow)
623 * Note: Unlike cs42l43_mcu_stage_2_3 there is no need to consider the shadow
625 * requires update which means the revision does not include shadow register
721 bool patched, shadow; local
755 * features through a set of shadow registers.
757 shadow = mcu_rev >= CS42L43_MCU_SHADOW_REGS_REQUIRED_REV;
794 return cs42l43_mcu_stage_2_3(cs42l43, shadow);
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.h134 * Cache of the guest's shadow VMCS, existing outside of guest
151 * Indicates if the shadow vmcs or enlightened vmcs must be updated
641 * in order to construct shadow PTEs with the correct protections.
702 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
708 static inline struct vmcs *alloc_vmcs(bool shadow) argument
710 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
/linux-master/drivers/gpu/drm/i915/
H A Di915_cmd_parser.c1430 * @shadow: validated copy of the batch buffer in question
1444 struct i915_vma *shadow,
1461 cmd = copy_batch(shadow->obj, batch->obj,
1474 shadow_addr = gen8_canonical_addr(i915_vma_offset(shadow));
1478 * We use the batch length as size because the shadow object is as
1536 * With the trampoline, the shadow is executed twice.
1551 cmd = page_mask_bits(shadow->obj->mm.mapping);
1572 i915_gem_object_flush_map(shadow->obj);
1576 i915_gem_object_unpin_map(shadow->obj);
1440 intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, unsigned long batch_offset, unsigned long batch_length, struct i915_vma *shadow, bool trampoline) argument
/linux-master/arch/alpha/kernel/
H A Dcore_mcpcia.c460 printk(" shadow[%d-%d] = %16lx %16lx\n",
461 i, i+1, frame->shadow[i],
462 frame->shadow[i+1]);
/linux-master/fs/nilfs2/
H A Ddat.c27 * @shadow: shadow map of DAT file
32 struct nilfs_shadow_map shadow; member in struct:nilfs_dat_info
525 err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
/linux-master/include/linux/
H A Dswap.h347 bool workingset_test_recent(void *shadow, bool file, bool *workingset);
350 void workingset_refault(struct folio *folio, void *shadow);
/linux-master/mm/kasan/
H A Dshadow.c3 * This file contains KASAN runtime code that manages shadow memory for
132 * Perform shadow offset calculation based on untagged address, as
157 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); local
158 *shadow = size & KASAN_GRANULE_MASK;
168 * Perform shadow offset calculation based on untagged address, as
235 * If shadow is mapped already than it must have been mapped
260 * In the latter case we can use vfree() to free shadow.
264 * Currently it's not possible to free shadow mapped
337 * User Mode Linux maps enough shadow memory for all of virtual memory
373 * STORE shadow(
[all...]
/linux-master/tools/scripts/
H A DMakefile.include128 EXTRA_WARNINGS += -Wno-shadow
/linux-master/drivers/crypto/hisilicon/sec/
H A Dsec_drv.h332 * @shadow: Pointers back to the shadow copy of the hardware ring element
351 void *shadow[SEC_QUEUE_LEN]; member in struct:sec_queue
/linux-master/arch/s390/kvm/
H A Dgaccess.h455 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
/linux-master/drivers/net/ethernet/qlogic/qed/
H A Dqed_vf.c1476 struct qed_bulletin_content shadow; local
1483 memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1486 if (shadow.version == p_iov->bulletin_shadow.version)
1490 crc = crc32(0, (u8 *)&shadow + crc_size,
1492 if (crc != shadow.crc)
1495 /* Set the shadow bulletin and process it */
1496 memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1499 "Read a bulletin update %08x\n", shadow.version);

Completed in 269 milliseconds

12345