Searched refs:shadow (Results 1 - 25 of 109) sorted by path

12345

/linux-master/arch/alpha/include/asm/
H A Dmce.h26 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ member in struct:el_common_EV5_uncorrectable_mcheck
/linux-master/arch/alpha/kernel/
H A Dcore_mcpcia.c460 printk(" shadow[%d-%d] = %16lx %16lx\n",
461 i, i+1, frame->shadow[i],
462 frame->shadow[i+1]);
/linux-master/arch/m68k/fpsp040/
H A Dfpsp.h137 .set FPSR_SHADOW,LV-64 | fpsr shadow reg
/linux-master/drivers/gpio/
H A Dgpio-janz-ttl.c60 u8 *shadow; local
64 shadow = &mod->porta_shadow;
66 shadow = &mod->portb_shadow;
69 shadow = &mod->portc_shadow;
74 ret = *shadow & BIT(offset);
83 u8 *shadow; local
87 shadow = &mod->porta_shadow;
90 shadow = &mod->portb_shadow;
94 shadow = &mod->portc_shadow;
100 *shadow |
[all...]
H A Dgpio-latch.c58 unsigned long *shadow; member in struct:gpio_latch_priv
61 * use a mutex or a spinlock to protect our shadow map.
64 struct mutex mutex; /* protects @shadow */
65 spinlock_t spinlock; /* protects @shadow */
81 assign_bit(offset, priv->shadow, val);
85 test_bit(latch * priv->n_latched_gpios + i, priv->shadow));
160 priv->shadow = devm_bitmap_zalloc(&pdev->dev, n_latches * priv->n_latched_gpios,
162 if (!priv->shadow)
H A Dgpio-mm-lantiq.c31 u16 shadow; /* shadow the latches state */ member in struct:ltq_mm
35 * ltq_mm_apply() - write the shadow value to the ebu address.
38 * Write the shadow value to the EBU to set the gpios. We need to set the
47 __raw_writew(chip->shadow, chip->mmchip.regs);
58 * Set the shadow value and call ltq_mm_apply.
65 chip->shadow |= (1 << offset);
67 chip->shadow &= ~(1 << offset);
104 u32 shadow; local
117 /* store the shadow valu
[all...]
/linux-master/drivers/media/dvb-frontends/
H A Ditd1000_priv.h20 u8 shadow[256]; member in struct:itd1000_state
/linux-master/include/linux/
H A Dscx200_gpio.h13 #define __SCx200_GPIO_SHADOW unsigned long *shadow = scx200_gpio_shadow+bank
16 #define __SCx200_GPIO_OUT __asm__ __volatile__("outsl":"=mS" (shadow):"d" (ioaddr), "0" (shadow))
46 set_bit(index, shadow); /* __set_bit()? */
57 clear_bit(index, shadow); /* __clear_bit()? */
69 set_bit(index, shadow);
71 clear_bit(index, shadow);
81 change_bit(index, shadow);
/linux-master/samples/livepatch/
H A DMakefile3 obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o
4 obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o
5 obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o
/linux-master/
H A DMakefile936 CC_FLAGS_SCS := -fsanitize=shadow-call-stack
/linux-master/arch/mips/include/asm/sn/
H A Dioc3.h19 u32 shadow; member in struct:ioc3_serialregs
/linux-master/arch/powerpc/platforms/ps3/
H A Dspu.c40 * struct spe_shadow - logical spe shadow register area.
42 * Read-only shadow of spe registers.
101 * @shadow_addr: lpar address of spe register shadow area returned by
103 * @shadow: Virtual (ioremap) address of spe register shadow area.
112 struct spe_shadow __iomem *shadow; member in struct:spu_pdata
124 unsigned long problem, unsigned long ls, unsigned long shadow,
131 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
180 iounmap(spu_pdata(spu)->shadow);
123 _dump_areas(unsigned int spe_id, unsigned long priv2, unsigned long problem, unsigned long ls, unsigned long shadow, const char* func, int line) argument
[all...]
/linux-master/arch/s390/kvm/
H A Dgaccess.h455 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
H A Dtrace-s390.h340 TP_PROTO(unsigned long start, unsigned long end, unsigned int shadow),
341 TP_ARGS(start, end, shadow),
346 __field(unsigned int, shadow)
352 __entry->shadow = shadow;
355 TP_printk("gmap notified (start:0x%lx end:0x%lx shadow:%d)",
356 __entry->start, __entry->end, __entry->shadow)
/linux-master/arch/x86/include/uapi/asm/
H A Dkvm.h329 /* Interrupt shadow states */
346 __u8 shadow; member in struct:kvm_vcpu_events::__anon4
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.c2856 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) argument
2874 if (shadow)
3968 * Mark the desired intercept state in shadow bitmap, this is needed
4010 * Mark the desired intercept state in shadow bitmap, this is needed
4304 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
6279 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6282 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
8565 /* NX support is required for shadow paging. */
H A Dvmx.h134 * Cache of the guest's shadow VMCS, existing outside of guest
151 * Indicates if the shadow vmcs or enlightened vmcs must be updated
641 * in order to construct shadow PTEs with the correct protections.
702 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
708 static inline struct vmcs *alloc_vmcs(bool shadow) argument
710 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
/linux-master/arch/x86/kvm/
H A Dx86.c933 * indirect shadow MMUs. If paging is disabled, no updates are needed
1631 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
3636 * a forced sync of the shadow page tables. Ensure all the
5414 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
5501 events->interrupt.shadow);
7777 * shadow page table for L2 guest.
8869 * the issue by unprotecting the gfn, as zapping the shadow page will
8890 * writing instruction, it means the VM-EXIT is caused by shadow
8891 * page protected, we can zap the shadow page and retry this
8987 u32 shadow; local
[all...]
/linux-master/arch/x86/mm/
H A Dkasan_init_64.c321 unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); local
323 return round_down(shadow, PAGE_SIZE);
328 unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); local
330 return round_up(shadow, PAGE_SIZE);
350 * We use the same shadow offset for 4- and 5-level paging to
397 * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
414 * Populate the shadow for the shared portion of the CPU entry area.
436 * kasan_early_shadow_page has been used as early shadow memory, thus
/linux-master/arch/x86/xen/
H A Denlighten_pv.c119 * passed in the update_descriptor hypercall we keep shadow copies to
576 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; local
581 if (desc_equal(shadow, &t->tls_array[i]))
584 *shadow = t->tls_array[i];
/linux-master/block/
H A Dsed-opal.c1970 struct opal_shadow_mbr *shadow = data; local
1972 return generic_table_write_data(dev, shadow->data, shadow->offset,
1973 shadow->size, opaluid[OPAL_MBR]);
/linux-master/drivers/block/
H A Dxen-blkfront.c190 struct blk_shadow shadow[]; member in struct:blkfront_ring_info
291 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
292 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
299 if (rinfo->shadow[id].req.u.rw.id != id)
301 if (rinfo->shadow[id].request == NULL)
303 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
304 rinfo->shadow[id].request = NULL;
543 rinfo->shadow[id].request = req;
544 rinfo->shadow[id].status = REQ_PROCESSING;
545 rinfo->shadow[i
611 struct blk_shadow *shadow = &rinfo->shadow[setup->id]; local
2074 struct blk_shadow *shadow = rinfo->shadow; local
[all...]
/linux-master/drivers/crypto/hisilicon/sec/
H A Dsec_algs.c541 void sec_alg_callback(struct sec_bd_info *resp, void *shadow) argument
543 struct sec_request *sec_req = shadow;
H A Dsec_drv.c708 queue->shadow[queue->expected]);
709 queue->shadow[queue->expected] = NULL;
854 * @ctx: Context to be put in the shadow array and passed back to cb on result.
872 queue->shadow[write] = ctx;
H A Dsec_drv.h332 * @shadow: Pointers back to the shadow copy of the hardware ring element
351 void *shadow[SEC_QUEUE_LEN]; member in struct:sec_queue

Completed in 345 milliseconds

12345