Searched refs:MSR_RI (Results 1 - 25 of 34) sorted by path

12

/linux-master/arch/powerpc/platforms/powernv/
H A Dsubcore-asm.S25 ori r4,r4,MSR_EE|MSR_SE|MSR_BE|MSR_RI
/linux-master/arch/powerpc/include/asm/
H A Dhw_irq.h71 __mtmsrd(MSR_EE | MSR_RI, 1);
83 __mtmsrd(MSR_RI, 1);
97 mtmsr(mfmsr() & ~(MSR_EE | MSR_RI)); local
108 __mtmsrd(MSR_RI, 1);
110 mtmsr(mfmsr() | MSR_RI);
H A Dptrace.h318 return unlikely(cpu_has_msr_ri() && !(regs->msr & MSR_RI));
324 regs_set_return_msr(regs, regs->msr | MSR_RI);
330 regs_set_return_msr(regs, regs->msr & ~MSR_RI);
H A Dreg.h112 #define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */ macro
134 #define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV)
147 #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
H A Dreg_booke.h41 #define MSR_ (MSR_ME | MSR_RI | MSR_CE)
46 #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
49 #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE)
/linux-master/arch/powerpc/kernel/
H A Dentry_32.S218 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
H A Dexceptions-64s.S399 xori r10,r10,MSR_RI
402 xori r10,r10,MSR_RI
887 * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
1064 * enable MSR_RI, then SLB or MCE will be able to recover, but a nested
1080 /* Clear MSR_RI before setting SRR0 and SRR1. */
1085 * MSR_RI is clear, now we can decrement paca->in_nmi.
1172 /* Clear MSR_RI before setting SRR0 and SRR1. */\
1174 mtmsrd r9,1; /* Clear MSR_RI */ \
1273 andi. r11,r12,MSR_RI
1362 li r10,0 /* clear MSR_RI */
[all...]
H A Dfpu.S98 ori r5,r5,MSR_FP|MSR_RI
H A Dhead_32.h66 LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~MSR_RI) /* re-enable MMU */
102 mtspr SPRN_EID, r2 /* Set MSR_RI */
H A Dhead_64.S155 ori r24,r24,MSR_RI
794 ori r3,r3,MSR_RI
982 ori r6,r6,MSR_RI
H A Dhead_8xx.S697 li r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
698 rlwinm r0, r10, 0, ~MSR_RI
H A Dhead_book3s_32.S202 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
1092 rlwinm r0, r6, 0, ~MSR_RI
1114 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
H A Dinterrupt_64.S278 li r12,-1 /* Set MSR_EE and MSR_RI */
400 andi. r0,r5,MSR_RI
H A Dkvm_emul.S57 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
59 lis r30, (~(MSR_EE | MSR_RI))@h
60 ori r30, r30, (~(MSR_EE | MSR_RI))@l
63 /* OR the register's (MSR_EE|MSR_RI) on MSR */
66 andi. r30, r30, (MSR_EE|MSR_RI)
115 #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
H A Dmce.c117 if (handled && (regs->msr & MSR_RI))
H A Dmisc_64.S362 1: li r9,MSR_RI
H A Dprocess.c1492 {MSR_RI, "RI"},
H A Drtas.c725 BUG_ON(!(msr & MSR_RI));
H A Drtas_entry.S116 LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
H A Dtm.S164 * At this point we can't take an SLB miss since we have MSR_RI
192 * clobbered by an exception once we turn on MSR_RI below.
205 li r11, MSR_RI
506 * At this point we can't take an SLB miss since we have MSR_RI
532 li r4, MSR_RI
H A Dtraps.c403 if (!(regs->msr & MSR_RI))
437 regs->msr &= ~MSR_RI;
H A Dvector.S54 ori r5,r5,MSR_RI
138 li r5,MSR_RI
/linux-master/arch/powerpc/kexec/
H A Drelocate_32.S412 ori r8, r8, MSR_RI|MSR_ME
/linux-master/arch/powerpc/kvm/
H A Dbook3s_64_entry.S401 * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
405 li r10,MSR_RI
H A Dbook3s_emulate.c302 new_msr &= ~(MSR_RI | MSR_EE);
303 new_msr |= rs_val & (MSR_RI | MSR_EE);

Completed in 749 milliseconds

12