1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_IRQFLAGS_H
6#define __ASM_IRQFLAGS_H
7
8#include <asm/alternative.h>
9#include <asm/barrier.h>
10#include <asm/ptrace.h>
11#include <asm/sysreg.h>
12
13/*
14 * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
15 * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
16 * order:
17 * Masking debug exceptions causes all other exceptions to be masked too/
18 * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
19 * always masked and unmasked together, and have no side effects for other
20 * flags. Keeping to this order makes it easier for entry.S to know which
21 * exceptions should be unmasked.
22 */
23
24static __always_inline void __daif_local_irq_enable(void)
25{
26	barrier();
27	asm volatile("msr daifclr, #3");
28	barrier();
29}
30
31static __always_inline void __pmr_local_irq_enable(void)
32{
33	if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
34		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
35		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
36	}
37
38	barrier();
39	write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1);
40	pmr_sync();
41	barrier();
42}
43
44static inline void arch_local_irq_enable(void)
45{
46	if (system_uses_irq_prio_masking()) {
47		__pmr_local_irq_enable();
48	} else {
49		__daif_local_irq_enable();
50	}
51}
52
53static __always_inline void __daif_local_irq_disable(void)
54{
55	barrier();
56	asm volatile("msr daifset, #3");
57	barrier();
58}
59
60static __always_inline void __pmr_local_irq_disable(void)
61{
62	if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
63		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
64		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
65	}
66
67	barrier();
68	write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1);
69	barrier();
70}
71
72static inline void arch_local_irq_disable(void)
73{
74	if (system_uses_irq_prio_masking()) {
75		__pmr_local_irq_disable();
76	} else {
77		__daif_local_irq_disable();
78	}
79}
80
81static __always_inline unsigned long __daif_local_save_flags(void)
82{
83	return read_sysreg(daif);
84}
85
86static __always_inline unsigned long __pmr_local_save_flags(void)
87{
88	return read_sysreg_s(SYS_ICC_PMR_EL1);
89}
90
91/*
92 * Save the current interrupt enable state.
93 */
94static inline unsigned long arch_local_save_flags(void)
95{
96	if (system_uses_irq_prio_masking()) {
97		return __pmr_local_save_flags();
98	} else {
99		return __daif_local_save_flags();
100	}
101}
102
103static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
104{
105	return flags & PSR_I_BIT;
106}
107
108static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
109{
110	return flags != GIC_PRIO_IRQON;
111}
112
113static inline bool arch_irqs_disabled_flags(unsigned long flags)
114{
115	if (system_uses_irq_prio_masking()) {
116		return __pmr_irqs_disabled_flags(flags);
117	} else {
118		return __daif_irqs_disabled_flags(flags);
119	}
120}
121
122static __always_inline bool __daif_irqs_disabled(void)
123{
124	return __daif_irqs_disabled_flags(__daif_local_save_flags());
125}
126
127static __always_inline bool __pmr_irqs_disabled(void)
128{
129	return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
130}
131
132static inline bool arch_irqs_disabled(void)
133{
134	if (system_uses_irq_prio_masking()) {
135		return __pmr_irqs_disabled();
136	} else {
137		return __daif_irqs_disabled();
138	}
139}
140
141static __always_inline unsigned long __daif_local_irq_save(void)
142{
143	unsigned long flags = __daif_local_save_flags();
144
145	__daif_local_irq_disable();
146
147	return flags;
148}
149
150static __always_inline unsigned long __pmr_local_irq_save(void)
151{
152	unsigned long flags = __pmr_local_save_flags();
153
154	/*
155	 * There are too many states with IRQs disabled, just keep the current
156	 * state if interrupts are already disabled/masked.
157	 */
158	if (!__pmr_irqs_disabled_flags(flags))
159		__pmr_local_irq_disable();
160
161	return flags;
162}
163
164static inline unsigned long arch_local_irq_save(void)
165{
166	if (system_uses_irq_prio_masking()) {
167		return __pmr_local_irq_save();
168	} else {
169		return __daif_local_irq_save();
170	}
171}
172
173static __always_inline void __daif_local_irq_restore(unsigned long flags)
174{
175	barrier();
176	write_sysreg(flags, daif);
177	barrier();
178}
179
180static __always_inline void __pmr_local_irq_restore(unsigned long flags)
181{
182	barrier();
183	write_sysreg_s(flags, SYS_ICC_PMR_EL1);
184	pmr_sync();
185	barrier();
186}
187
188/*
189 * restore saved IRQ state
190 */
191static inline void arch_local_irq_restore(unsigned long flags)
192{
193	if (system_uses_irq_prio_masking()) {
194		__pmr_local_irq_restore(flags);
195	} else {
196		__daif_local_irq_restore(flags);
197	}
198}
199
200#endif /* __ASM_IRQFLAGS_H */
201