1
2#include <linux/irq.h>
3#include <linux/interrupt.h>
4
5#include "internals.h"
6
7void move_masked_irq(int irq)
8{
9	struct irq_desc *desc = irq_to_desc(irq);
10
11	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
12		return;
13
14	/*
15	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
16	 */
17	if (CHECK_IRQ_PER_CPU(desc->status)) {
18		WARN_ON(1);
19		return;
20	}
21
22	desc->status &= ~IRQ_MOVE_PENDING;
23
24	if (unlikely(cpumask_empty(desc->pending_mask)))
25		return;
26
27	if (!desc->chip->set_affinity)
28		return;
29
30	assert_raw_spin_locked(&desc->lock);
31
32	/*
33	 * If there was a valid mask to work with, please
34	 * do the disable, re-program, enable sequence.
35	 * This is *not* particularly important for level triggered
36	 * but in a edge trigger case, we might be setting rte
37	 * when an active trigger is comming in. This could
38	 * cause some ioapics to mal-function.
39	 * Being paranoid i guess!
40	 *
41	 * For correct operation this depends on the caller
42	 * masking the irqs.
43	 */
44	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
45		   < nr_cpu_ids))
46		if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
47			cpumask_copy(desc->affinity, desc->pending_mask);
48			irq_set_thread_affinity(desc);
49		}
50
51	cpumask_clear(desc->pending_mask);
52}
53
54void move_native_irq(int irq)
55{
56	struct irq_desc *desc = irq_to_desc(irq);
57
58	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
59		return;
60
61	if (unlikely(desc->status & IRQ_DISABLED))
62		return;
63
64	desc->chip->mask(irq);
65	move_masked_irq(irq);
66	desc->chip->unmask(irq);
67}
68