Lines Matching refs:desc

36 bool irq_wait_for_poll(struct irq_desc *desc)
37 __must_hold(&desc->lock)
41 smp_processor_id(), desc->irq_data.irq))
46 raw_spin_unlock(&desc->lock);
47 while (irqd_irq_inprogress(&desc->irq_data))
49 raw_spin_lock(&desc->lock);
50 } while (irqd_irq_inprogress(&desc->irq_data));
52 return !irqd_irq_disabled(&desc->irq_data) && desc->action;
62 static int try_one_irq(struct irq_desc *desc, bool force)
67 raw_spin_lock(&desc->lock);
73 if (irq_settings_is_per_cpu(desc) ||
74 irq_settings_is_nested_thread(desc) ||
75 irq_settings_is_polled(desc))
82 if (irqd_irq_disabled(&desc->irq_data) && !force)
89 action = desc->action;
95 if (irqd_irq_inprogress(&desc->irq_data)) {
100 desc->istate |= IRQS_PENDING;
105 desc->istate |= IRQS_POLL_INPROGRESS;
107 if (handle_irq_event(desc) == IRQ_HANDLED)
110 action = desc->action;
111 } while ((desc->istate & IRQS_PENDING) && action);
112 desc->istate &= ~IRQS_POLL_INPROGRESS;
114 raw_spin_unlock(&desc->lock);
120 struct irq_desc *desc;
128 for_each_irq_desc(i, desc) {
135 if (try_one_irq(desc, false))
146 struct irq_desc *desc;
153 for_each_irq_desc(i, desc) {
160 state = desc->istate;
166 try_one_irq(desc, true);
192 static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
194 unsigned int irq = irq_desc_get_irq(desc);
209 * We need to take desc->lock here. note_interrupt() is called
210 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
212 * desc->lock here. See synchronize_irq().
214 raw_spin_lock_irqsave(&desc->lock, flags);
215 for_each_action_of_desc(desc, action) {
222 raw_spin_unlock_irqrestore(&desc->lock, flags);
225 static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
231 __report_bad_irq(desc, action_ret);
236 try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
265 action = desc->action;
272 void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
276 if (desc->istate & IRQS_POLL_INPROGRESS ||
277 irq_settings_is_polled(desc))
281 report_bad_irq(desc, action_ret);
322 if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
323 desc->threads_handled_last |= SPURIOUS_DEFERRED;
338 handled = atomic_read(&desc->threads_handled);
340 if (handled != desc->threads_handled_last) {
350 desc->threads_handled_last = handled;
381 desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
392 if (time_after(jiffies, desc->last_unhandled + HZ/10))
393 desc->irqs_unhandled = 1;
395 desc->irqs_unhandled++;
396 desc->last_unhandled = jiffies;
399 irq = irq_desc_get_irq(desc);
400 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
403 desc->irqs_unhandled -= ok;
406 if (likely(!desc->irqs_unhandled))
410 desc->irq_count++;
411 if (likely(desc->irq_count < 100000))
414 desc->irq_count = 0;
415 if (unlikely(desc->irqs_unhandled > 99900)) {
419 __report_bad_irq(desc, action_ret);
424 desc->istate |= IRQS_SPURIOUS_DISABLED;
425 desc->depth++;
426 irq_disable(desc);
431 desc->irqs_unhandled = 0;