Lines Matching refs:desc

38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
50 while (irqd_irq_inprogress(&desc->irq_data))
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
100 struct irq_desc *desc = irq_to_desc(irq);
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
111 static void __synchronize_irq(struct irq_desc *desc)
113 __synchronize_hardirq(desc, true);
118 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
138 struct irq_desc *desc = irq_to_desc(irq);
140 if (desc)
141 __synchronize_irq(desc);
148 static bool __irq_can_set_affinity(struct irq_desc *desc)
150 if (!desc || !irqd_can_balance(&desc->irq_data) ||
151 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
175 struct irq_desc *desc = irq_to_desc(irq);
177 return __irq_can_set_affinity(desc) &&
178 !irqd_affinity_is_managed(&desc->irq_data);
183 * @desc: irq descriptor which has affinity changed
187 * set_cpus_allowed_ptr() here as we hold desc->lock and this
190 void irq_set_thread_affinity(struct irq_desc *desc)
194 for_each_action_of_desc(desc, action) {
224 struct irq_desc *desc = irq_data_to_desc(data);
288 cpumask_copy(desc->irq_common_data.affinity, mask);
292 irq_set_thread_affinity(desc);
303 struct irq_desc *desc = irq_data_to_desc(data);
306 irq_copy_pending(desc, dest);
335 struct irq_desc *desc = irq_data_to_desc(data);
350 cpumask_copy(desc->irq_common_data.affinity, mask);
360 struct irq_desc *desc = irq_data_to_desc(data);
373 irq_copy_pending(desc, mask);
376 if (desc->affinity_notify) {
377 kref_get(&desc->affinity_notify->kref);
378 if (!schedule_work(&desc->affinity_notify->work)) {
380 kref_put(&desc->affinity_notify->kref,
381 desc->affinity_notify->release);
407 struct irq_desc *desc;
419 desc = irq_get_desc_buslock(irq, &flags, 0);
420 if (!desc)
424 if (irqd_is_started(&desc->irq_data)) {
430 if (irqd_affinity_is_managed(&desc->irq_data)) {
439 activated = irqd_is_activated(&desc->irq_data);
441 irq_domain_deactivate_irq(&desc->irq_data);
444 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
445 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
448 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
452 irq_domain_activate_irq(&desc->irq_data, false);
455 irq_put_desc_busunlock(desc, flags);
462 struct irq_desc *desc = irq_to_desc(irq);
466 if (!desc)
469 raw_spin_lock_irqsave(&desc->lock, flags);
470 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
471 raw_spin_unlock_irqrestore(&desc->lock, flags);
509 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
511 if (!desc)
513 desc->affinity_hint = m;
514 irq_put_desc_unlock(desc, flags);
525 struct irq_desc *desc = irq_to_desc(notify->irq);
529 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
532 raw_spin_lock_irqsave(&desc->lock, flags);
533 if (irq_move_pending(&desc->irq_data))
534 irq_get_pending(cpumask, desc);
536 cpumask_copy(cpumask, desc->irq_common_data.affinity);
537 raw_spin_unlock_irqrestore(&desc->lock, flags);
560 struct irq_desc *desc = irq_to_desc(irq);
567 if (!desc || desc->istate & IRQS_NMI)
577 raw_spin_lock_irqsave(&desc->lock, flags);
578 old_notify = desc->affinity_notify;
579 desc->affinity_notify = notify;
580 raw_spin_unlock_irqrestore(&desc->lock, flags);
598 int irq_setup_affinity(struct irq_desc *desc)
601 int ret, node = irq_desc_get_node(desc);
606 if (!__irq_can_set_affinity(desc))
614 if (irqd_affinity_is_managed(&desc->irq_data) ||
615 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
616 if (cpumask_intersects(desc->irq_common_data.affinity,
618 set = desc->irq_common_data.affinity;
620 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
634 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
640 int irq_setup_affinity(struct irq_desc *desc)
642 return irq_select_affinity(irq_desc_get_irq(desc));
662 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
667 if (!desc)
670 data = irq_desc_get_irq_data(desc);
684 irq_put_desc_unlock(desc, flags);
690 void __disable_irq(struct irq_desc *desc)
692 if (!desc->depth++)
693 irq_disable(desc);
699 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
701 if (!desc)
703 __disable_irq(desc);
704 irq_put_desc_busunlock(desc, flags);
788 void __enable_irq(struct irq_desc *desc)
790 switch (desc->depth) {
794 irq_desc_get_irq(desc));
797 if (desc->istate & IRQS_SUSPENDED)
800 irq_settings_set_noprobe(desc);
808 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
812 desc->depth--;
825 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
830 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
832 if (!desc)
834 if (WARN(!desc->irq_data.chip,
838 __enable_irq(desc);
840 irq_put_desc_busunlock(desc, flags);
860 struct irq_desc *desc = irq_to_desc(irq);
863 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
866 if (desc->irq_data.chip->irq_set_wake)
867 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
894 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
897 if (!desc)
901 if (desc->istate & IRQS_NMI) {
910 if (desc->wake_depth++ == 0) {
913 desc->wake_depth = 0;
915 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
918 if (desc->wake_depth == 0) {
920 } else if (--desc->wake_depth == 0) {
923 desc->wake_depth = 1;
925 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
930 irq_put_desc_busunlock(desc, flags);
943 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
946 if (!desc)
949 if (irq_settings_can_request(desc)) {
950 if (!desc->action ||
951 irqflags & desc->action->flags & IRQF_SHARED)
954 irq_put_desc_unlock(desc, flags);
958 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
960 struct irq_chip *chip = desc->irq_data.chip;
969 irq_desc_get_irq(desc),
975 if (!irqd_irq_masked(&desc->irq_data))
976 mask_irq(desc);
977 if (!irqd_irq_disabled(&desc->irq_data))
983 ret = chip->irq_set_type(&desc->irq_data, flags);
988 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
989 irqd_set(&desc->irq_data, flags);
993 flags = irqd_get_trigger_type(&desc->irq_data);
994 irq_settings_set_trigger_mask(desc, flags);
995 irqd_clear(&desc->irq_data, IRQD_LEVEL);
996 irq_settings_clr_level(desc);
998 irq_settings_set_level(desc);
999 irqd_set(&desc->irq_data, IRQD_LEVEL);
1006 flags, irq_desc_get_irq(desc), chip->irq_set_type);
1009 unmask_irq(desc);
1017 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1019 if (!desc)
1022 desc->parent_irq = parent_irq;
1024 irq_put_desc_unlock(desc, flags);
1060 static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1079 raw_spin_lock_irq(&desc->lock);
1084 if (cpumask_available(desc->irq_common_data.affinity)) {
1087 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1091 raw_spin_unlock_irq(&desc->lock);
1098 static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1101 static int irq_wait_for_interrupt(struct irq_desc *desc,
1106 irq_thread_check_affinity(desc, action);
1133 static void irq_finalize_oneshot(struct irq_desc *desc,
1136 if (!(desc->istate & IRQS_ONESHOT) ||
1140 chip_bus_lock(desc);
1141 raw_spin_lock_irq(&desc->lock);
1153 * versus "desc->threads_oneshot |= action->thread_mask;" in
1157 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1158 raw_spin_unlock_irq(&desc->lock);
1159 chip_bus_sync_unlock(desc);
1172 desc->threads_oneshot &= ~action->thread_mask;
1174 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1175 irqd_irq_masked(&desc->irq_data))
1176 unmask_threaded_irq(desc);
1179 raw_spin_unlock_irq(&desc->lock);
1180 chip_bus_sync_unlock(desc);
1190 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1199 atomic_inc(&desc->threads_handled);
1201 irq_finalize_oneshot(desc, action);
1213 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1220 atomic_inc(&desc->threads_handled);
1222 irq_finalize_oneshot(desc, action);
1226 void wake_threads_waitq(struct irq_desc *desc)
1228 if (atomic_dec_and_test(&desc->threads_active))
1229 wake_up(&desc->wait_for_threads);
1235 struct irq_desc *desc;
1247 desc = irq_to_desc(action->irq);
1250 * desc->threads_active and wake possible waiters.
1253 wake_threads_waitq(desc);
1255 /* Prevent a stale desc->threads_oneshot */
1256 irq_finalize_oneshot(desc, action);
1259 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1266 raw_spin_lock_irq(&desc->lock);
1267 __irq_wake_thread(desc, secondary);
1268 raw_spin_unlock_irq(&desc->lock);
1274 static void irq_thread_set_ready(struct irq_desc *desc,
1278 wake_up(&desc->wait_for_threads);
1285 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1292 wait_event(desc->wait_for_threads,
1303 struct irq_desc *desc = irq_to_desc(action->irq);
1304 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1307 irq_thread_set_ready(desc, action);
1320 while (!irq_wait_for_interrupt(desc, action)) {
1323 action_ret = handler_fn(desc, action);
1325 irq_wake_secondary(desc, action);
1327 wake_threads_waitq(desc);
1348 struct irq_desc *desc = irq_to_desc(irq);
1352 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1355 raw_spin_lock_irqsave(&desc->lock, flags);
1356 for_each_action_of_desc(desc, action) {
1359 __irq_wake_thread(desc, action);
1363 raw_spin_unlock_irqrestore(&desc->lock, flags);
1406 static int irq_request_resources(struct irq_desc *desc)
1408 struct irq_data *d = &desc->irq_data;
1414 static void irq_release_resources(struct irq_desc *desc)
1416 struct irq_data *d = &desc->irq_data;
1423 static bool irq_supports_nmi(struct irq_desc *desc)
1425 struct irq_data *d = irq_desc_get_irq_data(desc);
1439 static int irq_nmi_setup(struct irq_desc *desc)
1441 struct irq_data *d = irq_desc_get_irq_data(desc);
1447 static void irq_nmi_teardown(struct irq_desc *desc)
1449 struct irq_data *d = irq_desc_get_irq_data(desc);
1497 * desc->request_mutex Provides serialization against a concurrent free_irq()
1499 * desc->lock Provides serialization against hard interrupts
1501 * chip_bus_lock and desc->lock are sufficient for all other management and
1502 * interrupt related functions. desc->request_mutex solely serializes
1506 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1512 if (!desc)
1515 if (desc->irq_data.chip == &no_irq_chip)
1517 if (!try_module_get(desc->owner))
1527 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1533 nested = irq_settings_is_nested_thread(desc);
1546 if (irq_settings_can_thread(desc)) {
1578 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1584 * chip bus lock and desc->lock. Also protects against handing out
1588 mutex_lock(&desc->request_mutex);
1595 chip_bus_lock(desc);
1598 if (!desc->action) {
1599 ret = irq_request_resources(desc);
1602 new->name, irq, desc->irq_data.chip->name);
1611 * desc->request_mutex or the optional bus lock.
1613 raw_spin_lock_irqsave(&desc->lock, flags);
1614 old_ptr = &desc->action;
1627 if (desc->istate & IRQS_NMI) {
1629 new->name, irq, desc->irq_data.chip->name);
1638 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1639 oldtype = irqd_get_trigger_type(&desc->irq_data);
1642 irqd_set_trigger_type(&desc->irq_data, oldtype);
1690 * desc->thread_active to indicate that the
1694 * line have completed desc->threads_active becomes
1699 * interrupt handlers, then desc->threads_active is
1711 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1736 ret = __irq_set_trigger(desc,
1754 ret = irq_activate(desc);
1758 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1760 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1763 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1764 irq_settings_set_per_cpu(desc);
1766 irq_settings_set_no_debug(desc);
1770 irq_settings_set_no_debug(desc);
1773 desc->istate |= IRQS_ONESHOT;
1777 irq_settings_set_no_balancing(desc);
1778 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1782 irq_settings_can_autoenable(desc)) {
1783 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1793 desc->depth = 1;
1798 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1808 irq_pm_install_action(desc, new);
1811 desc->irq_count = 0;
1812 desc->irqs_unhandled = 0;
1818 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1819 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1820 __enable_irq(desc);
1823 raw_spin_unlock_irqrestore(&desc->lock, flags);
1824 chip_bus_sync_unlock(desc);
1825 mutex_unlock(&desc->request_mutex);
1827 irq_setup_timings(desc, new);
1829 wake_up_and_wait_for_irq_thread_ready(desc, new);
1830 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1832 register_irq_proc(irq, desc);
1848 raw_spin_unlock_irqrestore(&desc->lock, flags);
1850 if (!desc->action)
1851 irq_release_resources(desc);
1853 chip_bus_sync_unlock(desc);
1854 mutex_unlock(&desc->request_mutex);
1870 module_put(desc->owner);
1878 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1880 unsigned irq = desc->irq_data.irq;
1886 mutex_lock(&desc->request_mutex);
1887 chip_bus_lock(desc);
1888 raw_spin_lock_irqsave(&desc->lock, flags);
1894 action_ptr = &desc->action;
1900 raw_spin_unlock_irqrestore(&desc->lock, flags);
1901 chip_bus_sync_unlock(desc);
1902 mutex_unlock(&desc->request_mutex);
1914 irq_pm_remove_action(desc, action);
1917 if (!desc->action) {
1918 irq_settings_clr_disable_unlazy(desc);
1920 irq_shutdown(desc);
1925 if (WARN_ON_ONCE(desc->affinity_hint))
1926 desc->affinity_hint = NULL;
1929 raw_spin_unlock_irqrestore(&desc->lock, flags);
1940 * The still held desc->request_mutex() protects against a
1944 chip_bus_sync_unlock(desc);
1953 __synchronize_irq(desc);
1984 if (!desc->action) {
1989 chip_bus_lock(desc);
1994 raw_spin_lock_irqsave(&desc->lock, flags);
1995 irq_domain_deactivate_irq(&desc->irq_data);
1996 raw_spin_unlock_irqrestore(&desc->lock, flags);
1998 irq_release_resources(desc);
1999 chip_bus_sync_unlock(desc);
2000 irq_remove_timings(desc);
2003 mutex_unlock(&desc->request_mutex);
2005 irq_chip_pm_put(&desc->irq_data);
2006 module_put(desc->owner);
2029 struct irq_desc *desc = irq_to_desc(irq);
2033 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2037 if (WARN_ON(desc->affinity_notify))
2038 desc->affinity_notify = NULL;
2041 action = __free_irq(desc, dev_id);
2052 /* This function must be called with desc->lock held */
2053 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2057 desc->istate &= ~IRQS_NMI;
2059 if (!WARN_ON(desc->action == NULL)) {
2060 irq_pm_remove_action(desc, desc->action);
2061 devname = desc->action->name;
2062 unregister_handler_proc(irq, desc->action);
2064 kfree(desc->action);
2065 desc->action = NULL;
2068 irq_settings_clr_disable_unlazy(desc);
2069 irq_shutdown_and_deactivate(desc);
2071 irq_release_resources(desc);
2073 irq_chip_pm_put(&desc->irq_data);
2074 module_put(desc->owner);
2081 struct irq_desc *desc = irq_to_desc(irq);
2085 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2088 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2092 if (WARN_ON(desc->depth == 0))
2095 raw_spin_lock_irqsave(&desc->lock, flags);
2097 irq_nmi_teardown(desc);
2098 devname = __cleanup_nmi(irq, desc);
2100 raw_spin_unlock_irqrestore(&desc->lock, flags);
2152 struct irq_desc *desc;
2177 desc = irq_to_desc(irq);
2178 if (!desc)
2181 if (!irq_settings_can_request(desc) ||
2182 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2201 retval = irq_chip_pm_get(&desc->irq_data);
2207 retval = __setup_irq(irq, desc, action);
2210 irq_chip_pm_put(&desc->irq_data);
2258 struct irq_desc *desc;
2264 desc = irq_to_desc(irq);
2265 if (!desc)
2268 if (irq_settings_is_nested_thread(desc)) {
2309 struct irq_desc *desc;
2326 desc = irq_to_desc(irq);
2328 if (!desc || (irq_settings_can_autoenable(desc) &&
2330 !irq_settings_can_request(desc) ||
2331 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2332 !irq_supports_nmi(desc))
2344 retval = irq_chip_pm_get(&desc->irq_data);
2348 retval = __setup_irq(irq, desc, action);
2352 raw_spin_lock_irqsave(&desc->lock, flags);
2355 desc->istate |= IRQS_NMI;
2356 retval = irq_nmi_setup(desc);
2358 __cleanup_nmi(irq, desc);
2359 raw_spin_unlock_irqrestore(&desc->lock, flags);
2363 raw_spin_unlock_irqrestore(&desc->lock, flags);
2368 irq_chip_pm_put(&desc->irq_data);
2379 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2381 if (!desc)
2390 type = irqd_get_trigger_type(&desc->irq_data);
2395 ret = __irq_set_trigger(desc, type);
2403 irq_percpu_enable(desc, cpu);
2405 irq_put_desc_unlock(desc, flags);
2424 struct irq_desc *desc;
2428 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2429 if (!desc)
2432 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2433 irq_put_desc_unlock(desc, flags);
2443 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2445 if (!desc)
2448 irq_percpu_disable(desc, cpu);
2449 irq_put_desc_unlock(desc, flags);
2463 struct irq_desc *desc = irq_to_desc(irq);
2469 if (!desc)
2472 raw_spin_lock_irqsave(&desc->lock, flags);
2474 action = desc->action;
2480 if (!cpumask_empty(desc->percpu_enabled)) {
2482 irq, cpumask_first(desc->percpu_enabled));
2487 desc->action = NULL;
2489 desc->istate &= ~IRQS_NMI;
2491 raw_spin_unlock_irqrestore(&desc->lock, flags);
2495 irq_chip_pm_put(&desc->irq_data);
2496 module_put(desc->owner);
2500 raw_spin_unlock_irqrestore(&desc->lock, flags);
2513 struct irq_desc *desc = irq_to_desc(irq);
2515 if (desc && irq_settings_is_per_cpu_devid(desc))
2533 struct irq_desc *desc = irq_to_desc(irq);
2535 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2538 chip_bus_lock(desc);
2540 chip_bus_sync_unlock(desc);
2546 struct irq_desc *desc = irq_to_desc(irq);
2548 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2551 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2566 struct irq_desc *desc = irq_to_desc(irq);
2569 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2572 retval = irq_chip_pm_get(&desc->irq_data);
2576 retval = __setup_irq(irq, desc, act);
2579 irq_chip_pm_put(&desc->irq_data);
2606 struct irq_desc *desc;
2612 desc = irq_to_desc(irq);
2613 if (!desc || !irq_settings_can_request(desc) ||
2614 !irq_settings_is_per_cpu_devid(desc))
2629 retval = irq_chip_pm_get(&desc->irq_data);
2635 retval = __setup_irq(irq, desc, action);
2638 irq_chip_pm_put(&desc->irq_data);
2671 struct irq_desc *desc;
2678 desc = irq_to_desc(irq);
2680 if (!desc || !irq_settings_can_request(desc) ||
2681 !irq_settings_is_per_cpu_devid(desc) ||
2682 irq_settings_can_autoenable(desc) ||
2683 !irq_supports_nmi(desc))
2687 if (desc->istate & IRQS_NMI)
2700 retval = irq_chip_pm_get(&desc->irq_data);
2704 retval = __setup_irq(irq, desc, action);
2708 raw_spin_lock_irqsave(&desc->lock, flags);
2709 desc->istate |= IRQS_NMI;
2710 raw_spin_unlock_irqrestore(&desc->lock, flags);
2715 irq_chip_pm_put(&desc->irq_data);
2738 struct irq_desc *desc;
2743 desc = irq_get_desc_lock(irq, &flags,
2745 if (!desc)
2748 if (WARN(!(desc->istate & IRQS_NMI),
2755 ret = irq_nmi_setup(desc);
2762 irq_put_desc_unlock(desc, flags);
2781 struct irq_desc *desc;
2785 desc = irq_get_desc_lock(irq, &flags,
2787 if (!desc)
2790 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2793 irq_nmi_teardown(desc);
2795 irq_put_desc_unlock(desc, flags);
2838 struct irq_desc *desc;
2843 desc = irq_get_desc_buslock(irq, &flags, 0);
2844 if (!desc)
2847 data = irq_desc_get_irq_data(desc);
2851 irq_put_desc_busunlock(desc, flags);
2871 struct irq_desc *desc;
2877 desc = irq_get_desc_buslock(irq, &flags, 0);
2878 if (!desc)
2881 data = irq_desc_get_irq_data(desc);
2902 irq_put_desc_busunlock(desc, flags);
2933 struct irq_desc *desc;
2937 desc = irq_to_desc(irq);
2938 if (desc)
2939 res = !!(desc->status_use_accessors & bitmask);