Lines Matching refs:desc

38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
50 while (irqd_irq_inprogress(&desc->irq_data))
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
100 struct irq_desc *desc = irq_to_desc(irq);
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
111 static void __synchronize_irq(struct irq_desc *desc)
113 __synchronize_hardirq(desc, true);
118 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
138 struct irq_desc *desc = irq_to_desc(irq);
140 if (desc)
141 __synchronize_irq(desc);
148 static bool __irq_can_set_affinity(struct irq_desc *desc)
150 if (!desc || !irqd_can_balance(&desc->irq_data) ||
151 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
175 struct irq_desc *desc = irq_to_desc(irq);
177 return __irq_can_set_affinity(desc) &&
178 !irqd_affinity_is_managed(&desc->irq_data);
183 * @desc: irq descriptor which has affinity changed
187 * set_cpus_allowed_ptr() here as we hold desc->lock and this
190 void irq_set_thread_affinity(struct irq_desc *desc)
194 for_each_action_of_desc(desc, action) {
224 struct irq_desc *desc = irq_data_to_desc(data);
288 cpumask_copy(desc->irq_common_data.affinity, mask);
292 irq_set_thread_affinity(desc);
303 struct irq_desc *desc = irq_data_to_desc(data);
306 irq_copy_pending(desc, dest);
335 struct irq_desc *desc = irq_data_to_desc(data);
350 cpumask_copy(desc->irq_common_data.affinity, mask);
360 struct irq_desc *desc = irq_data_to_desc(data);
373 irq_copy_pending(desc, mask);
376 if (desc->affinity_notify) {
377 kref_get(&desc->affinity_notify->kref);
378 if (!schedule_work(&desc->affinity_notify->work)) {
380 kref_put(&desc->affinity_notify->kref,
381 desc->affinity_notify->release);
407 struct irq_desc *desc;
419 desc = irq_get_desc_buslock(irq, &flags, 0);
420 if (!desc)
424 if (irqd_is_started(&desc->irq_data)) {
430 if (irqd_affinity_is_managed(&desc->irq_data)) {
439 activated = irqd_is_activated(&desc->irq_data);
441 irq_domain_deactivate_irq(&desc->irq_data);
444 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
445 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
448 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
452 irq_domain_activate_irq(&desc->irq_data, false);
455 irq_put_desc_busunlock(desc, flags);
462 struct irq_desc *desc = irq_to_desc(irq);
466 if (!desc)
469 raw_spin_lock_irqsave(&desc->lock, flags);
470 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
471 raw_spin_unlock_irqrestore(&desc->lock, flags);
509 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
511 if (!desc)
513 desc->affinity_hint = m;
514 irq_put_desc_unlock(desc, flags);
525 struct irq_desc *desc = irq_to_desc(notify->irq);
529 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
532 raw_spin_lock_irqsave(&desc->lock, flags);
533 if (irq_move_pending(&desc->irq_data))
534 irq_get_pending(cpumask, desc);
536 cpumask_copy(cpumask, desc->irq_common_data.affinity);
537 raw_spin_unlock_irqrestore(&desc->lock, flags);
560 struct irq_desc *desc = irq_to_desc(irq);
567 if (!desc || irq_is_nmi(desc))
577 raw_spin_lock_irqsave(&desc->lock, flags);
578 old_notify = desc->affinity_notify;
579 desc->affinity_notify = notify;
580 raw_spin_unlock_irqrestore(&desc->lock, flags);
598 int irq_setup_affinity(struct irq_desc *desc)
601 int ret, node = irq_desc_get_node(desc);
606 if (!__irq_can_set_affinity(desc))
614 if (irqd_affinity_is_managed(&desc->irq_data) ||
615 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
616 if (cpumask_intersects(desc->irq_common_data.affinity,
618 set = desc->irq_common_data.affinity;
620 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
634 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
640 int irq_setup_affinity(struct irq_desc *desc)
642 return irq_select_affinity(irq_desc_get_irq(desc));
662 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
667 if (!desc)
670 data = irq_desc_get_irq_data(desc);
684 irq_put_desc_unlock(desc, flags);
690 void __disable_irq(struct irq_desc *desc)
692 if (!desc->depth++)
693 irq_disable(desc);
699 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
701 if (!desc)
703 __disable_irq(desc);
704 irq_put_desc_busunlock(desc, flags);
788 void __enable_irq(struct irq_desc *desc)
790 switch (desc->depth) {
794 irq_desc_get_irq(desc));
797 if (desc->istate & IRQS_SUSPENDED)
800 irq_settings_set_noprobe(desc);
812 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
816 desc->depth--;
829 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
834 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
836 if (!desc)
838 if (WARN(!desc->irq_data.chip,
842 __enable_irq(desc);
844 irq_put_desc_busunlock(desc, flags);
864 struct irq_desc *desc = irq_to_desc(irq);
867 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
870 if (desc->irq_data.chip->irq_set_wake)
871 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
898 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
901 if (!desc)
905 if (irq_is_nmi(desc)) {
914 if (desc->wake_depth++ == 0) {
917 desc->wake_depth = 0;
919 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
922 if (desc->wake_depth == 0) {
924 } else if (--desc->wake_depth == 0) {
927 desc->wake_depth = 1;
929 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
934 irq_put_desc_busunlock(desc, flags);
947 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
950 if (!desc)
953 if (irq_settings_can_request(desc)) {
954 if (!desc->action ||
955 irqflags & desc->action->flags & IRQF_SHARED)
958 irq_put_desc_unlock(desc, flags);
962 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
964 struct irq_chip *chip = desc->irq_data.chip;
973 irq_desc_get_irq(desc),
979 if (!irqd_irq_masked(&desc->irq_data))
980 mask_irq(desc);
981 if (!irqd_irq_disabled(&desc->irq_data))
987 ret = chip->irq_set_type(&desc->irq_data, flags);
992 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
993 irqd_set(&desc->irq_data, flags);
997 flags = irqd_get_trigger_type(&desc->irq_data);
998 irq_settings_set_trigger_mask(desc, flags);
999 irqd_clear(&desc->irq_data, IRQD_LEVEL);
1000 irq_settings_clr_level(desc);
1002 irq_settings_set_level(desc);
1003 irqd_set(&desc->irq_data, IRQD_LEVEL);
1010 flags, irq_desc_get_irq(desc), chip->irq_set_type);
1013 unmask_irq(desc);
1021 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1023 if (!desc)
1026 desc->parent_irq = parent_irq;
1028 irq_put_desc_unlock(desc, flags);
1064 static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1083 raw_spin_lock_irq(&desc->lock);
1088 if (cpumask_available(desc->irq_common_data.affinity)) {
1091 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1095 raw_spin_unlock_irq(&desc->lock);
1102 static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1105 static int irq_wait_for_interrupt(struct irq_desc *desc,
1110 irq_thread_check_affinity(desc, action);
1137 static void irq_finalize_oneshot(struct irq_desc *desc,
1140 if (!(desc->istate & IRQS_ONESHOT) ||
1144 chip_bus_lock(desc);
1145 raw_spin_lock_irq(&desc->lock);
1157 * versus "desc->threads_oneshot |= action->thread_mask;" in
1161 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1162 raw_spin_unlock_irq(&desc->lock);
1163 chip_bus_sync_unlock(desc);
1176 desc->threads_oneshot &= ~action->thread_mask;
1178 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1179 irqd_irq_masked(&desc->irq_data))
1180 unmask_threaded_irq(desc);
1183 raw_spin_unlock_irq(&desc->lock);
1184 chip_bus_sync_unlock(desc);
1194 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1203 atomic_inc(&desc->threads_handled);
1205 irq_finalize_oneshot(desc, action);
1217 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1224 atomic_inc(&desc->threads_handled);
1226 irq_finalize_oneshot(desc, action);
1230 void wake_threads_waitq(struct irq_desc *desc)
1232 if (atomic_dec_and_test(&desc->threads_active))
1233 wake_up(&desc->wait_for_threads);
1239 struct irq_desc *desc;
1251 desc = irq_to_desc(action->irq);
1254 * desc->threads_active and wake possible waiters.
1257 wake_threads_waitq(desc);
1259 /* Prevent a stale desc->threads_oneshot */
1260 irq_finalize_oneshot(desc, action);
1263 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1270 raw_spin_lock_irq(&desc->lock);
1271 __irq_wake_thread(desc, secondary);
1272 raw_spin_unlock_irq(&desc->lock);
1278 static void irq_thread_set_ready(struct irq_desc *desc,
1282 wake_up(&desc->wait_for_threads);
1289 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1296 wait_event(desc->wait_for_threads,
1307 struct irq_desc *desc = irq_to_desc(action->irq);
1308 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1311 irq_thread_set_ready(desc, action);
1324 while (!irq_wait_for_interrupt(desc, action)) {
1327 action_ret = handler_fn(desc, action);
1329 irq_wake_secondary(desc, action);
1331 wake_threads_waitq(desc);
1352 struct irq_desc *desc = irq_to_desc(irq);
1356 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1359 raw_spin_lock_irqsave(&desc->lock, flags);
1360 for_each_action_of_desc(desc, action) {
1363 __irq_wake_thread(desc, action);
1367 raw_spin_unlock_irqrestore(&desc->lock, flags);
1410 static int irq_request_resources(struct irq_desc *desc)
1412 struct irq_data *d = &desc->irq_data;
1418 static void irq_release_resources(struct irq_desc *desc)
1420 struct irq_data *d = &desc->irq_data;
1427 static bool irq_supports_nmi(struct irq_desc *desc)
1429 struct irq_data *d = irq_desc_get_irq_data(desc);
1443 static int irq_nmi_setup(struct irq_desc *desc)
1445 struct irq_data *d = irq_desc_get_irq_data(desc);
1451 static void irq_nmi_teardown(struct irq_desc *desc)
1453 struct irq_data *d = irq_desc_get_irq_data(desc);
1501 * desc->request_mutex Provides serialization against a concurrent free_irq()
1503 * desc->lock Provides serialization against hard interrupts
1505 * chip_bus_lock and desc->lock are sufficient for all other management and
1506 * interrupt related functions. desc->request_mutex solely serializes
1510 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1516 if (!desc)
1519 if (desc->irq_data.chip == &no_irq_chip)
1521 if (!try_module_get(desc->owner))
1531 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1537 nested = irq_settings_is_nested_thread(desc);
1550 if (irq_settings_can_thread(desc)) {
1582 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1588 * chip bus lock and desc->lock. Also protects against handing out
1592 mutex_lock(&desc->request_mutex);
1599 chip_bus_lock(desc);
1602 if (!desc->action) {
1603 ret = irq_request_resources(desc);
1606 new->name, irq, desc->irq_data.chip->name);
1615 * desc->request_mutex or the optional bus lock.
1617 raw_spin_lock_irqsave(&desc->lock, flags);
1618 old_ptr = &desc->action;
1631 if (irq_is_nmi(desc)) {
1633 new->name, irq, desc->irq_data.chip->name);
1642 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1643 oldtype = irqd_get_trigger_type(&desc->irq_data);
1646 irqd_set_trigger_type(&desc->irq_data, oldtype);
1694 * desc->thread_active to indicate that the
1698 * line have completed desc->threads_active becomes
1703 * interrupt handlers, then desc->threads_active is
1715 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1740 ret = __irq_set_trigger(desc,
1758 ret = irq_activate(desc);
1762 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1764 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1767 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1768 irq_settings_set_per_cpu(desc);
1770 irq_settings_set_no_debug(desc);
1774 irq_settings_set_no_debug(desc);
1777 desc->istate |= IRQS_ONESHOT;
1781 irq_settings_set_no_balancing(desc);
1782 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1786 irq_settings_can_autoenable(desc)) {
1787 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1797 desc->depth = 1;
1802 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1812 irq_pm_install_action(desc, new);
1815 desc->irq_count = 0;
1816 desc->irqs_unhandled = 0;
1822 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1823 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1824 __enable_irq(desc);
1827 raw_spin_unlock_irqrestore(&desc->lock, flags);
1828 chip_bus_sync_unlock(desc);
1829 mutex_unlock(&desc->request_mutex);
1831 irq_setup_timings(desc, new);
1833 wake_up_and_wait_for_irq_thread_ready(desc, new);
1834 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1836 register_irq_proc(irq, desc);
1852 raw_spin_unlock_irqrestore(&desc->lock, flags);
1854 if (!desc->action)
1855 irq_release_resources(desc);
1857 chip_bus_sync_unlock(desc);
1858 mutex_unlock(&desc->request_mutex);
1874 module_put(desc->owner);
1882 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1884 unsigned irq = desc->irq_data.irq;
1890 mutex_lock(&desc->request_mutex);
1891 chip_bus_lock(desc);
1892 raw_spin_lock_irqsave(&desc->lock, flags);
1898 action_ptr = &desc->action;
1904 raw_spin_unlock_irqrestore(&desc->lock, flags);
1905 chip_bus_sync_unlock(desc);
1906 mutex_unlock(&desc->request_mutex);
1918 irq_pm_remove_action(desc, action);
1921 if (!desc->action) {
1922 irq_settings_clr_disable_unlazy(desc);
1924 irq_shutdown(desc);
1929 if (WARN_ON_ONCE(desc->affinity_hint))
1930 desc->affinity_hint = NULL;
1933 raw_spin_unlock_irqrestore(&desc->lock, flags);
1944 * The still held desc->request_mutex() protects against a
1948 chip_bus_sync_unlock(desc);
1957 __synchronize_irq(desc);
1988 if (!desc->action) {
1993 chip_bus_lock(desc);
1998 raw_spin_lock_irqsave(&desc->lock, flags);
1999 irq_domain_deactivate_irq(&desc->irq_data);
2000 raw_spin_unlock_irqrestore(&desc->lock, flags);
2002 irq_release_resources(desc);
2003 chip_bus_sync_unlock(desc);
2004 irq_remove_timings(desc);
2007 mutex_unlock(&desc->request_mutex);
2009 irq_chip_pm_put(&desc->irq_data);
2010 module_put(desc->owner);
2033 struct irq_desc *desc = irq_to_desc(irq);
2037 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2041 if (WARN_ON(desc->affinity_notify))
2042 desc->affinity_notify = NULL;
2045 action = __free_irq(desc, dev_id);
2056 /* This function must be called with desc->lock held */
2057 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2061 desc->istate &= ~IRQS_NMI;
2063 if (!WARN_ON(desc->action == NULL)) {
2064 irq_pm_remove_action(desc, desc->action);
2065 devname = desc->action->name;
2066 unregister_handler_proc(irq, desc->action);
2068 kfree(desc->action);
2069 desc->action = NULL;
2072 irq_settings_clr_disable_unlazy(desc);
2073 irq_shutdown_and_deactivate(desc);
2075 irq_release_resources(desc);
2077 irq_chip_pm_put(&desc->irq_data);
2078 module_put(desc->owner);
2085 struct irq_desc *desc = irq_to_desc(irq);
2089 if (!desc || WARN_ON(!irq_is_nmi(desc)))
2092 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2096 if (WARN_ON(desc->depth == 0))
2099 raw_spin_lock_irqsave(&desc->lock, flags);
2101 irq_nmi_teardown(desc);
2102 devname = __cleanup_nmi(irq, desc);
2104 raw_spin_unlock_irqrestore(&desc->lock, flags);
2156 struct irq_desc *desc;
2181 desc = irq_to_desc(irq);
2182 if (!desc)
2185 if (!irq_settings_can_request(desc) ||
2186 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2205 retval = irq_chip_pm_get(&desc->irq_data);
2211 retval = __setup_irq(irq, desc, action);
2214 irq_chip_pm_put(&desc->irq_data);
2262 struct irq_desc *desc;
2268 desc = irq_to_desc(irq);
2269 if (!desc)
2272 if (irq_settings_is_nested_thread(desc)) {
2313 struct irq_desc *desc;
2330 desc = irq_to_desc(irq);
2332 if (!desc || (irq_settings_can_autoenable(desc) &&
2334 !irq_settings_can_request(desc) ||
2335 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2336 !irq_supports_nmi(desc))
2348 retval = irq_chip_pm_get(&desc->irq_data);
2352 retval = __setup_irq(irq, desc, action);
2356 raw_spin_lock_irqsave(&desc->lock, flags);
2359 desc->istate |= IRQS_NMI;
2360 retval = irq_nmi_setup(desc);
2362 __cleanup_nmi(irq, desc);
2363 raw_spin_unlock_irqrestore(&desc->lock, flags);
2367 raw_spin_unlock_irqrestore(&desc->lock, flags);
2372 irq_chip_pm_put(&desc->irq_data);
2383 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2385 if (!desc)
2394 type = irqd_get_trigger_type(&desc->irq_data);
2399 ret = __irq_set_trigger(desc, type);
2407 irq_percpu_enable(desc, cpu);
2409 irq_put_desc_unlock(desc, flags);
2428 struct irq_desc *desc;
2432 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2433 if (!desc)
2436 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2437 irq_put_desc_unlock(desc, flags);
2447 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2449 if (!desc)
2452 irq_percpu_disable(desc, cpu);
2453 irq_put_desc_unlock(desc, flags);
2467 struct irq_desc *desc = irq_to_desc(irq);
2473 if (!desc)
2476 raw_spin_lock_irqsave(&desc->lock, flags);
2478 action = desc->action;
2484 if (!cpumask_empty(desc->percpu_enabled)) {
2486 irq, cpumask_first(desc->percpu_enabled));
2491 desc->action = NULL;
2493 desc->istate &= ~IRQS_NMI;
2495 raw_spin_unlock_irqrestore(&desc->lock, flags);
2499 irq_chip_pm_put(&desc->irq_data);
2500 module_put(desc->owner);
2504 raw_spin_unlock_irqrestore(&desc->lock, flags);
2517 struct irq_desc *desc = irq_to_desc(irq);
2519 if (desc && irq_settings_is_per_cpu_devid(desc))
2537 struct irq_desc *desc = irq_to_desc(irq);
2539 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2542 chip_bus_lock(desc);
2544 chip_bus_sync_unlock(desc);
2550 struct irq_desc *desc = irq_to_desc(irq);
2552 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2555 if (WARN_ON(!irq_is_nmi(desc)))
2570 struct irq_desc *desc = irq_to_desc(irq);
2573 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2576 retval = irq_chip_pm_get(&desc->irq_data);
2580 retval = __setup_irq(irq, desc, act);
2583 irq_chip_pm_put(&desc->irq_data);
2610 struct irq_desc *desc;
2616 desc = irq_to_desc(irq);
2617 if (!desc || !irq_settings_can_request(desc) ||
2618 !irq_settings_is_per_cpu_devid(desc))
2633 retval = irq_chip_pm_get(&desc->irq_data);
2639 retval = __setup_irq(irq, desc, action);
2642 irq_chip_pm_put(&desc->irq_data);
2675 struct irq_desc *desc;
2682 desc = irq_to_desc(irq);
2684 if (!desc || !irq_settings_can_request(desc) ||
2685 !irq_settings_is_per_cpu_devid(desc) ||
2686 irq_settings_can_autoenable(desc) ||
2687 !irq_supports_nmi(desc))
2691 if (irq_is_nmi(desc))
2704 retval = irq_chip_pm_get(&desc->irq_data);
2708 retval = __setup_irq(irq, desc, action);
2712 raw_spin_lock_irqsave(&desc->lock, flags);
2713 desc->istate |= IRQS_NMI;
2714 raw_spin_unlock_irqrestore(&desc->lock, flags);
2719 irq_chip_pm_put(&desc->irq_data);
2742 struct irq_desc *desc;
2747 desc = irq_get_desc_lock(irq, &flags,
2749 if (!desc)
2752 if (WARN(!irq_is_nmi(desc),
2759 ret = irq_nmi_setup(desc);
2766 irq_put_desc_unlock(desc, flags);
2785 struct irq_desc *desc;
2789 desc = irq_get_desc_lock(irq, &flags,
2791 if (!desc)
2794 if (WARN_ON(!irq_is_nmi(desc)))
2797 irq_nmi_teardown(desc);
2799 irq_put_desc_unlock(desc, flags);
2842 struct irq_desc *desc;
2847 desc = irq_get_desc_buslock(irq, &flags, 0);
2848 if (!desc)
2851 data = irq_desc_get_irq_data(desc);
2855 irq_put_desc_busunlock(desc, flags);
2875 struct irq_desc *desc;
2881 desc = irq_get_desc_buslock(irq, &flags, 0);
2882 if (!desc)
2885 data = irq_desc_get_irq_data(desc);
2906 irq_put_desc_busunlock(desc, flags);
2937 struct irq_desc *desc;
2941 desc = irq_to_desc(irq);
2942 if (desc)
2943 res = !!(desc->status_use_accessors & bitmask);