1/* SPDX-License-Identifier: GPL-2.0 */ 2/* interrupt.h */ 3#ifndef _LINUX_INTERRUPT_H 4#define _LINUX_INTERRUPT_H 5 6#include <linux/kernel.h> 7#include <linux/bitops.h> 8#include <linux/cpumask.h> 9#include <linux/irqreturn.h> 10#include <linux/irqnr.h> 11#include <linux/hardirq.h> 12#include <linux/irqflags.h> 13#include <linux/hrtimer.h> 14#include <linux/kref.h> 15#include <linux/workqueue.h> 16#include <linux/jump_label.h> 17 18#include <linux/atomic.h> 19#include <asm/ptrace.h> 20#include <asm/irq.h> 21#include <asm/sections.h> 22 23/* 24 * These correspond to the IORESOURCE_IRQ_* defines in 25 * linux/ioport.h to select the interrupt line behaviour. When 26 * requesting an interrupt without specifying a IRQF_TRIGGER, the 27 * setting should be assumed to be "as already configured", which 28 * may be as per machine or firmware initialisation. 29 */ 30#define IRQF_TRIGGER_NONE 0x00000000 31#define IRQF_TRIGGER_RISING 0x00000001 32#define IRQF_TRIGGER_FALLING 0x00000002 33#define IRQF_TRIGGER_HIGH 0x00000004 34#define IRQF_TRIGGER_LOW 0x00000008 35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ 36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) 37#define IRQF_TRIGGER_PROBE 0x00000010 38 39/* 40 * These flags used only by the kernel as part of the 41 * irq handling routines. 42 * 43 * IRQF_SHARED - allow sharing the irq among several devices 44 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 45 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 46 * IRQF_PERCPU - Interrupt is per cpu 47 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 48 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 49 * registered first in a shared interrupt is considered for 50 * performance reasons) 51 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 52 * Used by threaded interrupts which need to keep the 53 * irq line disabled until the threaded handler has been run. 54 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee 55 * that this interrupt will wake the system from a suspended 56 * state. See Documentation/power/suspend-and-interrupts.rst 57 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set 58 * IRQF_NO_THREAD - Interrupt cannot be threaded 59 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device 60 * resume time. 61 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this 62 * interrupt handler after suspending interrupts. For system 63 * wakeup devices users need to implement wakeup detection in 64 * their interrupt handlers. 65 * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. 66 * Users will enable it explicitly by enable_irq() or enable_nmi() 67 * later. 68 * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers, 69 * depends on IRQF_PERCPU. 70 * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared 71 * interrupt. 72 */ 73#define IRQF_SHARED 0x00000080 74#define IRQF_PROBE_SHARED 0x00000100 75#define __IRQF_TIMER 0x00000200 76#define IRQF_PERCPU 0x00000400 77#define IRQF_NOBALANCING 0x00000800 78#define IRQF_IRQPOLL 0x00001000 79#define IRQF_ONESHOT 0x00002000 80#define IRQF_NO_SUSPEND 0x00004000 81#define IRQF_FORCE_RESUME 0x00008000 82#define IRQF_NO_THREAD 0x00010000 83#define IRQF_EARLY_RESUME 0x00020000 84#define IRQF_COND_SUSPEND 0x00040000 85#define IRQF_NO_AUTOEN 0x00080000 86#define IRQF_NO_DEBUG 0x00100000 87#define IRQF_COND_ONESHOT 0x00200000 88 89#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 90 91/* 92 * These values can be returned by request_any_context_irq() and 93 * describe the context the interrupt will be run in. 94 * 95 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context 96 * IRQC_IS_NESTED - interrupt runs in a nested threaded context 97 */ 98enum { 99 IRQC_IS_HARDIRQ = 0, 100 IRQC_IS_NESTED, 101}; 102 103typedef irqreturn_t (*irq_handler_t)(int, void *); 104 105/** 106 * struct irqaction - per interrupt action descriptor 107 * @handler: interrupt handler function 108 * @name: name of the device 109 * @dev_id: cookie to identify the device 110 * @percpu_dev_id: cookie to identify the device 111 * @next: pointer to the next irqaction for shared interrupts 112 * @irq: interrupt number 113 * @flags: flags (see IRQF_* above) 114 * @thread_fn: interrupt handler function for threaded interrupts 115 * @thread: thread pointer for threaded interrupts 116 * @secondary: pointer to secondary irqaction (force threading) 117 * @thread_flags: flags related to @thread 118 * @thread_mask: bitmask for keeping track of @thread activity 119 * @dir: pointer to the proc/irq/NN/name entry 120 */ 121struct irqaction { 122 irq_handler_t handler; 123 void *dev_id; 124 void __percpu *percpu_dev_id; 125 struct irqaction *next; 126 irq_handler_t thread_fn; 127 struct task_struct *thread; 128 struct irqaction *secondary; 129 unsigned int irq; 130 unsigned int flags; 131 unsigned long thread_flags; 132 unsigned long thread_mask; 133 const char *name; 134 struct proc_dir_entry *dir; 135} ____cacheline_internodealigned_in_smp; 136 137extern irqreturn_t no_action(int cpl, void *dev_id); 138 139/* 140 * If a (PCI) device interrupt is not connected we set dev->irq to 141 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we 142 * can distingiush that case from other error returns. 143 * 144 * 0x80000000 is guaranteed to be outside the available range of interrupts 145 * and easy to distinguish from other possible incorrect values. 146 */ 147#define IRQ_NOTCONNECTED (1U << 31) 148 149extern int __must_check 150request_threaded_irq(unsigned int irq, irq_handler_t handler, 151 irq_handler_t thread_fn, 152 unsigned long flags, const char *name, void *dev); 153 154/** 155 * request_irq - Add a handler for an interrupt line 156 * @irq: The interrupt line to allocate 157 * @handler: Function to be called when the IRQ occurs. 158 * Primary handler for threaded interrupts 159 * If NULL, the default primary handler is installed 160 * @flags: Handling flags 161 * @name: Name of the device generating this interrupt 162 * @dev: A cookie passed to the handler function 163 * 164 * This call allocates an interrupt and establishes a handler; see 165 * the documentation for request_threaded_irq() for details. 166 */ 167static inline int __must_check 168request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 169 const char *name, void *dev) 170{ 171 return request_threaded_irq(irq, handler, NULL, flags, name, dev); 172} 173 174extern int __must_check 175request_any_context_irq(unsigned int irq, irq_handler_t handler, 176 unsigned long flags, const char *name, void *dev_id); 177 178extern int __must_check 179__request_percpu_irq(unsigned int irq, irq_handler_t handler, 180 unsigned long flags, const char *devname, 181 void __percpu *percpu_dev_id); 182 183extern int __must_check 184request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, 185 const char *name, void *dev); 186 187static inline int __must_check 188request_percpu_irq(unsigned int irq, irq_handler_t handler, 189 const char *devname, void __percpu *percpu_dev_id) 190{ 191 return __request_percpu_irq(irq, handler, 0, 192 devname, percpu_dev_id); 193} 194 195extern int __must_check 196request_percpu_nmi(unsigned int irq, irq_handler_t handler, 197 const char *devname, void __percpu *dev); 198 199extern const void *free_irq(unsigned int, void *); 200extern void free_percpu_irq(unsigned int, void __percpu *); 201 202extern const void *free_nmi(unsigned int irq, void *dev_id); 203extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); 204 205struct device; 206 207extern int __must_check 208devm_request_threaded_irq(struct device *dev, unsigned int irq, 209 irq_handler_t handler, irq_handler_t thread_fn, 210 unsigned long irqflags, const char *devname, 211 void *dev_id); 212 213static inline int __must_check 214devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, 215 unsigned long irqflags, const char *devname, void *dev_id) 216{ 217 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, 218 devname, dev_id); 219} 220 221extern int __must_check 222devm_request_any_context_irq(struct device *dev, unsigned int irq, 223 irq_handler_t handler, unsigned long irqflags, 224 const char *devname, void *dev_id); 225 226extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 227 228bool irq_has_action(unsigned int irq); 229extern void disable_irq_nosync(unsigned int irq); 230extern bool disable_hardirq(unsigned int irq); 231extern void disable_irq(unsigned int irq); 232extern void disable_percpu_irq(unsigned int irq); 233extern void enable_irq(unsigned int irq); 234extern void enable_percpu_irq(unsigned int irq, unsigned int type); 235extern bool irq_percpu_is_enabled(unsigned int irq); 236extern void irq_wake_thread(unsigned int irq, void *dev_id); 237 238extern void disable_nmi_nosync(unsigned int irq); 239extern void disable_percpu_nmi(unsigned int irq); 240extern void enable_nmi(unsigned int irq); 241extern void enable_percpu_nmi(unsigned int irq, unsigned int type); 242extern int prepare_percpu_nmi(unsigned int irq); 243extern void teardown_percpu_nmi(unsigned int irq); 244 245extern int irq_inject_interrupt(unsigned int irq); 246 247/* The following three functions are for the core kernel use only. */ 248extern void suspend_device_irqs(void); 249extern void resume_device_irqs(void); 250extern void rearm_wake_irq(unsigned int irq); 251 252/** 253 * struct irq_affinity_notify - context for notification of IRQ affinity changes 254 * @irq: Interrupt to which notification applies 255 * @kref: Reference count, for internal use 256 * @work: Work item, for internal use 257 * @notify: Function to be called on change. This will be 258 * called in process context. 259 * @release: Function to be called on release. This will be 260 * called in process context. Once registered, the 261 * structure must only be freed when this function is 262 * called or later. 263 */ 264struct irq_affinity_notify { 265 unsigned int irq; 266 struct kref kref; 267 struct work_struct work; 268 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 269 void (*release)(struct kref *ref); 270}; 271 272#define IRQ_AFFINITY_MAX_SETS 4 273 274/** 275 * struct irq_affinity - Description for automatic irq affinity assignements 276 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of 277 * the MSI(-X) vector space 278 * @post_vectors: Don't apply affinity to @post_vectors at end of 279 * the MSI(-X) vector space 280 * @nr_sets: The number of interrupt sets for which affinity 281 * spreading is required 282 * @set_size: Array holding the size of each interrupt set 283 * @calc_sets: Callback for calculating the number and size 284 * of interrupt sets 285 * @priv: Private data for usage by @calc_sets, usually a 286 * pointer to driver/device specific data. 287 */ 288struct irq_affinity { 289 unsigned int pre_vectors; 290 unsigned int post_vectors; 291 unsigned int nr_sets; 292 unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; 293 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); 294 void *priv; 295}; 296 297/** 298 * struct irq_affinity_desc - Interrupt affinity descriptor 299 * @mask: cpumask to hold the affinity assignment 300 * @is_managed: 1 if the interrupt is managed internally 301 */ 302struct irq_affinity_desc { 303 struct cpumask mask; 304 unsigned int is_managed : 1; 305}; 306 307#if defined(CONFIG_SMP) 308 309extern cpumask_var_t irq_default_affinity; 310 311extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 312extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask); 313 314extern int irq_can_set_affinity(unsigned int irq); 315extern int irq_select_affinity(unsigned int irq); 316 317extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, 318 bool setaffinity); 319 320/** 321 * irq_update_affinity_hint - Update the affinity hint 322 * @irq: Interrupt to update 323 * @m: cpumask pointer (NULL to clear the hint) 324 * 325 * Updates the affinity hint, but does not change the affinity of the interrupt. 326 */ 327static inline int 328irq_update_affinity_hint(unsigned int irq, const struct cpumask *m) 329{ 330 return __irq_apply_affinity_hint(irq, m, false); 331} 332 333/** 334 * irq_set_affinity_and_hint - Update the affinity hint and apply the provided 335 * cpumask to the interrupt 336 * @irq: Interrupt to update 337 * @m: cpumask pointer (NULL to clear the hint) 338 * 339 * Updates the affinity hint and if @m is not NULL it applies it as the 340 * affinity of that interrupt. 341 */ 342static inline int 343irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m) 344{ 345 return __irq_apply_affinity_hint(irq, m, true); 346} 347 348/* 349 * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint() 350 * instead. 351 */ 352static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 353{ 354 return irq_set_affinity_and_hint(irq, m); 355} 356 357extern int irq_update_affinity_desc(unsigned int irq, 358 struct irq_affinity_desc *affinity); 359 360extern int 361irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 362 363struct irq_affinity_desc * 364irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); 365 366unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, 367 const struct irq_affinity *affd); 368 369#else /* CONFIG_SMP */ 370 371static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 372{ 373 return -EINVAL; 374} 375 376static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 377{ 378 return 0; 379} 380 381static inline int irq_can_set_affinity(unsigned int irq) 382{ 383 return 0; 384} 385 386static inline int irq_select_affinity(unsigned int irq) { return 0; } 387 388static inline int irq_update_affinity_hint(unsigned int irq, 389 const struct cpumask *m) 390{ 391 return -EINVAL; 392} 393 394static inline int irq_set_affinity_and_hint(unsigned int irq, 395 const struct cpumask *m) 396{ 397 return -EINVAL; 398} 399 400static inline int irq_set_affinity_hint(unsigned int irq, 401 const struct cpumask *m) 402{ 403 return -EINVAL; 404} 405 406static inline int irq_update_affinity_desc(unsigned int irq, 407 struct irq_affinity_desc *affinity) 408{ 409 return -EINVAL; 410} 411 412static inline int 413irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 414{ 415 return 0; 416} 417 418static inline struct irq_affinity_desc * 419irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) 420{ 421 return NULL; 422} 423 424static inline unsigned int 425irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, 426 const struct irq_affinity *affd) 427{ 428 return maxvec; 429} 430 431#endif /* CONFIG_SMP */ 432 433/* 434 * Special lockdep variants of irq disabling/enabling. 435 * These should be used for locking constructs that 436 * know that a particular irq context which is disabled, 437 * and which is the only irq-context user of a lock, 438 * that it's safe to take the lock in the irq-disabled 439 * section without disabling hardirqs. 440 * 441 * On !CONFIG_LOCKDEP they are equivalent to the normal 442 * irq disable/enable methods. 443 */ 444static inline void disable_irq_nosync_lockdep(unsigned int irq) 445{ 446 disable_irq_nosync(irq); 447#ifdef CONFIG_LOCKDEP 448 local_irq_disable(); 449#endif 450} 451 452static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) 453{ 454 disable_irq_nosync(irq); 455#ifdef CONFIG_LOCKDEP 456 local_irq_save(*flags); 457#endif 458} 459 460static inline void disable_irq_lockdep(unsigned int irq) 461{ 462 disable_irq(irq); 463#ifdef CONFIG_LOCKDEP 464 local_irq_disable(); 465#endif 466} 467 468static inline void enable_irq_lockdep(unsigned int irq) 469{ 470#ifdef CONFIG_LOCKDEP 471 local_irq_enable(); 472#endif 473 enable_irq(irq); 474} 475 476static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) 477{ 478#ifdef CONFIG_LOCKDEP 479 local_irq_restore(*flags); 480#endif 481 enable_irq(irq); 482} 483 484/* IRQ wakeup (PM) control: */ 485extern int irq_set_irq_wake(unsigned int irq, unsigned int on); 486 487static inline int enable_irq_wake(unsigned int irq) 488{ 489 return irq_set_irq_wake(irq, 1); 490} 491 492static inline int disable_irq_wake(unsigned int irq) 493{ 494 return irq_set_irq_wake(irq, 0); 495} 496 497/* 498 * irq_get_irqchip_state/irq_set_irqchip_state specific flags 499 */ 500enum irqchip_irq_state { 501 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ 502 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ 503 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ 504 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ 505}; 506 507extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 508 bool *state); 509extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 510 bool state); 511 512#ifdef CONFIG_IRQ_FORCED_THREADING 513# ifdef CONFIG_PREEMPT_RT 514# define force_irqthreads() (true) 515# else 516DECLARE_STATIC_KEY_FALSE(force_irqthreads_key); 517# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key)) 518# endif 519#else 520#define force_irqthreads() (false) 521#endif 522 523#ifndef local_softirq_pending 524 525#ifndef local_softirq_pending_ref 526#define local_softirq_pending_ref irq_stat.__softirq_pending 527#endif 528 529#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) 530#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) 531#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) 532 533#endif /* local_softirq_pending */ 534 535/* Some architectures might implement lazy enabling/disabling of 536 * interrupts. In some cases, such as stop_machine, we might want 537 * to ensure that after a local_irq_disable(), interrupts have 538 * really been disabled in hardware. Such architectures need to 539 * implement the following hook. 540 */ 541#ifndef hard_irq_disable 542#define hard_irq_disable() do { } while(0) 543#endif 544 545/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 546 frequency threaded job scheduling. For almost all the purposes 547 tasklets are more than enough. F.e. all serial device BHs et 548 al. should be converted to tasklets, not to softirqs. 549 */ 550 551enum 552{ 553 HI_SOFTIRQ=0, 554 TIMER_SOFTIRQ, 555 NET_TX_SOFTIRQ, 556 NET_RX_SOFTIRQ, 557 BLOCK_SOFTIRQ, 558 IRQ_POLL_SOFTIRQ, 559 TASKLET_SOFTIRQ, 560 SCHED_SOFTIRQ, 561 HRTIMER_SOFTIRQ, 562 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 563 564 NR_SOFTIRQS 565}; 566 567/* 568 * The following vectors can be safely ignored after ksoftirqd is parked: 569 * 570 * _ RCU: 571 * 1) rcutree_migrate_callbacks() migrates the queue. 572 * 2) rcutree_report_cpu_dead() reports the final quiescent states. 573 * 574 * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue 575 * 576 * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue 577 */ 578#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\ 579 BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ)) 580 581 582/* map softirq index to softirq name. update 'softirq_to_name' in 583 * kernel/softirq.c when adding a new softirq. 584 */ 585extern const char * const softirq_to_name[NR_SOFTIRQS]; 586 587/* softirq mask and active fields moved to irq_cpustat_t in 588 * asm/hardirq.h to get better cache usage. KAO 589 */ 590 591struct softirq_action 592{ 593 void (*action)(struct softirq_action *); 594}; 595 596asmlinkage void do_softirq(void); 597asmlinkage void __do_softirq(void); 598 599#ifdef CONFIG_PREEMPT_RT 600extern void do_softirq_post_smp_call_flush(unsigned int was_pending); 601#else 602static inline void do_softirq_post_smp_call_flush(unsigned int unused) 603{ 604 do_softirq(); 605} 606#endif 607 608extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 609extern void softirq_init(void); 610extern void __raise_softirq_irqoff(unsigned int nr); 611 612extern void raise_softirq_irqoff(unsigned int nr); 613extern void raise_softirq(unsigned int nr); 614 615DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 616 617static inline struct task_struct *this_cpu_ksoftirqd(void) 618{ 619 return this_cpu_read(ksoftirqd); 620} 621 622/* Tasklets --- multithreaded analogue of BHs. 623 624 This API is deprecated. Please consider using threaded IRQs instead: 625 https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de 626 627 Main feature differing them of generic softirqs: tasklet 628 is running only on one CPU simultaneously. 629 630 Main feature differing them of BHs: different tasklets 631 may be run simultaneously on different CPUs. 632 633 Properties: 634 * If tasklet_schedule() is called, then tasklet is guaranteed 635 to be executed on some cpu at least once after this. 636 * If the tasklet is already scheduled, but its execution is still not 637 started, it will be executed only once. 638 * If this tasklet is already running on another CPU (or schedule is called 639 from tasklet itself), it is rescheduled for later. 640 * Tasklet is strictly serialized wrt itself, but not 641 wrt another tasklets. If client needs some intertask synchronization, 642 he makes it with spinlocks. 643 */ 644 645struct tasklet_struct 646{ 647 struct tasklet_struct *next; 648 unsigned long state; 649 atomic_t count; 650 bool use_callback; 651 union { 652 void (*func)(unsigned long data); 653 void (*callback)(struct tasklet_struct *t); 654 }; 655 unsigned long data; 656}; 657 658#define DECLARE_TASKLET(name, _callback) \ 659struct tasklet_struct name = { \ 660 .count = ATOMIC_INIT(0), \ 661 .callback = _callback, \ 662 .use_callback = true, \ 663} 664 665#define DECLARE_TASKLET_DISABLED(name, _callback) \ 666struct tasklet_struct name = { \ 667 .count = ATOMIC_INIT(1), \ 668 .callback = _callback, \ 669 .use_callback = true, \ 670} 671 672#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ 673 container_of(callback_tasklet, typeof(*var), tasklet_fieldname) 674 675#define DECLARE_TASKLET_OLD(name, _func) \ 676struct tasklet_struct name = { \ 677 .count = ATOMIC_INIT(0), \ 678 .func = _func, \ 679} 680 681#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ 682struct tasklet_struct name = { \ 683 .count = ATOMIC_INIT(1), \ 684 .func = _func, \ 685} 686 687enum 688{ 689 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ 690 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ 691}; 692 693#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 694static inline int tasklet_trylock(struct tasklet_struct *t) 695{ 696 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); 697} 698 699void tasklet_unlock(struct tasklet_struct *t); 700void tasklet_unlock_wait(struct tasklet_struct *t); 701void tasklet_unlock_spin_wait(struct tasklet_struct *t); 702 703#else 704static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } 705static inline void tasklet_unlock(struct tasklet_struct *t) { } 706static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } 707static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } 708#endif 709 710extern void __tasklet_schedule(struct tasklet_struct *t); 711 712static inline void tasklet_schedule(struct tasklet_struct *t) 713{ 714 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 715 __tasklet_schedule(t); 716} 717 718extern void __tasklet_hi_schedule(struct tasklet_struct *t); 719 720static inline void tasklet_hi_schedule(struct tasklet_struct *t) 721{ 722 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 723 __tasklet_hi_schedule(t); 724} 725 726static inline void tasklet_disable_nosync(struct tasklet_struct *t) 727{ 728 atomic_inc(&t->count); 729 smp_mb__after_atomic(); 730} 731 732/* 733 * Do not use in new code. Disabling tasklets from atomic contexts is 734 * error prone and should be avoided. 735 */ 736static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) 737{ 738 tasklet_disable_nosync(t); 739 tasklet_unlock_spin_wait(t); 740 smp_mb(); 741} 742 743static inline void tasklet_disable(struct tasklet_struct *t) 744{ 745 tasklet_disable_nosync(t); 746 tasklet_unlock_wait(t); 747 smp_mb(); 748} 749 750static inline void tasklet_enable(struct tasklet_struct *t) 751{ 752 smp_mb__before_atomic(); 753 atomic_dec(&t->count); 754} 755 756extern void tasklet_kill(struct tasklet_struct *t); 757extern void tasklet_init(struct tasklet_struct *t, 758 void (*func)(unsigned long), unsigned long data); 759extern void tasklet_setup(struct tasklet_struct *t, 760 void (*callback)(struct tasklet_struct *)); 761 762/* 763 * Autoprobing for irqs: 764 * 765 * probe_irq_on() and probe_irq_off() provide robust primitives 766 * for accurate IRQ probing during kernel initialization. They are 767 * reasonably simple to use, are not "fooled" by spurious interrupts, 768 * and, unlike other attempts at IRQ probing, they do not get hung on 769 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). 770 * 771 * For reasonably foolproof probing, use them as follows: 772 * 773 * 1. clear and/or mask the device's internal interrupt. 774 * 2. sti(); 775 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs 776 * 4. enable the device and cause it to trigger an interrupt. 777 * 5. wait for the device to interrupt, using non-intrusive polling or a delay. 778 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple 779 * 7. service the device to clear its pending interrupt. 780 * 8. loop again if paranoia is required. 781 * 782 * probe_irq_on() returns a mask of allocated irq's. 783 * 784 * probe_irq_off() takes the mask as a parameter, 785 * and returns the irq number which occurred, 786 * or zero if none occurred, or a negative irq number 787 * if more than one irq occurred. 788 */ 789 790#if !defined(CONFIG_GENERIC_IRQ_PROBE) 791static inline unsigned long probe_irq_on(void) 792{ 793 return 0; 794} 795static inline int probe_irq_off(unsigned long val) 796{ 797 return 0; 798} 799static inline unsigned int probe_irq_mask(unsigned long val) 800{ 801 return 0; 802} 803#else 804extern unsigned long probe_irq_on(void); /* returns 0 on failure */ 805extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ 806extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ 807#endif 808 809#ifdef CONFIG_PROC_FS 810/* Initialize /proc/irq/ */ 811extern void init_irq_proc(void); 812#else 813static inline void init_irq_proc(void) 814{ 815} 816#endif 817 818#ifdef CONFIG_IRQ_TIMINGS 819void irq_timings_enable(void); 820void irq_timings_disable(void); 821u64 irq_timings_next_event(u64 now); 822#endif 823 824struct seq_file; 825int show_interrupts(struct seq_file *p, void *v); 826int arch_show_interrupts(struct seq_file *p, int prec); 827 828extern int early_irq_init(void); 829extern int arch_probe_nr_irqs(void); 830extern int arch_early_irq_init(void); 831 832/* 833 * We want to know which function is an entrypoint of a hardirq or a softirq. 834 */ 835#ifndef __irq_entry 836# define __irq_entry __section(".irqentry.text") 837#endif 838 839#define __softirq_entry __section(".softirqentry.text") 840 841#endif 842