1#ifndef __ASM_HARDIRQ_H
2#define __ASM_HARDIRQ_H
3
4#include <linux/config.h>
5#include <linux/threads.h>
6#include <linux/irq.h>
7
8/* assembly code in softirq.h is sensitive to the offsets of these fields */
9typedef struct {
10	unsigned int __softirq_pending;
11	unsigned int __local_irq_count;
12	unsigned int __local_bh_count;
13	unsigned int __syscall_count;
14	struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
15	unsigned int __nmi_count;	/* arch dependent */
16} ____cacheline_aligned irq_cpustat_t;
17
18#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
19
20/*
21 * Are we in an interrupt context? Either doing bottom half
22 * or hardware interrupt processing?
23 */
24#define in_interrupt() ({ int __cpu = smp_processor_id(); \
25	(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
26
27#define in_irq() (local_irq_count(smp_processor_id()) != 0)
28
29#ifndef CONFIG_SMP
30
31#define hardirq_trylock(cpu)	(local_irq_count(cpu) == 0)
32#define hardirq_endlock(cpu)	do { } while (0)
33
34#define irq_enter(cpu, irq)	(local_irq_count(cpu)++)
35#define irq_exit(cpu, irq)	(local_irq_count(cpu)--)
36
37#define synchronize_irq()	barrier()
38
39#else
40
41#include <asm/atomic.h>
42#include <asm/smp.h>
43
44extern unsigned char global_irq_holder;
45extern unsigned volatile long global_irq_lock; /* long for set_bit -RR */
46
47static inline int irqs_running (void)
48{
49	int i;
50
51	for (i = 0; i < smp_num_cpus; i++)
52		if (local_irq_count(i))
53			return 1;
54	return 0;
55}
56
57static inline void release_irqlock(int cpu)
58{
59	/* if we didn't own the irq lock, just ignore.. */
60	if (global_irq_holder == (unsigned char) cpu) {
61		global_irq_holder = NO_PROC_ID;
62		clear_bit(0,&global_irq_lock);
63	}
64}
65
66static inline void irq_enter(int cpu, int irq)
67{
68	++local_irq_count(cpu);
69
70	while (test_bit(0,&global_irq_lock)) {
71		cpu_relax();
72	}
73}
74
75static inline void irq_exit(int cpu, int irq)
76{
77	--local_irq_count(cpu);
78}
79
80static inline int hardirq_trylock(int cpu)
81{
82	return !local_irq_count(cpu) && !test_bit(0,&global_irq_lock);
83}
84
85#define hardirq_endlock(cpu)	do { } while (0)
86
87extern void synchronize_irq(void);
88
89#endif /* CONFIG_SMP */
90
91#endif /* __ASM_HARDIRQ_H */
92