1/* hardirq.h: 64-bit Sparc hard IRQ support.
2 *
3 * Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_HARDIRQ_H
7#define __SPARC64_HARDIRQ_H
8
9#include <linux/config.h>
10#include <linux/threads.h>
11#include <linux/brlock.h>
12#include <linux/spinlock.h>
13
14/* entry.S is sensitive to the offsets of these fields */
15/* rtrap.S is sensitive to the size of this structure */
16typedef struct {
17	unsigned int __softirq_pending;
18	unsigned int __unused_1;
19#ifndef CONFIG_SMP
20	unsigned int __local_irq_count;
21#else
22	unsigned int __unused_on_SMP;	/* DaveM says use brlock for SMP irq. KAO */
23#endif
24	unsigned int __local_bh_count;
25	unsigned int __syscall_count;
26        struct task_struct * __ksoftirqd_task;
27} ____cacheline_aligned irq_cpustat_t;
28
29#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
30/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
31
32#ifndef CONFIG_SMP
33#define irq_enter(cpu, irq)	((void)(irq), local_irq_count(cpu)++)
34#define irq_exit(cpu, irq)	((void)(irq), local_irq_count(cpu)--)
35#else
36#undef local_irq_count
37#define local_irq_count(cpu)	(__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
38#define irq_enter(cpu, irq)	br_read_lock(BR_GLOBALIRQ_LOCK)
39#define irq_exit(cpu, irq)	br_read_unlock(BR_GLOBALIRQ_LOCK)
40#endif
41
42/*
43 * Are we in an interrupt context? Either doing bottom half
44 * or hardware interrupt processing?
45 */
46#define in_interrupt() ((local_irq_count(smp_processor_id()) + \
47		         local_bh_count(smp_processor_id())) != 0)
48
49/* This tests only the local processors hw IRQ context disposition.  */
50#define in_irq() (local_irq_count(smp_processor_id()) != 0)
51
52#ifndef CONFIG_SMP
53
54#define hardirq_trylock(cpu)	((void)(cpu), local_irq_count(smp_processor_id()) == 0)
55#define hardirq_endlock(cpu)	do { (void)(cpu); } while(0)
56
57#define synchronize_irq()	barrier()
58
59#else /* (CONFIG_SMP) */
60
61static __inline__ int irqs_running(void)
62{
63	int i;
64
65	for (i = 0; i < smp_num_cpus; i++)
66		if (local_irq_count(cpu_logical_map(i)))
67			return 1;
68	return 0;
69}
70
71extern unsigned char global_irq_holder;
72
73static inline void release_irqlock(int cpu)
74{
75	/* if we didn't own the irq lock, just ignore... */
76	if(global_irq_holder == (unsigned char) cpu) {
77		global_irq_holder = NO_PROC_ID;
78		br_write_unlock(BR_GLOBALIRQ_LOCK);
79	}
80}
81
82static inline int hardirq_trylock(int cpu)
83{
84	spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
85
86	return (!local_irq_count(cpu) && !spin_is_locked(lock));
87}
88
89#define hardirq_endlock(cpu)	do { (void)(cpu); } while (0)
90
91extern void synchronize_irq(void);
92
93#endif /* CONFIG_SMP */
94
95#endif /* !(__SPARC64_HARDIRQ_H) */
96