1#ifdef __KERNEL__ 2#ifndef __ASM_HARDIRQ_H 3#define __ASM_HARDIRQ_H 4 5/* 6 * Use a brlock for the global irq lock, based on sparc64. 7 * Anton Blanchard <anton@au1.ibm.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15#include <linux/config.h> 16#include <linux/brlock.h> 17#include <linux/spinlock.h> 18 19 20typedef struct { 21 unsigned long __softirq_pending; 22#ifndef CONFIG_SMP 23 unsigned int __local_irq_count; 24#else 25 unsigned int __unused_on_SMP; /* We use brlocks on SMP */ 26#endif 27 unsigned int __local_bh_count; 28 unsigned int __syscall_count; 29 unsigned long __unused; 30 struct task_struct * __ksoftirqd_task; 31} ____cacheline_aligned irq_cpustat_t; 32 33#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 34/* Note that local_irq_count() is replaced by ppc64 specific version for SMP */ 35 36#ifndef CONFIG_SMP 37#define irq_enter(cpu) (local_irq_count(cpu)++) 38#define irq_exit(cpu) (local_irq_count(cpu)--) 39#else 40#undef local_irq_count 41#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK]) 42#define irq_enter(cpu) br_read_lock(BR_GLOBALIRQ_LOCK) 43#define irq_exit(cpu) br_read_unlock(BR_GLOBALIRQ_LOCK) 44#endif 45 46/* 47 * Are we in an interrupt context? Either doing bottom half 48 * or hardware interrupt processing? 49 */ 50#define in_interrupt() ({ int __cpu = smp_processor_id(); \ 51 (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }) 52 53/* This tests only the local processors hw IRQ context disposition. */ 54#define in_irq() (local_irq_count(smp_processor_id()) != 0) 55 56#ifndef CONFIG_SMP 57 58#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) 59#define hardirq_endlock(cpu) do { } while (0) 60 61#define synchronize_irq() barrier() 62 63#else /* CONFIG_SMP */ 64 65static __inline__ int irqs_running(void) 66{ 67 int i; 68 69 for (i = 0; i < smp_num_cpus; i++) 70 if (local_irq_count(cpu_logical_map(i))) 71 return 1; 72 return 0; 73} 74 75extern unsigned char global_irq_holder; 76 77static inline void release_irqlock(int cpu) 78{ 79 /* if we didn't own the irq lock, just ignore... */ 80 if(global_irq_holder == (unsigned char) cpu) { 81 global_irq_holder = NO_PROC_ID; 82 br_write_unlock(BR_GLOBALIRQ_LOCK); 83 } 84} 85 86static inline int hardirq_trylock(int cpu) 87{ 88 spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock; 89 90 return (!local_irq_count(cpu) && !spin_is_locked(lock)); 91} 92 93#define hardirq_endlock(cpu) do { (void)(cpu); } while (0) 94 95extern void synchronize_irq(void); 96 97#endif /* CONFIG_SMP */ 98 99#endif /* __KERNEL__ */ 100#endif /* __ASM_HARDIRQ_H */ 101