1#ifndef __ASM_SOFTIRQ_H
2#define __ASM_SOFTIRQ_H
3
4#include <asm/atomic.h>
5#include <asm/hardirq.h>
6
7#define __cpu_bh_enable(cpu) \
8		do { barrier(); local_bh_count(cpu)--; } while (0)
9#define cpu_bh_disable(cpu) \
10		do { local_bh_count(cpu)++; barrier(); } while (0)
11
12#define local_bh_disable()	cpu_bh_disable(smp_processor_id())
13#define __local_bh_enable()	__cpu_bh_enable(smp_processor_id())
14
15#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
16
17/*
18 * NOTE: this assembly code assumes:
19 *
20 *    (char *)&local_bh_count - 8 == (char *)&softirq_pending
21 *
22 * If you change the offsets in irq_stat then you have to
23 * update this code as well.
24 */
25#define local_bh_enable()						\
26do {									\
27	unsigned int *ptr = &local_bh_count(smp_processor_id());	\
28									\
29	barrier();							\
30	if (!--*ptr)							\
31		__asm__ __volatile__ (					\
32			"cmpl $0, -8(%0);"				\
33			"jnz 2f;"					\
34			"1:;"						\
35									\
36			LOCK_SECTION_START("")				\
37			"2: pushl %%eax; pushl %%ecx; pushl %%edx;"	\
38			"call %c1;"					\
39			"popl %%edx; popl %%ecx; popl %%eax;"		\
40			"jmp 1b;"					\
41			LOCK_SECTION_END				\
42									\
43		: /* no output */					\
44		: "r" (ptr), "i" (do_softirq)				\
45		/* no registers clobbered */ );				\
46} while (0)
47
48#endif	/* __ASM_SOFTIRQ_H */
49