1#ifndef __ASM_SOFTIRQ_H
2#define __ASM_SOFTIRQ_H
3
4#include <asm/atomic.h>
5#include <asm/hardirq.h>
6
7#define __cpu_bh_enable(cpu) \
8		do { barrier(); local_bh_count(cpu)--; } while (0)
9#define cpu_bh_disable(cpu) \
10		do { local_bh_count(cpu)++; barrier(); } while (0)
11
12#define local_bh_disable()	cpu_bh_disable(smp_processor_id())
13#define __local_bh_enable()	__cpu_bh_enable(smp_processor_id())
14
15#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
16
17/*
18 * NOTE: this assembly code assumes:
19 *
20 *    (char *)&local_bh_count - 8 == (char *)&softirq_pending
21 *
22 * If you change the offsets in irq_stat then you have to
23 * update this code as well.
24 */
25#define local_bh_enable()						\
26do {									\
27	unsigned int *ptr = &local_bh_count(smp_processor_id());	\
28									\
29	barrier();							\
30	if (!--*ptr)							\
31		__asm__ __volatile__ (					\
32			"cmpl $0, -8(%0);"				\
33			"jnz 2f;"					\
34			"1:;"						\
35									\
36                   ".subsection 1\n" \
37                   ".ifndef _text_lock_" __stringify(KBUILD_BASENAME) "\n" \
38                   "_text_lock_" __stringify(KBUILD_BASENAME) ":\n" \
39                   ".endif\n" \
40			"2:"	\
41			"call do_softirq_thunk;"		\
42			""		\
43			"jmp 1b;"					\
44			".subsection 0;"				\
45									\
46		: /* no output */					\
47		: "r" (ptr)				\
48		/* no registers clobbered */ );				\
49} while (0)
50
51#endif	/* __ASM_SOFTIRQ_H */
52