1/*
2 * <asm/smplock.h>
3 *
4 * i386 SMP lock implementation
5 */
6#include <linux/interrupt.h>
7#include <linux/spinlock.h>
8#include <linux/sched.h>
9#include <asm/current.h>
10
11extern spinlock_cacheline_t kernel_flag_cacheline;
12#define kernel_flag kernel_flag_cacheline.lock
13
14#define kernel_locked()		spin_is_locked(&kernel_flag)
15
16/*
17 * Release global kernel lock and global interrupt lock
18 */
19#define release_kernel_lock(task, cpu) \
20do { \
21	if (task->lock_depth >= 0) \
22		spin_unlock(&kernel_flag); \
23	release_irqlock(cpu); \
24	__sti(); \
25} while (0)
26
27/*
28 * Re-acquire the kernel lock
29 */
30#define reacquire_kernel_lock(task) \
31do { \
32	if (task->lock_depth >= 0) \
33		spin_lock(&kernel_flag); \
34} while (0)
35
36
37/*
38 * Getting the big kernel lock.
39 *
40 * This cannot happen asynchronously,
41 * so we only need to worry about other
42 * CPU's.
43 */
44static __inline__ void lock_kernel(void)
45{
46	if (!++current->lock_depth)
47		spin_lock(&kernel_flag);
48}
49
50static __inline__ void unlock_kernel(void)
51{
52	if (current->lock_depth < 0)
53		out_of_line_bug();
54	if (--current->lock_depth < 0)
55		spin_unlock(&kernel_flag);
56}
57