1/*
2 * <asm/smplock.h>
3 *
4 * Default SMP lock implementation
5 */
6
7#include <linux/sched.h>
8#include <linux/interrupt.h>
9#include <linux/spinlock.h>
10
11extern spinlock_t kernel_flag;
12
13#define kernel_locked()		spin_is_locked(&kernel_flag)
14
15/*
16 * Release global kernel lock and global interrupt lock
17 */
18static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
19{
20	if (task->lock_depth >= 0)
21		spin_unlock(&kernel_flag);
22	release_irqlock(cpu);
23	__sti();
24}
25
26/*
27 * Re-acquire the kernel lock
28 */
29static __inline__ void reacquire_kernel_lock(struct task_struct *task)
30{
31	if (task->lock_depth >= 0)
32		spin_lock(&kernel_flag);
33}
34
35/*
36 * Getting the big kernel lock.
37 *
38 * This cannot happen asynchronously,
39 * so we only need to worry about other
40 * CPU's.
41 */
42static __inline__ void lock_kernel(void)
43{
44	if (!++current->lock_depth)
45		spin_lock(&kernel_flag);
46}
47
48static __inline__ void unlock_kernel(void)
49{
50	if (--current->lock_depth < 0)
51		spin_unlock(&kernel_flag);
52}
53