1#ifndef __I386_MMU_CONTEXT_H
2#define __I386_MMU_CONTEXT_H
3
4#include <linux/config.h>
5#include <asm/desc.h>
6#include <asm/atomic.h>
7#include <asm/pgalloc.h>
8
9/*
10 * possibly do the LDT unload here?
11 */
12#define destroy_context(mm)		do { } while(0)
13#define init_new_context(tsk,mm)	0
14
15#ifdef CONFIG_SMP
16
17static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
18{
19	if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
20		cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
21}
22#else
23static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
24{
25}
26#endif
27
28static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
29{
30	if (prev != next) {
31		/* stop flush ipis for the previous mm */
32		clear_bit(cpu, &prev->cpu_vm_mask);
33		/*
34		 * Re-load LDT if necessary
35		 */
36		if (prev->context.segments != next->context.segments)
37			load_LDT(next);
38#ifdef CONFIG_SMP
39		cpu_tlbstate[cpu].state = TLBSTATE_OK;
40		cpu_tlbstate[cpu].active_mm = next;
41#endif
42		set_bit(cpu, &next->cpu_vm_mask);
43		set_bit(cpu, &next->context.cpuvalid);
44		/* Re-load page tables */
45		load_cr3(next->pgd);
46	}
47#ifdef CONFIG_SMP
48	else {
49		cpu_tlbstate[cpu].state = TLBSTATE_OK;
50		if(cpu_tlbstate[cpu].active_mm != next)
51			out_of_line_bug();
52		if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
53			/* We were in lazy tlb mode and leave_mm disabled
54			 * tlb flush IPI delivery. We must reload %cr3.
55			 */
56			load_cr3(next->pgd);
57		}
58		if (!test_and_set_bit(cpu, &next->context.cpuvalid))
59			load_LDT(next);
60	}
61#endif
62}
63
64#define activate_mm(prev, next) \
65	switch_mm((prev),(next),NULL,smp_processor_id())
66
67#endif
68