1#ifndef __X86_64_MMU_CONTEXT_H
2#define __X86_64_MMU_CONTEXT_H
3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/pda.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#include <asm-generic/mm_hooks.h>
11
12/*
13 * possibly do the LDT unload here?
14 */
15int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
16void destroy_context(struct mm_struct *mm);
17
18static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
19{
20#ifdef CONFIG_SMP
21	if (read_pda(mmu_state) == TLBSTATE_OK)
22		write_pda(mmu_state, TLBSTATE_LAZY);
23#endif
24}
25
26static inline void load_cr3(pgd_t *pgd)
27{
28	asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
29}
30
31static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
32			     struct task_struct *tsk)
33{
34	unsigned cpu = smp_processor_id();
35	if (likely(prev != next)) {
36		/* stop flush ipis for the previous mm */
37		cpu_clear(cpu, prev->cpu_vm_mask);
38#ifdef CONFIG_SMP
39		write_pda(mmu_state, TLBSTATE_OK);
40		write_pda(active_mm, next);
41#endif
42		cpu_set(cpu, next->cpu_vm_mask);
43		load_cr3(next->pgd);
44
45		if (unlikely(next->context.ldt != prev->context.ldt))
46			load_LDT_nolock(&next->context, cpu);
47	}
48#ifdef CONFIG_SMP
49	else {
50		write_pda(mmu_state, TLBSTATE_OK);
51		if (read_pda(active_mm) != next)
52			out_of_line_bug();
53		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
54			/* We were in lazy tlb mode and leave_mm disabled
55			 * tlb flush IPI delivery. We must reload CR3
56			 * to make sure to use no freed page tables.
57			 */
58			load_cr3(next->pgd);
59			load_LDT_nolock(&next->context, cpu);
60		}
61	}
62#endif
63}
64
65#define deactivate_mm(tsk,mm)	do { \
66	load_gs_index(0); \
67	asm volatile("movl %0,%%fs"::"r"(0));  \
68} while(0)
69
70#define activate_mm(prev, next) \
71	switch_mm((prev),(next),NULL)
72
73
74#endif
75