1/*
2 * Switch a MMU context.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 */
11#ifndef _ASM_MMU_CONTEXT_H
12#define _ASM_MMU_CONTEXT_H
13
14#include <linux/config.h>
15#include <linux/slab.h>
16#include <asm/pgalloc.h>
17#include <asm/pgtable.h>
18
19/*
20 * For the fast tlb miss handlers, we currently keep a per cpu array
21 * of pointers to the current pgd for each processor. Also, the proc.
22 * id is stuffed into the context register. This should be changed to
23 * use the processor id via current->processor, where current is stored
24 * in watchhi/lo. The context register should be used to contiguously
25 * map the page tables.
26 */
27#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
28	pgd_current[smp_processor_id()] = (unsigned long)(pgd)
29#define TLBMISS_HANDLER_SETUP() \
30	write_c0_context(((long)(&pgd_current[smp_processor_id()])) << 23); \
31	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
32extern unsigned long pgd_current[];
33
34#define cpu_context(cpu, mm)	((mm)->context[cpu])
35#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
36#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
37
38#define ASID_INC	0x1
39#define ASID_MASK	0xff
40
41static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
42{
43}
44
45/*
46 *  All unused by hardware upper bits will be considered
47 *  as a software asid extension.
48 */
49#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
50#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
51
52static inline void
53get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
54{
55	unsigned long asid = asid_cache(cpu);
56
57	if (! ((asid += ASID_INC) & ASID_MASK) ) {
58		flush_icache_all();
59		local_flush_tlb_all();	/* start new asid cycle */
60		if (!asid)		/* fix version if needed */
61			asid = ASID_FIRST_VERSION;
62	}
63	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
64}
65
66/*
67 * Initialize the context related info for a new mm_struct
68 * instance.
69 */
70static inline int
71init_new_context(struct task_struct *tsk, struct mm_struct *mm)
72{
73	int i;
74
75	for (i = 0; i < smp_num_cpus; i++)
76		cpu_context(i, mm) = 0;
77	return 0;
78}
79
80static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
81                             struct task_struct *tsk, unsigned cpu)
82{
83	/* Check if our ASID is of an older version and thus invalid */
84	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
85		get_new_mmu_context(next, cpu);
86
87	write_c0_entryhi(cpu_context(cpu, next));
88	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
89}
90
91/*
92 * Destroy context related info for an mm_struct that is about
93 * to be put to rest.
94 */
95static inline void destroy_context(struct mm_struct *mm)
96{
97}
98
99/*
100 * After we have set current->mm to a new value, this activates
101 * the context for the new mm so we see the new mappings.
102 */
103static inline void
104activate_mm(struct mm_struct *prev, struct mm_struct *next)
105{
106	/* Unconditionally get a new ASID.  */
107	get_new_mmu_context(next, smp_processor_id());
108
109	write_c0_entryhi(cpu_context(smp_processor_id(), next));
110	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
111}
112
113#endif /* _ASM_MMU_CONTEXT_H */
114