1/*
2 * Low level TLB handling.
3 *
4 * Copyright (C) 2000-2003, Axis Communications AB.
5 *
6 * Authors:   Bjorn Wesen <bjornw@axis.com>
7 *            Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8 */
9
10#include <asm/tlb.h>
11#include <asm/mmu_context.h>
12#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
13#include <asm/arch/hwregs/supp_reg.h>
14
15#define UPDATE_TLB_SEL_IDX(val)					\
16do { 								\
17	unsigned long tlb_sel; 					\
18								\
19	tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val);	\
20	SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel);			\
21} while(0)
22
23#define UPDATE_TLB_HILO(tlb_hi, tlb_lo)		\
24do {						\
25	SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi);	\
26	SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo);	\
27} while(0)
28
29/*
30 * The TLB can host up to 256 different mm contexts at the same time. The running
31 * context is found in the PID register. Each TLB entry contains a page_id that
32 * has to match the PID register to give a hit. page_id_map keeps track of which
33 * mm's is assigned to which page_id's, making sure it's known when to
34 * invalidate TLB entries.
35 *
36 * The last page_id is never running, it is used as an invalid page_id so that
37 * it's possible to make TLB entries that will nerver match.
38 *
39 * Note; the flushes needs to be atomic otherwise an interrupt hander that uses
40 * vmalloc'ed memory might cause a TLB load in the middle of a flush.
41 */
42
43/* Flush all TLB entries. */
44void
45__flush_tlb_all(void)
46{
47	int i;
48	int mmu;
49	unsigned long flags;
50	unsigned long mmu_tlb_hi;
51	unsigned long mmu_tlb_sel;
52
53	/*
54	 * Mask with 0xf so similar TLB entries aren't written in the same 4-way
55	 * entry group.
56	 */
57	local_irq_save(flags);
58
59	for (mmu = 1; mmu <= 2; mmu++) {
60		SUPP_BANK_SEL(mmu); /* Select the MMU */
61		for (i = 0; i < NUM_TLB_ENTRIES; i++) {
62			/* Store invalid entry */
63			mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i);
64
65			mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID)
66				    | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf));
67
68			SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel);
69			SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi);
70			SUPP_REG_WR(RW_MM_TLB_LO, 0);
71		}
72	}
73
74	local_irq_restore(flags);
75}
76
77/* Flush an entire user address space. */
78void
79__flush_tlb_mm(struct mm_struct *mm)
80{
81	int i;
82	int mmu;
83	unsigned long flags;
84	unsigned long page_id;
85	unsigned long tlb_hi;
86	unsigned long mmu_tlb_hi;
87
88	page_id = mm->context.page_id;
89
90	if (page_id == NO_CONTEXT)
91		return;
92
93	/* Mark the TLB entries that match the page_id as invalid. */
94	local_irq_save(flags);
95
96	for (mmu = 1; mmu <= 2; mmu++) {
97		SUPP_BANK_SEL(mmu);
98		for (i = 0; i < NUM_TLB_ENTRIES; i++) {
99			UPDATE_TLB_SEL_IDX(i);
100
101			/* Get the page_id */
102			SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
103
104			/* Check if the page_id match. */
105			if ((tlb_hi & 0xff) == page_id) {
106				mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid,
107				                        INVALID_PAGEID)
108				            | REG_FIELD(mmu, rw_mm_tlb_hi, vpn,
109				                        i & 0xf));
110
111				UPDATE_TLB_HILO(mmu_tlb_hi, 0);
112			}
113		}
114	}
115
116	local_irq_restore(flags);
117}
118
119/* Invalidate a single page. */
120void
121__flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
122{
123	int i;
124	int mmu;
125	unsigned long page_id;
126	unsigned long flags;
127	unsigned long tlb_hi;
128	unsigned long mmu_tlb_hi;
129
130	page_id = vma->vm_mm->context.page_id;
131
132	if (page_id == NO_CONTEXT)
133		return;
134
135	addr &= PAGE_MASK;
136
137	/*
138	 * Invalidate those TLB entries that match both the mm context and the
139	 * requested virtual address.
140	 */
141	local_irq_save(flags);
142
143	for (mmu = 1; mmu <= 2; mmu++) {
144		SUPP_BANK_SEL(mmu);
145		for (i = 0; i < NUM_TLB_ENTRIES; i++) {
146			UPDATE_TLB_SEL_IDX(i);
147			SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
148
149			/* Check if page_id and address matches */
150			if (((tlb_hi & 0xff) == page_id) &&
151			    ((tlb_hi & PAGE_MASK) == addr)) {
152				mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid,
153				                       INVALID_PAGEID) | addr;
154
155				UPDATE_TLB_HILO(mmu_tlb_hi, 0);
156			}
157		}
158	}
159
160	local_irq_restore(flags);
161}
162
163/*
164 * Initialize the context related info for a new mm_struct
165 * instance.
166 */
167
168int
169init_new_context(struct task_struct *tsk, struct mm_struct *mm)
170{
171	mm->context.page_id = NO_CONTEXT;
172	return 0;
173}
174
175static DEFINE_SPINLOCK(mmu_context_lock);
176
177/* Called in schedule() just before actually doing the switch_to. */
178void
179switch_mm(struct mm_struct *prev, struct mm_struct *next,
180	  struct task_struct *tsk)
181{
182	int cpu = smp_processor_id();
183
184	/* Make sure there is a MMU context. */
185	spin_lock(&mmu_context_lock);
186	get_mmu_context(next);
187	cpu_set(cpu, next->cpu_vm_mask);
188	spin_unlock(&mmu_context_lock);
189
190	/*
191	 * Remember the pgd for the fault handlers. Keep a seperate copy of it
192	 * because current and active_mm might be invalid at points where
193	 * there's still a need to derefer the pgd.
194	 */
195	per_cpu(current_pgd, cpu) = next->pgd;
196
197	/* Switch context in the MMU. */
198        if (tsk && task_thread_info(tsk))
199        {
200          SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | task_thread_info(tsk)->tls);
201        }
202        else
203        {
204          SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
205        }
206}
207