1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
7#include "asm/page.h"
8#include "asm/pgalloc.h"
9#include "asm/pgtable.h"
10#include "asm/tlbflush.h"
11#include "choose-mode.h"
12#include "mode_kern.h"
13#include "as-layout.h"
14#include "tlb.h"
15#include "mem.h"
16#include "mem_user.h"
17#include "os.h"
18
19static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
20		    unsigned int prot, struct host_vm_op *ops, int *index,
21		    int last_filled, union mm_context *mmu, void **flush,
22		    int (*do_ops)(union mm_context *, struct host_vm_op *,
23				  int, int, void **))
24{
25	__u64 offset;
26	struct host_vm_op *last;
27	int fd, ret = 0;
28
29	fd = phys_mapping(phys, &offset);
30	if(*index != -1){
31		last = &ops[*index];
32		if((last->type == MMAP) &&
33		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
34		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
35		   (last->u.mmap.offset + last->u.mmap.len == offset)){
36			last->u.mmap.len += len;
37			return 0;
38		}
39	}
40
41	if(*index == last_filled){
42		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
43		*index = -1;
44	}
45
46	ops[++*index] = ((struct host_vm_op) { .type	= MMAP,
47			     			.u = { .mmap = {
48						       .addr	= virt,
49						       .len	= len,
50						       .prot	= prot,
51						       .fd	= fd,
52						       .offset	= offset }
53			   } });
54	return ret;
55}
56
57static int add_munmap(unsigned long addr, unsigned long len,
58		      struct host_vm_op *ops, int *index, int last_filled,
59		      union mm_context *mmu, void **flush,
60		      int (*do_ops)(union mm_context *, struct host_vm_op *,
61				    int, int, void **))
62{
63	struct host_vm_op *last;
64	int ret = 0;
65
66	if(*index != -1){
67		last = &ops[*index];
68		if((last->type == MUNMAP) &&
69		   (last->u.munmap.addr + last->u.mmap.len == addr)){
70			last->u.munmap.len += len;
71			return 0;
72		}
73	}
74
75	if(*index == last_filled){
76		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
77		*index = -1;
78	}
79
80	ops[++*index] = ((struct host_vm_op) { .type	= MUNMAP,
81			     		       .u = { .munmap = {
82						        .addr	= addr,
83							.len	= len } } });
84	return ret;
85}
86
87static int add_mprotect(unsigned long addr, unsigned long len,
88			unsigned int prot, struct host_vm_op *ops, int *index,
89			int last_filled, union mm_context *mmu, void **flush,
90			int (*do_ops)(union mm_context *, struct host_vm_op *,
91				      int, int, void **))
92{
93	struct host_vm_op *last;
94	int ret = 0;
95
96	if(*index != -1){
97		last = &ops[*index];
98		if((last->type == MPROTECT) &&
99		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
100		   (last->u.mprotect.prot == prot)){
101			last->u.mprotect.len += len;
102			return 0;
103		}
104	}
105
106	if(*index == last_filled){
107		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
108		*index = -1;
109	}
110
111	ops[++*index] = ((struct host_vm_op) { .type	= MPROTECT,
112			     		       .u = { .mprotect = {
113						       .addr	= addr,
114						       .len	= len,
115						       .prot	= prot } } });
116	return ret;
117}
118
119#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
120
121static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
122				   unsigned long end, struct host_vm_op *ops,
123				   int last_op, int *op_index, int force,
124				   union mm_context *mmu, void **flush,
125				   int (*do_ops)(union mm_context *,
126						 struct host_vm_op *, int, int,
127						 void **))
128{
129	pte_t *pte;
130	int r, w, x, prot, ret = 0;
131
132	pte = pte_offset_kernel(pmd, addr);
133	do {
134		r = pte_read(*pte);
135		w = pte_write(*pte);
136		x = pte_exec(*pte);
137		if (!pte_young(*pte)) {
138			r = 0;
139			w = 0;
140		} else if (!pte_dirty(*pte)) {
141			w = 0;
142		}
143		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
144			(x ? UM_PROT_EXEC : 0));
145		if(force || pte_newpage(*pte)){
146			if(pte_present(*pte))
147				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
148					       PAGE_SIZE, prot, ops, op_index,
149					       last_op, mmu, flush, do_ops);
150			else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
151					      last_op, mmu, flush, do_ops);
152		}
153		else if(pte_newprot(*pte))
154			ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
155					   last_op, mmu, flush, do_ops);
156		*pte = pte_mkuptodate(*pte);
157	} while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
158	return ret;
159}
160
161static inline int update_pmd_range(pud_t *pud, unsigned long addr,
162				   unsigned long end, struct host_vm_op *ops,
163				   int last_op, int *op_index, int force,
164				   union mm_context *mmu, void **flush,
165				   int (*do_ops)(union mm_context *,
166						 struct host_vm_op *, int, int,
167						 void **))
168{
169	pmd_t *pmd;
170	unsigned long next;
171	int ret = 0;
172
173	pmd = pmd_offset(pud, addr);
174	do {
175		next = pmd_addr_end(addr, end);
176		if(!pmd_present(*pmd)){
177			if(force || pmd_newpage(*pmd)){
178				ret = add_munmap(addr, next - addr, ops,
179						 op_index, last_op, mmu,
180						 flush, do_ops);
181				pmd_mkuptodate(*pmd);
182			}
183		}
184		else ret = update_pte_range(pmd, addr, next, ops, last_op,
185					    op_index, force, mmu, flush,
186					    do_ops);
187	} while (pmd++, addr = next, ((addr != end) && !ret));
188	return ret;
189}
190
191static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
192				   unsigned long end, struct host_vm_op *ops,
193				   int last_op, int *op_index, int force,
194				   union mm_context *mmu, void **flush,
195				   int (*do_ops)(union mm_context *,
196						 struct host_vm_op *, int, int,
197						 void **))
198{
199	pud_t *pud;
200	unsigned long next;
201	int ret = 0;
202
203	pud = pud_offset(pgd, addr);
204	do {
205		next = pud_addr_end(addr, end);
206		if(!pud_present(*pud)){
207			if(force || pud_newpage(*pud)){
208				ret = add_munmap(addr, next - addr, ops,
209						 op_index, last_op, mmu,
210						 flush, do_ops);
211				pud_mkuptodate(*pud);
212			}
213		}
214		else ret = update_pmd_range(pud, addr, next, ops, last_op,
215					    op_index, force, mmu, flush,
216					    do_ops);
217	} while (pud++, addr = next, ((addr != end) && !ret));
218	return ret;
219}
220
221void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
222		      unsigned long end_addr, int force,
223		      int (*do_ops)(union mm_context *, struct host_vm_op *,
224				    int, int, void **))
225{
226	pgd_t *pgd;
227	union mm_context *mmu = &mm->context;
228	struct host_vm_op ops[1];
229	unsigned long addr = start_addr, next;
230	int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
231	void *flush = NULL;
232
233	ops[0].type = NONE;
234	pgd = pgd_offset(mm, addr);
235	do {
236		next = pgd_addr_end(addr, end_addr);
237		if(!pgd_present(*pgd)){
238			if (force || pgd_newpage(*pgd)){
239				ret = add_munmap(addr, next - addr, ops,
240						 &op_index, last_op, mmu,
241						 &flush, do_ops);
242				pgd_mkuptodate(*pgd);
243			}
244		}
245		else ret = update_pud_range(pgd, addr, next, ops, last_op,
246					    &op_index, force, mmu, &flush,
247					    do_ops);
248	} while (pgd++, addr = next, ((addr != end_addr) && !ret));
249
250	if(!ret)
251		ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
252
253	/* This is not an else because ret is modified above */
254	if(ret) {
255		printk("fix_range_common: failed, killing current process\n");
256		force_sig(SIGKILL, current);
257	}
258}
259
260int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
261{
262	struct mm_struct *mm;
263	pgd_t *pgd;
264	pud_t *pud;
265	pmd_t *pmd;
266	pte_t *pte;
267	unsigned long addr, last;
268	int updated = 0, err;
269
270	mm = &init_mm;
271	for(addr = start; addr < end;){
272		pgd = pgd_offset(mm, addr);
273		if(!pgd_present(*pgd)){
274			last = ADD_ROUND(addr, PGDIR_SIZE);
275			if(last > end)
276				last = end;
277			if(pgd_newpage(*pgd)){
278				updated = 1;
279				err = os_unmap_memory((void *) addr,
280						      last - addr);
281				if(err < 0)
282					panic("munmap failed, errno = %d\n",
283					      -err);
284			}
285			addr = last;
286			continue;
287		}
288
289		pud = pud_offset(pgd, addr);
290		if(!pud_present(*pud)){
291			last = ADD_ROUND(addr, PUD_SIZE);
292			if(last > end)
293				last = end;
294			if(pud_newpage(*pud)){
295				updated = 1;
296				err = os_unmap_memory((void *) addr,
297						      last - addr);
298				if(err < 0)
299					panic("munmap failed, errno = %d\n",
300					      -err);
301			}
302			addr = last;
303			continue;
304		}
305
306		pmd = pmd_offset(pud, addr);
307		if(!pmd_present(*pmd)){
308			last = ADD_ROUND(addr, PMD_SIZE);
309			if(last > end)
310				last = end;
311			if(pmd_newpage(*pmd)){
312				updated = 1;
313				err = os_unmap_memory((void *) addr,
314						      last - addr);
315				if(err < 0)
316					panic("munmap failed, errno = %d\n",
317					      -err);
318			}
319			addr = last;
320			continue;
321		}
322
323		pte = pte_offset_kernel(pmd, addr);
324		if(!pte_present(*pte) || pte_newpage(*pte)){
325			updated = 1;
326			err = os_unmap_memory((void *) addr,
327					      PAGE_SIZE);
328			if(err < 0)
329				panic("munmap failed, errno = %d\n",
330				      -err);
331			if(pte_present(*pte))
332				map_memory(addr,
333					   pte_val(*pte) & PAGE_MASK,
334					   PAGE_SIZE, 1, 1, 1);
335		}
336		else if(pte_newprot(*pte)){
337			updated = 1;
338			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
339		}
340		addr += PAGE_SIZE;
341	}
342	return(updated);
343}
344
345pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
346{
347	return(pgd_offset(mm, address));
348}
349
350pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
351{
352	return(pud_offset(pgd, address));
353}
354
355pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
356{
357	return(pmd_offset(pud, address));
358}
359
360pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
361{
362	return(pte_offset_kernel(pmd, address));
363}
364
365pte_t *addr_pte(struct task_struct *task, unsigned long addr)
366{
367	pgd_t *pgd = pgd_offset(task->mm, addr);
368	pud_t *pud = pud_offset(pgd, addr);
369	pmd_t *pmd = pmd_offset(pud, addr);
370
371	return(pte_offset_map(pmd, addr));
372}
373
374void flush_tlb_all(void)
375{
376	flush_tlb_mm(current->mm);
377}
378
379void flush_tlb_kernel_range(unsigned long start, unsigned long end)
380{
381	CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
382			 flush_tlb_kernel_range_common, start, end);
383}
384
385void flush_tlb_kernel_vm(void)
386{
387	CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
388		    flush_tlb_kernel_range_common(start_vm, end_vm));
389}
390
391void __flush_tlb_one(unsigned long addr)
392{
393	CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
394}
395
396void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
397		     unsigned long end)
398{
399	CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
400			 end);
401}
402
403void flush_tlb_mm(struct mm_struct *mm)
404{
405	CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
406}
407
408void force_flush_all(void)
409{
410	CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
411}
412