1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#include "linux/stddef.h"
8#include "linux/sched.h"
9#include "linux/mm.h"
10#include "asm/page.h"
11#include "asm/pgtable.h"
12#include "asm/mmu.h"
13#include "mem_user.h"
14#include "mem.h"
15#include "skas.h"
16#include "os.h"
17#include "tlb.h"
18
19static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
20		  int finished, void **flush)
21{
22	struct host_vm_op *op;
23        int i, ret = 0;
24
25        for(i = 0; i <= last && !ret; i++){
26		op = &ops[i];
27		switch(op->type){
28		case MMAP:
29			ret = map(&mmu->skas.id, op->u.mmap.addr,
30				  op->u.mmap.len, op->u.mmap.prot,
31				  op->u.mmap.fd, op->u.mmap.offset, finished,
32				  flush);
33			break;
34		case MUNMAP:
35			ret = unmap(&mmu->skas.id, op->u.munmap.addr,
36				    op->u.munmap.len, finished, flush);
37			break;
38		case MPROTECT:
39			ret = protect(&mmu->skas.id, op->u.mprotect.addr,
40				      op->u.mprotect.len, op->u.mprotect.prot,
41				      finished, flush);
42			break;
43		default:
44			printk("Unknown op type %d in do_ops\n", op->type);
45			break;
46		}
47	}
48
49	return ret;
50}
51
52extern int proc_mm;
53
54static void fix_range(struct mm_struct *mm, unsigned long start_addr,
55		      unsigned long end_addr, int force)
56{
57        if(!proc_mm && (end_addr > CONFIG_STUB_START))
58                end_addr = CONFIG_STUB_START;
59
60        fix_range_common(mm, start_addr, end_addr, force, do_ops);
61}
62
63void __flush_tlb_one_skas(unsigned long addr)
64{
65        flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
66}
67
68void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start,
69		     unsigned long end)
70{
71        if(vma->vm_mm == NULL)
72                flush_tlb_kernel_range_common(start, end);
73        else fix_range(vma->vm_mm, start, end, 0);
74}
75
76void flush_tlb_mm_skas(struct mm_struct *mm)
77{
78	unsigned long end;
79
80	/* Don't bother flushing if this address space is about to be
81         * destroyed.
82         */
83        if(atomic_read(&mm->mm_users) == 0)
84                return;
85
86	end = proc_mm ? task_size : CONFIG_STUB_START;
87        fix_range(mm, 0, end, 0);
88}
89
90void force_flush_all_skas(void)
91{
92	struct mm_struct *mm = current->mm;
93	struct vm_area_struct *vma = mm->mmap;
94
95	while(vma != NULL) {
96		fix_range(mm, vma->vm_start, vma->vm_end, 1);
97		vma = vma->vm_next;
98	}
99}
100
101void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
102{
103	pgd_t *pgd;
104	pud_t *pud;
105	pmd_t *pmd;
106	pte_t *pte;
107	struct mm_struct *mm = vma->vm_mm;
108	void *flush = NULL;
109	int r, w, x, prot, err = 0;
110	struct mm_id *mm_id;
111
112	pgd = pgd_offset(mm, address);
113	if(!pgd_present(*pgd))
114		goto kill;
115
116	pud = pud_offset(pgd, address);
117	if(!pud_present(*pud))
118		goto kill;
119
120	pmd = pmd_offset(pud, address);
121	if(!pmd_present(*pmd))
122		goto kill;
123
124	pte = pte_offset_kernel(pmd, address);
125
126	r = pte_read(*pte);
127	w = pte_write(*pte);
128	x = pte_exec(*pte);
129	if (!pte_young(*pte)) {
130		r = 0;
131		w = 0;
132	} else if (!pte_dirty(*pte)) {
133		w = 0;
134	}
135
136	mm_id = &mm->context.skas.id;
137	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
138		(x ? UM_PROT_EXEC : 0));
139	if(pte_newpage(*pte)){
140		if(pte_present(*pte)){
141			unsigned long long offset;
142			int fd;
143
144			fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
145			err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
146				  1, &flush);
147		}
148		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
149	}
150	else if(pte_newprot(*pte))
151		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
152
153	if(err)
154		goto kill;
155
156	*pte = pte_mkuptodate(*pte);
157
158	return;
159
160kill:
161	printk("Failed to flush page for address 0x%lx\n", address);
162	force_sig(SIGKILL, current);
163}
164