1/* 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 3 * Licensed under the GPL 4 */ 5 6#include "linux/sched.h" 7#include "linux/list.h" 8#include "linux/spinlock.h" 9#include "linux/slab.h" 10#include "linux/errno.h" 11#include "linux/mm.h" 12#include "asm/current.h" 13#include "asm/segment.h" 14#include "asm/mmu.h" 15#include "asm/pgalloc.h" 16#include "asm/pgtable.h" 17#include "asm/ldt.h" 18#include "os.h" 19#include "skas.h" 20 21extern int __syscall_stub_start; 22 23static int init_stub_pte(struct mm_struct *mm, unsigned long proc, 24 unsigned long kernel) 25{ 26 pgd_t *pgd; 27 pud_t *pud; 28 pmd_t *pmd; 29 pte_t *pte; 30 31 pgd = pgd_offset(mm, proc); 32 pud = pud_alloc(mm, pgd, proc); 33 if (!pud) 34 goto out; 35 36 pmd = pmd_alloc(mm, pud, proc); 37 if (!pmd) 38 goto out_pmd; 39 40 pte = pte_alloc_map(mm, pmd, proc); 41 if (!pte) 42 goto out_pte; 43 44 /* There's an interaction between the skas0 stub pages, stack 45 * randomization, and the BUG at the end of exit_mmap. exit_mmap 46 * checks that the number of page tables freed is the same as had 47 * been allocated. If the stack is on the last page table page, 48 * then the stack pte page will be freed, and if not, it won't. To 49 * avoid having to know where the stack is, or if the process mapped 50 * something at the top of its address space for some other reason, 51 * we set TASK_SIZE to end at the start of the last page table. 52 * This keeps exit_mmap off the last page, but introduces a leak 53 * of that page. So, we hang onto it here and free it in 54 * destroy_context_skas. 55 */ 56 57 mm->context.skas.last_page_table = pmd_page_vaddr(*pmd); 58#ifdef CONFIG_3_LEVEL_PGTABLES 59 mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); 60#endif 61 62 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 63 *pte = pte_mkread(*pte); 64 return(0); 65 66 out_pmd: 67 pud_free(pud); 68 out_pte: 69 pmd_free(pmd); 70 out: 71 return(-ENOMEM); 72} 73 74int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) 75{ 76 struct mmu_context_skas *from_mm = NULL; 77 struct mmu_context_skas *to_mm = &mm->context.skas; 78 unsigned long stack = 0; 79 int ret = -ENOMEM; 80 81 if(skas_needs_stub){ 82 stack = get_zeroed_page(GFP_KERNEL); 83 if(stack == 0) 84 goto out; 85 86 /* This zeros the entry that pgd_alloc didn't, needed since 87 * we are about to reinitialize it, and want mm.nr_ptes to 88 * be accurate. 89 */ 90 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); 91 92 ret = init_stub_pte(mm, CONFIG_STUB_CODE, 93 (unsigned long) &__syscall_stub_start); 94 if(ret) 95 goto out_free; 96 97 ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); 98 if(ret) 99 goto out_free; 100 101 mm->nr_ptes--; 102 } 103 104 to_mm->id.stack = stack; 105 if(current->mm != NULL && current->mm != &init_mm) 106 from_mm = ¤t->mm->context.skas; 107 108 if(proc_mm){ 109 ret = new_mm(stack); 110 if(ret < 0){ 111 printk("init_new_context_skas - new_mm failed, " 112 "errno = %d\n", ret); 113 goto out_free; 114 } 115 to_mm->id.u.mm_fd = ret; 116 } 117 else { 118 if(from_mm) 119 to_mm->id.u.pid = copy_context_skas0(stack, 120 from_mm->id.u.pid); 121 else to_mm->id.u.pid = start_userspace(stack); 122 } 123 124 ret = init_new_ldt(to_mm, from_mm); 125 if(ret < 0){ 126 printk("init_new_context_skas - init_ldt" 127 " failed, errno = %d\n", ret); 128 goto out_free; 129 } 130 131 return 0; 132 133 out_free: 134 if(to_mm->id.stack != 0) 135 free_page(to_mm->id.stack); 136 out: 137 return ret; 138} 139 140void destroy_context_skas(struct mm_struct *mm) 141{ 142 struct mmu_context_skas *mmu = &mm->context.skas; 143 144 if(proc_mm) 145 os_close_file(mmu->id.u.mm_fd); 146 else 147 os_kill_ptraced_process(mmu->id.u.pid, 1); 148 149 if(!proc_mm || !ptrace_faultinfo){ 150 free_page(mmu->id.stack); 151 pte_lock_deinit(virt_to_page(mmu->last_page_table)); 152 pte_free_kernel((pte_t *) mmu->last_page_table); 153 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE); 154#ifdef CONFIG_3_LEVEL_PGTABLES 155 pmd_free((pmd_t *) mmu->last_pmd); 156#endif 157 } 158} 159