• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/x86/vdso/
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
7#include <linux/err.h>
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/random.h>
12#include <linux/elf.h>
13#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
16#include <asm/vdso.h>
17
18#include "vextern.h"		/* Just for VMAGIC.  */
19#undef VEXTERN
20
21unsigned int __read_mostly vdso_enabled = 1;
22
23extern char vdso_start[], vdso_end[];
24extern unsigned short vdso_sync_cpuid;
25
26static struct page **vdso_pages;
27static unsigned vdso_size;
28
29static inline void *var_ref(void *p, char *name)
30{
31	if (*(void **)p != (void *)VMAGIC) {
32		printk("VDSO: variable %s broken\n", name);
33		vdso_enabled = 0;
34	}
35	return p;
36}
37
38static int __init init_vdso_vars(void)
39{
40	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
41	int i;
42	char *vbase;
43
44	vdso_size = npages << PAGE_SHIFT;
45	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
46	if (!vdso_pages)
47		goto oom;
48	for (i = 0; i < npages; i++) {
49		struct page *p;
50		p = alloc_page(GFP_KERNEL);
51		if (!p)
52			goto oom;
53		vdso_pages[i] = p;
54		copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
55	}
56
57	vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
58	if (!vbase)
59		goto oom;
60
61	if (memcmp(vbase, "\177ELF", 4)) {
62		printk("VDSO: I'm broken; not ELF\n");
63		vdso_enabled = 0;
64	}
65
66#define VEXTERN(x) \
67	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
68#include "vextern.h"
69#undef VEXTERN
70	vunmap(vbase);
71	return 0;
72
73 oom:
74	printk("Cannot allocate vdso\n");
75	vdso_enabled = 0;
76	return -ENOMEM;
77}
78subsys_initcall(init_vdso_vars);
79
80struct linux_binprm;
81
82/* Put the vdso above the (randomized) stack with another randomized offset.
83   This way there is no hole in the middle of address space.
84   To save memory make sure it is still in the same PTE as the stack top.
85   This doesn't give that many random bits */
86static unsigned long vdso_addr(unsigned long start, unsigned len)
87{
88	unsigned long addr, end;
89	unsigned offset;
90	end = (start + PMD_SIZE - 1) & PMD_MASK;
91	if (end >= TASK_SIZE_MAX)
92		end = TASK_SIZE_MAX;
93	end -= len;
94	/* This loses some more bits than a modulo, but is cheaper */
95	offset = get_random_int() & (PTRS_PER_PTE - 1);
96	addr = start + (offset << PAGE_SHIFT);
97	if (addr >= end)
98		addr = end;
99	return addr;
100}
101
102/* Setup a VMA at program startup for the vsyscall page.
103   Not called for compat tasks */
104int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
105{
106	struct mm_struct *mm = current->mm;
107	unsigned long addr;
108	int ret;
109
110	if (!vdso_enabled)
111		return 0;
112
113	down_write(&mm->mmap_sem);
114	addr = vdso_addr(mm->start_stack, vdso_size);
115	addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
116	if (IS_ERR_VALUE(addr)) {
117		ret = addr;
118		goto up_fail;
119	}
120
121	current->mm->context.vdso = (void *)addr;
122
123	ret = install_special_mapping(mm, addr, vdso_size,
124				      VM_READ|VM_EXEC|
125				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
126				      VM_ALWAYSDUMP,
127				      vdso_pages);
128	if (ret) {
129		current->mm->context.vdso = NULL;
130		goto up_fail;
131	}
132
133up_fail:
134	up_write(&mm->mmap_sem);
135	return ret;
136}
137
138static __init int vdso_setup(char *s)
139{
140	vdso_enabled = simple_strtoul(s, NULL, 0);
141	return 0;
142}
143__setup("vdso=", vdso_setup);
144