1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/slab.h"
8#include "linux/types.h"
9#include "linux/errno.h"
10#include "linux/spinlock.h"
11#include "asm/uaccess.h"
12#include "asm/smp.h"
13#include "asm/ldt.h"
14#include "asm/unistd.h"
15#include "choose-mode.h"
16#include "kern.h"
17#include "mode_kern.h"
18#include "os.h"
19
20extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
21
22#ifdef CONFIG_MODE_TT
23
24static long do_modify_ldt_tt(int func, void __user *ptr,
25			      unsigned long bytecount)
26{
27	struct user_desc info;
28	int res = 0;
29	void *buf = NULL;
30	void *p = NULL; /* What we pass to host. */
31
32	switch(func){
33	case 1:
34	case 0x11: /* write_ldt */
35		/* Do this check now to avoid overflows. */
36		if (bytecount != sizeof(struct user_desc)) {
37			res = -EINVAL;
38			goto out;
39		}
40
41		if(copy_from_user(&info, ptr, sizeof(info))) {
42			res = -EFAULT;
43			goto out;
44		}
45
46		p = &info;
47		break;
48	case 0:
49	case 2: /* read_ldt */
50
51		/* The use of info avoids kmalloc on the write case, not on the
52		 * read one. */
53		buf = kmalloc(bytecount, GFP_KERNEL);
54		if (!buf) {
55			res = -ENOMEM;
56			goto out;
57		}
58		p = buf;
59		break;
60	default:
61		res = -ENOSYS;
62		goto out;
63	}
64
65	res = modify_ldt(func, p, bytecount);
66	if(res < 0)
67		goto out;
68
69	switch(func){
70	case 0:
71	case 2:
72		/* Modify_ldt was for reading and returned the number of read
73		 * bytes.*/
74		if(copy_to_user(ptr, p, res))
75			res = -EFAULT;
76		break;
77	}
78
79out:
80	kfree(buf);
81	return res;
82}
83
84#endif
85
86#ifdef CONFIG_MODE_SKAS
87
88#include "skas.h"
89#include "skas_ptrace.h"
90#include "asm/mmu_context.h"
91#include "proc_mm.h"
92
93long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
94		     void **addr, int done)
95{
96	long res;
97
98	if(proc_mm){
99		/* This is a special handling for the case, that the mm to
100		 * modify isn't current->active_mm.
101		 * If this is called directly by modify_ldt,
102		 *     (current->active_mm->context.skas.u == mm_idp)
103		 * will be true. So no call to switch_mm_skas(mm_idp) is done.
104		 * If this is called in case of init_new_ldt or PTRACE_LDT,
105		 * mm_idp won't belong to current->active_mm, but child->mm.
106		 * So we need to switch child's mm into our userspace, then
107		 * later switch back.
108		 *
109		 * Note: I'm unsure: should interrupts be disabled here?
110		 */
111		if(!current->active_mm || current->active_mm == &init_mm ||
112		   mm_idp != &current->active_mm->context.skas.id)
113			switch_mm_skas(mm_idp);
114	}
115
116	if(ptrace_ldt) {
117		struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
118			.func = func,
119			.ptr = desc,
120			.bytecount = sizeof(*desc)};
121		u32 cpu;
122		int pid;
123
124		if(!proc_mm)
125			pid = mm_idp->u.pid;
126		else {
127			cpu = get_cpu();
128			pid = userspace_pid[cpu];
129		}
130
131		res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
132
133		if(proc_mm)
134			put_cpu();
135	}
136	else {
137		void *stub_addr;
138		res = syscall_stub_data(mm_idp, (unsigned long *)desc,
139					(sizeof(*desc) + sizeof(long) - 1) &
140					    ~(sizeof(long) - 1),
141					addr, &stub_addr);
142		if(!res){
143			unsigned long args[] = { func,
144						 (unsigned long)stub_addr,
145						 sizeof(*desc),
146						 0, 0, 0 };
147			res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
148					       0, addr, done);
149		}
150	}
151
152	if(proc_mm){
153		/* This is the second part of special handling, that makes
154		 * PTRACE_LDT possible to implement.
155		 */
156		if(current->active_mm && current->active_mm != &init_mm &&
157		   mm_idp != &current->active_mm->context.skas.id)
158			switch_mm_skas(&current->active_mm->context.skas.id);
159	}
160
161	return res;
162}
163
164static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
165{
166	int res, n;
167	struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
168			.func = 0,
169			.bytecount = bytecount,
170			.ptr = kmalloc(bytecount, GFP_KERNEL)};
171	u32 cpu;
172
173	if(ptrace_ldt.ptr == NULL)
174		return -ENOMEM;
175
176	/* This is called from sys_modify_ldt only, so userspace_pid gives
177	 * us the right number
178	 */
179
180	cpu = get_cpu();
181	res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
182	put_cpu();
183	if(res < 0)
184		goto out;
185
186	n = copy_to_user(ptr, ptrace_ldt.ptr, res);
187	if(n != 0)
188		res = -EFAULT;
189
190  out:
191	kfree(ptrace_ldt.ptr);
192
193	return res;
194}
195
196/*
197 * In skas mode, we hold our own ldt data in UML.
198 * Thus, the code implementing sys_modify_ldt_skas
199 * is very similar to (and mostly stolen from) sys_modify_ldt
200 * for arch/i386/kernel/ldt.c
201 * The routines copied and modified in part are:
202 * - read_ldt
203 * - read_default_ldt
204 * - write_ldt
205 * - sys_modify_ldt_skas
206 */
207
208static int read_ldt(void __user * ptr, unsigned long bytecount)
209{
210	int i, err = 0;
211	unsigned long size;
212	uml_ldt_t * ldt = &current->mm->context.skas.ldt;
213
214	if(!ldt->entry_count)
215		goto out;
216	if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
217		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
218	err = bytecount;
219
220	if(ptrace_ldt){
221		return read_ldt_from_host(ptr, bytecount);
222	}
223
224	down(&ldt->semaphore);
225	if(ldt->entry_count <= LDT_DIRECT_ENTRIES){
226		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
227		if(size > bytecount)
228			size = bytecount;
229		if(copy_to_user(ptr, ldt->u.entries, size))
230			err = -EFAULT;
231		bytecount -= size;
232		ptr += size;
233	}
234	else {
235		for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
236			 i++){
237			size = PAGE_SIZE;
238			if(size > bytecount)
239				size = bytecount;
240			if(copy_to_user(ptr, ldt->u.pages[i], size)){
241				err = -EFAULT;
242				break;
243			}
244			bytecount -= size;
245			ptr += size;
246		}
247	}
248	up(&ldt->semaphore);
249
250	if(bytecount == 0 || err == -EFAULT)
251		goto out;
252
253	if(clear_user(ptr, bytecount))
254		err = -EFAULT;
255
256out:
257	return err;
258}
259
260static int read_default_ldt(void __user * ptr, unsigned long bytecount)
261{
262	int err;
263
264	if(bytecount > 5*LDT_ENTRY_SIZE)
265		bytecount = 5*LDT_ENTRY_SIZE;
266
267	err = bytecount;
268	/* UML doesn't support lcall7 and lcall27.
269	 * So, we don't really have a default ldt, but emulate
270	 * an empty ldt of common host default ldt size.
271	 */
272	if(clear_user(ptr, bytecount))
273		err = -EFAULT;
274
275	return err;
276}
277
278static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
279{
280	uml_ldt_t * ldt = &current->mm->context.skas.ldt;
281	struct mm_id * mm_idp = &current->mm->context.skas.id;
282	int i, err;
283	struct user_desc ldt_info;
284	struct ldt_entry entry0, *ldt_p;
285	void *addr = NULL;
286
287	err = -EINVAL;
288	if(bytecount != sizeof(ldt_info))
289		goto out;
290	err = -EFAULT;
291	if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
292		goto out;
293
294	err = -EINVAL;
295	if(ldt_info.entry_number >= LDT_ENTRIES)
296		goto out;
297	if(ldt_info.contents == 3){
298		if (func == 1)
299			goto out;
300		if (ldt_info.seg_not_present == 0)
301			goto out;
302	}
303
304        if(!ptrace_ldt)
305                down(&ldt->semaphore);
306
307	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
308	if(err)
309		goto out_unlock;
310        else if(ptrace_ldt) {
311	/* With PTRACE_LDT available, this is used as a flag only */
312                ldt->entry_count = 1;
313                goto out;
314        }
315
316	if(ldt_info.entry_number >= ldt->entry_count &&
317	   ldt_info.entry_number >= LDT_DIRECT_ENTRIES){
318		for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
319		    i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
320		    i++){
321			if(i == 0)
322				memcpy(&entry0, ldt->u.entries,
323				       sizeof(entry0));
324			ldt->u.pages[i] = (struct ldt_entry *)
325				__get_free_page(GFP_KERNEL|__GFP_ZERO);
326			if(!ldt->u.pages[i]){
327				err = -ENOMEM;
328				/* Undo the change in host */
329				memset(&ldt_info, 0, sizeof(ldt_info));
330				write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
331				goto out_unlock;
332			}
333			if(i == 0) {
334				memcpy(ldt->u.pages[0], &entry0,
335				       sizeof(entry0));
336				memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
337				       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
338			}
339			ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
340		}
341	}
342	if(ldt->entry_count <= ldt_info.entry_number)
343		ldt->entry_count = ldt_info.entry_number + 1;
344
345	if(ldt->entry_count <= LDT_DIRECT_ENTRIES)
346		ldt_p = ldt->u.entries + ldt_info.entry_number;
347	else
348		ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
349			ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
350
351	if(ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
352	   (func == 1 || LDT_empty(&ldt_info))){
353		ldt_p->a = 0;
354		ldt_p->b = 0;
355	}
356	else{
357		if (func == 1)
358			ldt_info.useable = 0;
359		ldt_p->a = LDT_entry_a(&ldt_info);
360		ldt_p->b = LDT_entry_b(&ldt_info);
361	}
362	err = 0;
363
364out_unlock:
365	up(&ldt->semaphore);
366out:
367	return err;
368}
369
370static long do_modify_ldt_skas(int func, void __user *ptr,
371			       unsigned long bytecount)
372{
373	int ret = -ENOSYS;
374
375	switch (func) {
376		case 0:
377			ret = read_ldt(ptr, bytecount);
378			break;
379		case 1:
380		case 0x11:
381			ret = write_ldt(ptr, bytecount, func);
382			break;
383		case 2:
384			ret = read_default_ldt(ptr, bytecount);
385			break;
386	}
387	return ret;
388}
389
390static DEFINE_SPINLOCK(host_ldt_lock);
391static short dummy_list[9] = {0, -1};
392static short * host_ldt_entries = NULL;
393
394static void ldt_get_host_info(void)
395{
396	long ret;
397	struct ldt_entry * ldt;
398	short *tmp;
399	int i, size, k, order;
400
401	spin_lock(&host_ldt_lock);
402
403	if(host_ldt_entries != NULL){
404		spin_unlock(&host_ldt_lock);
405		return;
406	}
407	host_ldt_entries = dummy_list+1;
408
409	spin_unlock(&host_ldt_lock);
410
411	for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
412
413	ldt = (struct ldt_entry *)
414	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
415	if(ldt == NULL) {
416		printk("ldt_get_host_info: couldn't allocate buffer for host "
417		       "ldt\n");
418		return;
419	}
420
421	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
422	if(ret < 0) {
423		printk("ldt_get_host_info: couldn't read host ldt\n");
424		goto out_free;
425	}
426	if(ret == 0) {
427		/* default_ldt is active, simply write an empty entry 0 */
428		host_ldt_entries = dummy_list;
429		goto out_free;
430	}
431
432	for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){
433		if(ldt[i].a != 0 || ldt[i].b != 0)
434			size++;
435	}
436
437	if(size < ARRAY_SIZE(dummy_list))
438		host_ldt_entries = dummy_list;
439	else {
440		size = (size + 1) * sizeof(dummy_list[0]);
441		tmp = kmalloc(size, GFP_KERNEL);
442		if(tmp == NULL) {
443			printk("ldt_get_host_info: couldn't allocate host ldt "
444			       "list\n");
445			goto out_free;
446		}
447		host_ldt_entries = tmp;
448	}
449
450	for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
451		if(ldt[i].a != 0 || ldt[i].b != 0) {
452			host_ldt_entries[k++] = i;
453		}
454	}
455	host_ldt_entries[k] = -1;
456
457out_free:
458	free_pages((unsigned long)ldt, order);
459}
460
461long init_new_ldt(struct mmu_context_skas * new_mm,
462		  struct mmu_context_skas * from_mm)
463{
464	struct user_desc desc;
465	short * num_p;
466	int i;
467	long page, err=0;
468	void *addr = NULL;
469	struct proc_mm_op copy;
470
471
472	if(!ptrace_ldt)
473		init_MUTEX(&new_mm->ldt.semaphore);
474
475	if(!from_mm){
476		memset(&desc, 0, sizeof(desc));
477		/*
478		 * We have to initialize a clean ldt.
479		 */
480		if(proc_mm) {
481			/*
482			 * If the new mm was created using proc_mm, host's
483			 * default-ldt currently is assigned, which normally
484			 * contains the call-gates for lcall7 and lcall27.
485			 * To remove these gates, we simply write an empty
486			 * entry as number 0 to the host.
487			 */
488			err = write_ldt_entry(&new_mm->id, 1, &desc,
489					      &addr, 1);
490		}
491		else{
492			/*
493			 * Now we try to retrieve info about the ldt, we
494			 * inherited from the host. All ldt-entries found
495			 * will be reset in the following loop
496			 */
497			ldt_get_host_info();
498			for(num_p=host_ldt_entries; *num_p != -1; num_p++){
499				desc.entry_number = *num_p;
500				err = write_ldt_entry(&new_mm->id, 1, &desc,
501						      &addr, *(num_p + 1) == -1);
502				if(err)
503					break;
504			}
505		}
506		new_mm->ldt.entry_count = 0;
507
508		goto out;
509	}
510
511	if(proc_mm){
512		/* We have a valid from_mm, so we now have to copy the LDT of
513		 * from_mm to new_mm, because using proc_mm an new mm with
514		 * an empty/default LDT was created in new_mm()
515		 */
516		copy = ((struct proc_mm_op) { .op 	= MM_COPY_SEGMENTS,
517					      .u 	=
518					      { .copy_segments =
519							from_mm->id.u.mm_fd } } );
520		i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
521		if(i != sizeof(copy))
522			printk("new_mm : /proc/mm copy_segments failed, "
523			       "err = %d\n", -i);
524	}
525
526	if(!ptrace_ldt) {
527		/* Our local LDT is used to supply the data for
528		 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
529		 * i.e., we have to use the stub for modify_ldt, which
530		 * can't handle the big read buffer of up to 64kB.
531		 */
532		down(&from_mm->ldt.semaphore);
533		if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){
534			memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
535			       sizeof(new_mm->ldt.u.entries));
536		}
537		else{
538			i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
539			while(i-->0){
540				page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
541				if (!page){
542					err = -ENOMEM;
543					break;
544				}
545				new_mm->ldt.u.pages[i] =
546					(struct ldt_entry *) page;
547				memcpy(new_mm->ldt.u.pages[i],
548				       from_mm->ldt.u.pages[i], PAGE_SIZE);
549			}
550		}
551		new_mm->ldt.entry_count = from_mm->ldt.entry_count;
552		up(&from_mm->ldt.semaphore);
553	}
554
555    out:
556	return err;
557}
558
559
560void free_ldt(struct mmu_context_skas * mm)
561{
562	int i;
563
564	if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){
565		i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
566		while(i-- > 0){
567			free_page((long )mm->ldt.u.pages[i]);
568		}
569	}
570	mm->ldt.entry_count = 0;
571}
572#endif
573
574int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
575{
576	return CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
577	                        ptr, bytecount);
578}
579