linux_machdep.c revision 166007
1/*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer
10 *    in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 166007 2007-01-14 16:20:37Z netchild $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/file.h>
35#include <sys/fcntl.h>
36#include <sys/imgact.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mman.h>
40#include <sys/mutex.h>
41#include <sys/sx.h>
42#include <sys/priv.h>
43#include <sys/proc.h>
44#include <sys/queue.h>
45#include <sys/resource.h>
46#include <sys/resourcevar.h>
47#include <sys/signalvar.h>
48#include <sys/syscallsubr.h>
49#include <sys/sysproto.h>
50#include <sys/unistd.h>
51#include <sys/wait.h>
52
53#include <machine/frame.h>
54#include <machine/psl.h>
55#include <machine/segments.h>
56#include <machine/sysarch.h>
57
58#include <vm/vm.h>
59#include <vm/pmap.h>
60#include <vm/vm_map.h>
61
62#include <i386/linux/linux.h>
63#include <i386/linux/linux_proto.h>
64#include <compat/linux/linux_ipc.h>
65#include <compat/linux/linux_signal.h>
66#include <compat/linux/linux_util.h>
67#include <compat/linux/linux_emul.h>
68
69#include <i386/include/pcb.h>			/* needed for pcb definition in linux_set_thread_area */
70
71#include "opt_posix.h"
72
73extern struct sysentvec elf32_freebsd_sysvec;	/* defined in i386/i386/elf_machdep.c */
74
75struct l_descriptor {
76	l_uint		entry_number;
77	l_ulong		base_addr;
78	l_uint		limit;
79	l_uint		seg_32bit:1;
80	l_uint		contents:2;
81	l_uint		read_exec_only:1;
82	l_uint		limit_in_pages:1;
83	l_uint		seg_not_present:1;
84	l_uint		useable:1;
85};
86
87struct l_old_select_argv {
88	l_int		nfds;
89	l_fd_set	*readfds;
90	l_fd_set	*writefds;
91	l_fd_set	*exceptfds;
92	struct l_timeval	*timeout;
93};
94
95int
96linux_to_bsd_sigaltstack(int lsa)
97{
98	int bsa = 0;
99
100	if (lsa & LINUX_SS_DISABLE)
101		bsa |= SS_DISABLE;
102	if (lsa & LINUX_SS_ONSTACK)
103		bsa |= SS_ONSTACK;
104	return (bsa);
105}
106
107int
108bsd_to_linux_sigaltstack(int bsa)
109{
110	int lsa = 0;
111
112	if (bsa & SS_DISABLE)
113		lsa |= LINUX_SS_DISABLE;
114	if (bsa & SS_ONSTACK)
115		lsa |= LINUX_SS_ONSTACK;
116	return (lsa);
117}
118
119int
120linux_execve(struct thread *td, struct linux_execve_args *args)
121{
122	int error;
123	char *newpath;
124	struct image_args eargs;
125
126	LCONVPATHEXIST(td, args->path, &newpath);
127
128#ifdef DEBUG
129	if (ldebug(execve))
130		printf(ARGS(execve, "%s"), newpath);
131#endif
132
133	error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE,
134	    args->argp, args->envp);
135	free(newpath, M_TEMP);
136	if (error == 0)
137		error = kern_execve(td, &eargs, NULL);
138	if (error == 0)
139	   	/* linux process can exec fbsd one, dont attempt
140		 * to create emuldata for such process using
141		 * linux_proc_init, this leads to a panic on KASSERT
142		 * because such process has p->p_emuldata == NULL
143		 */
144	   	if (td->td_proc->p_sysent == &elf_linux_sysvec)
145   		   	error = linux_proc_init(td, 0, 0);
146	return (error);
147}
148
149struct l_ipc_kludge {
150	struct l_msgbuf *msgp;
151	l_long msgtyp;
152};
153
154int
155linux_ipc(struct thread *td, struct linux_ipc_args *args)
156{
157
158	switch (args->what & 0xFFFF) {
159	case LINUX_SEMOP: {
160		struct linux_semop_args a;
161
162		a.semid = args->arg1;
163		a.tsops = args->ptr;
164		a.nsops = args->arg2;
165		return (linux_semop(td, &a));
166	}
167	case LINUX_SEMGET: {
168		struct linux_semget_args a;
169
170		a.key = args->arg1;
171		a.nsems = args->arg2;
172		a.semflg = args->arg3;
173		return (linux_semget(td, &a));
174	}
175	case LINUX_SEMCTL: {
176		struct linux_semctl_args a;
177		int error;
178
179		a.semid = args->arg1;
180		a.semnum = args->arg2;
181		a.cmd = args->arg3;
182		error = copyin(args->ptr, &a.arg, sizeof(a.arg));
183		if (error)
184			return (error);
185		return (linux_semctl(td, &a));
186	}
187	case LINUX_MSGSND: {
188		struct linux_msgsnd_args a;
189
190		a.msqid = args->arg1;
191		a.msgp = args->ptr;
192		a.msgsz = args->arg2;
193		a.msgflg = args->arg3;
194		return (linux_msgsnd(td, &a));
195	}
196	case LINUX_MSGRCV: {
197		struct linux_msgrcv_args a;
198
199		a.msqid = args->arg1;
200		a.msgsz = args->arg2;
201		a.msgflg = args->arg3;
202		if ((args->what >> 16) == 0) {
203			struct l_ipc_kludge tmp;
204			int error;
205
206			if (args->ptr == NULL)
207				return (EINVAL);
208			error = copyin(args->ptr, &tmp, sizeof(tmp));
209			if (error)
210				return (error);
211			a.msgp = tmp.msgp;
212			a.msgtyp = tmp.msgtyp;
213		} else {
214			a.msgp = args->ptr;
215			a.msgtyp = args->arg5;
216		}
217		return (linux_msgrcv(td, &a));
218	}
219	case LINUX_MSGGET: {
220		struct linux_msgget_args a;
221
222		a.key = args->arg1;
223		a.msgflg = args->arg2;
224		return (linux_msgget(td, &a));
225	}
226	case LINUX_MSGCTL: {
227		struct linux_msgctl_args a;
228
229		a.msqid = args->arg1;
230		a.cmd = args->arg2;
231		a.buf = args->ptr;
232		return (linux_msgctl(td, &a));
233	}
234	case LINUX_SHMAT: {
235		struct linux_shmat_args a;
236
237		a.shmid = args->arg1;
238		a.shmaddr = args->ptr;
239		a.shmflg = args->arg2;
240		a.raddr = (l_ulong *)args->arg3;
241		return (linux_shmat(td, &a));
242	}
243	case LINUX_SHMDT: {
244		struct linux_shmdt_args a;
245
246		a.shmaddr = args->ptr;
247		return (linux_shmdt(td, &a));
248	}
249	case LINUX_SHMGET: {
250		struct linux_shmget_args a;
251
252		a.key = args->arg1;
253		a.size = args->arg2;
254		a.shmflg = args->arg3;
255		return (linux_shmget(td, &a));
256	}
257	case LINUX_SHMCTL: {
258		struct linux_shmctl_args a;
259
260		a.shmid = args->arg1;
261		a.cmd = args->arg2;
262		a.buf = args->ptr;
263		return (linux_shmctl(td, &a));
264	}
265	default:
266		break;
267	}
268
269	return (EINVAL);
270}
271
272int
273linux_old_select(struct thread *td, struct linux_old_select_args *args)
274{
275	struct l_old_select_argv linux_args;
276	struct linux_select_args newsel;
277	int error;
278
279#ifdef DEBUG
280	if (ldebug(old_select))
281		printf(ARGS(old_select, "%p"), args->ptr);
282#endif
283
284	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
285	if (error)
286		return (error);
287
288	newsel.nfds = linux_args.nfds;
289	newsel.readfds = linux_args.readfds;
290	newsel.writefds = linux_args.writefds;
291	newsel.exceptfds = linux_args.exceptfds;
292	newsel.timeout = linux_args.timeout;
293	return (linux_select(td, &newsel));
294}
295
296int
297linux_fork(struct thread *td, struct linux_fork_args *args)
298{
299	int error;
300
301#ifdef DEBUG
302	if (ldebug(fork))
303		printf(ARGS(fork, ""));
304#endif
305
306	if ((error = fork(td, (struct fork_args *)args)) != 0)
307		return (error);
308
309	if (td->td_retval[1] == 1)
310		td->td_retval[0] = 0;
311	error = linux_proc_init(td, td->td_retval[0], 0);
312	if (error)
313		return (error);
314
315	return (0);
316}
317
318int
319linux_vfork(struct thread *td, struct linux_vfork_args *args)
320{
321	int error;
322	struct proc *p2;
323
324#ifdef DEBUG
325	if (ldebug(vfork))
326		printf(ARGS(vfork, ""));
327#endif
328
329	/* exclude RFPPWAIT */
330	if ((error = fork1(td, RFFDG | RFPROC | RFMEM, 0, &p2)) != 0)
331		return (error);
332	if (error == 0) {
333	   	td->td_retval[0] = p2->p_pid;
334		td->td_retval[1] = 0;
335	}
336	/* Are we the child? */
337	if (td->td_retval[1] == 1)
338		td->td_retval[0] = 0;
339	error = linux_proc_init(td, td->td_retval[0], 0);
340	if (error)
341		return (error);
342	/* wait for the children to exit, ie. emulate vfork */
343	PROC_LOCK(p2);
344	p2->p_flag |= P_PPWAIT;
345	while (p2->p_flag & P_PPWAIT)
346	   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
347	PROC_UNLOCK(p2);
348
349	return (0);
350}
351
352int
353linux_clone(struct thread *td, struct linux_clone_args *args)
354{
355	int error, ff = RFPROC | RFSTOPPED;
356	struct proc *p2;
357	struct thread *td2;
358	int exit_signal;
359	struct linux_emuldata *em;
360
361#ifdef DEBUG
362	if (ldebug(clone)) {
363   	   	printf(ARGS(clone, "flags %x, stack %x, parent tid: %x, child tid: %x"),
364		    (unsigned int)args->flags, (unsigned int)args->stack,
365		    (unsigned int)args->parent_tidptr, (unsigned int)args->child_tidptr);
366	}
367#endif
368
369	exit_signal = args->flags & 0x000000ff;
370	if (!LINUX_SIG_VALID(exit_signal) && exit_signal != 0)
371		return (EINVAL);
372
373	if (exit_signal <= LINUX_SIGTBLSZ)
374		exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
375
376	if (args->flags & CLONE_VM)
377		ff |= RFMEM;
378	if (args->flags & CLONE_SIGHAND)
379		ff |= RFSIGSHARE;
380	/*
381	 * XXX: in linux sharing of fs info (chroot/cwd/umask)
382	 * and open files is independant. in fbsd its in one
383	 * structure but in reality it doesnt make any problems
384	 * because both this flags are set at once usually.
385	 */
386	if (!(args->flags & (CLONE_FILES | CLONE_FS)))
387		ff |= RFFDG;
388
389	/*
390	 * Attempt to detect when linux_clone(2) is used for creating
391	 * kernel threads. Unfortunately despite the existence of the
392	 * CLONE_THREAD flag, version of linuxthreads package used in
393	 * most popular distros as of beginning of 2005 doesn't make
394	 * any use of it. Therefore, this detection relay fully on
395	 * empirical observation that linuxthreads sets certain
396	 * combination of flags, so that we can make more or less
397	 * precise detection and notify the FreeBSD kernel that several
398	 * processes are in fact part of the same threading group, so
399	 * that special treatment is necessary for signal delivery
400	 * between those processes and fd locking.
401	 */
402	if ((args->flags & 0xffffff00) == THREADING_FLAGS)
403		ff |= RFTHREAD;
404
405	error = fork1(td, ff, 0, &p2);
406	if (error)
407		return (error);
408
409	/* create the emuldata */
410	error = linux_proc_init(td, p2->p_pid, args->flags);
411	/* reference it - no need to check this */
412	em = em_find(p2, EMUL_DOLOCK);
413	KASSERT(em != NULL, ("clone: emuldata not found.\n"));
414	/* and adjust it */
415	if (args->flags & CLONE_PARENT_SETTID) {
416	   	if (args->parent_tidptr == NULL) {
417		   	EMUL_UNLOCK(&emul_lock);
418			return (EINVAL);
419		}
420		error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid));
421		if (error) {
422		   	EMUL_UNLOCK(&emul_lock);
423			return (error);
424		}
425	}
426
427	if (args->flags & (CLONE_PARENT|CLONE_THREAD)) {
428	   	sx_xlock(&proctree_lock);
429		PROC_LOCK(p2);
430		proc_reparent(p2, td->td_proc->p_pptr);
431		PROC_UNLOCK(p2);
432		sx_xunlock(&proctree_lock);
433	}
434
435	if (args->flags & CLONE_THREAD) {
436	   	/* XXX: linux mangles pgrp and pptr somehow
437		 * I think it might be this but I am not sure.
438		 */
439#ifdef notyet
440	   	PROC_LOCK(p2);
441	   	p2->p_pgrp = td->td_proc->p_pgrp;
442	   	PROC_UNLOCK(p2);
443#endif
444	 	exit_signal = 0;
445	}
446
447	if (args->flags & CLONE_CHILD_SETTID)
448		em->child_set_tid = args->child_tidptr;
449	else
450	   	em->child_set_tid = NULL;
451
452	if (args->flags & CLONE_CHILD_CLEARTID)
453		em->child_clear_tid = args->child_tidptr;
454	else
455	   	em->child_clear_tid = NULL;
456
457	EMUL_UNLOCK(&emul_lock);
458
459	PROC_LOCK(p2);
460	p2->p_sigparent = exit_signal;
461	PROC_UNLOCK(p2);
462	td2 = FIRST_THREAD_IN_PROC(p2);
463	/*
464	 * in a case of stack = NULL we are supposed to COW calling process stack
465	 * this is what normal fork() does so we just keep the tf_esp arg intact
466	 */
467	if (args->stack)
468   	   	td2->td_frame->tf_esp = (unsigned int)args->stack;
469
470	if (args->flags & CLONE_SETTLS) {
471   	   	struct l_user_desc info;
472   	   	int idx;
473	   	int a[2];
474		struct segment_descriptor sd;
475
476	   	error = copyin((void *)td->td_frame->tf_esi, &info, sizeof(struct l_user_desc));
477		if (error)
478   		   	return (error);
479
480		idx = info.entry_number;
481
482		/*
483		 * looks like we're getting the idx we returned
484		 * in the set_thread_area() syscall
485		 */
486		if (idx != 6 && idx != 3)
487			return (EINVAL);
488
489		/* this doesnt happen in practice */
490		if (idx == 6) {
491		   	/* we might copy out the entry_number as 3 */
492		   	info.entry_number = 3;
493			error = copyout(&info, (void *) td->td_frame->tf_esi, sizeof(struct l_user_desc));
494			if (error)
495	   		   	return (error);
496		}
497
498		a[0] = LDT_entry_a(&info);
499		a[1] = LDT_entry_b(&info);
500
501		memcpy(&sd, &a, sizeof(a));
502#ifdef DEBUG
503	if (ldebug(clone))
504	   	printf("Segment created in clone with CLONE_SETTLS: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
505			sd.sd_hibase,
506			sd.sd_lolimit,
507			sd.sd_hilimit,
508			sd.sd_type,
509			sd.sd_dpl,
510			sd.sd_p,
511			sd.sd_xx,
512			sd.sd_def32,
513			sd.sd_gran);
514#endif
515
516		/* set %gs */
517		td2->td_pcb->pcb_gsd = sd;
518		td2->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
519	}
520
521#ifdef DEBUG
522	if (ldebug(clone))
523		printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"),
524		    (long)p2->p_pid, args->stack, exit_signal);
525#endif
526
527	/*
528	 * Make this runnable after we are finished with it.
529	 */
530	mtx_lock_spin(&sched_lock);
531	TD_SET_CAN_RUN(td2);
532	setrunqueue(td2, SRQ_BORING);
533	mtx_unlock_spin(&sched_lock);
534
535	td->td_retval[0] = p2->p_pid;
536	td->td_retval[1] = 0;
537
538	if (args->flags & CLONE_VFORK) {
539   	   	/* wait for the children to exit, ie. emulate vfork */
540   	   	PROC_LOCK(p2);
541		p2->p_flag |= P_PPWAIT;
542		while (p2->p_flag & P_PPWAIT)
543   		   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
544		PROC_UNLOCK(p2);
545	}
546
547	return (0);
548}
549
550/* XXX move */
551struct l_mmap_argv {
552	l_caddr_t	addr;
553	l_int		len;
554	l_int		prot;
555	l_int		flags;
556	l_int		fd;
557	l_int		pos;
558};
559
560#define STACK_SIZE  (2 * 1024 * 1024)
561#define GUARD_SIZE  (4 * PAGE_SIZE)
562
563static int linux_mmap_common(struct thread *, struct l_mmap_argv *);
564
565int
566linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
567{
568	struct l_mmap_argv linux_args;
569
570#ifdef DEBUG
571	if (ldebug(mmap2))
572		printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
573		    (void *)args->addr, args->len, args->prot,
574		    args->flags, args->fd, args->pgoff);
575#endif
576
577	linux_args.addr = (l_caddr_t)args->addr;
578	linux_args.len = args->len;
579	linux_args.prot = args->prot;
580	linux_args.flags = args->flags;
581	linux_args.fd = args->fd;
582	linux_args.pos = args->pgoff * PAGE_SIZE;
583
584	return (linux_mmap_common(td, &linux_args));
585}
586
587int
588linux_mmap(struct thread *td, struct linux_mmap_args *args)
589{
590	int error;
591	struct l_mmap_argv linux_args;
592
593	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
594	if (error)
595		return (error);
596
597#ifdef DEBUG
598	if (ldebug(mmap))
599		printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
600		    (void *)linux_args.addr, linux_args.len, linux_args.prot,
601		    linux_args.flags, linux_args.fd, linux_args.pos);
602#endif
603
604	return (linux_mmap_common(td, &linux_args));
605}
606
607static int
608linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args)
609{
610	struct proc *p = td->td_proc;
611	struct mmap_args /* {
612		caddr_t addr;
613		size_t len;
614		int prot;
615		int flags;
616		int fd;
617		long pad;
618		off_t pos;
619	} */ bsd_args;
620	int error;
621	struct file *fp;
622
623	error = 0;
624	bsd_args.flags = 0;
625	fp = NULL;
626
627	/*
628	 * Linux mmap(2):
629	 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
630	 */
631	if (! ((linux_args->flags & LINUX_MAP_SHARED) ^
632	    (linux_args->flags & LINUX_MAP_PRIVATE)))
633		return (EINVAL);
634
635	if (linux_args->flags & LINUX_MAP_SHARED)
636		bsd_args.flags |= MAP_SHARED;
637	if (linux_args->flags & LINUX_MAP_PRIVATE)
638		bsd_args.flags |= MAP_PRIVATE;
639	if (linux_args->flags & LINUX_MAP_FIXED)
640		bsd_args.flags |= MAP_FIXED;
641	if (linux_args->flags & LINUX_MAP_ANON)
642		bsd_args.flags |= MAP_ANON;
643	else
644		bsd_args.flags |= MAP_NOSYNC;
645	if (linux_args->flags & LINUX_MAP_GROWSDOWN) {
646		bsd_args.flags |= MAP_STACK;
647
648		/*
649		 * The linux MAP_GROWSDOWN option does not limit auto
650		 * growth of the region.  Linux mmap with this option
651		 * takes as addr the inital BOS, and as len, the initial
652		 * region size.  It can then grow down from addr without
653		 * limit.  However, linux threads has an implicit internal
654		 * limit to stack size of STACK_SIZE.  Its just not
655		 * enforced explicitly in linux.  But, here we impose
656		 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
657		 * region, since we can do this with our mmap.
658		 *
659		 * Our mmap with MAP_STACK takes addr as the maximum
660		 * downsize limit on BOS, and as len the max size of
661		 * the region.  It them maps the top SGROWSIZ bytes,
662		 * and autgrows the region down, up to the limit
663		 * in addr.
664		 *
665		 * If we don't use the MAP_STACK option, the effect
666		 * of this code is to allocate a stack region of a
667		 * fixed size of (STACK_SIZE - GUARD_SIZE).
668		 */
669
670		/* This gives us TOS */
671		bsd_args.addr = linux_args->addr + linux_args->len;
672
673		if (bsd_args.addr > p->p_vmspace->vm_maxsaddr) {
674			/*
675			 * Some linux apps will attempt to mmap
676			 * thread stacks near the top of their
677			 * address space.  If their TOS is greater
678			 * than vm_maxsaddr, vm_map_growstack()
679			 * will confuse the thread stack with the
680			 * process stack and deliver a SEGV if they
681			 * attempt to grow the thread stack past their
682			 * current stacksize rlimit.  To avoid this,
683			 * adjust vm_maxsaddr upwards to reflect
684			 * the current stacksize rlimit rather
685			 * than the maximum possible stacksize.
686			 * It would be better to adjust the
687			 * mmap'ed region, but some apps do not check
688			 * mmap's return value.
689			 */
690			PROC_LOCK(p);
691			p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
692			    lim_cur(p, RLIMIT_STACK);
693			PROC_UNLOCK(p);
694		}
695
696		/* This gives us our maximum stack size */
697		if (linux_args->len > STACK_SIZE - GUARD_SIZE)
698			bsd_args.len = linux_args->len;
699		else
700			bsd_args.len  = STACK_SIZE - GUARD_SIZE;
701
702		/*
703		 * This gives us a new BOS.  If we're using VM_STACK, then
704		 * mmap will just map the top SGROWSIZ bytes, and let
705		 * the stack grow down to the limit at BOS.  If we're
706		 * not using VM_STACK we map the full stack, since we
707		 * don't have a way to autogrow it.
708		 */
709		bsd_args.addr -= bsd_args.len;
710	} else {
711		bsd_args.addr = linux_args->addr;
712		bsd_args.len  = linux_args->len;
713	}
714
715	bsd_args.prot = linux_args->prot;
716	if (linux_args->flags & LINUX_MAP_ANON)
717		bsd_args.fd = -1;
718	else {
719		/*
720		 * Linux follows Solaris mmap(2) description:
721		 * The file descriptor fildes is opened with
722		 * read permission, regardless of the
723		 * protection options specified.
724		 * If PROT_WRITE is specified, the application
725		 * must have opened the file descriptor
726		 * fildes with write permission unless
727		 * MAP_PRIVATE is specified in the flag
728		 * argument as described below.
729		 */
730
731		if ((error = fget(td, linux_args->fd, &fp)) != 0)
732			return (error);
733		if (fp->f_type != DTYPE_VNODE) {
734			fdrop(fp, td);
735			return (EINVAL);
736		}
737
738		/* Linux mmap() just fails for O_WRONLY files */
739		if (! (fp->f_flag & FREAD)) {
740			fdrop(fp, td);
741			return (EACCES);
742		}
743
744		bsd_args.fd = linux_args->fd;
745		fdrop(fp, td);
746	}
747	bsd_args.pos = linux_args->pos;
748	bsd_args.pad = 0;
749
750#ifdef DEBUG
751	if (ldebug(mmap))
752		printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n",
753		    __func__,
754		    (void *)bsd_args.addr, bsd_args.len, bsd_args.prot,
755		    bsd_args.flags, bsd_args.fd, (int)bsd_args.pos);
756#endif
757	error = mmap(td, &bsd_args);
758#ifdef DEBUG
759	if (ldebug(mmap))
760		printf("-> %s() return: 0x%x (0x%08x)\n",
761			__func__, error, (u_int)td->td_retval[0]);
762#endif
763	return (error);
764}
765
766int
767linux_pipe(struct thread *td, struct linux_pipe_args *args)
768{
769	int error;
770	int reg_edx;
771
772#ifdef DEBUG
773	if (ldebug(pipe))
774		printf(ARGS(pipe, "*"));
775#endif
776
777	reg_edx = td->td_retval[1];
778	error = pipe(td, 0);
779	if (error) {
780		td->td_retval[1] = reg_edx;
781		return (error);
782	}
783
784	error = copyout(td->td_retval, args->pipefds, 2*sizeof(int));
785	if (error) {
786		td->td_retval[1] = reg_edx;
787		return (error);
788	}
789
790	td->td_retval[1] = reg_edx;
791	td->td_retval[0] = 0;
792	return (0);
793}
794
795int
796linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
797{
798	int error;
799	struct i386_ioperm_args iia;
800
801	iia.start = args->start;
802	iia.length = args->length;
803	iia.enable = args->enable;
804	mtx_lock(&Giant);
805	error = i386_set_ioperm(td, &iia);
806	mtx_unlock(&Giant);
807	return (error);
808}
809
810int
811linux_iopl(struct thread *td, struct linux_iopl_args *args)
812{
813	int error;
814
815	if (args->level < 0 || args->level > 3)
816		return (EINVAL);
817	if ((error = priv_check(td, PRIV_IO)) != 0)
818		return (error);
819	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
820		return (error);
821	td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
822	    (args->level * (PSL_IOPL / 3));
823	return (0);
824}
825
826int
827linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
828{
829	int error;
830	struct i386_ldt_args ldt;
831	struct l_descriptor ld;
832	union descriptor desc;
833
834	if (uap->ptr == NULL)
835		return (EINVAL);
836
837	switch (uap->func) {
838	case 0x00: /* read_ldt */
839		ldt.start = 0;
840		ldt.descs = uap->ptr;
841		ldt.num = uap->bytecount / sizeof(union descriptor);
842		mtx_lock(&Giant);
843		error = i386_get_ldt(td, &ldt);
844		td->td_retval[0] *= sizeof(union descriptor);
845		mtx_unlock(&Giant);
846		break;
847	case 0x01: /* write_ldt */
848	case 0x11: /* write_ldt */
849		if (uap->bytecount != sizeof(ld))
850			return (EINVAL);
851
852		error = copyin(uap->ptr, &ld, sizeof(ld));
853		if (error)
854			return (error);
855
856		ldt.start = ld.entry_number;
857		ldt.descs = &desc;
858		ldt.num = 1;
859		desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
860		desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
861		desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
862		desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
863		desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
864			(ld.contents << 2);
865		desc.sd.sd_dpl = 3;
866		desc.sd.sd_p = (ld.seg_not_present ^ 1);
867		desc.sd.sd_xx = 0;
868		desc.sd.sd_def32 = ld.seg_32bit;
869		desc.sd.sd_gran = ld.limit_in_pages;
870		mtx_lock(&Giant);
871		error = i386_set_ldt(td, &ldt, &desc);
872		mtx_unlock(&Giant);
873		break;
874	default:
875		error = EINVAL;
876		break;
877	}
878
879	if (error == EOPNOTSUPP) {
880		printf("linux: modify_ldt needs kernel option USER_LDT\n");
881		error = ENOSYS;
882	}
883
884	return (error);
885}
886
887int
888linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
889{
890	l_osigaction_t osa;
891	l_sigaction_t act, oact;
892	int error;
893
894#ifdef DEBUG
895	if (ldebug(sigaction))
896		printf(ARGS(sigaction, "%d, %p, %p"),
897		    args->sig, (void *)args->nsa, (void *)args->osa);
898#endif
899
900	if (args->nsa != NULL) {
901		error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
902		if (error)
903			return (error);
904		act.lsa_handler = osa.lsa_handler;
905		act.lsa_flags = osa.lsa_flags;
906		act.lsa_restorer = osa.lsa_restorer;
907		LINUX_SIGEMPTYSET(act.lsa_mask);
908		act.lsa_mask.__bits[0] = osa.lsa_mask;
909	}
910
911	error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
912	    args->osa ? &oact : NULL);
913
914	if (args->osa != NULL && !error) {
915		osa.lsa_handler = oact.lsa_handler;
916		osa.lsa_flags = oact.lsa_flags;
917		osa.lsa_restorer = oact.lsa_restorer;
918		osa.lsa_mask = oact.lsa_mask.__bits[0];
919		error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
920	}
921
922	return (error);
923}
924
925/*
926 * Linux has two extra args, restart and oldmask.  We dont use these,
927 * but it seems that "restart" is actually a context pointer that
928 * enables the signal to happen with a different register set.
929 */
930int
931linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
932{
933	sigset_t sigmask;
934	l_sigset_t mask;
935
936#ifdef DEBUG
937	if (ldebug(sigsuspend))
938		printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
939#endif
940
941	LINUX_SIGEMPTYSET(mask);
942	mask.__bits[0] = args->mask;
943	linux_to_bsd_sigset(&mask, &sigmask);
944	return (kern_sigsuspend(td, sigmask));
945}
946
947int
948linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
949{
950	l_sigset_t lmask;
951	sigset_t sigmask;
952	int error;
953
954#ifdef DEBUG
955	if (ldebug(rt_sigsuspend))
956		printf(ARGS(rt_sigsuspend, "%p, %d"),
957		    (void *)uap->newset, uap->sigsetsize);
958#endif
959
960	if (uap->sigsetsize != sizeof(l_sigset_t))
961		return (EINVAL);
962
963	error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
964	if (error)
965		return (error);
966
967	linux_to_bsd_sigset(&lmask, &sigmask);
968	return (kern_sigsuspend(td, sigmask));
969}
970
971int
972linux_pause(struct thread *td, struct linux_pause_args *args)
973{
974	struct proc *p = td->td_proc;
975	sigset_t sigmask;
976
977#ifdef DEBUG
978	if (ldebug(pause))
979		printf(ARGS(pause, ""));
980#endif
981
982	PROC_LOCK(p);
983	sigmask = td->td_sigmask;
984	PROC_UNLOCK(p);
985	return (kern_sigsuspend(td, sigmask));
986}
987
988int
989linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
990{
991	stack_t ss, oss;
992	l_stack_t lss;
993	int error;
994
995#ifdef DEBUG
996	if (ldebug(sigaltstack))
997		printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
998#endif
999
1000	if (uap->uss != NULL) {
1001		error = copyin(uap->uss, &lss, sizeof(l_stack_t));
1002		if (error)
1003			return (error);
1004
1005		ss.ss_sp = lss.ss_sp;
1006		ss.ss_size = lss.ss_size;
1007		ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
1008	}
1009	error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
1010	    (uap->uoss != NULL) ? &oss : NULL);
1011	if (!error && uap->uoss != NULL) {
1012		lss.ss_sp = oss.ss_sp;
1013		lss.ss_size = oss.ss_size;
1014		lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
1015		error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
1016	}
1017
1018	return (error);
1019}
1020
1021int
1022linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args)
1023{
1024	struct ftruncate_args sa;
1025
1026#ifdef DEBUG
1027	if (ldebug(ftruncate64))
1028		printf(ARGS(ftruncate64, "%u, %jd"), args->fd,
1029		    (intmax_t)args->length);
1030#endif
1031
1032	sa.fd = args->fd;
1033	sa.pad = 0;
1034	sa.length = args->length;
1035	return ftruncate(td, &sa);
1036}
1037
1038int
1039linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
1040{
1041	struct l_user_desc info;
1042	int error;
1043	int idx;
1044	int a[2];
1045	struct segment_descriptor sd;
1046
1047	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1048	if (error)
1049		return (error);
1050
1051#ifdef DEBUG
1052	if (ldebug(set_thread_area))
1053	   	printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
1054		      info.entry_number,
1055      		      info.base_addr,
1056      		      info.limit,
1057      		      info.seg_32bit,
1058		      info.contents,
1059      		      info.read_exec_only,
1060      		      info.limit_in_pages,
1061      		      info.seg_not_present,
1062      		      info.useable);
1063#endif
1064
1065	idx = info.entry_number;
1066	/*
1067	 * Semantics of linux version: every thread in the system has array
1068	 * of 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
1069	 * syscall loads one of the selected tls decriptors with a value
1070	 * and also loads GDT descriptors 6, 7 and 8 with the content of the per-thread
1071	 * descriptors.
1072	 *
1073	 * Semantics of fbsd version: I think we can ignore that linux has 3 per-thread
1074	 * descriptors and use just the 1st one. The tls_array[] is used only in
1075	 * set/get-thread_area() syscalls and for loading the GDT descriptors. In fbsd
1076	 * we use just one GDT descriptor for TLS so we will load just one.
1077	 * XXX: this doesnt work when user-space process tries to use more then 1 TLS segment
1078	 * comment in the linux sources says wine might do that.
1079	 */
1080
1081	/*
1082	 * we support just GLIBC TLS now
1083	 * we should let 3 proceed as well because we use this segment so
1084	 * if code does two subsequent calls it should succeed
1085	 */
1086	if (idx != 6 && idx != -1 && idx != 3)
1087		return (EINVAL);
1088
1089	/*
1090	 * we have to copy out the GDT entry we use
1091	 * FreeBSD uses GDT entry #3 for storing %gs so load that
1092	 * XXX: what if userspace program doesnt check this value and tries
1093	 * to use 6, 7 or 8?
1094	 */
1095	idx = info.entry_number = 3;
1096	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1097	if (error)
1098		return (error);
1099
1100	if (LDT_empty(&info)) {
1101		a[0] = 0;
1102		a[1] = 0;
1103	} else {
1104		a[0] = LDT_entry_a(&info);
1105		a[1] = LDT_entry_b(&info);
1106	}
1107
1108	memcpy(&sd, &a, sizeof(a));
1109#ifdef DEBUG
1110	if (ldebug(set_thread_area))
1111	   	printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
1112			sd.sd_hibase,
1113			sd.sd_lolimit,
1114			sd.sd_hilimit,
1115			sd.sd_type,
1116			sd.sd_dpl,
1117			sd.sd_p,
1118			sd.sd_xx,
1119			sd.sd_def32,
1120			sd.sd_gran);
1121#endif
1122
1123	/* this is taken from i386 version of cpu_set_user_tls() */
1124	critical_enter();
1125	/* set %gs */
1126	td->td_pcb->pcb_gsd = sd;
1127	PCPU_GET(fsgs_gdt)[1] = sd;
1128	load_gs(GSEL(GUGS_SEL, SEL_UPL));
1129	critical_exit();
1130
1131	return (0);
1132}
1133
1134int
1135linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
1136{
1137
1138	struct l_user_desc info;
1139	int error;
1140	int idx;
1141	struct l_desc_struct desc;
1142	struct segment_descriptor sd;
1143
1144#ifdef DEBUG
1145	if (ldebug(get_thread_area))
1146		printf(ARGS(get_thread_area, "%p"), args->desc);
1147#endif
1148
1149	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1150	if (error)
1151		return (error);
1152
1153	idx = info.entry_number;
1154	/* XXX: I am not sure if we want 3 to be allowed too. */
1155	if (idx != 6 && idx != 3)
1156		return (EINVAL);
1157
1158	idx = 3;
1159
1160	memset(&info, 0, sizeof(info));
1161
1162	sd = PCPU_GET(fsgs_gdt)[1];
1163
1164	memcpy(&desc, &sd, sizeof(desc));
1165
1166	info.entry_number = idx;
1167	info.base_addr = GET_BASE(&desc);
1168	info.limit = GET_LIMIT(&desc);
1169	info.seg_32bit = GET_32BIT(&desc);
1170	info.contents = GET_CONTENTS(&desc);
1171	info.read_exec_only = !GET_WRITABLE(&desc);
1172	info.limit_in_pages = GET_LIMIT_PAGES(&desc);
1173	info.seg_not_present = !GET_PRESENT(&desc);
1174	info.useable = GET_USEABLE(&desc);
1175
1176	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1177	if (error)
1178	   	return (EFAULT);
1179
1180	return (0);
1181}
1182
1183/* copied from kern/kern_time.c */
1184int
1185linux_timer_create(struct thread *td, struct linux_timer_create_args *args)
1186{
1187   	return ktimer_create(td, (struct ktimer_create_args *) args);
1188}
1189
1190int
1191linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args)
1192{
1193   	return ktimer_settime(td, (struct ktimer_settime_args *) args);
1194}
1195
1196int
1197linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args)
1198{
1199   	return ktimer_gettime(td, (struct ktimer_gettime_args *) args);
1200}
1201
1202int
1203linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args)
1204{
1205   	return ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args);
1206}
1207
1208int
1209linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args)
1210{
1211   	return ktimer_delete(td, (struct ktimer_delete_args *) args);
1212}
1213
1214/* XXX: this wont work with module - convert it */
1215int
1216linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
1217{
1218#ifdef P1003_1B_MQUEUE
1219   	return kmq_open(td, (struct kmq_open_args *) args);
1220#else
1221	return (ENOSYS);
1222#endif
1223}
1224
1225int
1226linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
1227{
1228#ifdef P1003_1B_MQUEUE
1229   	return kmq_unlink(td, (struct kmq_unlink_args *) args);
1230#else
1231	return (ENOSYS);
1232#endif
1233}
1234
1235int
1236linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
1237{
1238#ifdef P1003_1B_MQUEUE
1239   	return kmq_timedsend(td, (struct kmq_timedsend_args *) args);
1240#else
1241	return (ENOSYS);
1242#endif
1243}
1244
1245int
1246linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
1247{
1248#ifdef P1003_1B_MQUEUE
1249   	return kmq_timedreceive(td, (struct kmq_timedreceive_args *) args);
1250#else
1251	return (ENOSYS);
1252#endif
1253}
1254
1255int
1256linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
1257{
1258#ifdef P1003_1B_MQUEUE
1259	return kmq_notify(td, (struct kmq_notify_args *) args);
1260#else
1261	return (ENOSYS);
1262#endif
1263}
1264
1265int
1266linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
1267{
1268#ifdef P1003_1B_MQUEUE
1269   	return kmq_setattr(td, (struct kmq_setattr_args *) args);
1270#else
1271	return (ENOSYS);
1272#endif
1273}
1274
1275