linux_machdep.c revision 166944
1/*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer
10 *    in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 166944 2007-02-24 16:49:25Z netchild $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/file.h>
35#include <sys/fcntl.h>
36#include <sys/imgact.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mman.h>
40#include <sys/mutex.h>
41#include <sys/sx.h>
42#include <sys/priv.h>
43#include <sys/proc.h>
44#include <sys/queue.h>
45#include <sys/resource.h>
46#include <sys/resourcevar.h>
47#include <sys/signalvar.h>
48#include <sys/syscallsubr.h>
49#include <sys/sysproto.h>
50#include <sys/unistd.h>
51#include <sys/wait.h>
52#include <sys/sched.h>
53
54#include <machine/frame.h>
55#include <machine/psl.h>
56#include <machine/segments.h>
57#include <machine/sysarch.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/vm_map.h>
62
63#include <i386/linux/linux.h>
64#include <i386/linux/linux_proto.h>
65#include <compat/linux/linux_ipc.h>
66#include <compat/linux/linux_signal.h>
67#include <compat/linux/linux_util.h>
68#include <compat/linux/linux_emul.h>
69
70#include <i386/include/pcb.h>			/* needed for pcb definition in linux_set_thread_area */
71
72#include "opt_posix.h"
73
74extern struct sysentvec elf32_freebsd_sysvec;	/* defined in i386/i386/elf_machdep.c */
75
76struct l_descriptor {
77	l_uint		entry_number;
78	l_ulong		base_addr;
79	l_uint		limit;
80	l_uint		seg_32bit:1;
81	l_uint		contents:2;
82	l_uint		read_exec_only:1;
83	l_uint		limit_in_pages:1;
84	l_uint		seg_not_present:1;
85	l_uint		useable:1;
86};
87
88struct l_old_select_argv {
89	l_int		nfds;
90	l_fd_set	*readfds;
91	l_fd_set	*writefds;
92	l_fd_set	*exceptfds;
93	struct l_timeval	*timeout;
94};
95
96int
97linux_to_bsd_sigaltstack(int lsa)
98{
99	int bsa = 0;
100
101	if (lsa & LINUX_SS_DISABLE)
102		bsa |= SS_DISABLE;
103	if (lsa & LINUX_SS_ONSTACK)
104		bsa |= SS_ONSTACK;
105	return (bsa);
106}
107
108int
109bsd_to_linux_sigaltstack(int bsa)
110{
111	int lsa = 0;
112
113	if (bsa & SS_DISABLE)
114		lsa |= LINUX_SS_DISABLE;
115	if (bsa & SS_ONSTACK)
116		lsa |= LINUX_SS_ONSTACK;
117	return (lsa);
118}
119
120int
121linux_execve(struct thread *td, struct linux_execve_args *args)
122{
123	int error;
124	char *newpath;
125	struct image_args eargs;
126
127	LCONVPATHEXIST(td, args->path, &newpath);
128
129#ifdef DEBUG
130	if (ldebug(execve))
131		printf(ARGS(execve, "%s"), newpath);
132#endif
133
134	error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE,
135	    args->argp, args->envp);
136	free(newpath, M_TEMP);
137	if (error == 0)
138		error = kern_execve(td, &eargs, NULL);
139	if (error == 0)
140	   	/* linux process can exec fbsd one, dont attempt
141		 * to create emuldata for such process using
142		 * linux_proc_init, this leads to a panic on KASSERT
143		 * because such process has p->p_emuldata == NULL
144		 */
145	   	if (td->td_proc->p_sysent == &elf_linux_sysvec)
146   		   	error = linux_proc_init(td, 0, 0);
147	return (error);
148}
149
150struct l_ipc_kludge {
151	struct l_msgbuf *msgp;
152	l_long msgtyp;
153};
154
155int
156linux_ipc(struct thread *td, struct linux_ipc_args *args)
157{
158
159	switch (args->what & 0xFFFF) {
160	case LINUX_SEMOP: {
161		struct linux_semop_args a;
162
163		a.semid = args->arg1;
164		a.tsops = args->ptr;
165		a.nsops = args->arg2;
166		return (linux_semop(td, &a));
167	}
168	case LINUX_SEMGET: {
169		struct linux_semget_args a;
170
171		a.key = args->arg1;
172		a.nsems = args->arg2;
173		a.semflg = args->arg3;
174		return (linux_semget(td, &a));
175	}
176	case LINUX_SEMCTL: {
177		struct linux_semctl_args a;
178		int error;
179
180		a.semid = args->arg1;
181		a.semnum = args->arg2;
182		a.cmd = args->arg3;
183		error = copyin(args->ptr, &a.arg, sizeof(a.arg));
184		if (error)
185			return (error);
186		return (linux_semctl(td, &a));
187	}
188	case LINUX_MSGSND: {
189		struct linux_msgsnd_args a;
190
191		a.msqid = args->arg1;
192		a.msgp = args->ptr;
193		a.msgsz = args->arg2;
194		a.msgflg = args->arg3;
195		return (linux_msgsnd(td, &a));
196	}
197	case LINUX_MSGRCV: {
198		struct linux_msgrcv_args a;
199
200		a.msqid = args->arg1;
201		a.msgsz = args->arg2;
202		a.msgflg = args->arg3;
203		if ((args->what >> 16) == 0) {
204			struct l_ipc_kludge tmp;
205			int error;
206
207			if (args->ptr == NULL)
208				return (EINVAL);
209			error = copyin(args->ptr, &tmp, sizeof(tmp));
210			if (error)
211				return (error);
212			a.msgp = tmp.msgp;
213			a.msgtyp = tmp.msgtyp;
214		} else {
215			a.msgp = args->ptr;
216			a.msgtyp = args->arg5;
217		}
218		return (linux_msgrcv(td, &a));
219	}
220	case LINUX_MSGGET: {
221		struct linux_msgget_args a;
222
223		a.key = args->arg1;
224		a.msgflg = args->arg2;
225		return (linux_msgget(td, &a));
226	}
227	case LINUX_MSGCTL: {
228		struct linux_msgctl_args a;
229
230		a.msqid = args->arg1;
231		a.cmd = args->arg2;
232		a.buf = args->ptr;
233		return (linux_msgctl(td, &a));
234	}
235	case LINUX_SHMAT: {
236		struct linux_shmat_args a;
237
238		a.shmid = args->arg1;
239		a.shmaddr = args->ptr;
240		a.shmflg = args->arg2;
241		a.raddr = (l_ulong *)args->arg3;
242		return (linux_shmat(td, &a));
243	}
244	case LINUX_SHMDT: {
245		struct linux_shmdt_args a;
246
247		a.shmaddr = args->ptr;
248		return (linux_shmdt(td, &a));
249	}
250	case LINUX_SHMGET: {
251		struct linux_shmget_args a;
252
253		a.key = args->arg1;
254		a.size = args->arg2;
255		a.shmflg = args->arg3;
256		return (linux_shmget(td, &a));
257	}
258	case LINUX_SHMCTL: {
259		struct linux_shmctl_args a;
260
261		a.shmid = args->arg1;
262		a.cmd = args->arg2;
263		a.buf = args->ptr;
264		return (linux_shmctl(td, &a));
265	}
266	default:
267		break;
268	}
269
270	return (EINVAL);
271}
272
273int
274linux_old_select(struct thread *td, struct linux_old_select_args *args)
275{
276	struct l_old_select_argv linux_args;
277	struct linux_select_args newsel;
278	int error;
279
280#ifdef DEBUG
281	if (ldebug(old_select))
282		printf(ARGS(old_select, "%p"), args->ptr);
283#endif
284
285	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
286	if (error)
287		return (error);
288
289	newsel.nfds = linux_args.nfds;
290	newsel.readfds = linux_args.readfds;
291	newsel.writefds = linux_args.writefds;
292	newsel.exceptfds = linux_args.exceptfds;
293	newsel.timeout = linux_args.timeout;
294	return (linux_select(td, &newsel));
295}
296
297int
298linux_fork(struct thread *td, struct linux_fork_args *args)
299{
300	int error;
301	struct proc *p2;
302	struct thread *td2;
303
304#ifdef DEBUG
305	if (ldebug(fork))
306		printf(ARGS(fork, ""));
307#endif
308
309	if ((error = fork1(td, RFFDG | RFPROC | RFSTOPPED, 0, &p2)) != 0)
310		return (error);
311
312	if (error == 0) {
313		td->td_retval[0] = p2->p_pid;
314		td->td_retval[1] = 0;
315	}
316
317	if (td->td_retval[1] == 1)
318		td->td_retval[0] = 0;
319	error = linux_proc_init(td, td->td_retval[0], 0);
320	if (error)
321		return (error);
322
323	td2 = FIRST_THREAD_IN_PROC(p2);
324
325	/*
326	 * Make this runnable after we are finished with it.
327	 */
328	mtx_lock_spin(&sched_lock);
329	TD_SET_CAN_RUN(td2);
330	sched_add(td2, SRQ_BORING);
331	mtx_unlock_spin(&sched_lock);
332
333	return (0);
334}
335
336int
337linux_vfork(struct thread *td, struct linux_vfork_args *args)
338{
339	int error;
340	struct proc *p2;
341	struct thread *td2;
342
343#ifdef DEBUG
344	if (ldebug(vfork))
345		printf(ARGS(vfork, ""));
346#endif
347
348	/* exclude RFPPWAIT */
349	if ((error = fork1(td, RFFDG | RFPROC | RFMEM | RFSTOPPED, 0, &p2)) != 0)
350		return (error);
351	if (error == 0) {
352		td->td_retval[0] = p2->p_pid;
353		td->td_retval[1] = 0;
354	}
355	/* Are we the child? */
356	if (td->td_retval[1] == 1)
357		td->td_retval[0] = 0;
358	error = linux_proc_init(td, td->td_retval[0], 0);
359	if (error)
360		return (error);
361
362	PROC_LOCK(p2);
363	p2->p_flag |= P_PPWAIT;
364	PROC_UNLOCK(p2);
365
366	td2 = FIRST_THREAD_IN_PROC(p2);
367
368	/*
369	 * Make this runnable after we are finished with it.
370	 */
371	mtx_lock_spin(&sched_lock);
372	TD_SET_CAN_RUN(td2);
373	sched_add(td2, SRQ_BORING);
374	mtx_unlock_spin(&sched_lock);
375
376	/* wait for the children to exit, ie. emulate vfork */
377	PROC_LOCK(p2);
378	while (p2->p_flag & P_PPWAIT)
379	   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
380	PROC_UNLOCK(p2);
381
382	return (0);
383}
384
385int
386linux_clone(struct thread *td, struct linux_clone_args *args)
387{
388	int error, ff = RFPROC | RFSTOPPED;
389	struct proc *p2;
390	struct thread *td2;
391	int exit_signal;
392	struct linux_emuldata *em;
393
394#ifdef DEBUG
395	if (ldebug(clone)) {
396   	   	printf(ARGS(clone, "flags %x, stack %x, parent tid: %x, child tid: %x"),
397		    (unsigned int)args->flags, (unsigned int)args->stack,
398		    (unsigned int)args->parent_tidptr, (unsigned int)args->child_tidptr);
399	}
400#endif
401
402	exit_signal = args->flags & 0x000000ff;
403	if (!LINUX_SIG_VALID(exit_signal) && exit_signal != 0)
404		return (EINVAL);
405
406	if (exit_signal <= LINUX_SIGTBLSZ)
407		exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
408
409	if (args->flags & CLONE_VM)
410		ff |= RFMEM;
411	if (args->flags & CLONE_SIGHAND)
412		ff |= RFSIGSHARE;
413	/*
414	 * XXX: in linux sharing of fs info (chroot/cwd/umask)
415	 * and open files is independant. in fbsd its in one
416	 * structure but in reality it doesn't cause any problems
417	 * because both of these flags are usually set together.
418	 */
419	if (!(args->flags & (CLONE_FILES | CLONE_FS)))
420		ff |= RFFDG;
421
422	/*
423	 * Attempt to detect when linux_clone(2) is used for creating
424	 * kernel threads. Unfortunately despite the existence of the
425	 * CLONE_THREAD flag, version of linuxthreads package used in
426	 * most popular distros as of beginning of 2005 doesn't make
427	 * any use of it. Therefore, this detection relies on
428	 * empirical observation that linuxthreads sets certain
429	 * combination of flags, so that we can make more or less
430	 * precise detection and notify the FreeBSD kernel that several
431	 * processes are in fact part of the same threading group, so
432	 * that special treatment is necessary for signal delivery
433	 * between those processes and fd locking.
434	 */
435	if ((args->flags & 0xffffff00) == THREADING_FLAGS)
436		ff |= RFTHREAD;
437
438	if (args->flags & CLONE_PARENT_SETTID)
439		if (args->parent_tidptr == NULL)
440			return (EINVAL);
441
442	error = fork1(td, ff, 0, &p2);
443	if (error)
444		return (error);
445
446	if (args->flags & (CLONE_PARENT|CLONE_THREAD)) {
447	   	sx_xlock(&proctree_lock);
448		PROC_LOCK(p2);
449		proc_reparent(p2, td->td_proc->p_pptr);
450		PROC_UNLOCK(p2);
451		sx_xunlock(&proctree_lock);
452	}
453
454	/* create the emuldata */
455	error = linux_proc_init(td, p2->p_pid, args->flags);
456	/* reference it - no need to check this */
457	em = em_find(p2, EMUL_DOLOCK);
458	KASSERT(em != NULL, ("clone: emuldata not found.\n"));
459	/* and adjust it */
460
461	if (args->flags & CLONE_THREAD) {
462	   	/* XXX: linux mangles pgrp and pptr somehow
463		 * I think it might be this but I am not sure.
464		 */
465#ifdef notyet
466	   	PROC_LOCK(p2);
467	   	p2->p_pgrp = td->td_proc->p_pgrp;
468	   	PROC_UNLOCK(p2);
469#endif
470	 	exit_signal = 0;
471	}
472
473	if (args->flags & CLONE_CHILD_SETTID)
474		em->child_set_tid = args->child_tidptr;
475	else
476	   	em->child_set_tid = NULL;
477
478	if (args->flags & CLONE_CHILD_CLEARTID)
479		em->child_clear_tid = args->child_tidptr;
480	else
481	   	em->child_clear_tid = NULL;
482
483	EMUL_UNLOCK(&emul_lock);
484
485	if (args->flags & CLONE_PARENT_SETTID) {
486		error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid));
487		if (error)
488			printf(LMSG("copyout failed!"));
489	}
490
491	PROC_LOCK(p2);
492	p2->p_sigparent = exit_signal;
493	PROC_UNLOCK(p2);
494	td2 = FIRST_THREAD_IN_PROC(p2);
495	/*
496	 * in a case of stack = NULL we are supposed to COW calling process stack
497	 * this is what normal fork() does so we just keep the tf_esp arg intact
498	 */
499	if (args->stack)
500   	   	td2->td_frame->tf_esp = (unsigned int)args->stack;
501
502	if (args->flags & CLONE_SETTLS) {
503   	   	struct l_user_desc info;
504   	   	int idx;
505	   	int a[2];
506		struct segment_descriptor sd;
507
508	   	error = copyin((void *)td->td_frame->tf_esi, &info, sizeof(struct l_user_desc));
509		if (error) {
510			printf(LMSG("copyin failed!"));
511		} else {
512
513			idx = info.entry_number;
514
515			/*
516			 * looks like we're getting the idx we returned
517			 * in the set_thread_area() syscall
518			 */
519			if (idx != 6 && idx != 3) {
520				printf(LMSG("resetting idx!"));
521				idx = 3;
522			}
523
524			/* this doesnt happen in practice */
525			if (idx == 6) {
526		   		/* we might copy out the entry_number as 3 */
527			   	info.entry_number = 3;
528				error = copyout(&info, (void *) td->td_frame->tf_esi, sizeof(struct l_user_desc));
529				if (error)
530					printf(LMSG("copyout failed!"));
531			}
532
533			a[0] = LDT_entry_a(&info);
534			a[1] = LDT_entry_b(&info);
535
536			memcpy(&sd, &a, sizeof(a));
537#ifdef DEBUG
538		if (ldebug(clone))
539		   	printf("Segment created in clone with CLONE_SETTLS: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
540			sd.sd_hibase,
541			sd.sd_lolimit,
542			sd.sd_hilimit,
543			sd.sd_type,
544			sd.sd_dpl,
545			sd.sd_p,
546			sd.sd_xx,
547			sd.sd_def32,
548			sd.sd_gran);
549#endif
550
551			/* set %gs */
552			td2->td_pcb->pcb_gsd = sd;
553			td2->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
554		}
555	}
556
557#ifdef DEBUG
558	if (ldebug(clone))
559		printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"),
560		    (long)p2->p_pid, args->stack, exit_signal);
561#endif
562	if (args->flags & CLONE_VFORK) {
563	   	PROC_LOCK(p2);
564		p2->p_flag |= P_PPWAIT;
565	   	PROC_UNLOCK(p2);
566	}
567
568	/*
569	 * Make this runnable after we are finished with it.
570	 */
571	mtx_lock_spin(&sched_lock);
572	TD_SET_CAN_RUN(td2);
573	sched_add(td2, SRQ_BORING);
574	mtx_unlock_spin(&sched_lock);
575
576	td->td_retval[0] = p2->p_pid;
577	td->td_retval[1] = 0;
578
579	if (args->flags & CLONE_VFORK) {
580   	   	/* wait for the children to exit, ie. emulate vfork */
581   	   	PROC_LOCK(p2);
582		while (p2->p_flag & P_PPWAIT)
583   		   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
584		PROC_UNLOCK(p2);
585	}
586
587	return (0);
588}
589
590#define STACK_SIZE  (2 * 1024 * 1024)
591#define GUARD_SIZE  (4 * PAGE_SIZE)
592
593static int linux_mmap_common(struct thread *, struct l_mmap_argv *);
594
595int
596linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
597{
598	struct l_mmap_argv linux_args;
599
600#ifdef DEBUG
601	if (ldebug(mmap2))
602		printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
603		    (void *)args->addr, args->len, args->prot,
604		    args->flags, args->fd, args->pgoff);
605#endif
606
607	linux_args.addr = args->addr;
608	linux_args.len = args->len;
609	linux_args.prot = args->prot;
610	linux_args.flags = args->flags;
611	linux_args.fd = args->fd;
612	linux_args.pgoff = args->pgoff * PAGE_SIZE;
613
614	return (linux_mmap_common(td, &linux_args));
615}
616
617int
618linux_mmap(struct thread *td, struct linux_mmap_args *args)
619{
620	int error;
621	struct l_mmap_argv linux_args;
622
623	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
624	if (error)
625		return (error);
626
627#ifdef DEBUG
628	if (ldebug(mmap))
629		printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
630		    (void *)linux_args.addr, linux_args.len, linux_args.prot,
631		    linux_args.flags, linux_args.fd, linux_args.pgoff);
632#endif
633
634	return (linux_mmap_common(td, &linux_args));
635}
636
637static int
638linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args)
639{
640	struct proc *p = td->td_proc;
641	struct mmap_args /* {
642		caddr_t addr;
643		size_t len;
644		int prot;
645		int flags;
646		int fd;
647		long pad;
648		off_t pos;
649	} */ bsd_args;
650	int error;
651	struct file *fp;
652
653	error = 0;
654	bsd_args.flags = 0;
655	fp = NULL;
656
657	/*
658	 * Linux mmap(2):
659	 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
660	 */
661	if (! ((linux_args->flags & LINUX_MAP_SHARED) ^
662	    (linux_args->flags & LINUX_MAP_PRIVATE)))
663		return (EINVAL);
664
665	if (linux_args->flags & LINUX_MAP_SHARED)
666		bsd_args.flags |= MAP_SHARED;
667	if (linux_args->flags & LINUX_MAP_PRIVATE)
668		bsd_args.flags |= MAP_PRIVATE;
669	if (linux_args->flags & LINUX_MAP_FIXED)
670		bsd_args.flags |= MAP_FIXED;
671	if (linux_args->flags & LINUX_MAP_ANON)
672		bsd_args.flags |= MAP_ANON;
673	else
674		bsd_args.flags |= MAP_NOSYNC;
675	if (linux_args->flags & LINUX_MAP_GROWSDOWN)
676		bsd_args.flags |= MAP_STACK;
677
678	/*
679	 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
680	 * on Linux/i386. We do this to ensure maximum compatibility.
681	 * Linux/ia64 does the same in i386 emulation mode.
682	 */
683	bsd_args.prot = linux_args->prot;
684	if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
685		bsd_args.prot |= PROT_READ | PROT_EXEC;
686
687	if (linux_args->fd != -1) {
688		/*
689		 * Linux follows Solaris mmap(2) description:
690		 * The file descriptor fildes is opened with
691		 * read permission, regardless of the
692		 * protection options specified.
693		 */
694
695		if ((error = fget(td, linux_args->fd, &fp)) != 0)
696			return (error);
697		if (fp->f_type != DTYPE_VNODE) {
698			fdrop(fp, td);
699			return (EINVAL);
700		}
701
702		/* Linux mmap() just fails for O_WRONLY files */
703		if (!(fp->f_flag & FREAD)) {
704			fdrop(fp, td);
705			return (EACCES);
706		}
707
708		fdrop(fp, td);
709	}
710	bsd_args.fd = linux_args->fd;
711
712	if (linux_args->flags & LINUX_MAP_GROWSDOWN) {
713		/*
714		 * The linux MAP_GROWSDOWN option does not limit auto
715		 * growth of the region.  Linux mmap with this option
716		 * takes as addr the inital BOS, and as len, the initial
717		 * region size.  It can then grow down from addr without
718		 * limit.  However, linux threads has an implicit internal
719		 * limit to stack size of STACK_SIZE.  Its just not
720		 * enforced explicitly in linux.  But, here we impose
721		 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
722		 * region, since we can do this with our mmap.
723		 *
724		 * Our mmap with MAP_STACK takes addr as the maximum
725		 * downsize limit on BOS, and as len the max size of
726		 * the region.  It them maps the top SGROWSIZ bytes,
727		 * and auto grows the region down, up to the limit
728		 * in addr.
729		 *
730		 * If we don't use the MAP_STACK option, the effect
731		 * of this code is to allocate a stack region of a
732		 * fixed size of (STACK_SIZE - GUARD_SIZE).
733		 */
734
735		if ((caddr_t)PTRIN(linux_args->addr) + linux_args->len >
736		    p->p_vmspace->vm_maxsaddr) {
737			/*
738			 * Some linux apps will attempt to mmap
739			 * thread stacks near the top of their
740			 * address space.  If their TOS is greater
741			 * than vm_maxsaddr, vm_map_growstack()
742			 * will confuse the thread stack with the
743			 * process stack and deliver a SEGV if they
744			 * attempt to grow the thread stack past their
745			 * current stacksize rlimit.  To avoid this,
746			 * adjust vm_maxsaddr upwards to reflect
747			 * the current stacksize rlimit rather
748			 * than the maximum possible stacksize.
749			 * It would be better to adjust the
750			 * mmap'ed region, but some apps do not check
751			 * mmap's return value.
752			 */
753			PROC_LOCK(p);
754			p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
755			    lim_cur(p, RLIMIT_STACK);
756			PROC_UNLOCK(p);
757		}
758
759		/* This gives us our maximum stack size */
760		if (linux_args->len > STACK_SIZE - GUARD_SIZE)
761			bsd_args.len = linux_args->len;
762		else
763			bsd_args.len  = STACK_SIZE - GUARD_SIZE;
764
765		/*
766		 * This gives us a new BOS.  If we're using VM_STACK, then
767		 * mmap will just map the top SGROWSIZ bytes, and let
768		 * the stack grow down to the limit at BOS.  If we're
769		 * not using VM_STACK we map the full stack, since we
770		 * don't have a way to autogrow it.
771		 */
772		bsd_args.addr = (caddr_t)PTRIN(linux_args->addr) -
773		    bsd_args.len;
774	} else {
775		bsd_args.addr = (caddr_t)PTRIN(linux_args->addr);
776		bsd_args.len  = linux_args->len;
777	}
778	bsd_args.pos = linux_args->pgoff;
779	bsd_args.pad = 0;
780
781#ifdef DEBUG
782	if (ldebug(mmap))
783		printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n",
784		    __func__,
785		    (void *)bsd_args.addr, bsd_args.len, bsd_args.prot,
786		    bsd_args.flags, bsd_args.fd, (int)bsd_args.pos);
787#endif
788	error = mmap(td, &bsd_args);
789#ifdef DEBUG
790	if (ldebug(mmap))
791		printf("-> %s() return: 0x%x (0x%08x)\n",
792			__func__, error, (u_int)td->td_retval[0]);
793#endif
794	return (error);
795}
796
797int
798linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
799{
800	struct mprotect_args bsd_args;
801
802	bsd_args.addr = uap->addr;
803	bsd_args.len = uap->len;
804	bsd_args.prot = uap->prot;
805	if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
806		bsd_args.prot |= PROT_READ | PROT_EXEC;
807	return (mprotect(td, &bsd_args));
808}
809
810int
811linux_pipe(struct thread *td, struct linux_pipe_args *args)
812{
813	int error;
814	int reg_edx;
815
816#ifdef DEBUG
817	if (ldebug(pipe))
818		printf(ARGS(pipe, "*"));
819#endif
820
821	reg_edx = td->td_retval[1];
822	error = pipe(td, 0);
823	if (error) {
824		td->td_retval[1] = reg_edx;
825		return (error);
826	}
827
828	error = copyout(td->td_retval, args->pipefds, 2*sizeof(int));
829	if (error) {
830		td->td_retval[1] = reg_edx;
831		return (error);
832	}
833
834	td->td_retval[1] = reg_edx;
835	td->td_retval[0] = 0;
836	return (0);
837}
838
839int
840linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
841{
842	int error;
843	struct i386_ioperm_args iia;
844
845	iia.start = args->start;
846	iia.length = args->length;
847	iia.enable = args->enable;
848	mtx_lock(&Giant);
849	error = i386_set_ioperm(td, &iia);
850	mtx_unlock(&Giant);
851	return (error);
852}
853
854int
855linux_iopl(struct thread *td, struct linux_iopl_args *args)
856{
857	int error;
858
859	if (args->level < 0 || args->level > 3)
860		return (EINVAL);
861	if ((error = priv_check(td, PRIV_IO)) != 0)
862		return (error);
863	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
864		return (error);
865	td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
866	    (args->level * (PSL_IOPL / 3));
867	return (0);
868}
869
870int
871linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
872{
873	int error;
874	struct i386_ldt_args ldt;
875	struct l_descriptor ld;
876	union descriptor desc;
877
878	if (uap->ptr == NULL)
879		return (EINVAL);
880
881	switch (uap->func) {
882	case 0x00: /* read_ldt */
883		ldt.start = 0;
884		ldt.descs = uap->ptr;
885		ldt.num = uap->bytecount / sizeof(union descriptor);
886		mtx_lock(&Giant);
887		error = i386_get_ldt(td, &ldt);
888		td->td_retval[0] *= sizeof(union descriptor);
889		mtx_unlock(&Giant);
890		break;
891	case 0x01: /* write_ldt */
892	case 0x11: /* write_ldt */
893		if (uap->bytecount != sizeof(ld))
894			return (EINVAL);
895
896		error = copyin(uap->ptr, &ld, sizeof(ld));
897		if (error)
898			return (error);
899
900		ldt.start = ld.entry_number;
901		ldt.descs = &desc;
902		ldt.num = 1;
903		desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
904		desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
905		desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
906		desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
907		desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
908			(ld.contents << 2);
909		desc.sd.sd_dpl = 3;
910		desc.sd.sd_p = (ld.seg_not_present ^ 1);
911		desc.sd.sd_xx = 0;
912		desc.sd.sd_def32 = ld.seg_32bit;
913		desc.sd.sd_gran = ld.limit_in_pages;
914		mtx_lock(&Giant);
915		error = i386_set_ldt(td, &ldt, &desc);
916		mtx_unlock(&Giant);
917		break;
918	default:
919		error = EINVAL;
920		break;
921	}
922
923	if (error == EOPNOTSUPP) {
924		printf("linux: modify_ldt needs kernel option USER_LDT\n");
925		error = ENOSYS;
926	}
927
928	return (error);
929}
930
931int
932linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
933{
934	l_osigaction_t osa;
935	l_sigaction_t act, oact;
936	int error;
937
938#ifdef DEBUG
939	if (ldebug(sigaction))
940		printf(ARGS(sigaction, "%d, %p, %p"),
941		    args->sig, (void *)args->nsa, (void *)args->osa);
942#endif
943
944	if (args->nsa != NULL) {
945		error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
946		if (error)
947			return (error);
948		act.lsa_handler = osa.lsa_handler;
949		act.lsa_flags = osa.lsa_flags;
950		act.lsa_restorer = osa.lsa_restorer;
951		LINUX_SIGEMPTYSET(act.lsa_mask);
952		act.lsa_mask.__bits[0] = osa.lsa_mask;
953	}
954
955	error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
956	    args->osa ? &oact : NULL);
957
958	if (args->osa != NULL && !error) {
959		osa.lsa_handler = oact.lsa_handler;
960		osa.lsa_flags = oact.lsa_flags;
961		osa.lsa_restorer = oact.lsa_restorer;
962		osa.lsa_mask = oact.lsa_mask.__bits[0];
963		error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
964	}
965
966	return (error);
967}
968
969/*
970 * Linux has two extra args, restart and oldmask.  We dont use these,
971 * but it seems that "restart" is actually a context pointer that
972 * enables the signal to happen with a different register set.
973 */
974int
975linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
976{
977	sigset_t sigmask;
978	l_sigset_t mask;
979
980#ifdef DEBUG
981	if (ldebug(sigsuspend))
982		printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
983#endif
984
985	LINUX_SIGEMPTYSET(mask);
986	mask.__bits[0] = args->mask;
987	linux_to_bsd_sigset(&mask, &sigmask);
988	return (kern_sigsuspend(td, sigmask));
989}
990
991int
992linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
993{
994	l_sigset_t lmask;
995	sigset_t sigmask;
996	int error;
997
998#ifdef DEBUG
999	if (ldebug(rt_sigsuspend))
1000		printf(ARGS(rt_sigsuspend, "%p, %d"),
1001		    (void *)uap->newset, uap->sigsetsize);
1002#endif
1003
1004	if (uap->sigsetsize != sizeof(l_sigset_t))
1005		return (EINVAL);
1006
1007	error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
1008	if (error)
1009		return (error);
1010
1011	linux_to_bsd_sigset(&lmask, &sigmask);
1012	return (kern_sigsuspend(td, sigmask));
1013}
1014
1015int
1016linux_pause(struct thread *td, struct linux_pause_args *args)
1017{
1018	struct proc *p = td->td_proc;
1019	sigset_t sigmask;
1020
1021#ifdef DEBUG
1022	if (ldebug(pause))
1023		printf(ARGS(pause, ""));
1024#endif
1025
1026	PROC_LOCK(p);
1027	sigmask = td->td_sigmask;
1028	PROC_UNLOCK(p);
1029	return (kern_sigsuspend(td, sigmask));
1030}
1031
1032int
1033linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
1034{
1035	stack_t ss, oss;
1036	l_stack_t lss;
1037	int error;
1038
1039#ifdef DEBUG
1040	if (ldebug(sigaltstack))
1041		printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
1042#endif
1043
1044	if (uap->uss != NULL) {
1045		error = copyin(uap->uss, &lss, sizeof(l_stack_t));
1046		if (error)
1047			return (error);
1048
1049		ss.ss_sp = lss.ss_sp;
1050		ss.ss_size = lss.ss_size;
1051		ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
1052	}
1053	error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
1054	    (uap->uoss != NULL) ? &oss : NULL);
1055	if (!error && uap->uoss != NULL) {
1056		lss.ss_sp = oss.ss_sp;
1057		lss.ss_size = oss.ss_size;
1058		lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
1059		error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
1060	}
1061
1062	return (error);
1063}
1064
1065int
1066linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args)
1067{
1068	struct ftruncate_args sa;
1069
1070#ifdef DEBUG
1071	if (ldebug(ftruncate64))
1072		printf(ARGS(ftruncate64, "%u, %jd"), args->fd,
1073		    (intmax_t)args->length);
1074#endif
1075
1076	sa.fd = args->fd;
1077	sa.pad = 0;
1078	sa.length = args->length;
1079	return ftruncate(td, &sa);
1080}
1081
1082int
1083linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
1084{
1085	struct l_user_desc info;
1086	int error;
1087	int idx;
1088	int a[2];
1089	struct segment_descriptor sd;
1090
1091	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1092	if (error)
1093		return (error);
1094
1095#ifdef DEBUG
1096	if (ldebug(set_thread_area))
1097	   	printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
1098		      info.entry_number,
1099      		      info.base_addr,
1100      		      info.limit,
1101      		      info.seg_32bit,
1102		      info.contents,
1103      		      info.read_exec_only,
1104      		      info.limit_in_pages,
1105      		      info.seg_not_present,
1106      		      info.useable);
1107#endif
1108
1109	idx = info.entry_number;
1110	/*
1111	 * Semantics of linux version: every thread in the system has array of
1112	 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
1113	 * syscall loads one of the selected tls decriptors with a value and
1114	 * also loads GDT descriptors 6, 7 and 8 with the content of the
1115	 * per-thread descriptors.
1116	 *
1117	 * Semantics of fbsd version: I think we can ignore that linux has 3
1118	 * per-thread descriptors and use just the 1st one. The tls_array[]
1119	 * is used only in set/get-thread_area() syscalls and for loading the
1120	 * GDT descriptors. In fbsd we use just one GDT descriptor for TLS so
1121	 * we will load just one.
1122	 *
1123	 * XXX: this doesn't work when a user space process tries to use more
1124	 * than 1 TLS segment. Comment in the linux sources says wine might do
1125	 * this.
1126	 */
1127
1128	/*
1129	 * we support just GLIBC TLS now
1130	 * we should let 3 proceed as well because we use this segment so
1131	 * if code does two subsequent calls it should succeed
1132	 */
1133	if (idx != 6 && idx != -1 && idx != 3)
1134		return (EINVAL);
1135
1136	/*
1137	 * we have to copy out the GDT entry we use
1138	 * FreeBSD uses GDT entry #3 for storing %gs so load that
1139	 *
1140	 * XXX: what if a user space program doesn't check this value and tries
1141	 * to use 6, 7 or 8?
1142	 */
1143	idx = info.entry_number = 3;
1144	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1145	if (error)
1146		return (error);
1147
1148	if (LDT_empty(&info)) {
1149		a[0] = 0;
1150		a[1] = 0;
1151	} else {
1152		a[0] = LDT_entry_a(&info);
1153		a[1] = LDT_entry_b(&info);
1154	}
1155
1156	memcpy(&sd, &a, sizeof(a));
1157#ifdef DEBUG
1158	if (ldebug(set_thread_area))
1159	   	printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
1160			sd.sd_hibase,
1161			sd.sd_lolimit,
1162			sd.sd_hilimit,
1163			sd.sd_type,
1164			sd.sd_dpl,
1165			sd.sd_p,
1166			sd.sd_xx,
1167			sd.sd_def32,
1168			sd.sd_gran);
1169#endif
1170
1171	/* this is taken from i386 version of cpu_set_user_tls() */
1172	critical_enter();
1173	/* set %gs */
1174	td->td_pcb->pcb_gsd = sd;
1175	PCPU_GET(fsgs_gdt)[1] = sd;
1176	load_gs(GSEL(GUGS_SEL, SEL_UPL));
1177	critical_exit();
1178
1179	return (0);
1180}
1181
1182int
1183linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
1184{
1185
1186	struct l_user_desc info;
1187	int error;
1188	int idx;
1189	struct l_desc_struct desc;
1190	struct segment_descriptor sd;
1191
1192#ifdef DEBUG
1193	if (ldebug(get_thread_area))
1194		printf(ARGS(get_thread_area, "%p"), args->desc);
1195#endif
1196
1197	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1198	if (error)
1199		return (error);
1200
1201	idx = info.entry_number;
1202	/* XXX: I am not sure if we want 3 to be allowed too. */
1203	if (idx != 6 && idx != 3)
1204		return (EINVAL);
1205
1206	idx = 3;
1207
1208	memset(&info, 0, sizeof(info));
1209
1210	sd = PCPU_GET(fsgs_gdt)[1];
1211
1212	memcpy(&desc, &sd, sizeof(desc));
1213
1214	info.entry_number = idx;
1215	info.base_addr = GET_BASE(&desc);
1216	info.limit = GET_LIMIT(&desc);
1217	info.seg_32bit = GET_32BIT(&desc);
1218	info.contents = GET_CONTENTS(&desc);
1219	info.read_exec_only = !GET_WRITABLE(&desc);
1220	info.limit_in_pages = GET_LIMIT_PAGES(&desc);
1221	info.seg_not_present = !GET_PRESENT(&desc);
1222	info.useable = GET_USEABLE(&desc);
1223
1224	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1225	if (error)
1226	   	return (EFAULT);
1227
1228	return (0);
1229}
1230
1231/* copied from kern/kern_time.c */
1232int
1233linux_timer_create(struct thread *td, struct linux_timer_create_args *args)
1234{
1235   	return ktimer_create(td, (struct ktimer_create_args *) args);
1236}
1237
1238int
1239linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args)
1240{
1241   	return ktimer_settime(td, (struct ktimer_settime_args *) args);
1242}
1243
1244int
1245linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args)
1246{
1247   	return ktimer_gettime(td, (struct ktimer_gettime_args *) args);
1248}
1249
1250int
1251linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args)
1252{
1253   	return ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args);
1254}
1255
1256int
1257linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args)
1258{
1259   	return ktimer_delete(td, (struct ktimer_delete_args *) args);
1260}
1261
1262/* XXX: this wont work with module - convert it */
1263int
1264linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
1265{
1266#ifdef P1003_1B_MQUEUE
1267   	return kmq_open(td, (struct kmq_open_args *) args);
1268#else
1269	return (ENOSYS);
1270#endif
1271}
1272
1273int
1274linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
1275{
1276#ifdef P1003_1B_MQUEUE
1277   	return kmq_unlink(td, (struct kmq_unlink_args *) args);
1278#else
1279	return (ENOSYS);
1280#endif
1281}
1282
1283int
1284linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
1285{
1286#ifdef P1003_1B_MQUEUE
1287   	return kmq_timedsend(td, (struct kmq_timedsend_args *) args);
1288#else
1289	return (ENOSYS);
1290#endif
1291}
1292
1293int
1294linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
1295{
1296#ifdef P1003_1B_MQUEUE
1297   	return kmq_timedreceive(td, (struct kmq_timedreceive_args *) args);
1298#else
1299	return (ENOSYS);
1300#endif
1301}
1302
1303int
1304linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
1305{
1306#ifdef P1003_1B_MQUEUE
1307	return kmq_notify(td, (struct kmq_notify_args *) args);
1308#else
1309	return (ENOSYS);
1310#endif
1311}
1312
1313int
1314linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
1315{
1316#ifdef P1003_1B_MQUEUE
1317   	return kmq_setattr(td, (struct kmq_setattr_args *) args);
1318#else
1319	return (ENOSYS);
1320#endif
1321}
1322
1323