linux_machdep.c revision 166727
1/*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer
10 *    in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 166727 2007-02-15 00:54:40Z jkim $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/file.h>
35#include <sys/fcntl.h>
36#include <sys/imgact.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mman.h>
40#include <sys/mutex.h>
41#include <sys/sx.h>
42#include <sys/priv.h>
43#include <sys/proc.h>
44#include <sys/queue.h>
45#include <sys/resource.h>
46#include <sys/resourcevar.h>
47#include <sys/signalvar.h>
48#include <sys/syscallsubr.h>
49#include <sys/sysproto.h>
50#include <sys/unistd.h>
51#include <sys/wait.h>
52#include <sys/sched.h>
53
54#include <machine/frame.h>
55#include <machine/psl.h>
56#include <machine/segments.h>
57#include <machine/sysarch.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/vm_map.h>
62
63#include <i386/linux/linux.h>
64#include <i386/linux/linux_proto.h>
65#include <compat/linux/linux_ipc.h>
66#include <compat/linux/linux_signal.h>
67#include <compat/linux/linux_util.h>
68#include <compat/linux/linux_emul.h>
69
70#include <i386/include/pcb.h>			/* needed for pcb definition in linux_set_thread_area */
71
72#include "opt_posix.h"
73
74extern struct sysentvec elf32_freebsd_sysvec;	/* defined in i386/i386/elf_machdep.c */
75
76struct l_descriptor {
77	l_uint		entry_number;
78	l_ulong		base_addr;
79	l_uint		limit;
80	l_uint		seg_32bit:1;
81	l_uint		contents:2;
82	l_uint		read_exec_only:1;
83	l_uint		limit_in_pages:1;
84	l_uint		seg_not_present:1;
85	l_uint		useable:1;
86};
87
88struct l_old_select_argv {
89	l_int		nfds;
90	l_fd_set	*readfds;
91	l_fd_set	*writefds;
92	l_fd_set	*exceptfds;
93	struct l_timeval	*timeout;
94};
95
96int
97linux_to_bsd_sigaltstack(int lsa)
98{
99	int bsa = 0;
100
101	if (lsa & LINUX_SS_DISABLE)
102		bsa |= SS_DISABLE;
103	if (lsa & LINUX_SS_ONSTACK)
104		bsa |= SS_ONSTACK;
105	return (bsa);
106}
107
108int
109bsd_to_linux_sigaltstack(int bsa)
110{
111	int lsa = 0;
112
113	if (bsa & SS_DISABLE)
114		lsa |= LINUX_SS_DISABLE;
115	if (bsa & SS_ONSTACK)
116		lsa |= LINUX_SS_ONSTACK;
117	return (lsa);
118}
119
120int
121linux_execve(struct thread *td, struct linux_execve_args *args)
122{
123	int error;
124	char *newpath;
125	struct image_args eargs;
126
127	LCONVPATHEXIST(td, args->path, &newpath);
128
129#ifdef DEBUG
130	if (ldebug(execve))
131		printf(ARGS(execve, "%s"), newpath);
132#endif
133
134	error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE,
135	    args->argp, args->envp);
136	free(newpath, M_TEMP);
137	if (error == 0)
138		error = kern_execve(td, &eargs, NULL);
139	if (error == 0)
140	   	/* linux process can exec fbsd one, dont attempt
141		 * to create emuldata for such process using
142		 * linux_proc_init, this leads to a panic on KASSERT
143		 * because such process has p->p_emuldata == NULL
144		 */
145	   	if (td->td_proc->p_sysent == &elf_linux_sysvec)
146   		   	error = linux_proc_init(td, 0, 0);
147	return (error);
148}
149
150struct l_ipc_kludge {
151	struct l_msgbuf *msgp;
152	l_long msgtyp;
153};
154
155int
156linux_ipc(struct thread *td, struct linux_ipc_args *args)
157{
158
159	switch (args->what & 0xFFFF) {
160	case LINUX_SEMOP: {
161		struct linux_semop_args a;
162
163		a.semid = args->arg1;
164		a.tsops = args->ptr;
165		a.nsops = args->arg2;
166		return (linux_semop(td, &a));
167	}
168	case LINUX_SEMGET: {
169		struct linux_semget_args a;
170
171		a.key = args->arg1;
172		a.nsems = args->arg2;
173		a.semflg = args->arg3;
174		return (linux_semget(td, &a));
175	}
176	case LINUX_SEMCTL: {
177		struct linux_semctl_args a;
178		int error;
179
180		a.semid = args->arg1;
181		a.semnum = args->arg2;
182		a.cmd = args->arg3;
183		error = copyin(args->ptr, &a.arg, sizeof(a.arg));
184		if (error)
185			return (error);
186		return (linux_semctl(td, &a));
187	}
188	case LINUX_MSGSND: {
189		struct linux_msgsnd_args a;
190
191		a.msqid = args->arg1;
192		a.msgp = args->ptr;
193		a.msgsz = args->arg2;
194		a.msgflg = args->arg3;
195		return (linux_msgsnd(td, &a));
196	}
197	case LINUX_MSGRCV: {
198		struct linux_msgrcv_args a;
199
200		a.msqid = args->arg1;
201		a.msgsz = args->arg2;
202		a.msgflg = args->arg3;
203		if ((args->what >> 16) == 0) {
204			struct l_ipc_kludge tmp;
205			int error;
206
207			if (args->ptr == NULL)
208				return (EINVAL);
209			error = copyin(args->ptr, &tmp, sizeof(tmp));
210			if (error)
211				return (error);
212			a.msgp = tmp.msgp;
213			a.msgtyp = tmp.msgtyp;
214		} else {
215			a.msgp = args->ptr;
216			a.msgtyp = args->arg5;
217		}
218		return (linux_msgrcv(td, &a));
219	}
220	case LINUX_MSGGET: {
221		struct linux_msgget_args a;
222
223		a.key = args->arg1;
224		a.msgflg = args->arg2;
225		return (linux_msgget(td, &a));
226	}
227	case LINUX_MSGCTL: {
228		struct linux_msgctl_args a;
229
230		a.msqid = args->arg1;
231		a.cmd = args->arg2;
232		a.buf = args->ptr;
233		return (linux_msgctl(td, &a));
234	}
235	case LINUX_SHMAT: {
236		struct linux_shmat_args a;
237
238		a.shmid = args->arg1;
239		a.shmaddr = args->ptr;
240		a.shmflg = args->arg2;
241		a.raddr = (l_ulong *)args->arg3;
242		return (linux_shmat(td, &a));
243	}
244	case LINUX_SHMDT: {
245		struct linux_shmdt_args a;
246
247		a.shmaddr = args->ptr;
248		return (linux_shmdt(td, &a));
249	}
250	case LINUX_SHMGET: {
251		struct linux_shmget_args a;
252
253		a.key = args->arg1;
254		a.size = args->arg2;
255		a.shmflg = args->arg3;
256		return (linux_shmget(td, &a));
257	}
258	case LINUX_SHMCTL: {
259		struct linux_shmctl_args a;
260
261		a.shmid = args->arg1;
262		a.cmd = args->arg2;
263		a.buf = args->ptr;
264		return (linux_shmctl(td, &a));
265	}
266	default:
267		break;
268	}
269
270	return (EINVAL);
271}
272
273int
274linux_old_select(struct thread *td, struct linux_old_select_args *args)
275{
276	struct l_old_select_argv linux_args;
277	struct linux_select_args newsel;
278	int error;
279
280#ifdef DEBUG
281	if (ldebug(old_select))
282		printf(ARGS(old_select, "%p"), args->ptr);
283#endif
284
285	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
286	if (error)
287		return (error);
288
289	newsel.nfds = linux_args.nfds;
290	newsel.readfds = linux_args.readfds;
291	newsel.writefds = linux_args.writefds;
292	newsel.exceptfds = linux_args.exceptfds;
293	newsel.timeout = linux_args.timeout;
294	return (linux_select(td, &newsel));
295}
296
297int
298linux_fork(struct thread *td, struct linux_fork_args *args)
299{
300	int error;
301	struct proc *p2;
302	struct thread *td2;
303
304#ifdef DEBUG
305	if (ldebug(fork))
306		printf(ARGS(fork, ""));
307#endif
308
309	if ((error = fork1(td, RFFDG | RFPROC | RFSTOPPED, 0, &p2)) != 0)
310		return (error);
311
312	if (error == 0) {
313		td->td_retval[0] = p2->p_pid;
314		td->td_retval[1] = 0;
315	}
316
317	if (td->td_retval[1] == 1)
318		td->td_retval[0] = 0;
319	error = linux_proc_init(td, td->td_retval[0], 0);
320	if (error)
321		return (error);
322
323	td2 = FIRST_THREAD_IN_PROC(p2);
324
325	/*
326	 * Make this runnable after we are finished with it.
327	 */
328	mtx_lock_spin(&sched_lock);
329	TD_SET_CAN_RUN(td2);
330	sched_add(td2, SRQ_BORING);
331	mtx_unlock_spin(&sched_lock);
332
333	return (0);
334}
335
336int
337linux_vfork(struct thread *td, struct linux_vfork_args *args)
338{
339	int error;
340	struct proc *p2;
341	struct thread *td2;
342
343#ifdef DEBUG
344	if (ldebug(vfork))
345		printf(ARGS(vfork, ""));
346#endif
347
348	/* exclude RFPPWAIT */
349	if ((error = fork1(td, RFFDG | RFPROC | RFMEM | RFSTOPPED, 0, &p2)) != 0)
350		return (error);
351	if (error == 0) {
352		td->td_retval[0] = p2->p_pid;
353		td->td_retval[1] = 0;
354	}
355	/* Are we the child? */
356	if (td->td_retval[1] == 1)
357		td->td_retval[0] = 0;
358	error = linux_proc_init(td, td->td_retval[0], 0);
359	if (error)
360		return (error);
361
362	PROC_LOCK(p2);
363	p2->p_flag |= P_PPWAIT;
364	PROC_UNLOCK(p2);
365
366	td2 = FIRST_THREAD_IN_PROC(p2);
367
368	/*
369	 * Make this runnable after we are finished with it.
370	 */
371	mtx_lock_spin(&sched_lock);
372	TD_SET_CAN_RUN(td2);
373	sched_add(td2, SRQ_BORING);
374	mtx_unlock_spin(&sched_lock);
375
376	/* wait for the children to exit, ie. emulate vfork */
377	PROC_LOCK(p2);
378	while (p2->p_flag & P_PPWAIT)
379	   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
380	PROC_UNLOCK(p2);
381
382	return (0);
383}
384
385int
386linux_clone(struct thread *td, struct linux_clone_args *args)
387{
388	int error, ff = RFPROC | RFSTOPPED;
389	struct proc *p2;
390	struct thread *td2;
391	int exit_signal;
392	struct linux_emuldata *em;
393
394#ifdef DEBUG
395	if (ldebug(clone)) {
396   	   	printf(ARGS(clone, "flags %x, stack %x, parent tid: %x, child tid: %x"),
397		    (unsigned int)args->flags, (unsigned int)args->stack,
398		    (unsigned int)args->parent_tidptr, (unsigned int)args->child_tidptr);
399	}
400#endif
401
402	exit_signal = args->flags & 0x000000ff;
403	if (!LINUX_SIG_VALID(exit_signal) && exit_signal != 0)
404		return (EINVAL);
405
406	if (exit_signal <= LINUX_SIGTBLSZ)
407		exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
408
409	if (args->flags & CLONE_VM)
410		ff |= RFMEM;
411	if (args->flags & CLONE_SIGHAND)
412		ff |= RFSIGSHARE;
413	/*
414	 * XXX: in linux sharing of fs info (chroot/cwd/umask)
415	 * and open files is independant. in fbsd its in one
416	 * structure but in reality it doesnt make any problems
417	 * because both this flags are set at once usually.
418	 */
419	if (!(args->flags & (CLONE_FILES | CLONE_FS)))
420		ff |= RFFDG;
421
422	/*
423	 * Attempt to detect when linux_clone(2) is used for creating
424	 * kernel threads. Unfortunately despite the existence of the
425	 * CLONE_THREAD flag, version of linuxthreads package used in
426	 * most popular distros as of beginning of 2005 doesn't make
427	 * any use of it. Therefore, this detection relay fully on
428	 * empirical observation that linuxthreads sets certain
429	 * combination of flags, so that we can make more or less
430	 * precise detection and notify the FreeBSD kernel that several
431	 * processes are in fact part of the same threading group, so
432	 * that special treatment is necessary for signal delivery
433	 * between those processes and fd locking.
434	 */
435	if ((args->flags & 0xffffff00) == THREADING_FLAGS)
436		ff |= RFTHREAD;
437
438	error = fork1(td, ff, 0, &p2);
439	if (error)
440		return (error);
441
442	if (args->flags & (CLONE_PARENT|CLONE_THREAD)) {
443	   	sx_xlock(&proctree_lock);
444		PROC_LOCK(p2);
445		proc_reparent(p2, td->td_proc->p_pptr);
446		PROC_UNLOCK(p2);
447		sx_xunlock(&proctree_lock);
448	}
449
450	/* create the emuldata */
451	error = linux_proc_init(td, p2->p_pid, args->flags);
452	/* reference it - no need to check this */
453	em = em_find(p2, EMUL_DOLOCK);
454	KASSERT(em != NULL, ("clone: emuldata not found.\n"));
455	/* and adjust it */
456	if (args->flags & CLONE_PARENT_SETTID) {
457	   	if (args->parent_tidptr == NULL) {
458		   	EMUL_UNLOCK(&emul_lock);
459			return (EINVAL);
460		}
461		error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid));
462		if (error) {
463		   	EMUL_UNLOCK(&emul_lock);
464			return (error);
465		}
466	}
467
468	if (args->flags & CLONE_THREAD) {
469	   	/* XXX: linux mangles pgrp and pptr somehow
470		 * I think it might be this but I am not sure.
471		 */
472#ifdef notyet
473	   	PROC_LOCK(p2);
474	   	p2->p_pgrp = td->td_proc->p_pgrp;
475	   	PROC_UNLOCK(p2);
476#endif
477	 	exit_signal = 0;
478	}
479
480	if (args->flags & CLONE_CHILD_SETTID)
481		em->child_set_tid = args->child_tidptr;
482	else
483	   	em->child_set_tid = NULL;
484
485	if (args->flags & CLONE_CHILD_CLEARTID)
486		em->child_clear_tid = args->child_tidptr;
487	else
488	   	em->child_clear_tid = NULL;
489
490	EMUL_UNLOCK(&emul_lock);
491
492	PROC_LOCK(p2);
493	p2->p_sigparent = exit_signal;
494	PROC_UNLOCK(p2);
495	td2 = FIRST_THREAD_IN_PROC(p2);
496	/*
497	 * in a case of stack = NULL we are supposed to COW calling process stack
498	 * this is what normal fork() does so we just keep the tf_esp arg intact
499	 */
500	if (args->stack)
501   	   	td2->td_frame->tf_esp = (unsigned int)args->stack;
502
503	if (args->flags & CLONE_SETTLS) {
504   	   	struct l_user_desc info;
505   	   	int idx;
506	   	int a[2];
507		struct segment_descriptor sd;
508
509	   	error = copyin((void *)td->td_frame->tf_esi, &info, sizeof(struct l_user_desc));
510		if (error)
511   		   	return (error);
512
513		idx = info.entry_number;
514
515		/*
516		 * looks like we're getting the idx we returned
517		 * in the set_thread_area() syscall
518		 */
519		if (idx != 6 && idx != 3)
520			return (EINVAL);
521
522		/* this doesnt happen in practice */
523		if (idx == 6) {
524		   	/* we might copy out the entry_number as 3 */
525		   	info.entry_number = 3;
526			error = copyout(&info, (void *) td->td_frame->tf_esi, sizeof(struct l_user_desc));
527			if (error)
528	   		   	return (error);
529		}
530
531		a[0] = LDT_entry_a(&info);
532		a[1] = LDT_entry_b(&info);
533
534		memcpy(&sd, &a, sizeof(a));
535#ifdef DEBUG
536	if (ldebug(clone))
537	   	printf("Segment created in clone with CLONE_SETTLS: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
538			sd.sd_hibase,
539			sd.sd_lolimit,
540			sd.sd_hilimit,
541			sd.sd_type,
542			sd.sd_dpl,
543			sd.sd_p,
544			sd.sd_xx,
545			sd.sd_def32,
546			sd.sd_gran);
547#endif
548
549		/* set %gs */
550		td2->td_pcb->pcb_gsd = sd;
551		td2->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
552	}
553
554#ifdef DEBUG
555	if (ldebug(clone))
556		printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"),
557		    (long)p2->p_pid, args->stack, exit_signal);
558#endif
559	if (args->flags & CLONE_VFORK) {
560	   	PROC_LOCK(p2);
561		p2->p_flag |= P_PPWAIT;
562	   	PROC_UNLOCK(p2);
563	}
564
565	/*
566	 * Make this runnable after we are finished with it.
567	 */
568	mtx_lock_spin(&sched_lock);
569	TD_SET_CAN_RUN(td2);
570	sched_add(td2, SRQ_BORING);
571	mtx_unlock_spin(&sched_lock);
572
573	td->td_retval[0] = p2->p_pid;
574	td->td_retval[1] = 0;
575
576	if (args->flags & CLONE_VFORK) {
577   	   	/* wait for the children to exit, ie. emulate vfork */
578   	   	PROC_LOCK(p2);
579		while (p2->p_flag & P_PPWAIT)
580   		   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
581		PROC_UNLOCK(p2);
582	}
583
584	return (0);
585}
586
587#define STACK_SIZE  (2 * 1024 * 1024)
588#define GUARD_SIZE  (4 * PAGE_SIZE)
589
590static int linux_mmap_common(struct thread *, struct l_mmap_argv *);
591
592int
593linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
594{
595	struct l_mmap_argv linux_args;
596
597#ifdef DEBUG
598	if (ldebug(mmap2))
599		printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
600		    (void *)args->addr, args->len, args->prot,
601		    args->flags, args->fd, args->pgoff);
602#endif
603
604	linux_args.addr = args->addr;
605	linux_args.len = args->len;
606	linux_args.prot = args->prot;
607	linux_args.flags = args->flags;
608	linux_args.fd = args->fd;
609	linux_args.pgoff = args->pgoff * PAGE_SIZE;
610
611	return (linux_mmap_common(td, &linux_args));
612}
613
614int
615linux_mmap(struct thread *td, struct linux_mmap_args *args)
616{
617	int error;
618	struct l_mmap_argv linux_args;
619
620	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
621	if (error)
622		return (error);
623
624#ifdef DEBUG
625	if (ldebug(mmap))
626		printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
627		    (void *)linux_args.addr, linux_args.len, linux_args.prot,
628		    linux_args.flags, linux_args.fd, linux_args.pgoff);
629#endif
630
631	return (linux_mmap_common(td, &linux_args));
632}
633
634static int
635linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args)
636{
637	struct proc *p = td->td_proc;
638	struct mmap_args /* {
639		caddr_t addr;
640		size_t len;
641		int prot;
642		int flags;
643		int fd;
644		long pad;
645		off_t pos;
646	} */ bsd_args;
647	int error;
648	struct file *fp;
649
650	error = 0;
651	bsd_args.flags = 0;
652	fp = NULL;
653
654	/*
655	 * Linux mmap(2):
656	 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
657	 */
658	if (! ((linux_args->flags & LINUX_MAP_SHARED) ^
659	    (linux_args->flags & LINUX_MAP_PRIVATE)))
660		return (EINVAL);
661
662	if (linux_args->flags & LINUX_MAP_SHARED)
663		bsd_args.flags |= MAP_SHARED;
664	if (linux_args->flags & LINUX_MAP_PRIVATE)
665		bsd_args.flags |= MAP_PRIVATE;
666	if (linux_args->flags & LINUX_MAP_FIXED)
667		bsd_args.flags |= MAP_FIXED;
668	if (linux_args->flags & LINUX_MAP_ANON)
669		bsd_args.flags |= MAP_ANON;
670	else
671		bsd_args.flags |= MAP_NOSYNC;
672	if (linux_args->flags & LINUX_MAP_GROWSDOWN)
673		bsd_args.flags |= MAP_STACK;
674
675	/*
676	 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
677	 * on Linux/i386. We do this to ensure maximum compatibility.
678	 * Linux/ia64 does the same in i386 emulation mode.
679	 */
680	bsd_args.prot = linux_args->prot;
681	if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
682		bsd_args.prot |= PROT_READ | PROT_EXEC;
683
684	if (linux_args->fd != -1) {
685		/*
686		 * Linux follows Solaris mmap(2) description:
687		 * The file descriptor fildes is opened with
688		 * read permission, regardless of the
689		 * protection options specified.
690		 */
691
692		if ((error = fget(td, linux_args->fd, &fp)) != 0)
693			return (error);
694		if (fp->f_type != DTYPE_VNODE) {
695			fdrop(fp, td);
696			return (EINVAL);
697		}
698
699		/* Linux mmap() just fails for O_WRONLY files */
700		if (!(fp->f_flag & FREAD)) {
701			fdrop(fp, td);
702			return (EACCES);
703		}
704
705		fdrop(fp, td);
706	}
707	bsd_args.fd = linux_args->fd;
708
709	if (linux_args->flags & LINUX_MAP_GROWSDOWN) {
710		/*
711		 * The linux MAP_GROWSDOWN option does not limit auto
712		 * growth of the region.  Linux mmap with this option
713		 * takes as addr the inital BOS, and as len, the initial
714		 * region size.  It can then grow down from addr without
715		 * limit.  However, linux threads has an implicit internal
716		 * limit to stack size of STACK_SIZE.  Its just not
717		 * enforced explicitly in linux.  But, here we impose
718		 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
719		 * region, since we can do this with our mmap.
720		 *
721		 * Our mmap with MAP_STACK takes addr as the maximum
722		 * downsize limit on BOS, and as len the max size of
723		 * the region.  It them maps the top SGROWSIZ bytes,
724		 * and autgrows the region down, up to the limit
725		 * in addr.
726		 *
727		 * If we don't use the MAP_STACK option, the effect
728		 * of this code is to allocate a stack region of a
729		 * fixed size of (STACK_SIZE - GUARD_SIZE).
730		 */
731
732		if ((caddr_t)PTRIN(linux_args->addr) + linux_args->len >
733		    p->p_vmspace->vm_maxsaddr) {
734			/*
735			 * Some linux apps will attempt to mmap
736			 * thread stacks near the top of their
737			 * address space.  If their TOS is greater
738			 * than vm_maxsaddr, vm_map_growstack()
739			 * will confuse the thread stack with the
740			 * process stack and deliver a SEGV if they
741			 * attempt to grow the thread stack past their
742			 * current stacksize rlimit.  To avoid this,
743			 * adjust vm_maxsaddr upwards to reflect
744			 * the current stacksize rlimit rather
745			 * than the maximum possible stacksize.
746			 * It would be better to adjust the
747			 * mmap'ed region, but some apps do not check
748			 * mmap's return value.
749			 */
750			PROC_LOCK(p);
751			p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
752			    lim_cur(p, RLIMIT_STACK);
753			PROC_UNLOCK(p);
754		}
755
756		/* This gives us our maximum stack size */
757		if (linux_args->len > STACK_SIZE - GUARD_SIZE)
758			bsd_args.len = linux_args->len;
759		else
760			bsd_args.len  = STACK_SIZE - GUARD_SIZE;
761
762		/*
763		 * This gives us a new BOS.  If we're using VM_STACK, then
764		 * mmap will just map the top SGROWSIZ bytes, and let
765		 * the stack grow down to the limit at BOS.  If we're
766		 * not using VM_STACK we map the full stack, since we
767		 * don't have a way to autogrow it.
768		 */
769		bsd_args.addr = (caddr_t)PTRIN(linux_args->addr) -
770		    bsd_args.len;
771	} else {
772		bsd_args.addr = (caddr_t)PTRIN(linux_args->addr);
773		bsd_args.len  = linux_args->len;
774	}
775	bsd_args.pos = linux_args->pgoff;
776	bsd_args.pad = 0;
777
778#ifdef DEBUG
779	if (ldebug(mmap))
780		printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n",
781		    __func__,
782		    (void *)bsd_args.addr, bsd_args.len, bsd_args.prot,
783		    bsd_args.flags, bsd_args.fd, (int)bsd_args.pos);
784#endif
785	error = mmap(td, &bsd_args);
786#ifdef DEBUG
787	if (ldebug(mmap))
788		printf("-> %s() return: 0x%x (0x%08x)\n",
789			__func__, error, (u_int)td->td_retval[0]);
790#endif
791	return (error);
792}
793
794int
795linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
796{
797	struct mprotect_args bsd_args;
798
799	bsd_args.addr = uap->addr;
800	bsd_args.len = uap->len;
801	bsd_args.prot = uap->prot;
802	if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
803		bsd_args.prot |= PROT_READ | PROT_EXEC;
804	return (mprotect(td, &bsd_args));
805}
806
807int
808linux_pipe(struct thread *td, struct linux_pipe_args *args)
809{
810	int error;
811	int reg_edx;
812
813#ifdef DEBUG
814	if (ldebug(pipe))
815		printf(ARGS(pipe, "*"));
816#endif
817
818	reg_edx = td->td_retval[1];
819	error = pipe(td, 0);
820	if (error) {
821		td->td_retval[1] = reg_edx;
822		return (error);
823	}
824
825	error = copyout(td->td_retval, args->pipefds, 2*sizeof(int));
826	if (error) {
827		td->td_retval[1] = reg_edx;
828		return (error);
829	}
830
831	td->td_retval[1] = reg_edx;
832	td->td_retval[0] = 0;
833	return (0);
834}
835
836int
837linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
838{
839	int error;
840	struct i386_ioperm_args iia;
841
842	iia.start = args->start;
843	iia.length = args->length;
844	iia.enable = args->enable;
845	mtx_lock(&Giant);
846	error = i386_set_ioperm(td, &iia);
847	mtx_unlock(&Giant);
848	return (error);
849}
850
851int
852linux_iopl(struct thread *td, struct linux_iopl_args *args)
853{
854	int error;
855
856	if (args->level < 0 || args->level > 3)
857		return (EINVAL);
858	if ((error = priv_check(td, PRIV_IO)) != 0)
859		return (error);
860	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
861		return (error);
862	td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
863	    (args->level * (PSL_IOPL / 3));
864	return (0);
865}
866
867int
868linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
869{
870	int error;
871	struct i386_ldt_args ldt;
872	struct l_descriptor ld;
873	union descriptor desc;
874
875	if (uap->ptr == NULL)
876		return (EINVAL);
877
878	switch (uap->func) {
879	case 0x00: /* read_ldt */
880		ldt.start = 0;
881		ldt.descs = uap->ptr;
882		ldt.num = uap->bytecount / sizeof(union descriptor);
883		mtx_lock(&Giant);
884		error = i386_get_ldt(td, &ldt);
885		td->td_retval[0] *= sizeof(union descriptor);
886		mtx_unlock(&Giant);
887		break;
888	case 0x01: /* write_ldt */
889	case 0x11: /* write_ldt */
890		if (uap->bytecount != sizeof(ld))
891			return (EINVAL);
892
893		error = copyin(uap->ptr, &ld, sizeof(ld));
894		if (error)
895			return (error);
896
897		ldt.start = ld.entry_number;
898		ldt.descs = &desc;
899		ldt.num = 1;
900		desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
901		desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
902		desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
903		desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
904		desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
905			(ld.contents << 2);
906		desc.sd.sd_dpl = 3;
907		desc.sd.sd_p = (ld.seg_not_present ^ 1);
908		desc.sd.sd_xx = 0;
909		desc.sd.sd_def32 = ld.seg_32bit;
910		desc.sd.sd_gran = ld.limit_in_pages;
911		mtx_lock(&Giant);
912		error = i386_set_ldt(td, &ldt, &desc);
913		mtx_unlock(&Giant);
914		break;
915	default:
916		error = EINVAL;
917		break;
918	}
919
920	if (error == EOPNOTSUPP) {
921		printf("linux: modify_ldt needs kernel option USER_LDT\n");
922		error = ENOSYS;
923	}
924
925	return (error);
926}
927
928int
929linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
930{
931	l_osigaction_t osa;
932	l_sigaction_t act, oact;
933	int error;
934
935#ifdef DEBUG
936	if (ldebug(sigaction))
937		printf(ARGS(sigaction, "%d, %p, %p"),
938		    args->sig, (void *)args->nsa, (void *)args->osa);
939#endif
940
941	if (args->nsa != NULL) {
942		error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
943		if (error)
944			return (error);
945		act.lsa_handler = osa.lsa_handler;
946		act.lsa_flags = osa.lsa_flags;
947		act.lsa_restorer = osa.lsa_restorer;
948		LINUX_SIGEMPTYSET(act.lsa_mask);
949		act.lsa_mask.__bits[0] = osa.lsa_mask;
950	}
951
952	error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
953	    args->osa ? &oact : NULL);
954
955	if (args->osa != NULL && !error) {
956		osa.lsa_handler = oact.lsa_handler;
957		osa.lsa_flags = oact.lsa_flags;
958		osa.lsa_restorer = oact.lsa_restorer;
959		osa.lsa_mask = oact.lsa_mask.__bits[0];
960		error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
961	}
962
963	return (error);
964}
965
966/*
967 * Linux has two extra args, restart and oldmask.  We dont use these,
968 * but it seems that "restart" is actually a context pointer that
969 * enables the signal to happen with a different register set.
970 */
971int
972linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
973{
974	sigset_t sigmask;
975	l_sigset_t mask;
976
977#ifdef DEBUG
978	if (ldebug(sigsuspend))
979		printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
980#endif
981
982	LINUX_SIGEMPTYSET(mask);
983	mask.__bits[0] = args->mask;
984	linux_to_bsd_sigset(&mask, &sigmask);
985	return (kern_sigsuspend(td, sigmask));
986}
987
988int
989linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
990{
991	l_sigset_t lmask;
992	sigset_t sigmask;
993	int error;
994
995#ifdef DEBUG
996	if (ldebug(rt_sigsuspend))
997		printf(ARGS(rt_sigsuspend, "%p, %d"),
998		    (void *)uap->newset, uap->sigsetsize);
999#endif
1000
1001	if (uap->sigsetsize != sizeof(l_sigset_t))
1002		return (EINVAL);
1003
1004	error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
1005	if (error)
1006		return (error);
1007
1008	linux_to_bsd_sigset(&lmask, &sigmask);
1009	return (kern_sigsuspend(td, sigmask));
1010}
1011
1012int
1013linux_pause(struct thread *td, struct linux_pause_args *args)
1014{
1015	struct proc *p = td->td_proc;
1016	sigset_t sigmask;
1017
1018#ifdef DEBUG
1019	if (ldebug(pause))
1020		printf(ARGS(pause, ""));
1021#endif
1022
1023	PROC_LOCK(p);
1024	sigmask = td->td_sigmask;
1025	PROC_UNLOCK(p);
1026	return (kern_sigsuspend(td, sigmask));
1027}
1028
1029int
1030linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
1031{
1032	stack_t ss, oss;
1033	l_stack_t lss;
1034	int error;
1035
1036#ifdef DEBUG
1037	if (ldebug(sigaltstack))
1038		printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
1039#endif
1040
1041	if (uap->uss != NULL) {
1042		error = copyin(uap->uss, &lss, sizeof(l_stack_t));
1043		if (error)
1044			return (error);
1045
1046		ss.ss_sp = lss.ss_sp;
1047		ss.ss_size = lss.ss_size;
1048		ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
1049	}
1050	error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
1051	    (uap->uoss != NULL) ? &oss : NULL);
1052	if (!error && uap->uoss != NULL) {
1053		lss.ss_sp = oss.ss_sp;
1054		lss.ss_size = oss.ss_size;
1055		lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
1056		error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
1057	}
1058
1059	return (error);
1060}
1061
1062int
1063linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args)
1064{
1065	struct ftruncate_args sa;
1066
1067#ifdef DEBUG
1068	if (ldebug(ftruncate64))
1069		printf(ARGS(ftruncate64, "%u, %jd"), args->fd,
1070		    (intmax_t)args->length);
1071#endif
1072
1073	sa.fd = args->fd;
1074	sa.pad = 0;
1075	sa.length = args->length;
1076	return ftruncate(td, &sa);
1077}
1078
1079int
1080linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
1081{
1082	struct l_user_desc info;
1083	int error;
1084	int idx;
1085	int a[2];
1086	struct segment_descriptor sd;
1087
1088	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1089	if (error)
1090		return (error);
1091
1092#ifdef DEBUG
1093	if (ldebug(set_thread_area))
1094	   	printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
1095		      info.entry_number,
1096      		      info.base_addr,
1097      		      info.limit,
1098      		      info.seg_32bit,
1099		      info.contents,
1100      		      info.read_exec_only,
1101      		      info.limit_in_pages,
1102      		      info.seg_not_present,
1103      		      info.useable);
1104#endif
1105
1106	idx = info.entry_number;
1107	/*
1108	 * Semantics of linux version: every thread in the system has array
1109	 * of 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
1110	 * syscall loads one of the selected tls decriptors with a value
1111	 * and also loads GDT descriptors 6, 7 and 8 with the content of the per-thread
1112	 * descriptors.
1113	 *
1114	 * Semantics of fbsd version: I think we can ignore that linux has 3 per-thread
1115	 * descriptors and use just the 1st one. The tls_array[] is used only in
1116	 * set/get-thread_area() syscalls and for loading the GDT descriptors. In fbsd
1117	 * we use just one GDT descriptor for TLS so we will load just one.
1118	 * XXX: this doesnt work when user-space process tries to use more then 1 TLS segment
1119	 * comment in the linux sources says wine might do that.
1120	 */
1121
1122	/*
1123	 * we support just GLIBC TLS now
1124	 * we should let 3 proceed as well because we use this segment so
1125	 * if code does two subsequent calls it should succeed
1126	 */
1127	if (idx != 6 && idx != -1 && idx != 3)
1128		return (EINVAL);
1129
1130	/*
1131	 * we have to copy out the GDT entry we use
1132	 * FreeBSD uses GDT entry #3 for storing %gs so load that
1133	 * XXX: what if userspace program doesnt check this value and tries
1134	 * to use 6, 7 or 8?
1135	 */
1136	idx = info.entry_number = 3;
1137	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1138	if (error)
1139		return (error);
1140
1141	if (LDT_empty(&info)) {
1142		a[0] = 0;
1143		a[1] = 0;
1144	} else {
1145		a[0] = LDT_entry_a(&info);
1146		a[1] = LDT_entry_b(&info);
1147	}
1148
1149	memcpy(&sd, &a, sizeof(a));
1150#ifdef DEBUG
1151	if (ldebug(set_thread_area))
1152	   	printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
1153			sd.sd_hibase,
1154			sd.sd_lolimit,
1155			sd.sd_hilimit,
1156			sd.sd_type,
1157			sd.sd_dpl,
1158			sd.sd_p,
1159			sd.sd_xx,
1160			sd.sd_def32,
1161			sd.sd_gran);
1162#endif
1163
1164	/* this is taken from i386 version of cpu_set_user_tls() */
1165	critical_enter();
1166	/* set %gs */
1167	td->td_pcb->pcb_gsd = sd;
1168	PCPU_GET(fsgs_gdt)[1] = sd;
1169	load_gs(GSEL(GUGS_SEL, SEL_UPL));
1170	critical_exit();
1171
1172	return (0);
1173}
1174
1175int
1176linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
1177{
1178
1179	struct l_user_desc info;
1180	int error;
1181	int idx;
1182	struct l_desc_struct desc;
1183	struct segment_descriptor sd;
1184
1185#ifdef DEBUG
1186	if (ldebug(get_thread_area))
1187		printf(ARGS(get_thread_area, "%p"), args->desc);
1188#endif
1189
1190	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1191	if (error)
1192		return (error);
1193
1194	idx = info.entry_number;
1195	/* XXX: I am not sure if we want 3 to be allowed too. */
1196	if (idx != 6 && idx != 3)
1197		return (EINVAL);
1198
1199	idx = 3;
1200
1201	memset(&info, 0, sizeof(info));
1202
1203	sd = PCPU_GET(fsgs_gdt)[1];
1204
1205	memcpy(&desc, &sd, sizeof(desc));
1206
1207	info.entry_number = idx;
1208	info.base_addr = GET_BASE(&desc);
1209	info.limit = GET_LIMIT(&desc);
1210	info.seg_32bit = GET_32BIT(&desc);
1211	info.contents = GET_CONTENTS(&desc);
1212	info.read_exec_only = !GET_WRITABLE(&desc);
1213	info.limit_in_pages = GET_LIMIT_PAGES(&desc);
1214	info.seg_not_present = !GET_PRESENT(&desc);
1215	info.useable = GET_USEABLE(&desc);
1216
1217	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1218	if (error)
1219	   	return (EFAULT);
1220
1221	return (0);
1222}
1223
1224/* copied from kern/kern_time.c */
1225int
1226linux_timer_create(struct thread *td, struct linux_timer_create_args *args)
1227{
1228   	return ktimer_create(td, (struct ktimer_create_args *) args);
1229}
1230
1231int
1232linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args)
1233{
1234   	return ktimer_settime(td, (struct ktimer_settime_args *) args);
1235}
1236
1237int
1238linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args)
1239{
1240   	return ktimer_gettime(td, (struct ktimer_gettime_args *) args);
1241}
1242
1243int
1244linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args)
1245{
1246   	return ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args);
1247}
1248
1249int
1250linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args)
1251{
1252   	return ktimer_delete(td, (struct ktimer_delete_args *) args);
1253}
1254
1255/* XXX: this wont work with module - convert it */
1256int
1257linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
1258{
1259#ifdef P1003_1B_MQUEUE
1260   	return kmq_open(td, (struct kmq_open_args *) args);
1261#else
1262	return (ENOSYS);
1263#endif
1264}
1265
1266int
1267linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
1268{
1269#ifdef P1003_1B_MQUEUE
1270   	return kmq_unlink(td, (struct kmq_unlink_args *) args);
1271#else
1272	return (ENOSYS);
1273#endif
1274}
1275
1276int
1277linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
1278{
1279#ifdef P1003_1B_MQUEUE
1280   	return kmq_timedsend(td, (struct kmq_timedsend_args *) args);
1281#else
1282	return (ENOSYS);
1283#endif
1284}
1285
1286int
1287linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
1288{
1289#ifdef P1003_1B_MQUEUE
1290   	return kmq_timedreceive(td, (struct kmq_timedreceive_args *) args);
1291#else
1292	return (ENOSYS);
1293#endif
1294}
1295
1296int
1297linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
1298{
1299#ifdef P1003_1B_MQUEUE
1300	return kmq_notify(td, (struct kmq_notify_args *) args);
1301#else
1302	return (ENOSYS);
1303#endif
1304}
1305
1306int
1307linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
1308{
1309#ifdef P1003_1B_MQUEUE
1310   	return kmq_setattr(td, (struct kmq_setattr_args *) args);
1311#else
1312	return (ENOSYS);
1313#endif
1314}
1315
1316