linux_machdep.c revision 163536
1/*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer
10 *    in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 163536 2006-10-20 10:09:40Z netchild $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/file.h>
35#include <sys/fcntl.h>
36#include <sys/imgact.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mman.h>
40#include <sys/mutex.h>
41#include <sys/sx.h>
42#include <sys/proc.h>
43#include <sys/queue.h>
44#include <sys/resource.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/syscallsubr.h>
48#include <sys/sysproto.h>
49#include <sys/unistd.h>
50#include <sys/wait.h>
51
52#include <machine/frame.h>
53#include <machine/psl.h>
54#include <machine/segments.h>
55#include <machine/sysarch.h>
56
57#include <vm/vm.h>
58#include <vm/pmap.h>
59#include <vm/vm_map.h>
60
61#include <i386/linux/linux.h>
62#include <i386/linux/linux_proto.h>
63#include <compat/linux/linux_ipc.h>
64#include <compat/linux/linux_signal.h>
65#include <compat/linux/linux_util.h>
66#include <compat/linux/linux_emul.h>
67
68#include <i386/include/pcb.h>			/* needed for pcb definition in linux_set_thread_area */
69
70#include "opt_posix.h"
71
72extern struct sysentvec elf32_freebsd_sysvec;	/* defined in i386/i386/elf_machdep.c */
73
74struct l_descriptor {
75	l_uint		entry_number;
76	l_ulong		base_addr;
77	l_uint		limit;
78	l_uint		seg_32bit:1;
79	l_uint		contents:2;
80	l_uint		read_exec_only:1;
81	l_uint		limit_in_pages:1;
82	l_uint		seg_not_present:1;
83	l_uint		useable:1;
84};
85
86struct l_old_select_argv {
87	l_int		nfds;
88	l_fd_set	*readfds;
89	l_fd_set	*writefds;
90	l_fd_set	*exceptfds;
91	struct l_timeval	*timeout;
92};
93
94int
95linux_to_bsd_sigaltstack(int lsa)
96{
97	int bsa = 0;
98
99	if (lsa & LINUX_SS_DISABLE)
100		bsa |= SS_DISABLE;
101	if (lsa & LINUX_SS_ONSTACK)
102		bsa |= SS_ONSTACK;
103	return (bsa);
104}
105
106int
107bsd_to_linux_sigaltstack(int bsa)
108{
109	int lsa = 0;
110
111	if (bsa & SS_DISABLE)
112		lsa |= LINUX_SS_DISABLE;
113	if (bsa & SS_ONSTACK)
114		lsa |= LINUX_SS_ONSTACK;
115	return (lsa);
116}
117
118int
119linux_execve(struct thread *td, struct linux_execve_args *args)
120{
121	int error;
122	char *newpath;
123	struct image_args eargs;
124
125	LCONVPATHEXIST(td, args->path, &newpath);
126
127#ifdef DEBUG
128	if (ldebug(execve))
129		printf(ARGS(execve, "%s"), newpath);
130#endif
131
132	error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE,
133	    args->argp, args->envp);
134	free(newpath, M_TEMP);
135	if (error == 0)
136		error = kern_execve(td, &eargs, NULL);
137	if (error == 0)
138	   	/* linux process can exec fbsd one, dont attempt
139		 * to create emuldata for such process using
140		 * linux_proc_init, this leads to a panic on KASSERT
141		 * because such process has p->p_emuldata == NULL
142		 */
143	   	if (td->td_proc->p_sysent == &elf_linux_sysvec)
144   		   	error = linux_proc_init(td, 0, 0);
145	return (error);
146}
147
148struct l_ipc_kludge {
149	struct l_msgbuf *msgp;
150	l_long msgtyp;
151};
152
153int
154linux_ipc(struct thread *td, struct linux_ipc_args *args)
155{
156
157	switch (args->what & 0xFFFF) {
158	case LINUX_SEMOP: {
159		struct linux_semop_args a;
160
161		a.semid = args->arg1;
162		a.tsops = args->ptr;
163		a.nsops = args->arg2;
164		return (linux_semop(td, &a));
165	}
166	case LINUX_SEMGET: {
167		struct linux_semget_args a;
168
169		a.key = args->arg1;
170		a.nsems = args->arg2;
171		a.semflg = args->arg3;
172		return (linux_semget(td, &a));
173	}
174	case LINUX_SEMCTL: {
175		struct linux_semctl_args a;
176		int error;
177
178		a.semid = args->arg1;
179		a.semnum = args->arg2;
180		a.cmd = args->arg3;
181		error = copyin(args->ptr, &a.arg, sizeof(a.arg));
182		if (error)
183			return (error);
184		return (linux_semctl(td, &a));
185	}
186	case LINUX_MSGSND: {
187		struct linux_msgsnd_args a;
188
189		a.msqid = args->arg1;
190		a.msgp = args->ptr;
191		a.msgsz = args->arg2;
192		a.msgflg = args->arg3;
193		return (linux_msgsnd(td, &a));
194	}
195	case LINUX_MSGRCV: {
196		struct linux_msgrcv_args a;
197
198		a.msqid = args->arg1;
199		a.msgsz = args->arg2;
200		a.msgflg = args->arg3;
201		if ((args->what >> 16) == 0) {
202			struct l_ipc_kludge tmp;
203			int error;
204
205			if (args->ptr == NULL)
206				return (EINVAL);
207			error = copyin(args->ptr, &tmp, sizeof(tmp));
208			if (error)
209				return (error);
210			a.msgp = tmp.msgp;
211			a.msgtyp = tmp.msgtyp;
212		} else {
213			a.msgp = args->ptr;
214			a.msgtyp = args->arg5;
215		}
216		return (linux_msgrcv(td, &a));
217	}
218	case LINUX_MSGGET: {
219		struct linux_msgget_args a;
220
221		a.key = args->arg1;
222		a.msgflg = args->arg2;
223		return (linux_msgget(td, &a));
224	}
225	case LINUX_MSGCTL: {
226		struct linux_msgctl_args a;
227
228		a.msqid = args->arg1;
229		a.cmd = args->arg2;
230		a.buf = args->ptr;
231		return (linux_msgctl(td, &a));
232	}
233	case LINUX_SHMAT: {
234		struct linux_shmat_args a;
235
236		a.shmid = args->arg1;
237		a.shmaddr = args->ptr;
238		a.shmflg = args->arg2;
239		a.raddr = (l_ulong *)args->arg3;
240		return (linux_shmat(td, &a));
241	}
242	case LINUX_SHMDT: {
243		struct linux_shmdt_args a;
244
245		a.shmaddr = args->ptr;
246		return (linux_shmdt(td, &a));
247	}
248	case LINUX_SHMGET: {
249		struct linux_shmget_args a;
250
251		a.key = args->arg1;
252		a.size = args->arg2;
253		a.shmflg = args->arg3;
254		return (linux_shmget(td, &a));
255	}
256	case LINUX_SHMCTL: {
257		struct linux_shmctl_args a;
258
259		a.shmid = args->arg1;
260		a.cmd = args->arg2;
261		a.buf = args->ptr;
262		return (linux_shmctl(td, &a));
263	}
264	default:
265		break;
266	}
267
268	return (EINVAL);
269}
270
271int
272linux_old_select(struct thread *td, struct linux_old_select_args *args)
273{
274	struct l_old_select_argv linux_args;
275	struct linux_select_args newsel;
276	int error;
277
278#ifdef DEBUG
279	if (ldebug(old_select))
280		printf(ARGS(old_select, "%p"), args->ptr);
281#endif
282
283	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
284	if (error)
285		return (error);
286
287	newsel.nfds = linux_args.nfds;
288	newsel.readfds = linux_args.readfds;
289	newsel.writefds = linux_args.writefds;
290	newsel.exceptfds = linux_args.exceptfds;
291	newsel.timeout = linux_args.timeout;
292	return (linux_select(td, &newsel));
293}
294
295int
296linux_fork(struct thread *td, struct linux_fork_args *args)
297{
298	int error;
299
300#ifdef DEBUG
301	if (ldebug(fork))
302		printf(ARGS(fork, ""));
303#endif
304
305	if ((error = fork(td, (struct fork_args *)args)) != 0)
306		return (error);
307
308	if (td->td_retval[1] == 1)
309		td->td_retval[0] = 0;
310	error = linux_proc_init(td, td->td_retval[0], 0);
311	if (error)
312		return (error);
313
314	return (0);
315}
316
317int
318linux_vfork(struct thread *td, struct linux_vfork_args *args)
319{
320	int error;
321	struct proc *p2;
322
323#ifdef DEBUG
324	if (ldebug(vfork))
325		printf(ARGS(vfork, ""));
326#endif
327
328	/* exclude RFPPWAIT */
329	if ((error = fork1(td, RFFDG | RFPROC | RFMEM, 0, &p2)) != 0)
330		return (error);
331	if (error == 0) {
332	   	td->td_retval[0] = p2->p_pid;
333		td->td_retval[1] = 0;
334	}
335	/* Are we the child? */
336	if (td->td_retval[1] == 1)
337		td->td_retval[0] = 0;
338	error = linux_proc_init(td, td->td_retval[0], 0);
339	if (error)
340		return (error);
341	/* wait for the children to exit, ie. emulate vfork */
342	PROC_LOCK(p2);
343	while (p2->p_flag & P_PPWAIT)
344	   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
345	PROC_UNLOCK(p2);
346
347	return (0);
348}
349
350int
351linux_clone(struct thread *td, struct linux_clone_args *args)
352{
353	int error, ff = RFPROC | RFSTOPPED;
354	struct proc *p2;
355	struct thread *td2;
356	int exit_signal;
357	struct linux_emuldata *em;
358
359#ifdef DEBUG
360	if (ldebug(clone)) {
361   	   	printf(ARGS(clone, "flags %x, stack %x, parent tid: %x, child tid: %x"),
362		    (unsigned int)args->flags, (unsigned int)args->stack,
363		    (unsigned int)args->parent_tidptr, (unsigned int)args->child_tidptr);
364	}
365#endif
366
367	exit_signal = args->flags & 0x000000ff;
368	if (!LINUX_SIG_VALID(exit_signal) && exit_signal != 0)
369		return (EINVAL);
370
371	if (exit_signal <= LINUX_SIGTBLSZ)
372		exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
373
374	if (args->flags & CLONE_VM)
375		ff |= RFMEM;
376	if (args->flags & CLONE_SIGHAND)
377		ff |= RFSIGSHARE;
378	/*
379	 * XXX: in linux sharing of fs info (chroot/cwd/umask)
380	 * and open files is independant. in fbsd its in one
381	 * structure but in reality it doesnt make any problems
382	 * because both this flags are set at once usually.
383	 */
384	if (!(args->flags & (CLONE_FILES | CLONE_FS)))
385		ff |= RFFDG;
386
387	/*
388	 * Attempt to detect when linux_clone(2) is used for creating
389	 * kernel threads. Unfortunately despite the existence of the
390	 * CLONE_THREAD flag, version of linuxthreads package used in
391	 * most popular distros as of beginning of 2005 doesn't make
392	 * any use of it. Therefore, this detection relay fully on
393	 * empirical observation that linuxthreads sets certain
394	 * combination of flags, so that we can make more or less
395	 * precise detection and notify the FreeBSD kernel that several
396	 * processes are in fact part of the same threading group, so
397	 * that special treatment is necessary for signal delivery
398	 * between those processes and fd locking.
399	 */
400	if ((args->flags & 0xffffff00) == THREADING_FLAGS)
401		ff |= RFTHREAD;
402
403	error = fork1(td, ff, 0, &p2);
404	if (error)
405		return (error);
406
407	/* create the emuldata */
408	error = linux_proc_init(td, p2->p_pid, args->flags);
409	/* reference it - no need to check this */
410	em = em_find(p2, EMUL_UNLOCKED);
411	KASSERT(em != NULL, ("clone: emuldata not found.\n"));
412	/* and adjust it */
413	if (args->flags & CLONE_PARENT_SETTID) {
414	   	if (args->parent_tidptr == NULL) {
415		   	EMUL_UNLOCK(&emul_lock);
416			return (EINVAL);
417		}
418		error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid));
419		if (error) {
420		   	EMUL_UNLOCK(&emul_lock);
421			return (error);
422		}
423	}
424
425	if (args->flags & (CLONE_PARENT|CLONE_THREAD)) {
426	   	sx_xlock(&proctree_lock);
427		PROC_LOCK(p2);
428		proc_reparent(p2, td->td_proc->p_pptr);
429		PROC_UNLOCK(p2);
430		sx_xunlock(&proctree_lock);
431	}
432
433	if (args->flags & CLONE_THREAD) {
434	   	/* XXX: linux mangles pgrp and pptr somehow
435		 * I think it might be this but I am not sure.
436		 */
437#ifdef notyet
438	   	PROC_LOCK(p2);
439	   	p2->p_pgrp = td->td_proc->p_pgrp;
440	   	PROC_UNLOCK(p2);
441#endif
442	 	exit_signal = 0;
443	}
444
445	if (args->flags & CLONE_CHILD_SETTID)
446		em->child_set_tid = args->child_tidptr;
447	else
448	   	em->child_set_tid = NULL;
449
450	if (args->flags & CLONE_CHILD_CLEARTID)
451		em->child_clear_tid = args->child_tidptr;
452	else
453	   	em->child_clear_tid = NULL;
454
455	EMUL_UNLOCK(&emul_lock);
456
457	PROC_LOCK(p2);
458	p2->p_sigparent = exit_signal;
459	PROC_UNLOCK(p2);
460	td2 = FIRST_THREAD_IN_PROC(p2);
461	/*
462	 * in a case of stack = NULL we are supposed to COW calling process stack
463	 * this is what normal fork() does so we just keep the tf_esp arg intact
464	 */
465	if (args->stack)
466   	   	td2->td_frame->tf_esp = (unsigned int)args->stack;
467
468	if (args->flags & CLONE_SETTLS) {
469   	   	struct l_user_desc info;
470   	   	int idx;
471	   	int a[2];
472		struct segment_descriptor sd;
473
474	   	error = copyin((void *)td->td_frame->tf_esi, &info, sizeof(struct l_user_desc));
475		if (error)
476   		   	return (error);
477
478		idx = info.entry_number;
479
480		/*
481		 * looks like we're getting the idx we returned
482		 * in the set_thread_area() syscall
483		 */
484		if (idx != 6 && idx != 3)
485			return (EINVAL);
486
487		/* this doesnt happen in practice */
488		if (idx == 6) {
489		   	/* we might copy out the entry_number as 3 */
490		   	info.entry_number = 3;
491			error = copyout(&info, (void *) td->td_frame->tf_esi, sizeof(struct l_user_desc));
492			if (error)
493	   		   	return (error);
494		}
495
496		a[0] = LDT_entry_a(&info);
497		a[1] = LDT_entry_b(&info);
498
499		memcpy(&sd, &a, sizeof(a));
500#ifdef DEBUG
501	if (ldebug(clone))
502	   	printf("Segment created in clone with CLONE_SETTLS: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
503			sd.sd_hibase,
504			sd.sd_lolimit,
505			sd.sd_hilimit,
506			sd.sd_type,
507			sd.sd_dpl,
508			sd.sd_p,
509			sd.sd_xx,
510			sd.sd_def32,
511			sd.sd_gran);
512#endif
513
514		/* set %gs */
515		td2->td_pcb->pcb_gsd = sd;
516		td2->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
517	}
518
519#ifdef DEBUG
520	if (ldebug(clone))
521		printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"),
522		    (long)p2->p_pid, args->stack, exit_signal);
523#endif
524
525	/*
526	 * Make this runnable after we are finished with it.
527	 */
528	mtx_lock_spin(&sched_lock);
529	TD_SET_CAN_RUN(td2);
530	setrunqueue(td2, SRQ_BORING);
531	mtx_unlock_spin(&sched_lock);
532
533	td->td_retval[0] = p2->p_pid;
534	td->td_retval[1] = 0;
535
536	if (args->flags & CLONE_VFORK) {
537   	   	/* wait for the children to exit, ie. emulate vfork */
538   	   	PROC_LOCK(p2);
539		p2->p_flag |= P_PPWAIT;
540		while (p2->p_flag & P_PPWAIT)
541   		   	msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0);
542		PROC_UNLOCK(p2);
543	}
544
545	return (0);
546}
547
548/* XXX move */
549struct l_mmap_argv {
550	l_caddr_t	addr;
551	l_int		len;
552	l_int		prot;
553	l_int		flags;
554	l_int		fd;
555	l_int		pos;
556};
557
558#define STACK_SIZE  (2 * 1024 * 1024)
559#define GUARD_SIZE  (4 * PAGE_SIZE)
560
561static int linux_mmap_common(struct thread *, struct l_mmap_argv *);
562
563int
564linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
565{
566	struct l_mmap_argv linux_args;
567
568#ifdef DEBUG
569	if (ldebug(mmap2))
570		printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
571		    (void *)args->addr, args->len, args->prot,
572		    args->flags, args->fd, args->pgoff);
573#endif
574
575	linux_args.addr = (l_caddr_t)args->addr;
576	linux_args.len = args->len;
577	linux_args.prot = args->prot;
578	linux_args.flags = args->flags;
579	linux_args.fd = args->fd;
580	linux_args.pos = args->pgoff * PAGE_SIZE;
581
582	return (linux_mmap_common(td, &linux_args));
583}
584
585int
586linux_mmap(struct thread *td, struct linux_mmap_args *args)
587{
588	int error;
589	struct l_mmap_argv linux_args;
590
591	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
592	if (error)
593		return (error);
594
595#ifdef DEBUG
596	if (ldebug(mmap))
597		printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
598		    (void *)linux_args.addr, linux_args.len, linux_args.prot,
599		    linux_args.flags, linux_args.fd, linux_args.pos);
600#endif
601
602	return (linux_mmap_common(td, &linux_args));
603}
604
605static int
606linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args)
607{
608	struct proc *p = td->td_proc;
609	struct mmap_args /* {
610		caddr_t addr;
611		size_t len;
612		int prot;
613		int flags;
614		int fd;
615		long pad;
616		off_t pos;
617	} */ bsd_args;
618	int error;
619	struct file *fp;
620
621	error = 0;
622	bsd_args.flags = 0;
623	fp = NULL;
624
625	/*
626	 * Linux mmap(2):
627	 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
628	 */
629	if (! ((linux_args->flags & LINUX_MAP_SHARED) ^
630	    (linux_args->flags & LINUX_MAP_PRIVATE)))
631		return (EINVAL);
632
633	if (linux_args->flags & LINUX_MAP_SHARED)
634		bsd_args.flags |= MAP_SHARED;
635	if (linux_args->flags & LINUX_MAP_PRIVATE)
636		bsd_args.flags |= MAP_PRIVATE;
637	if (linux_args->flags & LINUX_MAP_FIXED)
638		bsd_args.flags |= MAP_FIXED;
639	if (linux_args->flags & LINUX_MAP_ANON)
640		bsd_args.flags |= MAP_ANON;
641	else
642		bsd_args.flags |= MAP_NOSYNC;
643	if (linux_args->flags & LINUX_MAP_GROWSDOWN) {
644		bsd_args.flags |= MAP_STACK;
645
646		/*
647		 * The linux MAP_GROWSDOWN option does not limit auto
648		 * growth of the region.  Linux mmap with this option
649		 * takes as addr the inital BOS, and as len, the initial
650		 * region size.  It can then grow down from addr without
651		 * limit.  However, linux threads has an implicit internal
652		 * limit to stack size of STACK_SIZE.  Its just not
653		 * enforced explicitly in linux.  But, here we impose
654		 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
655		 * region, since we can do this with our mmap.
656		 *
657		 * Our mmap with MAP_STACK takes addr as the maximum
658		 * downsize limit on BOS, and as len the max size of
659		 * the region.  It them maps the top SGROWSIZ bytes,
660		 * and autgrows the region down, up to the limit
661		 * in addr.
662		 *
663		 * If we don't use the MAP_STACK option, the effect
664		 * of this code is to allocate a stack region of a
665		 * fixed size of (STACK_SIZE - GUARD_SIZE).
666		 */
667
668		/* This gives us TOS */
669		bsd_args.addr = linux_args->addr + linux_args->len;
670
671		if (bsd_args.addr > p->p_vmspace->vm_maxsaddr) {
672			/*
673			 * Some linux apps will attempt to mmap
674			 * thread stacks near the top of their
675			 * address space.  If their TOS is greater
676			 * than vm_maxsaddr, vm_map_growstack()
677			 * will confuse the thread stack with the
678			 * process stack and deliver a SEGV if they
679			 * attempt to grow the thread stack past their
680			 * current stacksize rlimit.  To avoid this,
681			 * adjust vm_maxsaddr upwards to reflect
682			 * the current stacksize rlimit rather
683			 * than the maximum possible stacksize.
684			 * It would be better to adjust the
685			 * mmap'ed region, but some apps do not check
686			 * mmap's return value.
687			 */
688			PROC_LOCK(p);
689			p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
690			    lim_cur(p, RLIMIT_STACK);
691			PROC_UNLOCK(p);
692		}
693
694		/* This gives us our maximum stack size */
695		if (linux_args->len > STACK_SIZE - GUARD_SIZE)
696			bsd_args.len = linux_args->len;
697		else
698			bsd_args.len  = STACK_SIZE - GUARD_SIZE;
699
700		/*
701		 * This gives us a new BOS.  If we're using VM_STACK, then
702		 * mmap will just map the top SGROWSIZ bytes, and let
703		 * the stack grow down to the limit at BOS.  If we're
704		 * not using VM_STACK we map the full stack, since we
705		 * don't have a way to autogrow it.
706		 */
707		bsd_args.addr -= bsd_args.len;
708	} else {
709		bsd_args.addr = linux_args->addr;
710		bsd_args.len  = linux_args->len;
711	}
712
713	bsd_args.prot = linux_args->prot;
714	if (linux_args->flags & LINUX_MAP_ANON)
715		bsd_args.fd = -1;
716	else {
717		/*
718		 * Linux follows Solaris mmap(2) description:
719		 * The file descriptor fildes is opened with
720		 * read permission, regardless of the
721		 * protection options specified.
722		 * If PROT_WRITE is specified, the application
723		 * must have opened the file descriptor
724		 * fildes with write permission unless
725		 * MAP_PRIVATE is specified in the flag
726		 * argument as described below.
727		 */
728
729		if ((error = fget(td, linux_args->fd, &fp)) != 0)
730			return (error);
731		if (fp->f_type != DTYPE_VNODE) {
732			fdrop(fp, td);
733			return (EINVAL);
734		}
735
736		/* Linux mmap() just fails for O_WRONLY files */
737		if (! (fp->f_flag & FREAD)) {
738			fdrop(fp, td);
739			return (EACCES);
740		}
741
742		bsd_args.fd = linux_args->fd;
743		fdrop(fp, td);
744	}
745	bsd_args.pos = linux_args->pos;
746	bsd_args.pad = 0;
747
748#ifdef DEBUG
749	if (ldebug(mmap))
750		printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n",
751		    __func__,
752		    (void *)bsd_args.addr, bsd_args.len, bsd_args.prot,
753		    bsd_args.flags, bsd_args.fd, (int)bsd_args.pos);
754#endif
755	error = mmap(td, &bsd_args);
756#ifdef DEBUG
757	if (ldebug(mmap))
758		printf("-> %s() return: 0x%x (0x%08x)\n",
759			__func__, error, (u_int)td->td_retval[0]);
760#endif
761	return (error);
762}
763
764int
765linux_pipe(struct thread *td, struct linux_pipe_args *args)
766{
767	int error;
768	int reg_edx;
769
770#ifdef DEBUG
771	if (ldebug(pipe))
772		printf(ARGS(pipe, "*"));
773#endif
774
775	reg_edx = td->td_retval[1];
776	error = pipe(td, 0);
777	if (error) {
778		td->td_retval[1] = reg_edx;
779		return (error);
780	}
781
782	error = copyout(td->td_retval, args->pipefds, 2*sizeof(int));
783	if (error) {
784		td->td_retval[1] = reg_edx;
785		return (error);
786	}
787
788	td->td_retval[1] = reg_edx;
789	td->td_retval[0] = 0;
790	return (0);
791}
792
793int
794linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
795{
796	int error;
797	struct i386_ioperm_args iia;
798
799	iia.start = args->start;
800	iia.length = args->length;
801	iia.enable = args->enable;
802	mtx_lock(&Giant);
803	error = i386_set_ioperm(td, &iia);
804	mtx_unlock(&Giant);
805	return (error);
806}
807
808int
809linux_iopl(struct thread *td, struct linux_iopl_args *args)
810{
811	int error;
812
813	if (args->level < 0 || args->level > 3)
814		return (EINVAL);
815	if ((error = suser(td)) != 0)
816		return (error);
817	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
818		return (error);
819	td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
820	    (args->level * (PSL_IOPL / 3));
821	return (0);
822}
823
824int
825linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
826{
827	int error;
828	struct i386_ldt_args ldt;
829	struct l_descriptor ld;
830	union descriptor desc;
831
832	if (uap->ptr == NULL)
833		return (EINVAL);
834
835	switch (uap->func) {
836	case 0x00: /* read_ldt */
837		ldt.start = 0;
838		ldt.descs = uap->ptr;
839		ldt.num = uap->bytecount / sizeof(union descriptor);
840		mtx_lock(&Giant);
841		error = i386_get_ldt(td, &ldt);
842		td->td_retval[0] *= sizeof(union descriptor);
843		mtx_unlock(&Giant);
844		break;
845	case 0x01: /* write_ldt */
846	case 0x11: /* write_ldt */
847		if (uap->bytecount != sizeof(ld))
848			return (EINVAL);
849
850		error = copyin(uap->ptr, &ld, sizeof(ld));
851		if (error)
852			return (error);
853
854		ldt.start = ld.entry_number;
855		ldt.descs = &desc;
856		ldt.num = 1;
857		desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
858		desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
859		desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
860		desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
861		desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
862			(ld.contents << 2);
863		desc.sd.sd_dpl = 3;
864		desc.sd.sd_p = (ld.seg_not_present ^ 1);
865		desc.sd.sd_xx = 0;
866		desc.sd.sd_def32 = ld.seg_32bit;
867		desc.sd.sd_gran = ld.limit_in_pages;
868		mtx_lock(&Giant);
869		error = i386_set_ldt(td, &ldt, &desc);
870		mtx_unlock(&Giant);
871		break;
872	default:
873		error = EINVAL;
874		break;
875	}
876
877	if (error == EOPNOTSUPP) {
878		printf("linux: modify_ldt needs kernel option USER_LDT\n");
879		error = ENOSYS;
880	}
881
882	return (error);
883}
884
885int
886linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
887{
888	l_osigaction_t osa;
889	l_sigaction_t act, oact;
890	int error;
891
892#ifdef DEBUG
893	if (ldebug(sigaction))
894		printf(ARGS(sigaction, "%d, %p, %p"),
895		    args->sig, (void *)args->nsa, (void *)args->osa);
896#endif
897
898	if (args->nsa != NULL) {
899		error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
900		if (error)
901			return (error);
902		act.lsa_handler = osa.lsa_handler;
903		act.lsa_flags = osa.lsa_flags;
904		act.lsa_restorer = osa.lsa_restorer;
905		LINUX_SIGEMPTYSET(act.lsa_mask);
906		act.lsa_mask.__bits[0] = osa.lsa_mask;
907	}
908
909	error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
910	    args->osa ? &oact : NULL);
911
912	if (args->osa != NULL && !error) {
913		osa.lsa_handler = oact.lsa_handler;
914		osa.lsa_flags = oact.lsa_flags;
915		osa.lsa_restorer = oact.lsa_restorer;
916		osa.lsa_mask = oact.lsa_mask.__bits[0];
917		error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
918	}
919
920	return (error);
921}
922
923/*
924 * Linux has two extra args, restart and oldmask.  We dont use these,
925 * but it seems that "restart" is actually a context pointer that
926 * enables the signal to happen with a different register set.
927 */
928int
929linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
930{
931	sigset_t sigmask;
932	l_sigset_t mask;
933
934#ifdef DEBUG
935	if (ldebug(sigsuspend))
936		printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
937#endif
938
939	LINUX_SIGEMPTYSET(mask);
940	mask.__bits[0] = args->mask;
941	linux_to_bsd_sigset(&mask, &sigmask);
942	return (kern_sigsuspend(td, sigmask));
943}
944
945int
946linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
947{
948	l_sigset_t lmask;
949	sigset_t sigmask;
950	int error;
951
952#ifdef DEBUG
953	if (ldebug(rt_sigsuspend))
954		printf(ARGS(rt_sigsuspend, "%p, %d"),
955		    (void *)uap->newset, uap->sigsetsize);
956#endif
957
958	if (uap->sigsetsize != sizeof(l_sigset_t))
959		return (EINVAL);
960
961	error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
962	if (error)
963		return (error);
964
965	linux_to_bsd_sigset(&lmask, &sigmask);
966	return (kern_sigsuspend(td, sigmask));
967}
968
969int
970linux_pause(struct thread *td, struct linux_pause_args *args)
971{
972	struct proc *p = td->td_proc;
973	sigset_t sigmask;
974
975#ifdef DEBUG
976	if (ldebug(pause))
977		printf(ARGS(pause, ""));
978#endif
979
980	PROC_LOCK(p);
981	sigmask = td->td_sigmask;
982	PROC_UNLOCK(p);
983	return (kern_sigsuspend(td, sigmask));
984}
985
986int
987linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
988{
989	stack_t ss, oss;
990	l_stack_t lss;
991	int error;
992
993#ifdef DEBUG
994	if (ldebug(sigaltstack))
995		printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
996#endif
997
998	if (uap->uss != NULL) {
999		error = copyin(uap->uss, &lss, sizeof(l_stack_t));
1000		if (error)
1001			return (error);
1002
1003		ss.ss_sp = lss.ss_sp;
1004		ss.ss_size = lss.ss_size;
1005		ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
1006	}
1007	error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
1008	    (uap->uoss != NULL) ? &oss : NULL);
1009	if (!error && uap->uoss != NULL) {
1010		lss.ss_sp = oss.ss_sp;
1011		lss.ss_size = oss.ss_size;
1012		lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
1013		error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
1014	}
1015
1016	return (error);
1017}
1018
1019int
1020linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args)
1021{
1022	struct ftruncate_args sa;
1023
1024#ifdef DEBUG
1025	if (ldebug(ftruncate64))
1026		printf(ARGS(ftruncate64, "%u, %jd"), args->fd,
1027		    (intmax_t)args->length);
1028#endif
1029
1030	sa.fd = args->fd;
1031	sa.pad = 0;
1032	sa.length = args->length;
1033	return ftruncate(td, &sa);
1034}
1035
1036int
1037linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
1038{
1039	struct l_user_desc info;
1040	int error;
1041	int idx;
1042	int a[2];
1043	struct segment_descriptor sd;
1044
1045	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1046	if (error)
1047		return (error);
1048
1049#ifdef DEBUG
1050	if (ldebug(set_thread_area))
1051	   	printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
1052		      info.entry_number,
1053      		      info.base_addr,
1054      		      info.limit,
1055      		      info.seg_32bit,
1056		      info.contents,
1057      		      info.read_exec_only,
1058      		      info.limit_in_pages,
1059      		      info.seg_not_present,
1060      		      info.useable);
1061#endif
1062
1063	idx = info.entry_number;
1064	/*
1065	 * Semantics of linux version: every thread in the system has array
1066	 * of 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
1067	 * syscall loads one of the selected tls decriptors with a value
1068	 * and also loads GDT descriptors 6, 7 and 8 with the content of the per-thread
1069	 * descriptors.
1070	 *
1071	 * Semantics of fbsd version: I think we can ignore that linux has 3 per-thread
1072	 * descriptors and use just the 1st one. The tls_array[] is used only in
1073	 * set/get-thread_area() syscalls and for loading the GDT descriptors. In fbsd
1074	 * we use just one GDT descriptor for TLS so we will load just one.
1075	 * XXX: this doesnt work when user-space process tries to use more then 1 TLS segment
1076	 * comment in the linux sources says wine might do that.
1077	 */
1078
1079	/*
1080	 * we support just GLIBC TLS now
1081	 * we should let 3 proceed as well because we use this segment so
1082	 * if code does two subsequent calls it should succeed
1083	 */
1084	if (idx != 6 && idx != -1 && idx != 3)
1085		return (EINVAL);
1086
1087	/*
1088	 * we have to copy out the GDT entry we use
1089	 * FreeBSD uses GDT entry #3 for storing %gs so load that
1090	 * XXX: what if userspace program doesnt check this value and tries
1091	 * to use 6, 7 or 8?
1092	 */
1093	idx = info.entry_number = 3;
1094	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1095	if (error)
1096		return (error);
1097
1098	if (LDT_empty(&info)) {
1099		a[0] = 0;
1100		a[1] = 0;
1101	} else {
1102		a[0] = LDT_entry_a(&info);
1103		a[1] = LDT_entry_b(&info);
1104	}
1105
1106	memcpy(&sd, &a, sizeof(a));
1107#ifdef DEBUG
1108	if (ldebug(set_thread_area))
1109	   	printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
1110			sd.sd_hibase,
1111			sd.sd_lolimit,
1112			sd.sd_hilimit,
1113			sd.sd_type,
1114			sd.sd_dpl,
1115			sd.sd_p,
1116			sd.sd_xx,
1117			sd.sd_def32,
1118			sd.sd_gran);
1119#endif
1120
1121	/* this is taken from i386 version of cpu_set_user_tls() */
1122	critical_enter();
1123	/* set %gs */
1124	td->td_pcb->pcb_gsd = sd;
1125	PCPU_GET(fsgs_gdt)[1] = sd;
1126	load_gs(GSEL(GUGS_SEL, SEL_UPL));
1127	critical_exit();
1128
1129	return (0);
1130}
1131
1132int
1133linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
1134{
1135
1136	struct l_user_desc info;
1137	int error;
1138	int idx;
1139	struct l_desc_struct desc;
1140	struct segment_descriptor sd;
1141
1142#ifdef DEBUG
1143	if (ldebug(get_thread_area))
1144		printf(ARGS(get_thread_area, "%p"), args->desc);
1145#endif
1146
1147	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1148	if (error)
1149		return (error);
1150
1151	idx = info.entry_number;
1152	/* XXX: I am not sure if we want 3 to be allowed too. */
1153	if (idx != 6 && idx != 3)
1154		return (EINVAL);
1155
1156	idx = 3;
1157
1158	memset(&info, 0, sizeof(info));
1159
1160	sd = PCPU_GET(fsgs_gdt)[1];
1161
1162	memcpy(&desc, &sd, sizeof(desc));
1163
1164	info.entry_number = idx;
1165	info.base_addr = GET_BASE(&desc);
1166	info.limit = GET_LIMIT(&desc);
1167	info.seg_32bit = GET_32BIT(&desc);
1168	info.contents = GET_CONTENTS(&desc);
1169	info.read_exec_only = !GET_WRITABLE(&desc);
1170	info.limit_in_pages = GET_LIMIT_PAGES(&desc);
1171	info.seg_not_present = !GET_PRESENT(&desc);
1172	info.useable = GET_USEABLE(&desc);
1173
1174	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1175	if (error)
1176	   	return (EFAULT);
1177
1178	return (0);
1179}
1180
1181/* copied from kern/kern_time.c */
1182int
1183linux_timer_create(struct thread *td, struct linux_timer_create_args *args)
1184{
1185   	return ktimer_create(td, (struct ktimer_create_args *) args);
1186}
1187
1188int
1189linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args)
1190{
1191   	return ktimer_settime(td, (struct ktimer_settime_args *) args);
1192}
1193
1194int
1195linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args)
1196{
1197   	return ktimer_gettime(td, (struct ktimer_gettime_args *) args);
1198}
1199
1200int
1201linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args)
1202{
1203   	return ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args);
1204}
1205
1206int
1207linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args)
1208{
1209   	return ktimer_delete(td, (struct ktimer_delete_args *) args);
1210}
1211
1212/* XXX: this wont work with module - convert it */
1213int
1214linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
1215{
1216#ifdef P1003_1B_MQUEUE
1217   	return kmq_open(td, (struct kmq_open_args *) args);
1218#else
1219	return (ENOSYS);
1220#endif
1221}
1222
1223int
1224linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
1225{
1226#ifdef P1003_1B_MQUEUE
1227   	return kmq_unlink(td, (struct kmq_unlink_args *) args);
1228#else
1229	return (ENOSYS);
1230#endif
1231}
1232
1233int
1234linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
1235{
1236#ifdef P1003_1B_MQUEUE
1237   	return kmq_timedsend(td, (struct kmq_timedsend_args *) args);
1238#else
1239	return (ENOSYS);
1240#endif
1241}
1242
1243int
1244linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
1245{
1246#ifdef P1003_1B_MQUEUE
1247   	return kmq_timedreceive(td, (struct kmq_timedreceive_args *) args);
1248#else
1249	return (ENOSYS);
1250#endif
1251}
1252
1253int
1254linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
1255{
1256#ifdef P1003_1B_MQUEUE
1257	return kmq_notify(td, (struct kmq_notify_args *) args);
1258#else
1259	return (ENOSYS);
1260#endif
1261}
1262
1263int
1264linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
1265{
1266#ifdef P1003_1B_MQUEUE
1267   	return kmq_setattr(td, (struct kmq_setattr_args *) args);
1268#else
1269	return (ENOSYS);
1270#endif
1271}
1272
1273