1/*-
2 * Copyright (c) 2002 Doug Rabson
3 * Copyright (c) 1994-1995 S��ren Schmidt
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer
11 *    in this position and unchanged.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/compat/linux/linux_misc.c 347521 2019-05-13 11:17:31Z dchagin $");
32
33#include "opt_compat.h"
34
35#include <sys/param.h>
36#include <sys/blist.h>
37#include <sys/fcntl.h>
38#if defined(__i386__)
39#include <sys/imgact_aout.h>
40#endif
41#include <sys/jail.h>
42#include <sys/kernel.h>
43#include <sys/limits.h>
44#include <sys/lock.h>
45#include <sys/malloc.h>
46#include <sys/mman.h>
47#include <sys/mount.h>
48#include <sys/mutex.h>
49#include <sys/namei.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/procctl.h>
53#include <sys/reboot.h>
54#include <sys/racct.h>
55#include <sys/random.h>
56#include <sys/resourcevar.h>
57#include <sys/sched.h>
58#include <sys/sdt.h>
59#include <sys/signalvar.h>
60#include <sys/stat.h>
61#include <sys/syscallsubr.h>
62#include <sys/sysctl.h>
63#include <sys/sysproto.h>
64#include <sys/systm.h>
65#include <sys/time.h>
66#include <sys/vmmeter.h>
67#include <sys/vnode.h>
68#include <sys/wait.h>
69#include <sys/cpuset.h>
70#include <sys/uio.h>
71
72#include <security/mac/mac_framework.h>
73
74#include <vm/vm.h>
75#include <vm/pmap.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_map.h>
78#include <vm/vm_extern.h>
79#include <vm/vm_object.h>
80#include <vm/swap_pager.h>
81
82#ifdef COMPAT_LINUX32
83#include <machine/../linux32/linux.h>
84#include <machine/../linux32/linux32_proto.h>
85#else
86#include <machine/../linux/linux.h>
87#include <machine/../linux/linux_proto.h>
88#endif
89
90#include <compat/linux/linux_dtrace.h>
91#include <compat/linux/linux_file.h>
92#include <compat/linux/linux_mib.h>
93#include <compat/linux/linux_signal.h>
94#include <compat/linux/linux_timer.h>
95#include <compat/linux/linux_util.h>
96#include <compat/linux/linux_sysproto.h>
97#include <compat/linux/linux_emul.h>
98#include <compat/linux/linux_misc.h>
99
100/**
101 * Special DTrace provider for the linuxulator.
102 *
103 * In this file we define the provider for the entire linuxulator. All
104 * modules (= files of the linuxulator) use it.
105 *
106 * We define a different name depending on the emulated bitsize, see
107 * ../../<ARCH>/linux{,32}/linux.h, e.g.:
108 *      native bitsize          = linuxulator
109 *      amd64, 32bit emulation  = linuxulator32
110 */
111LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE);
112
113int stclohz;				/* Statistics clock frequency */
114
115static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
116	RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
117	RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
118	RLIMIT_MEMLOCK, RLIMIT_AS
119};
120
121struct l_sysinfo {
122	l_long		uptime;		/* Seconds since boot */
123	l_ulong		loads[3];	/* 1, 5, and 15 minute load averages */
124#define LINUX_SYSINFO_LOADS_SCALE 65536
125	l_ulong		totalram;	/* Total usable main memory size */
126	l_ulong		freeram;	/* Available memory size */
127	l_ulong		sharedram;	/* Amount of shared memory */
128	l_ulong		bufferram;	/* Memory used by buffers */
129	l_ulong		totalswap;	/* Total swap space size */
130	l_ulong		freeswap;	/* swap space still available */
131	l_ushort	procs;		/* Number of current processes */
132	l_ushort	pads;
133	l_ulong		totalbig;
134	l_ulong		freebig;
135	l_uint		mem_unit;
136	char		_f[20-2*sizeof(l_long)-sizeof(l_int)];	/* padding */
137};
138
139struct l_pselect6arg {
140	l_uintptr_t	ss;
141	l_size_t	ss_len;
142};
143
144static int	linux_utimensat_nsec_valid(l_long);
145
146
147int
148linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
149{
150	struct l_sysinfo sysinfo;
151	vm_object_t object;
152	int i, j;
153	struct timespec ts;
154
155	bzero(&sysinfo, sizeof(sysinfo));
156	getnanouptime(&ts);
157	if (ts.tv_nsec != 0)
158		ts.tv_sec++;
159	sysinfo.uptime = ts.tv_sec;
160
161	/* Use the information from the mib to get our load averages */
162	for (i = 0; i < 3; i++)
163		sysinfo.loads[i] = averunnable.ldavg[i] *
164		    LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
165
166	sysinfo.totalram = physmem * PAGE_SIZE;
167	sysinfo.freeram = sysinfo.totalram - vm_cnt.v_wire_count * PAGE_SIZE;
168
169	sysinfo.sharedram = 0;
170	mtx_lock(&vm_object_list_mtx);
171	TAILQ_FOREACH(object, &vm_object_list, object_list)
172		if (object->shadow_count > 1)
173			sysinfo.sharedram += object->resident_page_count;
174	mtx_unlock(&vm_object_list_mtx);
175
176	sysinfo.sharedram *= PAGE_SIZE;
177	sysinfo.bufferram = 0;
178
179	swap_pager_status(&i, &j);
180	sysinfo.totalswap = i * PAGE_SIZE;
181	sysinfo.freeswap = (i - j) * PAGE_SIZE;
182
183	sysinfo.procs = nprocs;
184
185	/* The following are only present in newer Linux kernels. */
186	sysinfo.totalbig = 0;
187	sysinfo.freebig = 0;
188	sysinfo.mem_unit = 1;
189
190	return (copyout(&sysinfo, args->info, sizeof(sysinfo)));
191}
192
193#ifdef LINUX_LEGACY_SYSCALLS
194int
195linux_alarm(struct thread *td, struct linux_alarm_args *args)
196{
197	struct itimerval it, old_it;
198	u_int secs;
199	int error;
200
201#ifdef DEBUG
202	if (ldebug(alarm))
203		printf(ARGS(alarm, "%u"), args->secs);
204#endif
205	secs = args->secs;
206	/*
207	 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2
208	 * to match kern_setitimer()'s limit to avoid error from it.
209	 *
210	 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit
211	 * platforms.
212	 */
213	if (secs > INT32_MAX / 2)
214		secs = INT32_MAX / 2;
215
216	it.it_value.tv_sec = secs;
217	it.it_value.tv_usec = 0;
218	timevalclear(&it.it_interval);
219	error = kern_setitimer(td, ITIMER_REAL, &it, &old_it);
220	KASSERT(error == 0, ("kern_setitimer returns %d", error));
221
222	if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) ||
223	    old_it.it_value.tv_usec >= 500000)
224		old_it.it_value.tv_sec++;
225	td->td_retval[0] = old_it.it_value.tv_sec;
226	return (0);
227}
228#endif
229
230int
231linux_brk(struct thread *td, struct linux_brk_args *args)
232{
233	struct vmspace *vm = td->td_proc->p_vmspace;
234	vm_offset_t new, old;
235	struct obreak_args /* {
236		char * nsize;
237	} */ tmp;
238
239#ifdef DEBUG
240	if (ldebug(brk))
241		printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend);
242#endif
243	old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize);
244	new = (vm_offset_t)args->dsend;
245	tmp.nsize = (char *)new;
246	if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp))
247		td->td_retval[0] = (long)new;
248	else
249		td->td_retval[0] = (long)old;
250
251	return (0);
252}
253
254#if defined(__i386__)
255/* XXX: what about amd64/linux32? */
256
257int
258linux_uselib(struct thread *td, struct linux_uselib_args *args)
259{
260	struct nameidata ni;
261	struct vnode *vp;
262	struct exec *a_out;
263	struct vattr attr;
264	vm_offset_t vmaddr;
265	unsigned long file_offset;
266	unsigned long bss_size;
267	char *library;
268	ssize_t aresid;
269	int error, locked, writecount;
270
271	LCONVPATHEXIST(td, args->library, &library);
272
273#ifdef DEBUG
274	if (ldebug(uselib))
275		printf(ARGS(uselib, "%s"), library);
276#endif
277
278	a_out = NULL;
279	locked = 0;
280	vp = NULL;
281
282	NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
283	    UIO_SYSSPACE, library, td);
284	error = namei(&ni);
285	LFREEPATH(library);
286	if (error)
287		goto cleanup;
288
289	vp = ni.ni_vp;
290	NDFREE(&ni, NDF_ONLY_PNBUF);
291
292	/*
293	 * From here on down, we have a locked vnode that must be unlocked.
294	 * XXX: The code below largely duplicates exec_check_permissions().
295	 */
296	locked = 1;
297
298	/* Writable? */
299	error = VOP_GET_WRITECOUNT(vp, &writecount);
300	if (error != 0)
301		goto cleanup;
302	if (writecount != 0) {
303		error = ETXTBSY;
304		goto cleanup;
305	}
306
307	/* Executable? */
308	error = VOP_GETATTR(vp, &attr, td->td_ucred);
309	if (error)
310		goto cleanup;
311
312	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
313	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
314		/* EACCESS is what exec(2) returns. */
315		error = ENOEXEC;
316		goto cleanup;
317	}
318
319	/* Sensible size? */
320	if (attr.va_size == 0) {
321		error = ENOEXEC;
322		goto cleanup;
323	}
324
325	/* Can we access it? */
326	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
327	if (error)
328		goto cleanup;
329
330	/*
331	 * XXX: This should use vn_open() so that it is properly authorized,
332	 * and to reduce code redundancy all over the place here.
333	 * XXX: Not really, it duplicates far more of exec_check_permissions()
334	 * than vn_open().
335	 */
336#ifdef MAC
337	error = mac_vnode_check_open(td->td_ucred, vp, VREAD);
338	if (error)
339		goto cleanup;
340#endif
341	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
342	if (error)
343		goto cleanup;
344
345	/* Pull in executable header into exec_map */
346	error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE,
347	    VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
348	if (error)
349		goto cleanup;
350
351	/* Is it a Linux binary ? */
352	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
353		error = ENOEXEC;
354		goto cleanup;
355	}
356
357	/*
358	 * While we are here, we should REALLY do some more checks
359	 */
360
361	/* Set file/virtual offset based on a.out variant. */
362	switch ((int)(a_out->a_magic & 0xffff)) {
363	case 0413:			/* ZMAGIC */
364		file_offset = 1024;
365		break;
366	case 0314:			/* QMAGIC */
367		file_offset = 0;
368		break;
369	default:
370		error = ENOEXEC;
371		goto cleanup;
372	}
373
374	bss_size = round_page(a_out->a_bss);
375
376	/* Check various fields in header for validity/bounds. */
377	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
378		error = ENOEXEC;
379		goto cleanup;
380	}
381
382	/* text + data can't exceed file size */
383	if (a_out->a_data + a_out->a_text > attr.va_size) {
384		error = EFAULT;
385		goto cleanup;
386	}
387
388	/*
389	 * text/data/bss must not exceed limits
390	 * XXX - this is not complete. it should check current usage PLUS
391	 * the resources needed by this library.
392	 */
393	PROC_LOCK(td->td_proc);
394	if (a_out->a_text > maxtsiz ||
395	    a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) ||
396	    racct_set(td->td_proc, RACCT_DATA, a_out->a_data +
397	    bss_size) != 0) {
398		PROC_UNLOCK(td->td_proc);
399		error = ENOMEM;
400		goto cleanup;
401	}
402	PROC_UNLOCK(td->td_proc);
403
404	/*
405	 * Prevent more writers.
406	 * XXX: Note that if any of the VM operations fail below we don't
407	 * clear this flag.
408	 */
409	VOP_SET_TEXT(vp);
410
411	/*
412	 * Lock no longer needed
413	 */
414	locked = 0;
415	VOP_UNLOCK(vp, 0);
416
417	/*
418	 * Check if file_offset page aligned. Currently we cannot handle
419	 * misalinged file offsets, and so we read in the entire image
420	 * (what a waste).
421	 */
422	if (file_offset & PAGE_MASK) {
423#ifdef DEBUG
424		printf("uselib: Non page aligned binary %lu\n", file_offset);
425#endif
426		/* Map text+data read/write/execute */
427
428		/* a_entry is the load address and is page aligned */
429		vmaddr = trunc_page(a_out->a_entry);
430
431		/* get anon user mapping, read+write+execute */
432		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
433		    &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE,
434		    VM_PROT_ALL, VM_PROT_ALL, 0);
435		if (error)
436			goto cleanup;
437
438		error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset,
439		    a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
440		    td->td_ucred, NOCRED, &aresid, td);
441		if (error != 0)
442			goto cleanup;
443		if (aresid != 0) {
444			error = ENOEXEC;
445			goto cleanup;
446		}
447	} else {
448#ifdef DEBUG
449		printf("uselib: Page aligned binary %lu\n", file_offset);
450#endif
451		/*
452		 * for QMAGIC, a_entry is 20 bytes beyond the load address
453		 * to skip the executable header
454		 */
455		vmaddr = trunc_page(a_out->a_entry);
456
457		/*
458		 * Map it all into the process's space as a single
459		 * copy-on-write "data" segment.
460		 */
461		error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr,
462		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
463		    MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset);
464		if (error)
465			goto cleanup;
466	}
467#ifdef DEBUG
468	printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0],
469	    ((long *)vmaddr)[1]);
470#endif
471	if (bss_size != 0) {
472		/* Calculate BSS start address */
473		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
474		    a_out->a_data;
475
476		/* allocate some 'anon' space */
477		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
478		    &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL,
479		    VM_PROT_ALL, 0);
480		if (error)
481			goto cleanup;
482	}
483
484cleanup:
485	/* Unlock vnode if needed */
486	if (locked)
487		VOP_UNLOCK(vp, 0);
488
489	/* Release the temporary mapping. */
490	if (a_out)
491		kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
492
493	return (error);
494}
495
496#endif	/* __i386__ */
497
498#ifdef LINUX_LEGACY_SYSCALLS
499int
500linux_select(struct thread *td, struct linux_select_args *args)
501{
502	l_timeval ltv;
503	struct timeval tv0, tv1, utv, *tvp;
504	int error;
505
506#ifdef DEBUG
507	if (ldebug(select))
508		printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds,
509		    (void *)args->readfds, (void *)args->writefds,
510		    (void *)args->exceptfds, (void *)args->timeout);
511#endif
512
513	/*
514	 * Store current time for computation of the amount of
515	 * time left.
516	 */
517	if (args->timeout) {
518		if ((error = copyin(args->timeout, &ltv, sizeof(ltv))))
519			goto select_out;
520		utv.tv_sec = ltv.tv_sec;
521		utv.tv_usec = ltv.tv_usec;
522#ifdef DEBUG
523		if (ldebug(select))
524			printf(LMSG("incoming timeout (%jd/%ld)"),
525			    (intmax_t)utv.tv_sec, utv.tv_usec);
526#endif
527
528		if (itimerfix(&utv)) {
529			/*
530			 * The timeval was invalid.  Convert it to something
531			 * valid that will act as it does under Linux.
532			 */
533			utv.tv_sec += utv.tv_usec / 1000000;
534			utv.tv_usec %= 1000000;
535			if (utv.tv_usec < 0) {
536				utv.tv_sec -= 1;
537				utv.tv_usec += 1000000;
538			}
539			if (utv.tv_sec < 0)
540				timevalclear(&utv);
541		}
542		microtime(&tv0);
543		tvp = &utv;
544	} else
545		tvp = NULL;
546
547	error = kern_select(td, args->nfds, args->readfds, args->writefds,
548	    args->exceptfds, tvp, LINUX_NFDBITS);
549
550#ifdef DEBUG
551	if (ldebug(select))
552		printf(LMSG("real select returns %d"), error);
553#endif
554	if (error)
555		goto select_out;
556
557	if (args->timeout) {
558		if (td->td_retval[0]) {
559			/*
560			 * Compute how much time was left of the timeout,
561			 * by subtracting the current time and the time
562			 * before we started the call, and subtracting
563			 * that result from the user-supplied value.
564			 */
565			microtime(&tv1);
566			timevalsub(&tv1, &tv0);
567			timevalsub(&utv, &tv1);
568			if (utv.tv_sec < 0)
569				timevalclear(&utv);
570		} else
571			timevalclear(&utv);
572#ifdef DEBUG
573		if (ldebug(select))
574			printf(LMSG("outgoing timeout (%jd/%ld)"),
575			    (intmax_t)utv.tv_sec, utv.tv_usec);
576#endif
577		ltv.tv_sec = utv.tv_sec;
578		ltv.tv_usec = utv.tv_usec;
579		if ((error = copyout(&ltv, args->timeout, sizeof(ltv))))
580			goto select_out;
581	}
582
583select_out:
584#ifdef DEBUG
585	if (ldebug(select))
586		printf(LMSG("select_out -> %d"), error);
587#endif
588	return (error);
589}
590#endif
591
592int
593linux_mremap(struct thread *td, struct linux_mremap_args *args)
594{
595	uintptr_t addr;
596	size_t len;
597	int error = 0;
598
599#ifdef DEBUG
600	if (ldebug(mremap))
601		printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"),
602		    (void *)(uintptr_t)args->addr,
603		    (unsigned long)args->old_len,
604		    (unsigned long)args->new_len,
605		    (unsigned long)args->flags);
606#endif
607
608	if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) {
609		td->td_retval[0] = 0;
610		return (EINVAL);
611	}
612
613	/*
614	 * Check for the page alignment.
615	 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK.
616	 */
617	if (args->addr & PAGE_MASK) {
618		td->td_retval[0] = 0;
619		return (EINVAL);
620	}
621
622	args->new_len = round_page(args->new_len);
623	args->old_len = round_page(args->old_len);
624
625	if (args->new_len > args->old_len) {
626		td->td_retval[0] = 0;
627		return (ENOMEM);
628	}
629
630	if (args->new_len < args->old_len) {
631		addr = args->addr + args->new_len;
632		len = args->old_len - args->new_len;
633		error = kern_munmap(td, addr, len);
634	}
635
636	td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
637	return (error);
638}
639
640#define LINUX_MS_ASYNC       0x0001
641#define LINUX_MS_INVALIDATE  0x0002
642#define LINUX_MS_SYNC        0x0004
643
644int
645linux_msync(struct thread *td, struct linux_msync_args *args)
646{
647
648	return (kern_msync(td, args->addr, args->len,
649	    args->fl & ~LINUX_MS_SYNC));
650}
651
652#ifdef LINUX_LEGACY_SYSCALLS
653int
654linux_time(struct thread *td, struct linux_time_args *args)
655{
656	struct timeval tv;
657	l_time_t tm;
658	int error;
659
660#ifdef DEBUG
661	if (ldebug(time))
662		printf(ARGS(time, "*"));
663#endif
664
665	microtime(&tv);
666	tm = tv.tv_sec;
667	if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
668		return (error);
669	td->td_retval[0] = tm;
670	return (0);
671}
672#endif
673
674struct l_times_argv {
675	l_clock_t	tms_utime;
676	l_clock_t	tms_stime;
677	l_clock_t	tms_cutime;
678	l_clock_t	tms_cstime;
679};
680
681
682/*
683 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value.
684 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK
685 * auxiliary vector entry.
686 */
687#define	CLK_TCK		100
688
689#define	CONVOTCK(r)	(r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
690#define	CONVNTCK(r)	(r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz))
691
692#define	CONVTCK(r)	(linux_kernver(td) >= LINUX_KERNVER_2004000 ?		\
693			    CONVNTCK(r) : CONVOTCK(r))
694
695int
696linux_times(struct thread *td, struct linux_times_args *args)
697{
698	struct timeval tv, utime, stime, cutime, cstime;
699	struct l_times_argv tms;
700	struct proc *p;
701	int error;
702
703#ifdef DEBUG
704	if (ldebug(times))
705		printf(ARGS(times, "*"));
706#endif
707
708	if (args->buf != NULL) {
709		p = td->td_proc;
710		PROC_LOCK(p);
711		PROC_STATLOCK(p);
712		calcru(p, &utime, &stime);
713		PROC_STATUNLOCK(p);
714		calccru(p, &cutime, &cstime);
715		PROC_UNLOCK(p);
716
717		tms.tms_utime = CONVTCK(utime);
718		tms.tms_stime = CONVTCK(stime);
719
720		tms.tms_cutime = CONVTCK(cutime);
721		tms.tms_cstime = CONVTCK(cstime);
722
723		if ((error = copyout(&tms, args->buf, sizeof(tms))))
724			return (error);
725	}
726
727	microuptime(&tv);
728	td->td_retval[0] = (int)CONVTCK(tv);
729	return (0);
730}
731
732int
733linux_newuname(struct thread *td, struct linux_newuname_args *args)
734{
735	struct l_new_utsname utsname;
736	char osname[LINUX_MAX_UTSNAME];
737	char osrelease[LINUX_MAX_UTSNAME];
738	char *p;
739
740#ifdef DEBUG
741	if (ldebug(newuname))
742		printf(ARGS(newuname, "*"));
743#endif
744
745	linux_get_osname(td, osname);
746	linux_get_osrelease(td, osrelease);
747
748	bzero(&utsname, sizeof(utsname));
749	strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
750	getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
751	getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME);
752	strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
753	strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
754	for (p = utsname.version; *p != '\0'; ++p)
755		if (*p == '\n') {
756			*p = '\0';
757			break;
758		}
759	strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME);
760
761	return (copyout(&utsname, args->buf, sizeof(utsname)));
762}
763
764struct l_utimbuf {
765	l_time_t l_actime;
766	l_time_t l_modtime;
767};
768
769#ifdef LINUX_LEGACY_SYSCALLS
770int
771linux_utime(struct thread *td, struct linux_utime_args *args)
772{
773	struct timeval tv[2], *tvp;
774	struct l_utimbuf lut;
775	char *fname;
776	int error;
777
778	LCONVPATHEXIST(td, args->fname, &fname);
779
780#ifdef DEBUG
781	if (ldebug(utime))
782		printf(ARGS(utime, "%s, *"), fname);
783#endif
784
785	if (args->times) {
786		if ((error = copyin(args->times, &lut, sizeof lut))) {
787			LFREEPATH(fname);
788			return (error);
789		}
790		tv[0].tv_sec = lut.l_actime;
791		tv[0].tv_usec = 0;
792		tv[1].tv_sec = lut.l_modtime;
793		tv[1].tv_usec = 0;
794		tvp = tv;
795	} else
796		tvp = NULL;
797
798	error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp,
799	    UIO_SYSSPACE);
800	LFREEPATH(fname);
801	return (error);
802}
803#endif
804
805#ifdef LINUX_LEGACY_SYSCALLS
806int
807linux_utimes(struct thread *td, struct linux_utimes_args *args)
808{
809	l_timeval ltv[2];
810	struct timeval tv[2], *tvp = NULL;
811	char *fname;
812	int error;
813
814	LCONVPATHEXIST(td, args->fname, &fname);
815
816#ifdef DEBUG
817	if (ldebug(utimes))
818		printf(ARGS(utimes, "%s, *"), fname);
819#endif
820
821	if (args->tptr != NULL) {
822		if ((error = copyin(args->tptr, ltv, sizeof ltv))) {
823			LFREEPATH(fname);
824			return (error);
825		}
826		tv[0].tv_sec = ltv[0].tv_sec;
827		tv[0].tv_usec = ltv[0].tv_usec;
828		tv[1].tv_sec = ltv[1].tv_sec;
829		tv[1].tv_usec = ltv[1].tv_usec;
830		tvp = tv;
831	}
832
833	error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE,
834	    tvp, UIO_SYSSPACE);
835	LFREEPATH(fname);
836	return (error);
837}
838#endif
839
840static int
841linux_utimensat_nsec_valid(l_long nsec)
842{
843
844	if (nsec == LINUX_UTIME_OMIT || nsec == LINUX_UTIME_NOW)
845		return (0);
846	if (nsec >= 0 && nsec <= 999999999)
847		return (0);
848	return (1);
849}
850
851int
852linux_utimensat(struct thread *td, struct linux_utimensat_args *args)
853{
854	struct l_timespec l_times[2];
855	struct timespec times[2], *timesp = NULL;
856	char *path = NULL;
857	int error, dfd, flags = 0;
858
859	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
860
861#ifdef DEBUG
862	if (ldebug(utimensat))
863		printf(ARGS(utimensat, "%d, *"), dfd);
864#endif
865
866	if (args->flags & ~LINUX_AT_SYMLINK_NOFOLLOW)
867		return (EINVAL);
868
869	if (args->times != NULL) {
870		error = copyin(args->times, l_times, sizeof(l_times));
871		if (error != 0)
872			return (error);
873
874		if (linux_utimensat_nsec_valid(l_times[0].tv_nsec) != 0 ||
875		    linux_utimensat_nsec_valid(l_times[1].tv_nsec) != 0)
876			return (EINVAL);
877
878		times[0].tv_sec = l_times[0].tv_sec;
879		switch (l_times[0].tv_nsec)
880		{
881		case LINUX_UTIME_OMIT:
882			times[0].tv_nsec = UTIME_OMIT;
883			break;
884		case LINUX_UTIME_NOW:
885			times[0].tv_nsec = UTIME_NOW;
886			break;
887		default:
888			times[0].tv_nsec = l_times[0].tv_nsec;
889		}
890
891		times[1].tv_sec = l_times[1].tv_sec;
892		switch (l_times[1].tv_nsec)
893		{
894		case LINUX_UTIME_OMIT:
895			times[1].tv_nsec = UTIME_OMIT;
896			break;
897		case LINUX_UTIME_NOW:
898			times[1].tv_nsec = UTIME_NOW;
899			break;
900		default:
901			times[1].tv_nsec = l_times[1].tv_nsec;
902			break;
903		}
904		timesp = times;
905
906		/* This breaks POSIX, but is what the Linux kernel does
907		 * _on purpose_ (documented in the man page for utimensat(2)),
908		 * so we must follow that behaviour. */
909		if (times[0].tv_nsec == UTIME_OMIT &&
910		    times[1].tv_nsec == UTIME_OMIT)
911			return (0);
912	}
913
914	if (args->pathname != NULL)
915		LCONVPATHEXIST_AT(td, args->pathname, &path, dfd);
916	else if (args->flags != 0)
917		return (EINVAL);
918
919	if (args->flags & LINUX_AT_SYMLINK_NOFOLLOW)
920		flags |= AT_SYMLINK_NOFOLLOW;
921
922	if (path == NULL)
923		error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE);
924	else {
925		error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp,
926			UIO_SYSSPACE, flags);
927		LFREEPATH(path);
928	}
929
930	return (error);
931}
932
933#ifdef LINUX_LEGACY_SYSCALLS
934int
935linux_futimesat(struct thread *td, struct linux_futimesat_args *args)
936{
937	l_timeval ltv[2];
938	struct timeval tv[2], *tvp = NULL;
939	char *fname;
940	int error, dfd;
941
942	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
943	LCONVPATHEXIST_AT(td, args->filename, &fname, dfd);
944
945#ifdef DEBUG
946	if (ldebug(futimesat))
947		printf(ARGS(futimesat, "%s, *"), fname);
948#endif
949
950	if (args->utimes != NULL) {
951		if ((error = copyin(args->utimes, ltv, sizeof ltv))) {
952			LFREEPATH(fname);
953			return (error);
954		}
955		tv[0].tv_sec = ltv[0].tv_sec;
956		tv[0].tv_usec = ltv[0].tv_usec;
957		tv[1].tv_sec = ltv[1].tv_sec;
958		tv[1].tv_usec = ltv[1].tv_usec;
959		tvp = tv;
960	}
961
962	error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE);
963	LFREEPATH(fname);
964	return (error);
965}
966#endif
967
968int
969linux_common_wait(struct thread *td, int pid, int *status,
970    int options, struct rusage *ru)
971{
972	int error, tmpstat;
973
974	error = kern_wait(td, pid, &tmpstat, options, ru);
975	if (error)
976		return (error);
977
978	if (status) {
979		tmpstat &= 0xffff;
980		if (WIFSIGNALED(tmpstat))
981			tmpstat = (tmpstat & 0xffffff80) |
982			    bsd_to_linux_signal(WTERMSIG(tmpstat));
983		else if (WIFSTOPPED(tmpstat))
984			tmpstat = (tmpstat & 0xffff00ff) |
985			    (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8);
986		else if (WIFCONTINUED(tmpstat))
987			tmpstat = 0xffff;
988		error = copyout(&tmpstat, status, sizeof(int));
989	}
990
991	return (error);
992}
993
994#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
995int
996linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
997{
998	struct linux_wait4_args wait4_args;
999
1000#ifdef DEBUG
1001	if (ldebug(waitpid))
1002		printf(ARGS(waitpid, "%d, %p, %d"),
1003		    args->pid, (void *)args->status, args->options);
1004#endif
1005
1006	wait4_args.pid = args->pid;
1007	wait4_args.status = args->status;
1008	wait4_args.options = args->options;
1009	wait4_args.rusage = NULL;
1010
1011	return (linux_wait4(td, &wait4_args));
1012}
1013#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1014
1015int
1016linux_wait4(struct thread *td, struct linux_wait4_args *args)
1017{
1018	int error, options;
1019	struct rusage ru, *rup;
1020
1021#ifdef DEBUG
1022	if (ldebug(wait4))
1023		printf(ARGS(wait4, "%d, %p, %d, %p"),
1024		    args->pid, (void *)args->status, args->options,
1025		    (void *)args->rusage);
1026#endif
1027	if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG |
1028	    LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL))
1029		return (EINVAL);
1030
1031	options = WEXITED;
1032	linux_to_bsd_waitopts(args->options, &options);
1033
1034	if (args->rusage != NULL)
1035		rup = &ru;
1036	else
1037		rup = NULL;
1038	error = linux_common_wait(td, args->pid, args->status, options, rup);
1039	if (error != 0)
1040		return (error);
1041	if (args->rusage != NULL)
1042		error = linux_copyout_rusage(&ru, args->rusage);
1043	return (error);
1044}
1045
1046int
1047linux_waitid(struct thread *td, struct linux_waitid_args *args)
1048{
1049	int status, options, sig;
1050	struct __wrusage wru;
1051	siginfo_t siginfo;
1052	l_siginfo_t lsi;
1053	idtype_t idtype;
1054	struct proc *p;
1055	int error;
1056
1057	options = 0;
1058	linux_to_bsd_waitopts(args->options, &options);
1059
1060	if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED))
1061		return (EINVAL);
1062	if (!(options & (WEXITED | WUNTRACED | WCONTINUED)))
1063		return (EINVAL);
1064
1065	switch (args->idtype) {
1066	case LINUX_P_ALL:
1067		idtype = P_ALL;
1068		break;
1069	case LINUX_P_PID:
1070		if (args->id <= 0)
1071			return (EINVAL);
1072		idtype = P_PID;
1073		break;
1074	case LINUX_P_PGID:
1075		if (args->id <= 0)
1076			return (EINVAL);
1077		idtype = P_PGID;
1078		break;
1079	default:
1080		return (EINVAL);
1081	}
1082
1083	error = kern_wait6(td, idtype, args->id, &status, options,
1084	    &wru, &siginfo);
1085	if (error != 0)
1086		return (error);
1087	if (args->rusage != NULL) {
1088		error = linux_copyout_rusage(&wru.wru_children,
1089		    args->rusage);
1090		if (error != 0)
1091			return (error);
1092	}
1093	if (args->info != NULL) {
1094		p = td->td_proc;
1095		bzero(&lsi, sizeof(lsi));
1096		if (td->td_retval[0] != 0) {
1097			sig = bsd_to_linux_signal(siginfo.si_signo);
1098			siginfo_to_lsiginfo(&siginfo, &lsi, sig);
1099		}
1100		error = copyout(&lsi, args->info, sizeof(lsi));
1101	}
1102	td->td_retval[0] = 0;
1103
1104	return (error);
1105}
1106
1107#ifdef LINUX_LEGACY_SYSCALLS
1108int
1109linux_mknod(struct thread *td, struct linux_mknod_args *args)
1110{
1111	char *path;
1112	int error;
1113
1114	LCONVPATHCREAT(td, args->path, &path);
1115
1116#ifdef DEBUG
1117	if (ldebug(mknod))
1118		printf(ARGS(mknod, "%s, %d, %ju"), path, args->mode,
1119		    (uintmax_t)args->dev);
1120#endif
1121
1122	switch (args->mode & S_IFMT) {
1123	case S_IFIFO:
1124	case S_IFSOCK:
1125		error = kern_mkfifoat(td, AT_FDCWD, path, UIO_SYSSPACE,
1126		    args->mode);
1127		break;
1128
1129	case S_IFCHR:
1130	case S_IFBLK:
1131		error = kern_mknodat(td, AT_FDCWD, path, UIO_SYSSPACE,
1132		    args->mode, args->dev);
1133		break;
1134
1135	case S_IFDIR:
1136		error = EPERM;
1137		break;
1138
1139	case 0:
1140		args->mode |= S_IFREG;
1141		/* FALLTHROUGH */
1142	case S_IFREG:
1143		error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE,
1144		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
1145		if (error == 0)
1146			kern_close(td, td->td_retval[0]);
1147		break;
1148
1149	default:
1150		error = EINVAL;
1151		break;
1152	}
1153	LFREEPATH(path);
1154	return (error);
1155}
1156#endif
1157
1158int
1159linux_mknodat(struct thread *td, struct linux_mknodat_args *args)
1160{
1161	char *path;
1162	int error, dfd;
1163
1164	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
1165	LCONVPATHCREAT_AT(td, args->filename, &path, dfd);
1166
1167#ifdef DEBUG
1168	if (ldebug(mknodat))
1169		printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev);
1170#endif
1171
1172	switch (args->mode & S_IFMT) {
1173	case S_IFIFO:
1174	case S_IFSOCK:
1175		error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode);
1176		break;
1177
1178	case S_IFCHR:
1179	case S_IFBLK:
1180		error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode,
1181		    args->dev);
1182		break;
1183
1184	case S_IFDIR:
1185		error = EPERM;
1186		break;
1187
1188	case 0:
1189		args->mode |= S_IFREG;
1190		/* FALLTHROUGH */
1191	case S_IFREG:
1192		error = kern_openat(td, dfd, path, UIO_SYSSPACE,
1193		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
1194		if (error == 0)
1195			kern_close(td, td->td_retval[0]);
1196		break;
1197
1198	default:
1199		error = EINVAL;
1200		break;
1201	}
1202	LFREEPATH(path);
1203	return (error);
1204}
1205
1206/*
1207 * UGH! This is just about the dumbest idea I've ever heard!!
1208 */
1209int
1210linux_personality(struct thread *td, struct linux_personality_args *args)
1211{
1212	struct linux_pemuldata *pem;
1213	struct proc *p = td->td_proc;
1214	uint32_t old;
1215
1216#ifdef DEBUG
1217	if (ldebug(personality))
1218		printf(ARGS(personality, "%u"), args->per);
1219#endif
1220
1221	PROC_LOCK(p);
1222	pem = pem_find(p);
1223	old = pem->persona;
1224	if (args->per != 0xffffffff)
1225		pem->persona = args->per;
1226	PROC_UNLOCK(p);
1227
1228	td->td_retval[0] = old;
1229	return (0);
1230}
1231
1232struct l_itimerval {
1233	l_timeval it_interval;
1234	l_timeval it_value;
1235};
1236
1237#define	B2L_ITIMERVAL(bip, lip)						\
1238	(bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec;		\
1239	(bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec;	\
1240	(bip)->it_value.tv_sec = (lip)->it_value.tv_sec;		\
1241	(bip)->it_value.tv_usec = (lip)->it_value.tv_usec;
1242
1243int
1244linux_setitimer(struct thread *td, struct linux_setitimer_args *uap)
1245{
1246	int error;
1247	struct l_itimerval ls;
1248	struct itimerval aitv, oitv;
1249
1250#ifdef DEBUG
1251	if (ldebug(setitimer))
1252		printf(ARGS(setitimer, "%p, %p"),
1253		    (void *)uap->itv, (void *)uap->oitv);
1254#endif
1255
1256	if (uap->itv == NULL) {
1257		uap->itv = uap->oitv;
1258		return (linux_getitimer(td, (struct linux_getitimer_args *)uap));
1259	}
1260
1261	error = copyin(uap->itv, &ls, sizeof(ls));
1262	if (error != 0)
1263		return (error);
1264	B2L_ITIMERVAL(&aitv, &ls);
1265#ifdef DEBUG
1266	if (ldebug(setitimer)) {
1267		printf("setitimer: value: sec: %jd, usec: %ld\n",
1268		    (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec);
1269		printf("setitimer: interval: sec: %jd, usec: %ld\n",
1270		    (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec);
1271	}
1272#endif
1273	error = kern_setitimer(td, uap->which, &aitv, &oitv);
1274	if (error != 0 || uap->oitv == NULL)
1275		return (error);
1276	B2L_ITIMERVAL(&ls, &oitv);
1277
1278	return (copyout(&ls, uap->oitv, sizeof(ls)));
1279}
1280
1281int
1282linux_getitimer(struct thread *td, struct linux_getitimer_args *uap)
1283{
1284	int error;
1285	struct l_itimerval ls;
1286	struct itimerval aitv;
1287
1288#ifdef DEBUG
1289	if (ldebug(getitimer))
1290		printf(ARGS(getitimer, "%p"), (void *)uap->itv);
1291#endif
1292	error = kern_getitimer(td, uap->which, &aitv);
1293	if (error != 0)
1294		return (error);
1295	B2L_ITIMERVAL(&ls, &aitv);
1296	return (copyout(&ls, uap->itv, sizeof(ls)));
1297}
1298
1299#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1300int
1301linux_nice(struct thread *td, struct linux_nice_args *args)
1302{
1303	struct setpriority_args bsd_args;
1304
1305	bsd_args.which = PRIO_PROCESS;
1306	bsd_args.who = 0;		/* current process */
1307	bsd_args.prio = args->inc;
1308	return (sys_setpriority(td, &bsd_args));
1309}
1310#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1311
1312int
1313linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
1314{
1315	struct ucred *newcred, *oldcred;
1316	l_gid_t *linux_gidset;
1317	gid_t *bsd_gidset;
1318	int ngrp, error;
1319	struct proc *p;
1320
1321	ngrp = args->gidsetsize;
1322	if (ngrp < 0 || ngrp >= ngroups_max + 1)
1323		return (EINVAL);
1324	linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
1325	error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
1326	if (error)
1327		goto out;
1328	newcred = crget();
1329	crextend(newcred, ngrp + 1);
1330	p = td->td_proc;
1331	PROC_LOCK(p);
1332	oldcred = p->p_ucred;
1333	crcopy(newcred, oldcred);
1334
1335	/*
1336	 * cr_groups[0] holds egid. Setting the whole set from
1337	 * the supplied set will cause egid to be changed too.
1338	 * Keep cr_groups[0] unchanged to prevent that.
1339	 */
1340
1341	if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) {
1342		PROC_UNLOCK(p);
1343		crfree(newcred);
1344		goto out;
1345	}
1346
1347	if (ngrp > 0) {
1348		newcred->cr_ngroups = ngrp + 1;
1349
1350		bsd_gidset = newcred->cr_groups;
1351		ngrp--;
1352		while (ngrp >= 0) {
1353			bsd_gidset[ngrp + 1] = linux_gidset[ngrp];
1354			ngrp--;
1355		}
1356	} else
1357		newcred->cr_ngroups = 1;
1358
1359	setsugid(p);
1360	proc_set_cred(p, newcred);
1361	PROC_UNLOCK(p);
1362	crfree(oldcred);
1363	error = 0;
1364out:
1365	free(linux_gidset, M_LINUX);
1366	return (error);
1367}
1368
1369int
1370linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1371{
1372	struct ucred *cred;
1373	l_gid_t *linux_gidset;
1374	gid_t *bsd_gidset;
1375	int bsd_gidsetsz, ngrp, error;
1376
1377	cred = td->td_ucred;
1378	bsd_gidset = cred->cr_groups;
1379	bsd_gidsetsz = cred->cr_ngroups - 1;
1380
1381	/*
1382	 * cr_groups[0] holds egid. Returning the whole set
1383	 * here will cause a duplicate. Exclude cr_groups[0]
1384	 * to prevent that.
1385	 */
1386
1387	if ((ngrp = args->gidsetsize) == 0) {
1388		td->td_retval[0] = bsd_gidsetsz;
1389		return (0);
1390	}
1391
1392	if (ngrp < bsd_gidsetsz)
1393		return (EINVAL);
1394
1395	ngrp = 0;
1396	linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
1397	    M_LINUX, M_WAITOK);
1398	while (ngrp < bsd_gidsetsz) {
1399		linux_gidset[ngrp] = bsd_gidset[ngrp + 1];
1400		ngrp++;
1401	}
1402
1403	error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t));
1404	free(linux_gidset, M_LINUX);
1405	if (error)
1406		return (error);
1407
1408	td->td_retval[0] = ngrp;
1409	return (0);
1410}
1411
1412int
1413linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1414{
1415	struct rlimit bsd_rlim;
1416	struct l_rlimit rlim;
1417	u_int which;
1418	int error;
1419
1420#ifdef DEBUG
1421	if (ldebug(setrlimit))
1422		printf(ARGS(setrlimit, "%d, %p"),
1423		    args->resource, (void *)args->rlim);
1424#endif
1425
1426	if (args->resource >= LINUX_RLIM_NLIMITS)
1427		return (EINVAL);
1428
1429	which = linux_to_bsd_resource[args->resource];
1430	if (which == -1)
1431		return (EINVAL);
1432
1433	error = copyin(args->rlim, &rlim, sizeof(rlim));
1434	if (error)
1435		return (error);
1436
1437	bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1438	bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1439	return (kern_setrlimit(td, which, &bsd_rlim));
1440}
1441
1442#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1443int
1444linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1445{
1446	struct l_rlimit rlim;
1447	struct rlimit bsd_rlim;
1448	u_int which;
1449
1450#ifdef DEBUG
1451	if (ldebug(old_getrlimit))
1452		printf(ARGS(old_getrlimit, "%d, %p"),
1453		    args->resource, (void *)args->rlim);
1454#endif
1455
1456	if (args->resource >= LINUX_RLIM_NLIMITS)
1457		return (EINVAL);
1458
1459	which = linux_to_bsd_resource[args->resource];
1460	if (which == -1)
1461		return (EINVAL);
1462
1463	lim_rlimit(td, which, &bsd_rlim);
1464
1465#ifdef COMPAT_LINUX32
1466	rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur;
1467	if (rlim.rlim_cur == UINT_MAX)
1468		rlim.rlim_cur = INT_MAX;
1469	rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max;
1470	if (rlim.rlim_max == UINT_MAX)
1471		rlim.rlim_max = INT_MAX;
1472#else
1473	rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
1474	if (rlim.rlim_cur == ULONG_MAX)
1475		rlim.rlim_cur = LONG_MAX;
1476	rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
1477	if (rlim.rlim_max == ULONG_MAX)
1478		rlim.rlim_max = LONG_MAX;
1479#endif
1480	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1481}
1482#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1483
1484int
1485linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1486{
1487	struct l_rlimit rlim;
1488	struct rlimit bsd_rlim;
1489	u_int which;
1490
1491#ifdef DEBUG
1492	if (ldebug(getrlimit))
1493		printf(ARGS(getrlimit, "%d, %p"),
1494		    args->resource, (void *)args->rlim);
1495#endif
1496
1497	if (args->resource >= LINUX_RLIM_NLIMITS)
1498		return (EINVAL);
1499
1500	which = linux_to_bsd_resource[args->resource];
1501	if (which == -1)
1502		return (EINVAL);
1503
1504	lim_rlimit(td, which, &bsd_rlim);
1505
1506	rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
1507	rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
1508	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1509}
1510
1511int
1512linux_sched_setscheduler(struct thread *td,
1513    struct linux_sched_setscheduler_args *args)
1514{
1515	struct sched_param sched_param;
1516	struct thread *tdt;
1517	int error, policy;
1518
1519#ifdef DEBUG
1520	if (ldebug(sched_setscheduler))
1521		printf(ARGS(sched_setscheduler, "%d, %d, %p"),
1522		    args->pid, args->policy, (const void *)args->param);
1523#endif
1524
1525	switch (args->policy) {
1526	case LINUX_SCHED_OTHER:
1527		policy = SCHED_OTHER;
1528		break;
1529	case LINUX_SCHED_FIFO:
1530		policy = SCHED_FIFO;
1531		break;
1532	case LINUX_SCHED_RR:
1533		policy = SCHED_RR;
1534		break;
1535	default:
1536		return (EINVAL);
1537	}
1538
1539	error = copyin(args->param, &sched_param, sizeof(sched_param));
1540	if (error)
1541		return (error);
1542
1543	tdt = linux_tdfind(td, args->pid, -1);
1544	if (tdt == NULL)
1545		return (ESRCH);
1546
1547	error = kern_sched_setscheduler(td, tdt, policy, &sched_param);
1548	PROC_UNLOCK(tdt->td_proc);
1549	return (error);
1550}
1551
1552int
1553linux_sched_getscheduler(struct thread *td,
1554    struct linux_sched_getscheduler_args *args)
1555{
1556	struct thread *tdt;
1557	int error, policy;
1558
1559#ifdef DEBUG
1560	if (ldebug(sched_getscheduler))
1561		printf(ARGS(sched_getscheduler, "%d"), args->pid);
1562#endif
1563
1564	tdt = linux_tdfind(td, args->pid, -1);
1565	if (tdt == NULL)
1566		return (ESRCH);
1567
1568	error = kern_sched_getscheduler(td, tdt, &policy);
1569	PROC_UNLOCK(tdt->td_proc);
1570
1571	switch (policy) {
1572	case SCHED_OTHER:
1573		td->td_retval[0] = LINUX_SCHED_OTHER;
1574		break;
1575	case SCHED_FIFO:
1576		td->td_retval[0] = LINUX_SCHED_FIFO;
1577		break;
1578	case SCHED_RR:
1579		td->td_retval[0] = LINUX_SCHED_RR;
1580		break;
1581	}
1582	return (error);
1583}
1584
1585int
1586linux_sched_get_priority_max(struct thread *td,
1587    struct linux_sched_get_priority_max_args *args)
1588{
1589	struct sched_get_priority_max_args bsd;
1590
1591#ifdef DEBUG
1592	if (ldebug(sched_get_priority_max))
1593		printf(ARGS(sched_get_priority_max, "%d"), args->policy);
1594#endif
1595
1596	switch (args->policy) {
1597	case LINUX_SCHED_OTHER:
1598		bsd.policy = SCHED_OTHER;
1599		break;
1600	case LINUX_SCHED_FIFO:
1601		bsd.policy = SCHED_FIFO;
1602		break;
1603	case LINUX_SCHED_RR:
1604		bsd.policy = SCHED_RR;
1605		break;
1606	default:
1607		return (EINVAL);
1608	}
1609	return (sys_sched_get_priority_max(td, &bsd));
1610}
1611
1612int
1613linux_sched_get_priority_min(struct thread *td,
1614    struct linux_sched_get_priority_min_args *args)
1615{
1616	struct sched_get_priority_min_args bsd;
1617
1618#ifdef DEBUG
1619	if (ldebug(sched_get_priority_min))
1620		printf(ARGS(sched_get_priority_min, "%d"), args->policy);
1621#endif
1622
1623	switch (args->policy) {
1624	case LINUX_SCHED_OTHER:
1625		bsd.policy = SCHED_OTHER;
1626		break;
1627	case LINUX_SCHED_FIFO:
1628		bsd.policy = SCHED_FIFO;
1629		break;
1630	case LINUX_SCHED_RR:
1631		bsd.policy = SCHED_RR;
1632		break;
1633	default:
1634		return (EINVAL);
1635	}
1636	return (sys_sched_get_priority_min(td, &bsd));
1637}
1638
1639#define REBOOT_CAD_ON	0x89abcdef
1640#define REBOOT_CAD_OFF	0
1641#define REBOOT_HALT	0xcdef0123
1642#define REBOOT_RESTART	0x01234567
1643#define REBOOT_RESTART2	0xA1B2C3D4
1644#define REBOOT_POWEROFF	0x4321FEDC
1645#define REBOOT_MAGIC1	0xfee1dead
1646#define REBOOT_MAGIC2	0x28121969
1647#define REBOOT_MAGIC2A	0x05121996
1648#define REBOOT_MAGIC2B	0x16041998
1649
1650int
1651linux_reboot(struct thread *td, struct linux_reboot_args *args)
1652{
1653	struct reboot_args bsd_args;
1654
1655#ifdef DEBUG
1656	if (ldebug(reboot))
1657		printf(ARGS(reboot, "0x%x"), args->cmd);
1658#endif
1659
1660	if (args->magic1 != REBOOT_MAGIC1)
1661		return (EINVAL);
1662
1663	switch (args->magic2) {
1664	case REBOOT_MAGIC2:
1665	case REBOOT_MAGIC2A:
1666	case REBOOT_MAGIC2B:
1667		break;
1668	default:
1669		return (EINVAL);
1670	}
1671
1672	switch (args->cmd) {
1673	case REBOOT_CAD_ON:
1674	case REBOOT_CAD_OFF:
1675		return (priv_check(td, PRIV_REBOOT));
1676	case REBOOT_HALT:
1677		bsd_args.opt = RB_HALT;
1678		break;
1679	case REBOOT_RESTART:
1680	case REBOOT_RESTART2:
1681		bsd_args.opt = 0;
1682		break;
1683	case REBOOT_POWEROFF:
1684		bsd_args.opt = RB_POWEROFF;
1685		break;
1686	default:
1687		return (EINVAL);
1688	}
1689	return (sys_reboot(td, &bsd_args));
1690}
1691
1692
1693/*
1694 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify
1695 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that
1696 * are assumed to be preserved. The following lightweight syscalls fixes
1697 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c
1698 *
1699 * linux_getpid() - MP SAFE
1700 * linux_getgid() - MP SAFE
1701 * linux_getuid() - MP SAFE
1702 */
1703
1704int
1705linux_getpid(struct thread *td, struct linux_getpid_args *args)
1706{
1707
1708#ifdef DEBUG
1709	if (ldebug(getpid))
1710		printf(ARGS(getpid, ""));
1711#endif
1712	td->td_retval[0] = td->td_proc->p_pid;
1713
1714	return (0);
1715}
1716
1717int
1718linux_gettid(struct thread *td, struct linux_gettid_args *args)
1719{
1720	struct linux_emuldata *em;
1721
1722#ifdef DEBUG
1723	if (ldebug(gettid))
1724		printf(ARGS(gettid, ""));
1725#endif
1726
1727	em = em_find(td);
1728	KASSERT(em != NULL, ("gettid: emuldata not found.\n"));
1729
1730	td->td_retval[0] = em->em_tid;
1731
1732	return (0);
1733}
1734
1735
1736int
1737linux_getppid(struct thread *td, struct linux_getppid_args *args)
1738{
1739
1740#ifdef DEBUG
1741	if (ldebug(getppid))
1742		printf(ARGS(getppid, ""));
1743#endif
1744
1745	td->td_retval[0] = kern_getppid(td);
1746	return (0);
1747}
1748
1749int
1750linux_getgid(struct thread *td, struct linux_getgid_args *args)
1751{
1752
1753#ifdef DEBUG
1754	if (ldebug(getgid))
1755		printf(ARGS(getgid, ""));
1756#endif
1757
1758	td->td_retval[0] = td->td_ucred->cr_rgid;
1759	return (0);
1760}
1761
1762int
1763linux_getuid(struct thread *td, struct linux_getuid_args *args)
1764{
1765
1766#ifdef DEBUG
1767	if (ldebug(getuid))
1768		printf(ARGS(getuid, ""));
1769#endif
1770
1771	td->td_retval[0] = td->td_ucred->cr_ruid;
1772	return (0);
1773}
1774
1775
1776int
1777linux_getsid(struct thread *td, struct linux_getsid_args *args)
1778{
1779	struct getsid_args bsd;
1780
1781#ifdef DEBUG
1782	if (ldebug(getsid))
1783		printf(ARGS(getsid, "%i"), args->pid);
1784#endif
1785
1786	bsd.pid = args->pid;
1787	return (sys_getsid(td, &bsd));
1788}
1789
1790int
1791linux_nosys(struct thread *td, struct nosys_args *ignore)
1792{
1793
1794	return (ENOSYS);
1795}
1796
1797int
1798linux_getpriority(struct thread *td, struct linux_getpriority_args *args)
1799{
1800	struct getpriority_args bsd_args;
1801	int error;
1802
1803#ifdef DEBUG
1804	if (ldebug(getpriority))
1805		printf(ARGS(getpriority, "%i, %i"), args->which, args->who);
1806#endif
1807
1808	bsd_args.which = args->which;
1809	bsd_args.who = args->who;
1810	error = sys_getpriority(td, &bsd_args);
1811	td->td_retval[0] = 20 - td->td_retval[0];
1812	return (error);
1813}
1814
1815int
1816linux_sethostname(struct thread *td, struct linux_sethostname_args *args)
1817{
1818	int name[2];
1819
1820#ifdef DEBUG
1821	if (ldebug(sethostname))
1822		printf(ARGS(sethostname, "*, %i"), args->len);
1823#endif
1824
1825	name[0] = CTL_KERN;
1826	name[1] = KERN_HOSTNAME;
1827	return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname,
1828	    args->len, 0, 0));
1829}
1830
1831int
1832linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args)
1833{
1834	int name[2];
1835
1836#ifdef DEBUG
1837	if (ldebug(setdomainname))
1838		printf(ARGS(setdomainname, "*, %i"), args->len);
1839#endif
1840
1841	name[0] = CTL_KERN;
1842	name[1] = KERN_NISDOMAINNAME;
1843	return (userland_sysctl(td, name, 2, 0, 0, 0, args->name,
1844	    args->len, 0, 0));
1845}
1846
1847int
1848linux_exit_group(struct thread *td, struct linux_exit_group_args *args)
1849{
1850
1851#ifdef DEBUG
1852	if (ldebug(exit_group))
1853		printf(ARGS(exit_group, "%i"), args->error_code);
1854#endif
1855
1856	LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid,
1857	    args->error_code);
1858
1859	/*
1860	 * XXX: we should send a signal to the parent if
1861	 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?)
1862	 * as it doesnt occur often.
1863	 */
1864	exit1(td, args->error_code, 0);
1865		/* NOTREACHED */
1866}
1867
1868#define _LINUX_CAPABILITY_VERSION_1  0x19980330
1869#define _LINUX_CAPABILITY_VERSION_2  0x20071026
1870#define _LINUX_CAPABILITY_VERSION_3  0x20080522
1871
1872struct l_user_cap_header {
1873	l_int	version;
1874	l_int	pid;
1875};
1876
1877struct l_user_cap_data {
1878	l_int	effective;
1879	l_int	permitted;
1880	l_int	inheritable;
1881};
1882
1883int
1884linux_capget(struct thread *td, struct linux_capget_args *uap)
1885{
1886	struct l_user_cap_header luch;
1887	struct l_user_cap_data lucd[2];
1888	int error, u32s;
1889
1890	if (uap->hdrp == NULL)
1891		return (EFAULT);
1892
1893	error = copyin(uap->hdrp, &luch, sizeof(luch));
1894	if (error != 0)
1895		return (error);
1896
1897	switch (luch.version) {
1898	case _LINUX_CAPABILITY_VERSION_1:
1899		u32s = 1;
1900		break;
1901	case _LINUX_CAPABILITY_VERSION_2:
1902	case _LINUX_CAPABILITY_VERSION_3:
1903		u32s = 2;
1904		break;
1905	default:
1906#ifdef DEBUG
1907		if (ldebug(capget))
1908			printf(LMSG("invalid capget capability version 0x%x"),
1909			    luch.version);
1910#endif
1911		luch.version = _LINUX_CAPABILITY_VERSION_1;
1912		error = copyout(&luch, uap->hdrp, sizeof(luch));
1913		if (error)
1914			return (error);
1915		return (EINVAL);
1916	}
1917
1918	if (luch.pid)
1919		return (EPERM);
1920
1921	if (uap->datap) {
1922		/*
1923		 * The current implementation doesn't support setting
1924		 * a capability (it's essentially a stub) so indicate
1925		 * that no capabilities are currently set or available
1926		 * to request.
1927		 */
1928		memset(&lucd, 0, u32s * sizeof(lucd[0]));
1929		error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0]));
1930	}
1931
1932	return (error);
1933}
1934
1935int
1936linux_capset(struct thread *td, struct linux_capset_args *uap)
1937{
1938	struct l_user_cap_header luch;
1939	struct l_user_cap_data lucd[2];
1940	int error, i, u32s;
1941
1942	if (uap->hdrp == NULL || uap->datap == NULL)
1943		return (EFAULT);
1944
1945	error = copyin(uap->hdrp, &luch, sizeof(luch));
1946	if (error != 0)
1947		return (error);
1948
1949	switch (luch.version) {
1950	case _LINUX_CAPABILITY_VERSION_1:
1951		u32s = 1;
1952		break;
1953	case _LINUX_CAPABILITY_VERSION_2:
1954	case _LINUX_CAPABILITY_VERSION_3:
1955		u32s = 2;
1956		break;
1957	default:
1958#ifdef DEBUG
1959		if (ldebug(capset))
1960			printf(LMSG("invalid capset capability version 0x%x"),
1961			    luch.version);
1962#endif
1963		luch.version = _LINUX_CAPABILITY_VERSION_1;
1964		error = copyout(&luch, uap->hdrp, sizeof(luch));
1965		if (error)
1966			return (error);
1967		return (EINVAL);
1968	}
1969
1970	if (luch.pid)
1971		return (EPERM);
1972
1973	error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0]));
1974	if (error != 0)
1975		return (error);
1976
1977	/* We currently don't support setting any capabilities. */
1978	for (i = 0; i < u32s; i++) {
1979		if (lucd[i].effective || lucd[i].permitted ||
1980		    lucd[i].inheritable) {
1981			linux_msg(td,
1982			    "capset[%d] effective=0x%x, permitted=0x%x, "
1983			    "inheritable=0x%x is not implemented", i,
1984			    (int)lucd[i].effective, (int)lucd[i].permitted,
1985			    (int)lucd[i].inheritable);
1986			return (EPERM);
1987		}
1988	}
1989
1990	return (0);
1991}
1992
1993int
1994linux_prctl(struct thread *td, struct linux_prctl_args *args)
1995{
1996	int error = 0, max_size;
1997	struct proc *p = td->td_proc;
1998	char comm[LINUX_MAX_COMM_LEN];
1999	int pdeath_signal;
2000
2001#ifdef DEBUG
2002	if (ldebug(prctl))
2003		printf(ARGS(prctl, "%d, %ju, %ju, %ju, %ju"), args->option,
2004		    (uintmax_t)args->arg2, (uintmax_t)args->arg3,
2005		    (uintmax_t)args->arg4, (uintmax_t)args->arg5);
2006#endif
2007
2008	switch (args->option) {
2009	case LINUX_PR_SET_PDEATHSIG:
2010		if (!LINUX_SIG_VALID(args->arg2))
2011			return (EINVAL);
2012		pdeath_signal = linux_to_bsd_signal(args->arg2);
2013		return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL,
2014		    &pdeath_signal));
2015	case LINUX_PR_GET_PDEATHSIG:
2016		error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS,
2017		    &pdeath_signal);
2018		if (error != 0)
2019			return (error);
2020		pdeath_signal = bsd_to_linux_signal(pdeath_signal);
2021		return (copyout(&pdeath_signal,
2022		    (void *)(register_t)args->arg2,
2023		    sizeof(pdeath_signal)));
2024		break;
2025	case LINUX_PR_GET_KEEPCAPS:
2026		/*
2027		 * Indicate that we always clear the effective and
2028		 * permitted capability sets when the user id becomes
2029		 * non-zero (actually the capability sets are simply
2030		 * always zero in the current implementation).
2031		 */
2032		td->td_retval[0] = 0;
2033		break;
2034	case LINUX_PR_SET_KEEPCAPS:
2035		/*
2036		 * Ignore requests to keep the effective and permitted
2037		 * capability sets when the user id becomes non-zero.
2038		 */
2039		break;
2040	case LINUX_PR_SET_NAME:
2041		/*
2042		 * To be on the safe side we need to make sure to not
2043		 * overflow the size a Linux program expects. We already
2044		 * do this here in the copyin, so that we don't need to
2045		 * check on copyout.
2046		 */
2047		max_size = MIN(sizeof(comm), sizeof(p->p_comm));
2048		error = copyinstr((void *)(register_t)args->arg2, comm,
2049		    max_size, NULL);
2050
2051		/* Linux silently truncates the name if it is too long. */
2052		if (error == ENAMETOOLONG) {
2053			/*
2054			 * XXX: copyinstr() isn't documented to populate the
2055			 * array completely, so do a copyin() to be on the
2056			 * safe side. This should be changed in case
2057			 * copyinstr() is changed to guarantee this.
2058			 */
2059			error = copyin((void *)(register_t)args->arg2, comm,
2060			    max_size - 1);
2061			comm[max_size - 1] = '\0';
2062		}
2063		if (error)
2064			return (error);
2065
2066		PROC_LOCK(p);
2067		strlcpy(p->p_comm, comm, sizeof(p->p_comm));
2068		PROC_UNLOCK(p);
2069		break;
2070	case LINUX_PR_GET_NAME:
2071		PROC_LOCK(p);
2072		strlcpy(comm, p->p_comm, sizeof(comm));
2073		PROC_UNLOCK(p);
2074		error = copyout(comm, (void *)(register_t)args->arg2,
2075		    strlen(comm) + 1);
2076		break;
2077	default:
2078		error = EINVAL;
2079		break;
2080	}
2081
2082	return (error);
2083}
2084
2085int
2086linux_sched_setparam(struct thread *td,
2087    struct linux_sched_setparam_args *uap)
2088{
2089	struct sched_param sched_param;
2090	struct thread *tdt;
2091	int error;
2092
2093#ifdef DEBUG
2094	if (ldebug(sched_setparam))
2095		printf(ARGS(sched_setparam, "%d, *"), uap->pid);
2096#endif
2097
2098	error = copyin(uap->param, &sched_param, sizeof(sched_param));
2099	if (error)
2100		return (error);
2101
2102	tdt = linux_tdfind(td, uap->pid, -1);
2103	if (tdt == NULL)
2104		return (ESRCH);
2105
2106	error = kern_sched_setparam(td, tdt, &sched_param);
2107	PROC_UNLOCK(tdt->td_proc);
2108	return (error);
2109}
2110
2111int
2112linux_sched_getparam(struct thread *td,
2113    struct linux_sched_getparam_args *uap)
2114{
2115	struct sched_param sched_param;
2116	struct thread *tdt;
2117	int error;
2118
2119#ifdef DEBUG
2120	if (ldebug(sched_getparam))
2121		printf(ARGS(sched_getparam, "%d, *"), uap->pid);
2122#endif
2123
2124	tdt = linux_tdfind(td, uap->pid, -1);
2125	if (tdt == NULL)
2126		return (ESRCH);
2127
2128	error = kern_sched_getparam(td, tdt, &sched_param);
2129	PROC_UNLOCK(tdt->td_proc);
2130	if (error == 0)
2131		error = copyout(&sched_param, uap->param,
2132		    sizeof(sched_param));
2133	return (error);
2134}
2135
2136/*
2137 * Get affinity of a process.
2138 */
2139int
2140linux_sched_getaffinity(struct thread *td,
2141    struct linux_sched_getaffinity_args *args)
2142{
2143	int error;
2144	struct thread *tdt;
2145
2146#ifdef DEBUG
2147	if (ldebug(sched_getaffinity))
2148		printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid,
2149		    args->len);
2150#endif
2151	if (args->len < sizeof(cpuset_t))
2152		return (EINVAL);
2153
2154	tdt = linux_tdfind(td, args->pid, -1);
2155	if (tdt == NULL)
2156		return (ESRCH);
2157
2158	PROC_UNLOCK(tdt->td_proc);
2159
2160	error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
2161	    tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr);
2162	if (error == 0)
2163		td->td_retval[0] = sizeof(cpuset_t);
2164
2165	return (error);
2166}
2167
2168/*
2169 *  Set affinity of a process.
2170 */
2171int
2172linux_sched_setaffinity(struct thread *td,
2173    struct linux_sched_setaffinity_args *args)
2174{
2175	struct thread *tdt;
2176
2177#ifdef DEBUG
2178	if (ldebug(sched_setaffinity))
2179		printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid,
2180		    args->len);
2181#endif
2182	if (args->len < sizeof(cpuset_t))
2183		return (EINVAL);
2184
2185	tdt = linux_tdfind(td, args->pid, -1);
2186	if (tdt == NULL)
2187		return (ESRCH);
2188
2189	PROC_UNLOCK(tdt->td_proc);
2190
2191	return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
2192	    tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr));
2193}
2194
2195struct linux_rlimit64 {
2196	uint64_t	rlim_cur;
2197	uint64_t	rlim_max;
2198};
2199
2200int
2201linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args)
2202{
2203	struct rlimit rlim, nrlim;
2204	struct linux_rlimit64 lrlim;
2205	struct proc *p;
2206	u_int which;
2207	int flags;
2208	int error;
2209
2210#ifdef DEBUG
2211	if (ldebug(prlimit64))
2212		printf(ARGS(prlimit64, "%d, %d, %p, %p"), args->pid,
2213		    args->resource, (void *)args->new, (void *)args->old);
2214#endif
2215
2216	if (args->resource >= LINUX_RLIM_NLIMITS)
2217		return (EINVAL);
2218
2219	which = linux_to_bsd_resource[args->resource];
2220	if (which == -1)
2221		return (EINVAL);
2222
2223	if (args->new != NULL) {
2224		/*
2225		 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux
2226		 * rlim is unsigned 64-bit. FreeBSD treats negative limits
2227		 * as INFINITY so we do not need a conversion even.
2228		 */
2229		error = copyin(args->new, &nrlim, sizeof(nrlim));
2230		if (error != 0)
2231			return (error);
2232	}
2233
2234	flags = PGET_HOLD | PGET_NOTWEXIT;
2235	if (args->new != NULL)
2236		flags |= PGET_CANDEBUG;
2237	else
2238		flags |= PGET_CANSEE;
2239	error = pget(args->pid, flags, &p);
2240	if (error != 0)
2241		return (error);
2242
2243	if (args->old != NULL) {
2244		PROC_LOCK(p);
2245		lim_rlimit_proc(p, which, &rlim);
2246		PROC_UNLOCK(p);
2247		if (rlim.rlim_cur == RLIM_INFINITY)
2248			lrlim.rlim_cur = LINUX_RLIM_INFINITY;
2249		else
2250			lrlim.rlim_cur = rlim.rlim_cur;
2251		if (rlim.rlim_max == RLIM_INFINITY)
2252			lrlim.rlim_max = LINUX_RLIM_INFINITY;
2253		else
2254			lrlim.rlim_max = rlim.rlim_max;
2255		error = copyout(&lrlim, args->old, sizeof(lrlim));
2256		if (error != 0)
2257			goto out;
2258	}
2259
2260	if (args->new != NULL)
2261		error = kern_proc_setrlimit(td, p, which, &nrlim);
2262
2263 out:
2264	PRELE(p);
2265	return (error);
2266}
2267
2268int
2269linux_pselect6(struct thread *td, struct linux_pselect6_args *args)
2270{
2271	struct timeval utv, tv0, tv1, *tvp;
2272	struct l_pselect6arg lpse6;
2273	struct l_timespec lts;
2274	struct timespec uts;
2275	l_sigset_t l_ss;
2276	sigset_t *ssp;
2277	sigset_t ss;
2278	int error;
2279
2280	ssp = NULL;
2281	if (args->sig != NULL) {
2282		error = copyin(args->sig, &lpse6, sizeof(lpse6));
2283		if (error != 0)
2284			return (error);
2285		if (lpse6.ss_len != sizeof(l_ss))
2286			return (EINVAL);
2287		if (lpse6.ss != 0) {
2288			error = copyin(PTRIN(lpse6.ss), &l_ss,
2289			    sizeof(l_ss));
2290			if (error != 0)
2291				return (error);
2292			linux_to_bsd_sigset(&l_ss, &ss);
2293			ssp = &ss;
2294		}
2295	}
2296
2297	/*
2298	 * Currently glibc changes nanosecond number to microsecond.
2299	 * This mean losing precision but for now it is hardly seen.
2300	 */
2301	if (args->tsp != NULL) {
2302		error = copyin(args->tsp, &lts, sizeof(lts));
2303		if (error != 0)
2304			return (error);
2305		error = linux_to_native_timespec(&uts, &lts);
2306		if (error != 0)
2307			return (error);
2308
2309		TIMESPEC_TO_TIMEVAL(&utv, &uts);
2310		if (itimerfix(&utv))
2311			return (EINVAL);
2312
2313		microtime(&tv0);
2314		tvp = &utv;
2315	} else
2316		tvp = NULL;
2317
2318	error = kern_pselect(td, args->nfds, args->readfds, args->writefds,
2319	    args->exceptfds, tvp, ssp, LINUX_NFDBITS);
2320
2321	if (error == 0 && args->tsp != NULL) {
2322		if (td->td_retval[0] != 0) {
2323			/*
2324			 * Compute how much time was left of the timeout,
2325			 * by subtracting the current time and the time
2326			 * before we started the call, and subtracting
2327			 * that result from the user-supplied value.
2328			 */
2329
2330			microtime(&tv1);
2331			timevalsub(&tv1, &tv0);
2332			timevalsub(&utv, &tv1);
2333			if (utv.tv_sec < 0)
2334				timevalclear(&utv);
2335		} else
2336			timevalclear(&utv);
2337
2338		TIMEVAL_TO_TIMESPEC(&utv, &uts);
2339
2340		error = native_to_linux_timespec(&lts, &uts);
2341		if (error == 0)
2342			error = copyout(&lts, args->tsp, sizeof(lts));
2343	}
2344
2345	return (error);
2346}
2347
2348int
2349linux_ppoll(struct thread *td, struct linux_ppoll_args *args)
2350{
2351	struct timespec ts0, ts1;
2352	struct l_timespec lts;
2353	struct timespec uts, *tsp;
2354	l_sigset_t l_ss;
2355	sigset_t *ssp;
2356	sigset_t ss;
2357	int error;
2358
2359	if (args->sset != NULL) {
2360		if (args->ssize != sizeof(l_ss))
2361			return (EINVAL);
2362		error = copyin(args->sset, &l_ss, sizeof(l_ss));
2363		if (error)
2364			return (error);
2365		linux_to_bsd_sigset(&l_ss, &ss);
2366		ssp = &ss;
2367	} else
2368		ssp = NULL;
2369	if (args->tsp != NULL) {
2370		error = copyin(args->tsp, &lts, sizeof(lts));
2371		if (error)
2372			return (error);
2373		error = linux_to_native_timespec(&uts, &lts);
2374		if (error != 0)
2375			return (error);
2376
2377		nanotime(&ts0);
2378		tsp = &uts;
2379	} else
2380		tsp = NULL;
2381
2382	error = kern_poll(td, args->fds, args->nfds, tsp, ssp);
2383
2384	if (error == 0 && args->tsp != NULL) {
2385		if (td->td_retval[0]) {
2386			nanotime(&ts1);
2387			timespecsub(&ts1, &ts0);
2388			timespecsub(&uts, &ts1);
2389			if (uts.tv_sec < 0)
2390				timespecclear(&uts);
2391		} else
2392			timespecclear(&uts);
2393
2394		error = native_to_linux_timespec(&lts, &uts);
2395		if (error == 0)
2396			error = copyout(&lts, args->tsp, sizeof(lts));
2397	}
2398
2399	return (error);
2400}
2401
2402#if defined(DEBUG) || defined(KTR)
2403/* XXX: can be removed when every ldebug(...) and KTR stuff are removed. */
2404
2405#ifdef COMPAT_LINUX32
2406#define	L_MAXSYSCALL	LINUX32_SYS_MAXSYSCALL
2407#else
2408#define	L_MAXSYSCALL	LINUX_SYS_MAXSYSCALL
2409#endif
2410
2411u_char linux_debug_map[howmany(L_MAXSYSCALL, sizeof(u_char))];
2412
2413static int
2414linux_debug(int syscall, int toggle, int global)
2415{
2416
2417	if (global) {
2418		char c = toggle ? 0 : 0xff;
2419
2420		memset(linux_debug_map, c, sizeof(linux_debug_map));
2421		return (0);
2422	}
2423	if (syscall < 0 || syscall >= L_MAXSYSCALL)
2424		return (EINVAL);
2425	if (toggle)
2426		clrbit(linux_debug_map, syscall);
2427	else
2428		setbit(linux_debug_map, syscall);
2429	return (0);
2430}
2431#undef L_MAXSYSCALL
2432
2433/*
2434 * Usage: sysctl linux.debug=<syscall_nr>.<0/1>
2435 *
2436 *    E.g.: sysctl linux.debug=21.0
2437 *
2438 * As a special case, syscall "all" will apply to all syscalls globally.
2439 */
2440#define LINUX_MAX_DEBUGSTR	16
2441int
2442linux_sysctl_debug(SYSCTL_HANDLER_ARGS)
2443{
2444	char value[LINUX_MAX_DEBUGSTR], *p;
2445	int error, sysc, toggle;
2446	int global = 0;
2447
2448	value[0] = '\0';
2449	error = sysctl_handle_string(oidp, value, LINUX_MAX_DEBUGSTR, req);
2450	if (error || req->newptr == NULL)
2451		return (error);
2452	for (p = value; *p != '\0' && *p != '.'; p++);
2453	if (*p == '\0')
2454		return (EINVAL);
2455	*p++ = '\0';
2456	sysc = strtol(value, NULL, 0);
2457	toggle = strtol(p, NULL, 0);
2458	if (strcmp(value, "all") == 0)
2459		global = 1;
2460	error = linux_debug(sysc, toggle, global);
2461	return (error);
2462}
2463
2464#endif /* DEBUG || KTR */
2465
2466int
2467linux_sched_rr_get_interval(struct thread *td,
2468    struct linux_sched_rr_get_interval_args *uap)
2469{
2470	struct timespec ts;
2471	struct l_timespec lts;
2472	struct thread *tdt;
2473	int error;
2474
2475	/*
2476	 * According to man in case the invalid pid specified
2477	 * EINVAL should be returned.
2478	 */
2479	if (uap->pid < 0)
2480		return (EINVAL);
2481
2482	tdt = linux_tdfind(td, uap->pid, -1);
2483	if (tdt == NULL)
2484		return (ESRCH);
2485
2486	error = kern_sched_rr_get_interval_td(td, tdt, &ts);
2487	PROC_UNLOCK(tdt->td_proc);
2488	if (error != 0)
2489		return (error);
2490	error = native_to_linux_timespec(&lts, &ts);
2491	if (error != 0)
2492		return (error);
2493	return (copyout(&lts, uap->interval, sizeof(lts)));
2494}
2495
2496/*
2497 * In case when the Linux thread is the initial thread in
2498 * the thread group thread id is equal to the process id.
2499 * Glibc depends on this magic (assert in pthread_getattr_np.c).
2500 */
2501struct thread *
2502linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid)
2503{
2504	struct linux_emuldata *em;
2505	struct thread *tdt;
2506	struct proc *p;
2507
2508	tdt = NULL;
2509	if (tid == 0 || tid == td->td_tid) {
2510		tdt = td;
2511		PROC_LOCK(tdt->td_proc);
2512	} else if (tid > PID_MAX)
2513		tdt = tdfind(tid, pid);
2514	else {
2515		/*
2516		 * Initial thread where the tid equal to the pid.
2517		 */
2518		p = pfind(tid);
2519		if (p != NULL) {
2520			if (SV_PROC_ABI(p) != SV_ABI_LINUX) {
2521				/*
2522				 * p is not a Linuxulator process.
2523				 */
2524				PROC_UNLOCK(p);
2525				return (NULL);
2526			}
2527			FOREACH_THREAD_IN_PROC(p, tdt) {
2528				em = em_find(tdt);
2529				if (tid == em->em_tid)
2530					return (tdt);
2531			}
2532			PROC_UNLOCK(p);
2533		}
2534		return (NULL);
2535	}
2536
2537	return (tdt);
2538}
2539
2540void
2541linux_to_bsd_waitopts(int options, int *bsdopts)
2542{
2543
2544	if (options & LINUX_WNOHANG)
2545		*bsdopts |= WNOHANG;
2546	if (options & LINUX_WUNTRACED)
2547		*bsdopts |= WUNTRACED;
2548	if (options & LINUX_WEXITED)
2549		*bsdopts |= WEXITED;
2550	if (options & LINUX_WCONTINUED)
2551		*bsdopts |= WCONTINUED;
2552	if (options & LINUX_WNOWAIT)
2553		*bsdopts |= WNOWAIT;
2554
2555	if (options & __WCLONE)
2556		*bsdopts |= WLINUXCLONE;
2557}
2558
2559int
2560linux_getrandom(struct thread *td, struct linux_getrandom_args *args)
2561{
2562	struct uio uio;
2563	struct iovec iov;
2564	int error;
2565
2566	if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM))
2567		return (EINVAL);
2568	if (args->count > INT_MAX)
2569		args->count = INT_MAX;
2570
2571	iov.iov_base = args->buf;
2572	iov.iov_len = args->count;
2573
2574	uio.uio_iov = &iov;
2575	uio.uio_iovcnt = 1;
2576	uio.uio_resid = iov.iov_len;
2577	uio.uio_segflg = UIO_USERSPACE;
2578	uio.uio_rw = UIO_READ;
2579	uio.uio_td = td;
2580
2581	error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK);
2582	if (error == 0)
2583		td->td_retval[0] = args->count - uio.uio_resid;
2584	return (error);
2585}
2586
2587int
2588linux_mincore(struct thread *td, struct linux_mincore_args *args)
2589{
2590
2591	/* Needs to be page-aligned */
2592	if (args->start & PAGE_MASK)
2593		return (EINVAL);
2594	return (kern_mincore(td, args->start, args->len, args->vec));
2595}
2596