linux_misc.c revision 293528
1/*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 S��ren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/10/sys/compat/linux/linux_misc.c 293528 2016-01-09 16:11:09Z dchagin $"); 32 33#include "opt_compat.h" 34#include "opt_kdtrace.h" 35 36#include <sys/param.h> 37#include <sys/blist.h> 38#include <sys/fcntl.h> 39#if defined(__i386__) 40#include <sys/imgact_aout.h> 41#endif 42#include <sys/jail.h> 43#include <sys/kernel.h> 44#include <sys/limits.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mman.h> 48#include <sys/mount.h> 49#include <sys/mutex.h> 50#include <sys/namei.h> 51#include <sys/priv.h> 52#include <sys/proc.h> 53#include <sys/reboot.h> 54#include <sys/racct.h> 55#include <sys/resourcevar.h> 56#include <sys/sched.h> 57#include <sys/sdt.h> 58#include <sys/signalvar.h> 59#include <sys/stat.h> 60#include <sys/syscallsubr.h> 61#include <sys/sysctl.h> 62#include <sys/sysproto.h> 63#include <sys/systm.h> 64#include <sys/time.h> 65#include <sys/vmmeter.h> 66#include <sys/vnode.h> 67#include <sys/wait.h> 68#include <sys/cpuset.h> 69 70#include <security/mac/mac_framework.h> 71 72#include <vm/vm.h> 73#include <vm/pmap.h> 74#include <vm/vm_kern.h> 75#include <vm/vm_map.h> 76#include <vm/vm_extern.h> 77#include <vm/vm_object.h> 78#include <vm/swap_pager.h> 79 80#ifdef COMPAT_LINUX32 81#include <machine/../linux32/linux.h> 82#include <machine/../linux32/linux32_proto.h> 83#else 84#include <machine/../linux/linux.h> 85#include <machine/../linux/linux_proto.h> 86#endif 87 88#include <compat/linux/linux_dtrace.h> 89#include <compat/linux/linux_file.h> 90#include <compat/linux/linux_mib.h> 91#include <compat/linux/linux_signal.h> 92#include <compat/linux/linux_util.h> 93#include <compat/linux/linux_sysproto.h> 94#include <compat/linux/linux_emul.h> 95#include <compat/linux/linux_misc.h> 96 97/** 98 * Special DTrace provider for the linuxulator. 99 * 100 * In this file we define the provider for the entire linuxulator. All 101 * modules (= files of the linuxulator) use it. 102 * 103 * We define a different name depending on the emulated bitsize, see 104 * ../../<ARCH>/linux{,32}/linux.h, e.g.: 105 * native bitsize = linuxulator 106 * amd64, 32bit emulation = linuxulator32 107 */ 108LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE); 109 110int stclohz; /* Statistics clock frequency */ 111 112static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 113 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 114 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 115 RLIMIT_MEMLOCK, RLIMIT_AS 116}; 117 118struct l_sysinfo { 119 l_long uptime; /* Seconds since boot */ 120 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 121#define LINUX_SYSINFO_LOADS_SCALE 65536 122 l_ulong totalram; /* Total usable main memory size */ 123 l_ulong freeram; /* Available memory size */ 124 l_ulong sharedram; /* Amount of shared memory */ 125 l_ulong bufferram; /* Memory used by buffers */ 126 l_ulong totalswap; /* Total swap space size */ 127 l_ulong freeswap; /* swap space still available */ 128 l_ushort procs; /* Number of current processes */ 129 l_ushort pads; 130 l_ulong totalbig; 131 l_ulong freebig; 132 l_uint mem_unit; 133 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 134}; 135 136struct l_pselect6arg { 137 l_uintptr_t ss; 138 l_size_t ss_len; 139}; 140 141int 142linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 143{ 144 struct l_sysinfo sysinfo; 145 vm_object_t object; 146 int i, j; 147 struct timespec ts; 148 149 getnanouptime(&ts); 150 if (ts.tv_nsec != 0) 151 ts.tv_sec++; 152 sysinfo.uptime = ts.tv_sec; 153 154 /* Use the information from the mib to get our load averages */ 155 for (i = 0; i < 3; i++) 156 sysinfo.loads[i] = averunnable.ldavg[i] * 157 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 158 159 sysinfo.totalram = physmem * PAGE_SIZE; 160 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 161 162 sysinfo.sharedram = 0; 163 mtx_lock(&vm_object_list_mtx); 164 TAILQ_FOREACH(object, &vm_object_list, object_list) 165 if (object->shadow_count > 1) 166 sysinfo.sharedram += object->resident_page_count; 167 mtx_unlock(&vm_object_list_mtx); 168 169 sysinfo.sharedram *= PAGE_SIZE; 170 sysinfo.bufferram = 0; 171 172 swap_pager_status(&i, &j); 173 sysinfo.totalswap = i * PAGE_SIZE; 174 sysinfo.freeswap = (i - j) * PAGE_SIZE; 175 176 sysinfo.procs = nprocs; 177 178 /* The following are only present in newer Linux kernels. */ 179 sysinfo.totalbig = 0; 180 sysinfo.freebig = 0; 181 sysinfo.mem_unit = 1; 182 183 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 184} 185 186int 187linux_alarm(struct thread *td, struct linux_alarm_args *args) 188{ 189 struct itimerval it, old_it; 190 u_int secs; 191 int error; 192 193#ifdef DEBUG 194 if (ldebug(alarm)) 195 printf(ARGS(alarm, "%u"), args->secs); 196#endif 197 198 secs = args->secs; 199 200 if (secs > INT_MAX) 201 secs = INT_MAX; 202 203 it.it_value.tv_sec = (long) secs; 204 it.it_value.tv_usec = 0; 205 it.it_interval.tv_sec = 0; 206 it.it_interval.tv_usec = 0; 207 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 208 if (error) 209 return (error); 210 if (timevalisset(&old_it.it_value)) { 211 if (old_it.it_value.tv_usec != 0) 212 old_it.it_value.tv_sec++; 213 td->td_retval[0] = old_it.it_value.tv_sec; 214 } 215 return (0); 216} 217 218int 219linux_brk(struct thread *td, struct linux_brk_args *args) 220{ 221 struct vmspace *vm = td->td_proc->p_vmspace; 222 vm_offset_t new, old; 223 struct obreak_args /* { 224 char * nsize; 225 } */ tmp; 226 227#ifdef DEBUG 228 if (ldebug(brk)) 229 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 230#endif 231 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 232 new = (vm_offset_t)args->dsend; 233 tmp.nsize = (char *)new; 234 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 235 td->td_retval[0] = (long)new; 236 else 237 td->td_retval[0] = (long)old; 238 239 return (0); 240} 241 242#if defined(__i386__) 243/* XXX: what about amd64/linux32? */ 244 245int 246linux_uselib(struct thread *td, struct linux_uselib_args *args) 247{ 248 struct nameidata ni; 249 struct vnode *vp; 250 struct exec *a_out; 251 struct vattr attr; 252 vm_offset_t vmaddr; 253 unsigned long file_offset; 254 unsigned long bss_size; 255 char *library; 256 ssize_t aresid; 257 int error, locked, writecount; 258 259 LCONVPATHEXIST(td, args->library, &library); 260 261#ifdef DEBUG 262 if (ldebug(uselib)) 263 printf(ARGS(uselib, "%s"), library); 264#endif 265 266 a_out = NULL; 267 locked = 0; 268 vp = NULL; 269 270 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 271 UIO_SYSSPACE, library, td); 272 error = namei(&ni); 273 LFREEPATH(library); 274 if (error) 275 goto cleanup; 276 277 vp = ni.ni_vp; 278 NDFREE(&ni, NDF_ONLY_PNBUF); 279 280 /* 281 * From here on down, we have a locked vnode that must be unlocked. 282 * XXX: The code below largely duplicates exec_check_permissions(). 283 */ 284 locked = 1; 285 286 /* Writable? */ 287 error = VOP_GET_WRITECOUNT(vp, &writecount); 288 if (error != 0) 289 goto cleanup; 290 if (writecount != 0) { 291 error = ETXTBSY; 292 goto cleanup; 293 } 294 295 /* Executable? */ 296 error = VOP_GETATTR(vp, &attr, td->td_ucred); 297 if (error) 298 goto cleanup; 299 300 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 301 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 302 /* EACCESS is what exec(2) returns. */ 303 error = ENOEXEC; 304 goto cleanup; 305 } 306 307 /* Sensible size? */ 308 if (attr.va_size == 0) { 309 error = ENOEXEC; 310 goto cleanup; 311 } 312 313 /* Can we access it? */ 314 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 315 if (error) 316 goto cleanup; 317 318 /* 319 * XXX: This should use vn_open() so that it is properly authorized, 320 * and to reduce code redundancy all over the place here. 321 * XXX: Not really, it duplicates far more of exec_check_permissions() 322 * than vn_open(). 323 */ 324#ifdef MAC 325 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 326 if (error) 327 goto cleanup; 328#endif 329 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 330 if (error) 331 goto cleanup; 332 333 /* Pull in executable header into exec_map */ 334 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 335 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 336 if (error) 337 goto cleanup; 338 339 /* Is it a Linux binary ? */ 340 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 341 error = ENOEXEC; 342 goto cleanup; 343 } 344 345 /* 346 * While we are here, we should REALLY do some more checks 347 */ 348 349 /* Set file/virtual offset based on a.out variant. */ 350 switch ((int)(a_out->a_magic & 0xffff)) { 351 case 0413: /* ZMAGIC */ 352 file_offset = 1024; 353 break; 354 case 0314: /* QMAGIC */ 355 file_offset = 0; 356 break; 357 default: 358 error = ENOEXEC; 359 goto cleanup; 360 } 361 362 bss_size = round_page(a_out->a_bss); 363 364 /* Check various fields in header for validity/bounds. */ 365 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 366 error = ENOEXEC; 367 goto cleanup; 368 } 369 370 /* text + data can't exceed file size */ 371 if (a_out->a_data + a_out->a_text > attr.va_size) { 372 error = EFAULT; 373 goto cleanup; 374 } 375 376 /* 377 * text/data/bss must not exceed limits 378 * XXX - this is not complete. it should check current usage PLUS 379 * the resources needed by this library. 380 */ 381 PROC_LOCK(td->td_proc); 382 if (a_out->a_text > maxtsiz || 383 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA) || 384 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 385 bss_size) != 0) { 386 PROC_UNLOCK(td->td_proc); 387 error = ENOMEM; 388 goto cleanup; 389 } 390 PROC_UNLOCK(td->td_proc); 391 392 /* 393 * Prevent more writers. 394 * XXX: Note that if any of the VM operations fail below we don't 395 * clear this flag. 396 */ 397 VOP_SET_TEXT(vp); 398 399 /* 400 * Lock no longer needed 401 */ 402 locked = 0; 403 VOP_UNLOCK(vp, 0); 404 405 /* 406 * Check if file_offset page aligned. Currently we cannot handle 407 * misalinged file offsets, and so we read in the entire image 408 * (what a waste). 409 */ 410 if (file_offset & PAGE_MASK) { 411#ifdef DEBUG 412 printf("uselib: Non page aligned binary %lu\n", file_offset); 413#endif 414 /* Map text+data read/write/execute */ 415 416 /* a_entry is the load address and is page aligned */ 417 vmaddr = trunc_page(a_out->a_entry); 418 419 /* get anon user mapping, read+write+execute */ 420 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 421 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 422 VM_PROT_ALL, VM_PROT_ALL, 0); 423 if (error) 424 goto cleanup; 425 426 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 427 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 428 td->td_ucred, NOCRED, &aresid, td); 429 if (error != 0) 430 goto cleanup; 431 if (aresid != 0) { 432 error = ENOEXEC; 433 goto cleanup; 434 } 435 } else { 436#ifdef DEBUG 437 printf("uselib: Page aligned binary %lu\n", file_offset); 438#endif 439 /* 440 * for QMAGIC, a_entry is 20 bytes beyond the load address 441 * to skip the executable header 442 */ 443 vmaddr = trunc_page(a_out->a_entry); 444 445 /* 446 * Map it all into the process's space as a single 447 * copy-on-write "data" segment. 448 */ 449 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 450 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 451 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 452 if (error) 453 goto cleanup; 454 } 455#ifdef DEBUG 456 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 457 ((long *)vmaddr)[1]); 458#endif 459 if (bss_size != 0) { 460 /* Calculate BSS start address */ 461 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 462 a_out->a_data; 463 464 /* allocate some 'anon' space */ 465 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 466 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 467 VM_PROT_ALL, 0); 468 if (error) 469 goto cleanup; 470 } 471 472cleanup: 473 /* Unlock vnode if needed */ 474 if (locked) 475 VOP_UNLOCK(vp, 0); 476 477 /* Release the temporary mapping. */ 478 if (a_out) 479 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 480 481 return (error); 482} 483 484#endif /* __i386__ */ 485 486int 487linux_select(struct thread *td, struct linux_select_args *args) 488{ 489 l_timeval ltv; 490 struct timeval tv0, tv1, utv, *tvp; 491 int error; 492 493#ifdef DEBUG 494 if (ldebug(select)) 495 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 496 (void *)args->readfds, (void *)args->writefds, 497 (void *)args->exceptfds, (void *)args->timeout); 498#endif 499 500 /* 501 * Store current time for computation of the amount of 502 * time left. 503 */ 504 if (args->timeout) { 505 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 506 goto select_out; 507 utv.tv_sec = ltv.tv_sec; 508 utv.tv_usec = ltv.tv_usec; 509#ifdef DEBUG 510 if (ldebug(select)) 511 printf(LMSG("incoming timeout (%jd/%ld)"), 512 (intmax_t)utv.tv_sec, utv.tv_usec); 513#endif 514 515 if (itimerfix(&utv)) { 516 /* 517 * The timeval was invalid. Convert it to something 518 * valid that will act as it does under Linux. 519 */ 520 utv.tv_sec += utv.tv_usec / 1000000; 521 utv.tv_usec %= 1000000; 522 if (utv.tv_usec < 0) { 523 utv.tv_sec -= 1; 524 utv.tv_usec += 1000000; 525 } 526 if (utv.tv_sec < 0) 527 timevalclear(&utv); 528 } 529 microtime(&tv0); 530 tvp = &utv; 531 } else 532 tvp = NULL; 533 534 error = kern_select(td, args->nfds, args->readfds, args->writefds, 535 args->exceptfds, tvp, sizeof(l_int) * 8); 536 537#ifdef DEBUG 538 if (ldebug(select)) 539 printf(LMSG("real select returns %d"), error); 540#endif 541 if (error) 542 goto select_out; 543 544 if (args->timeout) { 545 if (td->td_retval[0]) { 546 /* 547 * Compute how much time was left of the timeout, 548 * by subtracting the current time and the time 549 * before we started the call, and subtracting 550 * that result from the user-supplied value. 551 */ 552 microtime(&tv1); 553 timevalsub(&tv1, &tv0); 554 timevalsub(&utv, &tv1); 555 if (utv.tv_sec < 0) 556 timevalclear(&utv); 557 } else 558 timevalclear(&utv); 559#ifdef DEBUG 560 if (ldebug(select)) 561 printf(LMSG("outgoing timeout (%jd/%ld)"), 562 (intmax_t)utv.tv_sec, utv.tv_usec); 563#endif 564 ltv.tv_sec = utv.tv_sec; 565 ltv.tv_usec = utv.tv_usec; 566 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 567 goto select_out; 568 } 569 570select_out: 571#ifdef DEBUG 572 if (ldebug(select)) 573 printf(LMSG("select_out -> %d"), error); 574#endif 575 return (error); 576} 577 578int 579linux_mremap(struct thread *td, struct linux_mremap_args *args) 580{ 581 struct munmap_args /* { 582 void *addr; 583 size_t len; 584 } */ bsd_args; 585 int error = 0; 586 587#ifdef DEBUG 588 if (ldebug(mremap)) 589 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 590 (void *)(uintptr_t)args->addr, 591 (unsigned long)args->old_len, 592 (unsigned long)args->new_len, 593 (unsigned long)args->flags); 594#endif 595 596 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 597 td->td_retval[0] = 0; 598 return (EINVAL); 599 } 600 601 /* 602 * Check for the page alignment. 603 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 604 */ 605 if (args->addr & PAGE_MASK) { 606 td->td_retval[0] = 0; 607 return (EINVAL); 608 } 609 610 args->new_len = round_page(args->new_len); 611 args->old_len = round_page(args->old_len); 612 613 if (args->new_len > args->old_len) { 614 td->td_retval[0] = 0; 615 return (ENOMEM); 616 } 617 618 if (args->new_len < args->old_len) { 619 bsd_args.addr = 620 (caddr_t)((uintptr_t)args->addr + args->new_len); 621 bsd_args.len = args->old_len - args->new_len; 622 error = sys_munmap(td, &bsd_args); 623 } 624 625 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 626 return (error); 627} 628 629#define LINUX_MS_ASYNC 0x0001 630#define LINUX_MS_INVALIDATE 0x0002 631#define LINUX_MS_SYNC 0x0004 632 633int 634linux_msync(struct thread *td, struct linux_msync_args *args) 635{ 636 struct msync_args bsd_args; 637 638 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 639 bsd_args.len = (uintptr_t)args->len; 640 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 641 642 return (sys_msync(td, &bsd_args)); 643} 644 645int 646linux_time(struct thread *td, struct linux_time_args *args) 647{ 648 struct timeval tv; 649 l_time_t tm; 650 int error; 651 652#ifdef DEBUG 653 if (ldebug(time)) 654 printf(ARGS(time, "*")); 655#endif 656 657 microtime(&tv); 658 tm = tv.tv_sec; 659 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 660 return (error); 661 td->td_retval[0] = tm; 662 return (0); 663} 664 665struct l_times_argv { 666 l_clock_t tms_utime; 667 l_clock_t tms_stime; 668 l_clock_t tms_cutime; 669 l_clock_t tms_cstime; 670}; 671 672 673/* 674 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 675 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 676 * auxiliary vector entry. 677 */ 678#define CLK_TCK 100 679 680#define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 681#define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 682 683#define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 684 CONVNTCK(r) : CONVOTCK(r)) 685 686int 687linux_times(struct thread *td, struct linux_times_args *args) 688{ 689 struct timeval tv, utime, stime, cutime, cstime; 690 struct l_times_argv tms; 691 struct proc *p; 692 int error; 693 694#ifdef DEBUG 695 if (ldebug(times)) 696 printf(ARGS(times, "*")); 697#endif 698 699 if (args->buf != NULL) { 700 p = td->td_proc; 701 PROC_LOCK(p); 702 PROC_STATLOCK(p); 703 calcru(p, &utime, &stime); 704 PROC_STATUNLOCK(p); 705 calccru(p, &cutime, &cstime); 706 PROC_UNLOCK(p); 707 708 tms.tms_utime = CONVTCK(utime); 709 tms.tms_stime = CONVTCK(stime); 710 711 tms.tms_cutime = CONVTCK(cutime); 712 tms.tms_cstime = CONVTCK(cstime); 713 714 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 715 return (error); 716 } 717 718 microuptime(&tv); 719 td->td_retval[0] = (int)CONVTCK(tv); 720 return (0); 721} 722 723int 724linux_newuname(struct thread *td, struct linux_newuname_args *args) 725{ 726 struct l_new_utsname utsname; 727 char osname[LINUX_MAX_UTSNAME]; 728 char osrelease[LINUX_MAX_UTSNAME]; 729 char *p; 730 731#ifdef DEBUG 732 if (ldebug(newuname)) 733 printf(ARGS(newuname, "*")); 734#endif 735 736 linux_get_osname(td, osname); 737 linux_get_osrelease(td, osrelease); 738 739 bzero(&utsname, sizeof(utsname)); 740 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 741 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 742 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 743 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 744 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 745 for (p = utsname.version; *p != '\0'; ++p) 746 if (*p == '\n') { 747 *p = '\0'; 748 break; 749 } 750 strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME); 751 752 return (copyout(&utsname, args->buf, sizeof(utsname))); 753} 754 755struct l_utimbuf { 756 l_time_t l_actime; 757 l_time_t l_modtime; 758}; 759 760int 761linux_utime(struct thread *td, struct linux_utime_args *args) 762{ 763 struct timeval tv[2], *tvp; 764 struct l_utimbuf lut; 765 char *fname; 766 int error; 767 768 LCONVPATHEXIST(td, args->fname, &fname); 769 770#ifdef DEBUG 771 if (ldebug(utime)) 772 printf(ARGS(utime, "%s, *"), fname); 773#endif 774 775 if (args->times) { 776 if ((error = copyin(args->times, &lut, sizeof lut))) { 777 LFREEPATH(fname); 778 return (error); 779 } 780 tv[0].tv_sec = lut.l_actime; 781 tv[0].tv_usec = 0; 782 tv[1].tv_sec = lut.l_modtime; 783 tv[1].tv_usec = 0; 784 tvp = tv; 785 } else 786 tvp = NULL; 787 788 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 789 LFREEPATH(fname); 790 return (error); 791} 792 793int 794linux_utimes(struct thread *td, struct linux_utimes_args *args) 795{ 796 l_timeval ltv[2]; 797 struct timeval tv[2], *tvp = NULL; 798 char *fname; 799 int error; 800 801 LCONVPATHEXIST(td, args->fname, &fname); 802 803#ifdef DEBUG 804 if (ldebug(utimes)) 805 printf(ARGS(utimes, "%s, *"), fname); 806#endif 807 808 if (args->tptr != NULL) { 809 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 810 LFREEPATH(fname); 811 return (error); 812 } 813 tv[0].tv_sec = ltv[0].tv_sec; 814 tv[0].tv_usec = ltv[0].tv_usec; 815 tv[1].tv_sec = ltv[1].tv_sec; 816 tv[1].tv_usec = ltv[1].tv_usec; 817 tvp = tv; 818 } 819 820 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 821 LFREEPATH(fname); 822 return (error); 823} 824 825int 826linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 827{ 828 l_timeval ltv[2]; 829 struct timeval tv[2], *tvp = NULL; 830 char *fname; 831 int error, dfd; 832 833 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 834 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 835 836#ifdef DEBUG 837 if (ldebug(futimesat)) 838 printf(ARGS(futimesat, "%s, *"), fname); 839#endif 840 841 if (args->utimes != NULL) { 842 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 843 LFREEPATH(fname); 844 return (error); 845 } 846 tv[0].tv_sec = ltv[0].tv_sec; 847 tv[0].tv_usec = ltv[0].tv_usec; 848 tv[1].tv_sec = ltv[1].tv_sec; 849 tv[1].tv_usec = ltv[1].tv_usec; 850 tvp = tv; 851 } 852 853 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 854 LFREEPATH(fname); 855 return (error); 856} 857 858int 859linux_common_wait(struct thread *td, int pid, int *status, 860 int options, struct rusage *ru) 861{ 862 int error, tmpstat; 863 864 error = kern_wait(td, pid, &tmpstat, options, ru); 865 if (error) 866 return (error); 867 868 if (status) { 869 tmpstat &= 0xffff; 870 if (WIFSIGNALED(tmpstat)) 871 tmpstat = (tmpstat & 0xffffff80) | 872 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 873 else if (WIFSTOPPED(tmpstat)) 874 tmpstat = (tmpstat & 0xffff00ff) | 875 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 876 error = copyout(&tmpstat, status, sizeof(int)); 877 } 878 879 return (error); 880} 881 882#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 883int 884linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 885{ 886 int options; 887 888#ifdef DEBUG 889 if (ldebug(waitpid)) 890 printf(ARGS(waitpid, "%d, %p, %d"), 891 args->pid, (void *)args->status, args->options); 892#endif 893 /* 894 * this is necessary because the test in kern_wait doesn't work 895 * because we mess with the options here 896 */ 897 if (args->options & ~(WUNTRACED | WNOHANG | WCONTINUED | __WCLONE)) 898 return (EINVAL); 899 900 options = (args->options & (WNOHANG | WUNTRACED)); 901 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 902 if (args->options & __WCLONE) 903 options |= WLINUXCLONE; 904 905 return (linux_common_wait(td, args->pid, args->status, options, NULL)); 906} 907#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 908 909int 910linux_wait4(struct thread *td, struct linux_wait4_args *args) 911{ 912 int error, options; 913 struct rusage ru, *rup; 914 915#ifdef DEBUG 916 if (ldebug(wait4)) 917 printf(ARGS(wait4, "%d, %p, %d, %p"), 918 args->pid, (void *)args->status, args->options, 919 (void *)args->rusage); 920#endif 921 922 options = (args->options & (WNOHANG | WUNTRACED)); 923 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 924 if (args->options & __WCLONE) 925 options |= WLINUXCLONE; 926 927 if (args->rusage != NULL) 928 rup = &ru; 929 else 930 rup = NULL; 931 error = linux_common_wait(td, args->pid, args->status, options, rup); 932 if (error != 0) 933 return (error); 934 if (args->rusage != NULL) 935 error = linux_copyout_rusage(&ru, args->rusage); 936 return (error); 937} 938 939int 940linux_waitid(struct thread *td, struct linux_waitid_args *args) 941{ 942 int status, options, sig; 943 struct __wrusage wru; 944 siginfo_t siginfo; 945 l_siginfo_t lsi; 946 idtype_t idtype; 947 struct proc *p; 948 int error; 949 950 options = 0; 951 linux_to_bsd_waitopts(args->options, &options); 952 953 if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED)) 954 return (EINVAL); 955 if (!(options & (WEXITED | WUNTRACED | WCONTINUED))) 956 return (EINVAL); 957 958 switch (args->idtype) { 959 case LINUX_P_ALL: 960 idtype = P_ALL; 961 break; 962 case LINUX_P_PID: 963 if (args->id <= 0) 964 return (EINVAL); 965 idtype = P_PID; 966 break; 967 case LINUX_P_PGID: 968 if (args->id <= 0) 969 return (EINVAL); 970 idtype = P_PGID; 971 break; 972 default: 973 return (EINVAL); 974 } 975 976 error = kern_wait6(td, idtype, args->id, &status, options, 977 &wru, &siginfo); 978 if (error != 0) 979 return (error); 980 if (args->rusage != NULL) { 981 error = linux_copyout_rusage(&wru.wru_children, 982 args->rusage); 983 if (error != 0) 984 return (error); 985 } 986 if (args->info != NULL) { 987 p = td->td_proc; 988 if (td->td_retval[0] == 0) 989 bzero(&lsi, sizeof(lsi)); 990 else { 991 sig = BSD_TO_LINUX_SIGNAL(siginfo.si_signo); 992 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 993 } 994 error = copyout(&lsi, args->info, sizeof(lsi)); 995 } 996 td->td_retval[0] = 0; 997 998 return (error); 999} 1000 1001int 1002linux_mknod(struct thread *td, struct linux_mknod_args *args) 1003{ 1004 char *path; 1005 int error; 1006 1007 LCONVPATHCREAT(td, args->path, &path); 1008 1009#ifdef DEBUG 1010 if (ldebug(mknod)) 1011 printf(ARGS(mknod, "%s, %d, %ju"), path, args->mode, 1012 (uintmax_t)args->dev); 1013#endif 1014 1015 switch (args->mode & S_IFMT) { 1016 case S_IFIFO: 1017 case S_IFSOCK: 1018 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 1019 break; 1020 1021 case S_IFCHR: 1022 case S_IFBLK: 1023 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 1024 args->dev); 1025 break; 1026 1027 case S_IFDIR: 1028 error = EPERM; 1029 break; 1030 1031 case 0: 1032 args->mode |= S_IFREG; 1033 /* FALLTHROUGH */ 1034 case S_IFREG: 1035 error = kern_open(td, path, UIO_SYSSPACE, 1036 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1037 if (error == 0) 1038 kern_close(td, td->td_retval[0]); 1039 break; 1040 1041 default: 1042 error = EINVAL; 1043 break; 1044 } 1045 LFREEPATH(path); 1046 return (error); 1047} 1048 1049int 1050linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1051{ 1052 char *path; 1053 int error, dfd; 1054 1055 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1056 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 1057 1058#ifdef DEBUG 1059 if (ldebug(mknodat)) 1060 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 1061#endif 1062 1063 switch (args->mode & S_IFMT) { 1064 case S_IFIFO: 1065 case S_IFSOCK: 1066 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 1067 break; 1068 1069 case S_IFCHR: 1070 case S_IFBLK: 1071 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 1072 args->dev); 1073 break; 1074 1075 case S_IFDIR: 1076 error = EPERM; 1077 break; 1078 1079 case 0: 1080 args->mode |= S_IFREG; 1081 /* FALLTHROUGH */ 1082 case S_IFREG: 1083 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 1084 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1085 if (error == 0) 1086 kern_close(td, td->td_retval[0]); 1087 break; 1088 1089 default: 1090 error = EINVAL; 1091 break; 1092 } 1093 LFREEPATH(path); 1094 return (error); 1095} 1096 1097/* 1098 * UGH! This is just about the dumbest idea I've ever heard!! 1099 */ 1100int 1101linux_personality(struct thread *td, struct linux_personality_args *args) 1102{ 1103#ifdef DEBUG 1104 if (ldebug(personality)) 1105 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 1106#endif 1107 if (args->per != 0) 1108 return (EINVAL); 1109 1110 /* Yes Jim, it's still a Linux... */ 1111 td->td_retval[0] = 0; 1112 return (0); 1113} 1114 1115struct l_itimerval { 1116 l_timeval it_interval; 1117 l_timeval it_value; 1118}; 1119 1120#define B2L_ITIMERVAL(bip, lip) \ 1121 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1122 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1123 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1124 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1125 1126int 1127linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1128{ 1129 int error; 1130 struct l_itimerval ls; 1131 struct itimerval aitv, oitv; 1132 1133#ifdef DEBUG 1134 if (ldebug(setitimer)) 1135 printf(ARGS(setitimer, "%p, %p"), 1136 (void *)uap->itv, (void *)uap->oitv); 1137#endif 1138 1139 if (uap->itv == NULL) { 1140 uap->itv = uap->oitv; 1141 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1142 } 1143 1144 error = copyin(uap->itv, &ls, sizeof(ls)); 1145 if (error != 0) 1146 return (error); 1147 B2L_ITIMERVAL(&aitv, &ls); 1148#ifdef DEBUG 1149 if (ldebug(setitimer)) { 1150 printf("setitimer: value: sec: %jd, usec: %ld\n", 1151 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1152 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1153 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1154 } 1155#endif 1156 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1157 if (error != 0 || uap->oitv == NULL) 1158 return (error); 1159 B2L_ITIMERVAL(&ls, &oitv); 1160 1161 return (copyout(&ls, uap->oitv, sizeof(ls))); 1162} 1163 1164int 1165linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1166{ 1167 int error; 1168 struct l_itimerval ls; 1169 struct itimerval aitv; 1170 1171#ifdef DEBUG 1172 if (ldebug(getitimer)) 1173 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1174#endif 1175 error = kern_getitimer(td, uap->which, &aitv); 1176 if (error != 0) 1177 return (error); 1178 B2L_ITIMERVAL(&ls, &aitv); 1179 return (copyout(&ls, uap->itv, sizeof(ls))); 1180} 1181 1182#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1183int 1184linux_nice(struct thread *td, struct linux_nice_args *args) 1185{ 1186 struct setpriority_args bsd_args; 1187 1188 bsd_args.which = PRIO_PROCESS; 1189 bsd_args.who = 0; /* current process */ 1190 bsd_args.prio = args->inc; 1191 return (sys_setpriority(td, &bsd_args)); 1192} 1193#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1194 1195int 1196linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1197{ 1198 struct ucred *newcred, *oldcred; 1199 l_gid_t *linux_gidset; 1200 gid_t *bsd_gidset; 1201 int ngrp, error; 1202 struct proc *p; 1203 1204 ngrp = args->gidsetsize; 1205 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1206 return (EINVAL); 1207 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_TEMP, M_WAITOK); 1208 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1209 if (error) 1210 goto out; 1211 newcred = crget(); 1212 p = td->td_proc; 1213 PROC_LOCK(p); 1214 oldcred = crcopysafe(p, newcred); 1215 1216 /* 1217 * cr_groups[0] holds egid. Setting the whole set from 1218 * the supplied set will cause egid to be changed too. 1219 * Keep cr_groups[0] unchanged to prevent that. 1220 */ 1221 1222 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1223 PROC_UNLOCK(p); 1224 crfree(newcred); 1225 goto out; 1226 } 1227 1228 if (ngrp > 0) { 1229 newcred->cr_ngroups = ngrp + 1; 1230 1231 bsd_gidset = newcred->cr_groups; 1232 ngrp--; 1233 while (ngrp >= 0) { 1234 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1235 ngrp--; 1236 } 1237 } else 1238 newcred->cr_ngroups = 1; 1239 1240 setsugid(p); 1241 p->p_ucred = newcred; 1242 PROC_UNLOCK(p); 1243 crfree(oldcred); 1244 error = 0; 1245out: 1246 free(linux_gidset, M_TEMP); 1247 return (error); 1248} 1249 1250int 1251linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1252{ 1253 struct ucred *cred; 1254 l_gid_t *linux_gidset; 1255 gid_t *bsd_gidset; 1256 int bsd_gidsetsz, ngrp, error; 1257 1258 cred = td->td_ucred; 1259 bsd_gidset = cred->cr_groups; 1260 bsd_gidsetsz = cred->cr_ngroups - 1; 1261 1262 /* 1263 * cr_groups[0] holds egid. Returning the whole set 1264 * here will cause a duplicate. Exclude cr_groups[0] 1265 * to prevent that. 1266 */ 1267 1268 if ((ngrp = args->gidsetsize) == 0) { 1269 td->td_retval[0] = bsd_gidsetsz; 1270 return (0); 1271 } 1272 1273 if (ngrp < bsd_gidsetsz) 1274 return (EINVAL); 1275 1276 ngrp = 0; 1277 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1278 M_TEMP, M_WAITOK); 1279 while (ngrp < bsd_gidsetsz) { 1280 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1281 ngrp++; 1282 } 1283 1284 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1285 free(linux_gidset, M_TEMP); 1286 if (error) 1287 return (error); 1288 1289 td->td_retval[0] = ngrp; 1290 return (0); 1291} 1292 1293int 1294linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1295{ 1296 struct rlimit bsd_rlim; 1297 struct l_rlimit rlim; 1298 u_int which; 1299 int error; 1300 1301#ifdef DEBUG 1302 if (ldebug(setrlimit)) 1303 printf(ARGS(setrlimit, "%d, %p"), 1304 args->resource, (void *)args->rlim); 1305#endif 1306 1307 if (args->resource >= LINUX_RLIM_NLIMITS) 1308 return (EINVAL); 1309 1310 which = linux_to_bsd_resource[args->resource]; 1311 if (which == -1) 1312 return (EINVAL); 1313 1314 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1315 if (error) 1316 return (error); 1317 1318 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1319 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1320 return (kern_setrlimit(td, which, &bsd_rlim)); 1321} 1322 1323#if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1324int 1325linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1326{ 1327 struct l_rlimit rlim; 1328 struct proc *p = td->td_proc; 1329 struct rlimit bsd_rlim; 1330 u_int which; 1331 1332#ifdef DEBUG 1333 if (ldebug(old_getrlimit)) 1334 printf(ARGS(old_getrlimit, "%d, %p"), 1335 args->resource, (void *)args->rlim); 1336#endif 1337 1338 if (args->resource >= LINUX_RLIM_NLIMITS) 1339 return (EINVAL); 1340 1341 which = linux_to_bsd_resource[args->resource]; 1342 if (which == -1) 1343 return (EINVAL); 1344 1345 PROC_LOCK(p); 1346 lim_rlimit(p, which, &bsd_rlim); 1347 PROC_UNLOCK(p); 1348 1349#ifdef COMPAT_LINUX32 1350 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1351 if (rlim.rlim_cur == UINT_MAX) 1352 rlim.rlim_cur = INT_MAX; 1353 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1354 if (rlim.rlim_max == UINT_MAX) 1355 rlim.rlim_max = INT_MAX; 1356#else 1357 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1358 if (rlim.rlim_cur == ULONG_MAX) 1359 rlim.rlim_cur = LONG_MAX; 1360 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1361 if (rlim.rlim_max == ULONG_MAX) 1362 rlim.rlim_max = LONG_MAX; 1363#endif 1364 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1365} 1366#endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1367 1368int 1369linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1370{ 1371 struct l_rlimit rlim; 1372 struct proc *p = td->td_proc; 1373 struct rlimit bsd_rlim; 1374 u_int which; 1375 1376#ifdef DEBUG 1377 if (ldebug(getrlimit)) 1378 printf(ARGS(getrlimit, "%d, %p"), 1379 args->resource, (void *)args->rlim); 1380#endif 1381 1382 if (args->resource >= LINUX_RLIM_NLIMITS) 1383 return (EINVAL); 1384 1385 which = linux_to_bsd_resource[args->resource]; 1386 if (which == -1) 1387 return (EINVAL); 1388 1389 PROC_LOCK(p); 1390 lim_rlimit(p, which, &bsd_rlim); 1391 PROC_UNLOCK(p); 1392 1393 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1394 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1395 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1396} 1397 1398int 1399linux_sched_setscheduler(struct thread *td, 1400 struct linux_sched_setscheduler_args *args) 1401{ 1402 struct sched_param sched_param; 1403 struct thread *tdt; 1404 int error, policy; 1405 1406#ifdef DEBUG 1407 if (ldebug(sched_setscheduler)) 1408 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1409 args->pid, args->policy, (const void *)args->param); 1410#endif 1411 1412 switch (args->policy) { 1413 case LINUX_SCHED_OTHER: 1414 policy = SCHED_OTHER; 1415 break; 1416 case LINUX_SCHED_FIFO: 1417 policy = SCHED_FIFO; 1418 break; 1419 case LINUX_SCHED_RR: 1420 policy = SCHED_RR; 1421 break; 1422 default: 1423 return (EINVAL); 1424 } 1425 1426 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1427 if (error) 1428 return (error); 1429 1430 tdt = linux_tdfind(td, args->pid, -1); 1431 if (tdt == NULL) 1432 return (ESRCH); 1433 1434 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1435 PROC_UNLOCK(tdt->td_proc); 1436 return (error); 1437} 1438 1439int 1440linux_sched_getscheduler(struct thread *td, 1441 struct linux_sched_getscheduler_args *args) 1442{ 1443 struct thread *tdt; 1444 int error, policy; 1445 1446#ifdef DEBUG 1447 if (ldebug(sched_getscheduler)) 1448 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1449#endif 1450 1451 tdt = linux_tdfind(td, args->pid, -1); 1452 if (tdt == NULL) 1453 return (ESRCH); 1454 1455 error = kern_sched_getscheduler(td, tdt, &policy); 1456 PROC_UNLOCK(tdt->td_proc); 1457 1458 switch (policy) { 1459 case SCHED_OTHER: 1460 td->td_retval[0] = LINUX_SCHED_OTHER; 1461 break; 1462 case SCHED_FIFO: 1463 td->td_retval[0] = LINUX_SCHED_FIFO; 1464 break; 1465 case SCHED_RR: 1466 td->td_retval[0] = LINUX_SCHED_RR; 1467 break; 1468 } 1469 return (error); 1470} 1471 1472int 1473linux_sched_get_priority_max(struct thread *td, 1474 struct linux_sched_get_priority_max_args *args) 1475{ 1476 struct sched_get_priority_max_args bsd; 1477 1478#ifdef DEBUG 1479 if (ldebug(sched_get_priority_max)) 1480 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1481#endif 1482 1483 switch (args->policy) { 1484 case LINUX_SCHED_OTHER: 1485 bsd.policy = SCHED_OTHER; 1486 break; 1487 case LINUX_SCHED_FIFO: 1488 bsd.policy = SCHED_FIFO; 1489 break; 1490 case LINUX_SCHED_RR: 1491 bsd.policy = SCHED_RR; 1492 break; 1493 default: 1494 return (EINVAL); 1495 } 1496 return (sys_sched_get_priority_max(td, &bsd)); 1497} 1498 1499int 1500linux_sched_get_priority_min(struct thread *td, 1501 struct linux_sched_get_priority_min_args *args) 1502{ 1503 struct sched_get_priority_min_args bsd; 1504 1505#ifdef DEBUG 1506 if (ldebug(sched_get_priority_min)) 1507 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1508#endif 1509 1510 switch (args->policy) { 1511 case LINUX_SCHED_OTHER: 1512 bsd.policy = SCHED_OTHER; 1513 break; 1514 case LINUX_SCHED_FIFO: 1515 bsd.policy = SCHED_FIFO; 1516 break; 1517 case LINUX_SCHED_RR: 1518 bsd.policy = SCHED_RR; 1519 break; 1520 default: 1521 return (EINVAL); 1522 } 1523 return (sys_sched_get_priority_min(td, &bsd)); 1524} 1525 1526#define REBOOT_CAD_ON 0x89abcdef 1527#define REBOOT_CAD_OFF 0 1528#define REBOOT_HALT 0xcdef0123 1529#define REBOOT_RESTART 0x01234567 1530#define REBOOT_RESTART2 0xA1B2C3D4 1531#define REBOOT_POWEROFF 0x4321FEDC 1532#define REBOOT_MAGIC1 0xfee1dead 1533#define REBOOT_MAGIC2 0x28121969 1534#define REBOOT_MAGIC2A 0x05121996 1535#define REBOOT_MAGIC2B 0x16041998 1536 1537int 1538linux_reboot(struct thread *td, struct linux_reboot_args *args) 1539{ 1540 struct reboot_args bsd_args; 1541 1542#ifdef DEBUG 1543 if (ldebug(reboot)) 1544 printf(ARGS(reboot, "0x%x"), args->cmd); 1545#endif 1546 1547 if (args->magic1 != REBOOT_MAGIC1) 1548 return (EINVAL); 1549 1550 switch (args->magic2) { 1551 case REBOOT_MAGIC2: 1552 case REBOOT_MAGIC2A: 1553 case REBOOT_MAGIC2B: 1554 break; 1555 default: 1556 return (EINVAL); 1557 } 1558 1559 switch (args->cmd) { 1560 case REBOOT_CAD_ON: 1561 case REBOOT_CAD_OFF: 1562 return (priv_check(td, PRIV_REBOOT)); 1563 case REBOOT_HALT: 1564 bsd_args.opt = RB_HALT; 1565 break; 1566 case REBOOT_RESTART: 1567 case REBOOT_RESTART2: 1568 bsd_args.opt = 0; 1569 break; 1570 case REBOOT_POWEROFF: 1571 bsd_args.opt = RB_POWEROFF; 1572 break; 1573 default: 1574 return (EINVAL); 1575 } 1576 return (sys_reboot(td, &bsd_args)); 1577} 1578 1579 1580/* 1581 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1582 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1583 * are assumed to be preserved. The following lightweight syscalls fixes 1584 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1585 * 1586 * linux_getpid() - MP SAFE 1587 * linux_getgid() - MP SAFE 1588 * linux_getuid() - MP SAFE 1589 */ 1590 1591int 1592linux_getpid(struct thread *td, struct linux_getpid_args *args) 1593{ 1594 1595#ifdef DEBUG 1596 if (ldebug(getpid)) 1597 printf(ARGS(getpid, "")); 1598#endif 1599 td->td_retval[0] = td->td_proc->p_pid; 1600 1601 return (0); 1602} 1603 1604int 1605linux_gettid(struct thread *td, struct linux_gettid_args *args) 1606{ 1607 struct linux_emuldata *em; 1608 1609#ifdef DEBUG 1610 if (ldebug(gettid)) 1611 printf(ARGS(gettid, "")); 1612#endif 1613 1614 em = em_find(td); 1615 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1616 1617 td->td_retval[0] = em->em_tid; 1618 1619 return (0); 1620} 1621 1622 1623int 1624linux_getppid(struct thread *td, struct linux_getppid_args *args) 1625{ 1626 1627#ifdef DEBUG 1628 if (ldebug(getppid)) 1629 printf(ARGS(getppid, "")); 1630#endif 1631 1632 PROC_LOCK(td->td_proc); 1633 td->td_retval[0] = td->td_proc->p_pptr->p_pid; 1634 PROC_UNLOCK(td->td_proc); 1635 return (0); 1636} 1637 1638int 1639linux_getgid(struct thread *td, struct linux_getgid_args *args) 1640{ 1641 1642#ifdef DEBUG 1643 if (ldebug(getgid)) 1644 printf(ARGS(getgid, "")); 1645#endif 1646 1647 td->td_retval[0] = td->td_ucred->cr_rgid; 1648 return (0); 1649} 1650 1651int 1652linux_getuid(struct thread *td, struct linux_getuid_args *args) 1653{ 1654 1655#ifdef DEBUG 1656 if (ldebug(getuid)) 1657 printf(ARGS(getuid, "")); 1658#endif 1659 1660 td->td_retval[0] = td->td_ucred->cr_ruid; 1661 return (0); 1662} 1663 1664 1665int 1666linux_getsid(struct thread *td, struct linux_getsid_args *args) 1667{ 1668 struct getsid_args bsd; 1669 1670#ifdef DEBUG 1671 if (ldebug(getsid)) 1672 printf(ARGS(getsid, "%i"), args->pid); 1673#endif 1674 1675 bsd.pid = args->pid; 1676 return (sys_getsid(td, &bsd)); 1677} 1678 1679int 1680linux_nosys(struct thread *td, struct nosys_args *ignore) 1681{ 1682 1683 return (ENOSYS); 1684} 1685 1686int 1687linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1688{ 1689 struct getpriority_args bsd_args; 1690 int error; 1691 1692#ifdef DEBUG 1693 if (ldebug(getpriority)) 1694 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1695#endif 1696 1697 bsd_args.which = args->which; 1698 bsd_args.who = args->who; 1699 error = sys_getpriority(td, &bsd_args); 1700 td->td_retval[0] = 20 - td->td_retval[0]; 1701 return (error); 1702} 1703 1704int 1705linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1706{ 1707 int name[2]; 1708 1709#ifdef DEBUG 1710 if (ldebug(sethostname)) 1711 printf(ARGS(sethostname, "*, %i"), args->len); 1712#endif 1713 1714 name[0] = CTL_KERN; 1715 name[1] = KERN_HOSTNAME; 1716 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1717 args->len, 0, 0)); 1718} 1719 1720int 1721linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1722{ 1723 int name[2]; 1724 1725#ifdef DEBUG 1726 if (ldebug(setdomainname)) 1727 printf(ARGS(setdomainname, "*, %i"), args->len); 1728#endif 1729 1730 name[0] = CTL_KERN; 1731 name[1] = KERN_NISDOMAINNAME; 1732 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1733 args->len, 0, 0)); 1734} 1735 1736int 1737linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1738{ 1739 1740#ifdef DEBUG 1741 if (ldebug(exit_group)) 1742 printf(ARGS(exit_group, "%i"), args->error_code); 1743#endif 1744 1745 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1746 args->error_code); 1747 1748 /* 1749 * XXX: we should send a signal to the parent if 1750 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1751 * as it doesnt occur often. 1752 */ 1753 exit1(td, W_EXITCODE(args->error_code, 0)); 1754 /* NOTREACHED */ 1755} 1756 1757#define _LINUX_CAPABILITY_VERSION 0x19980330 1758 1759struct l_user_cap_header { 1760 l_int version; 1761 l_int pid; 1762}; 1763 1764struct l_user_cap_data { 1765 l_int effective; 1766 l_int permitted; 1767 l_int inheritable; 1768}; 1769 1770int 1771linux_capget(struct thread *td, struct linux_capget_args *args) 1772{ 1773 struct l_user_cap_header luch; 1774 struct l_user_cap_data lucd; 1775 int error; 1776 1777 if (args->hdrp == NULL) 1778 return (EFAULT); 1779 1780 error = copyin(args->hdrp, &luch, sizeof(luch)); 1781 if (error != 0) 1782 return (error); 1783 1784 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1785 luch.version = _LINUX_CAPABILITY_VERSION; 1786 error = copyout(&luch, args->hdrp, sizeof(luch)); 1787 if (error) 1788 return (error); 1789 return (EINVAL); 1790 } 1791 1792 if (luch.pid) 1793 return (EPERM); 1794 1795 if (args->datap) { 1796 /* 1797 * The current implementation doesn't support setting 1798 * a capability (it's essentially a stub) so indicate 1799 * that no capabilities are currently set or available 1800 * to request. 1801 */ 1802 bzero (&lucd, sizeof(lucd)); 1803 error = copyout(&lucd, args->datap, sizeof(lucd)); 1804 } 1805 1806 return (error); 1807} 1808 1809int 1810linux_capset(struct thread *td, struct linux_capset_args *args) 1811{ 1812 struct l_user_cap_header luch; 1813 struct l_user_cap_data lucd; 1814 int error; 1815 1816 if (args->hdrp == NULL || args->datap == NULL) 1817 return (EFAULT); 1818 1819 error = copyin(args->hdrp, &luch, sizeof(luch)); 1820 if (error != 0) 1821 return (error); 1822 1823 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1824 luch.version = _LINUX_CAPABILITY_VERSION; 1825 error = copyout(&luch, args->hdrp, sizeof(luch)); 1826 if (error) 1827 return (error); 1828 return (EINVAL); 1829 } 1830 1831 if (luch.pid) 1832 return (EPERM); 1833 1834 error = copyin(args->datap, &lucd, sizeof(lucd)); 1835 if (error != 0) 1836 return (error); 1837 1838 /* We currently don't support setting any capabilities. */ 1839 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1840 linux_msg(td, 1841 "capset effective=0x%x, permitted=0x%x, " 1842 "inheritable=0x%x is not implemented", 1843 (int)lucd.effective, (int)lucd.permitted, 1844 (int)lucd.inheritable); 1845 return (EPERM); 1846 } 1847 1848 return (0); 1849} 1850 1851int 1852linux_prctl(struct thread *td, struct linux_prctl_args *args) 1853{ 1854 int error = 0, max_size; 1855 struct proc *p = td->td_proc; 1856 char comm[LINUX_MAX_COMM_LEN]; 1857 struct linux_emuldata *em; 1858 int pdeath_signal; 1859 1860#ifdef DEBUG 1861 if (ldebug(prctl)) 1862 printf(ARGS(prctl, "%d, %ju, %ju, %ju, %ju"), args->option, 1863 (uintmax_t)args->arg2, (uintmax_t)args->arg3, 1864 (uintmax_t)args->arg4, (uintmax_t)args->arg5); 1865#endif 1866 1867 switch (args->option) { 1868 case LINUX_PR_SET_PDEATHSIG: 1869 if (!LINUX_SIG_VALID(args->arg2)) 1870 return (EINVAL); 1871 em = em_find(td); 1872 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1873 em->pdeath_signal = args->arg2; 1874 break; 1875 case LINUX_PR_GET_PDEATHSIG: 1876 em = em_find(td); 1877 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1878 pdeath_signal = em->pdeath_signal; 1879 error = copyout(&pdeath_signal, 1880 (void *)(register_t)args->arg2, 1881 sizeof(pdeath_signal)); 1882 break; 1883 case LINUX_PR_GET_KEEPCAPS: 1884 /* 1885 * Indicate that we always clear the effective and 1886 * permitted capability sets when the user id becomes 1887 * non-zero (actually the capability sets are simply 1888 * always zero in the current implementation). 1889 */ 1890 td->td_retval[0] = 0; 1891 break; 1892 case LINUX_PR_SET_KEEPCAPS: 1893 /* 1894 * Ignore requests to keep the effective and permitted 1895 * capability sets when the user id becomes non-zero. 1896 */ 1897 break; 1898 case LINUX_PR_SET_NAME: 1899 /* 1900 * To be on the safe side we need to make sure to not 1901 * overflow the size a linux program expects. We already 1902 * do this here in the copyin, so that we don't need to 1903 * check on copyout. 1904 */ 1905 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1906 error = copyinstr((void *)(register_t)args->arg2, comm, 1907 max_size, NULL); 1908 1909 /* Linux silently truncates the name if it is too long. */ 1910 if (error == ENAMETOOLONG) { 1911 /* 1912 * XXX: copyinstr() isn't documented to populate the 1913 * array completely, so do a copyin() to be on the 1914 * safe side. This should be changed in case 1915 * copyinstr() is changed to guarantee this. 1916 */ 1917 error = copyin((void *)(register_t)args->arg2, comm, 1918 max_size - 1); 1919 comm[max_size - 1] = '\0'; 1920 } 1921 if (error) 1922 return (error); 1923 1924 PROC_LOCK(p); 1925 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1926 PROC_UNLOCK(p); 1927 break; 1928 case LINUX_PR_GET_NAME: 1929 PROC_LOCK(p); 1930 strlcpy(comm, p->p_comm, sizeof(comm)); 1931 PROC_UNLOCK(p); 1932 error = copyout(comm, (void *)(register_t)args->arg2, 1933 strlen(comm) + 1); 1934 break; 1935 default: 1936 error = EINVAL; 1937 break; 1938 } 1939 1940 return (error); 1941} 1942 1943int 1944linux_sched_setparam(struct thread *td, 1945 struct linux_sched_setparam_args *uap) 1946{ 1947 struct sched_param sched_param; 1948 struct thread *tdt; 1949 int error; 1950 1951#ifdef DEBUG 1952 if (ldebug(sched_setparam)) 1953 printf(ARGS(sched_setparam, "%d, *"), uap->pid); 1954#endif 1955 1956 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 1957 if (error) 1958 return (error); 1959 1960 tdt = linux_tdfind(td, uap->pid, -1); 1961 if (tdt == NULL) 1962 return (ESRCH); 1963 1964 error = kern_sched_setparam(td, tdt, &sched_param); 1965 PROC_UNLOCK(tdt->td_proc); 1966 return (error); 1967} 1968 1969int 1970linux_sched_getparam(struct thread *td, 1971 struct linux_sched_getparam_args *uap) 1972{ 1973 struct sched_param sched_param; 1974 struct thread *tdt; 1975 int error; 1976 1977#ifdef DEBUG 1978 if (ldebug(sched_getparam)) 1979 printf(ARGS(sched_getparam, "%d, *"), uap->pid); 1980#endif 1981 1982 tdt = linux_tdfind(td, uap->pid, -1); 1983 if (tdt == NULL) 1984 return (ESRCH); 1985 1986 error = kern_sched_getparam(td, tdt, &sched_param); 1987 PROC_UNLOCK(tdt->td_proc); 1988 if (error == 0) 1989 error = copyout(&sched_param, uap->param, 1990 sizeof(sched_param)); 1991 return (error); 1992} 1993 1994/* 1995 * Get affinity of a process. 1996 */ 1997int 1998linux_sched_getaffinity(struct thread *td, 1999 struct linux_sched_getaffinity_args *args) 2000{ 2001 int error; 2002 struct thread *tdt; 2003 struct cpuset_getaffinity_args cga; 2004 2005#ifdef DEBUG 2006 if (ldebug(sched_getaffinity)) 2007 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 2008 args->len); 2009#endif 2010 if (args->len < sizeof(cpuset_t)) 2011 return (EINVAL); 2012 2013 tdt = linux_tdfind(td, args->pid, -1); 2014 if (tdt == NULL) 2015 return (ESRCH); 2016 2017 PROC_UNLOCK(tdt->td_proc); 2018 cga.level = CPU_LEVEL_WHICH; 2019 cga.which = CPU_WHICH_TID; 2020 cga.id = tdt->td_tid; 2021 cga.cpusetsize = sizeof(cpuset_t); 2022 cga.mask = (cpuset_t *) args->user_mask_ptr; 2023 2024 if ((error = sys_cpuset_getaffinity(td, &cga)) == 0) 2025 td->td_retval[0] = sizeof(cpuset_t); 2026 2027 return (error); 2028} 2029 2030/* 2031 * Set affinity of a process. 2032 */ 2033int 2034linux_sched_setaffinity(struct thread *td, 2035 struct linux_sched_setaffinity_args *args) 2036{ 2037 struct cpuset_setaffinity_args csa; 2038 struct thread *tdt; 2039 2040#ifdef DEBUG 2041 if (ldebug(sched_setaffinity)) 2042 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 2043 args->len); 2044#endif 2045 if (args->len < sizeof(cpuset_t)) 2046 return (EINVAL); 2047 2048 tdt = linux_tdfind(td, args->pid, -1); 2049 if (tdt == NULL) 2050 return (ESRCH); 2051 2052 PROC_UNLOCK(tdt->td_proc); 2053 csa.level = CPU_LEVEL_WHICH; 2054 csa.which = CPU_WHICH_TID; 2055 csa.id = tdt->td_tid; 2056 csa.cpusetsize = sizeof(cpuset_t); 2057 csa.mask = (cpuset_t *) args->user_mask_ptr; 2058 2059 return (sys_cpuset_setaffinity(td, &csa)); 2060} 2061 2062struct linux_rlimit64 { 2063 uint64_t rlim_cur; 2064 uint64_t rlim_max; 2065}; 2066 2067int 2068linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2069{ 2070 struct rlimit rlim, nrlim; 2071 struct linux_rlimit64 lrlim; 2072 struct proc *p; 2073 u_int which; 2074 int flags; 2075 int error; 2076 2077#ifdef DEBUG 2078 if (ldebug(prlimit64)) 2079 printf(ARGS(prlimit64, "%d, %d, %p, %p"), args->pid, 2080 args->resource, (void *)args->new, (void *)args->old); 2081#endif 2082 2083 if (args->resource >= LINUX_RLIM_NLIMITS) 2084 return (EINVAL); 2085 2086 which = linux_to_bsd_resource[args->resource]; 2087 if (which == -1) 2088 return (EINVAL); 2089 2090 if (args->new != NULL) { 2091 /* 2092 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2093 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2094 * as INFINITY so we do not need a conversion even. 2095 */ 2096 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2097 if (error != 0) 2098 return (error); 2099 } 2100 2101 flags = PGET_HOLD | PGET_NOTWEXIT; 2102 if (args->new != NULL) 2103 flags |= PGET_CANDEBUG; 2104 else 2105 flags |= PGET_CANSEE; 2106 error = pget(args->pid, flags, &p); 2107 if (error != 0) 2108 return (error); 2109 2110 if (args->old != NULL) { 2111 PROC_LOCK(p); 2112 lim_rlimit(p, which, &rlim); 2113 PROC_UNLOCK(p); 2114 if (rlim.rlim_cur == RLIM_INFINITY) 2115 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2116 else 2117 lrlim.rlim_cur = rlim.rlim_cur; 2118 if (rlim.rlim_max == RLIM_INFINITY) 2119 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2120 else 2121 lrlim.rlim_max = rlim.rlim_max; 2122 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2123 if (error != 0) 2124 goto out; 2125 } 2126 2127 if (args->new != NULL) 2128 error = kern_proc_setrlimit(td, p, which, &nrlim); 2129 2130 out: 2131 PRELE(p); 2132 return (error); 2133} 2134 2135int 2136linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2137{ 2138 struct timeval utv, tv0, tv1, *tvp; 2139 struct l_pselect6arg lpse6; 2140 struct l_timespec lts; 2141 struct timespec uts; 2142 l_sigset_t l_ss; 2143 sigset_t *ssp; 2144 sigset_t ss; 2145 int error; 2146 2147 ssp = NULL; 2148 if (args->sig != NULL) { 2149 error = copyin(args->sig, &lpse6, sizeof(lpse6)); 2150 if (error != 0) 2151 return (error); 2152 if (lpse6.ss_len != sizeof(l_ss)) 2153 return (EINVAL); 2154 if (lpse6.ss != 0) { 2155 error = copyin(PTRIN(lpse6.ss), &l_ss, 2156 sizeof(l_ss)); 2157 if (error != 0) 2158 return (error); 2159 linux_to_bsd_sigset(&l_ss, &ss); 2160 ssp = &ss; 2161 } 2162 } 2163 2164 /* 2165 * Currently glibc changes nanosecond number to microsecond. 2166 * This mean losing precision but for now it is hardly seen. 2167 */ 2168 if (args->tsp != NULL) { 2169 error = copyin(args->tsp, <s, sizeof(lts)); 2170 if (error != 0) 2171 return (error); 2172 uts.tv_sec = lts.tv_sec; 2173 uts.tv_nsec = lts.tv_nsec; 2174 2175 TIMESPEC_TO_TIMEVAL(&utv, &uts); 2176 if (itimerfix(&utv)) 2177 return (EINVAL); 2178 2179 microtime(&tv0); 2180 tvp = &utv; 2181 } else 2182 tvp = NULL; 2183 2184 error = kern_pselect(td, args->nfds, args->readfds, args->writefds, 2185 args->exceptfds, tvp, ssp, sizeof(l_int) * 8); 2186 2187 if (error == 0 && args->tsp != NULL) { 2188 if (td->td_retval[0] != 0) { 2189 /* 2190 * Compute how much time was left of the timeout, 2191 * by subtracting the current time and the time 2192 * before we started the call, and subtracting 2193 * that result from the user-supplied value. 2194 */ 2195 2196 microtime(&tv1); 2197 timevalsub(&tv1, &tv0); 2198 timevalsub(&utv, &tv1); 2199 if (utv.tv_sec < 0) 2200 timevalclear(&utv); 2201 } else 2202 timevalclear(&utv); 2203 2204 TIMEVAL_TO_TIMESPEC(&utv, &uts); 2205 lts.tv_sec = uts.tv_sec; 2206 lts.tv_nsec = uts.tv_nsec; 2207 error = copyout(<s, args->tsp, sizeof(lts)); 2208 } 2209 2210 return (error); 2211} 2212 2213#if defined(DEBUG) || defined(KTR) 2214/* XXX: can be removed when every ldebug(...) and KTR stuff are removed. */ 2215 2216u_char linux_debug_map[howmany(LINUX_SYS_MAXSYSCALL, sizeof(u_char))]; 2217 2218static int 2219linux_debug(int syscall, int toggle, int global) 2220{ 2221 2222 if (global) { 2223 char c = toggle ? 0 : 0xff; 2224 2225 memset(linux_debug_map, c, sizeof(linux_debug_map)); 2226 return (0); 2227 } 2228 if (syscall < 0 || syscall >= LINUX_SYS_MAXSYSCALL) 2229 return (EINVAL); 2230 if (toggle) 2231 clrbit(linux_debug_map, syscall); 2232 else 2233 setbit(linux_debug_map, syscall); 2234 return (0); 2235} 2236 2237/* 2238 * Usage: sysctl linux.debug=<syscall_nr>.<0/1> 2239 * 2240 * E.g.: sysctl linux.debug=21.0 2241 * 2242 * As a special case, syscall "all" will apply to all syscalls globally. 2243 */ 2244#define LINUX_MAX_DEBUGSTR 16 2245int 2246linux_sysctl_debug(SYSCTL_HANDLER_ARGS) 2247{ 2248 char value[LINUX_MAX_DEBUGSTR], *p; 2249 int error, sysc, toggle; 2250 int global = 0; 2251 2252 value[0] = '\0'; 2253 error = sysctl_handle_string(oidp, value, LINUX_MAX_DEBUGSTR, req); 2254 if (error || req->newptr == NULL) 2255 return (error); 2256 for (p = value; *p != '\0' && *p != '.'; p++); 2257 if (*p == '\0') 2258 return (EINVAL); 2259 *p++ = '\0'; 2260 sysc = strtol(value, NULL, 0); 2261 toggle = strtol(p, NULL, 0); 2262 if (strcmp(value, "all") == 0) 2263 global = 1; 2264 error = linux_debug(sysc, toggle, global); 2265 return (error); 2266} 2267 2268#endif /* DEBUG || KTR */ 2269 2270int 2271linux_sched_rr_get_interval(struct thread *td, 2272 struct linux_sched_rr_get_interval_args *uap) 2273{ 2274 struct timespec ts; 2275 struct l_timespec lts; 2276 struct thread *tdt; 2277 int error; 2278 2279 /* 2280 * According to man in case the invalid pid specified 2281 * EINVAL should be returned. 2282 */ 2283 if (uap->pid < 0) 2284 return (EINVAL); 2285 2286 tdt = linux_tdfind(td, uap->pid, -1); 2287 if (tdt == NULL) 2288 return (ESRCH); 2289 2290 error = kern_sched_rr_get_interval_td(td, tdt, &ts); 2291 PROC_UNLOCK(tdt->td_proc); 2292 if (error != 0) 2293 return (error); 2294 lts.tv_sec = ts.tv_sec; 2295 lts.tv_nsec = ts.tv_nsec; 2296 return (copyout(<s, uap->interval, sizeof(lts))); 2297} 2298 2299/* 2300 * In case when the Linux thread is the initial thread in 2301 * the thread group thread id is equal to the process id. 2302 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2303 */ 2304struct thread * 2305linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2306{ 2307 struct linux_emuldata *em; 2308 struct thread *tdt; 2309 struct proc *p; 2310 2311 tdt = NULL; 2312 if (tid == 0 || tid == td->td_tid) { 2313 tdt = td; 2314 PROC_LOCK(tdt->td_proc); 2315 } else if (tid > PID_MAX) 2316 tdt = tdfind(tid, pid); 2317 else { 2318 /* 2319 * Initial thread where the tid equal to the pid. 2320 */ 2321 p = pfind(tid); 2322 if (p != NULL) { 2323 if (SV_PROC_ABI(p) != SV_ABI_LINUX) { 2324 /* 2325 * p is not a Linuxulator process. 2326 */ 2327 PROC_UNLOCK(p); 2328 return (NULL); 2329 } 2330 FOREACH_THREAD_IN_PROC(p, tdt) { 2331 em = em_find(tdt); 2332 if (tid == em->em_tid) 2333 return (tdt); 2334 } 2335 PROC_UNLOCK(p); 2336 } 2337 return (NULL); 2338 } 2339 2340 return (tdt); 2341} 2342 2343void 2344linux_to_bsd_waitopts(int options, int *bsdopts) 2345{ 2346 2347 if (options & LINUX_WNOHANG) 2348 *bsdopts |= WNOHANG; 2349 if (options & LINUX_WUNTRACED) 2350 *bsdopts |= WUNTRACED; 2351 if (options & LINUX_WEXITED) 2352 *bsdopts |= WEXITED; 2353 if (options & LINUX_WCONTINUED) 2354 *bsdopts |= WCONTINUED; 2355 if (options & LINUX_WNOWAIT) 2356 *bsdopts |= WNOWAIT; 2357 2358 if (options & __WCLONE) 2359 *bsdopts |= WLINUXCLONE; 2360} 2361