linux_machdep.c revision 140992
1/*- 2 * Copyright (c) 2000 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 140992 2005-01-29 23:12:00Z sobomax $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/imgact.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/mman.h> 38#include <sys/mutex.h> 39#include <sys/proc.h> 40#include <sys/resource.h> 41#include <sys/resourcevar.h> 42#include <sys/signalvar.h> 43#include <sys/syscallsubr.h> 44#include <sys/sysproto.h> 45#include <sys/unistd.h> 46 47#include <machine/frame.h> 48#include <machine/psl.h> 49#include <machine/segments.h> 50#include <machine/sysarch.h> 51 52#include <vm/vm.h> 53#include <vm/pmap.h> 54#include <vm/vm_map.h> 55 56#include <i386/linux/linux.h> 57#include <i386/linux/linux_proto.h> 58#include <compat/linux/linux_ipc.h> 59#include <compat/linux/linux_signal.h> 60#include <compat/linux/linux_util.h> 61 62struct l_descriptor { 63 l_uint entry_number; 64 l_ulong base_addr; 65 l_uint limit; 66 l_uint seg_32bit:1; 67 l_uint contents:2; 68 l_uint read_exec_only:1; 69 l_uint limit_in_pages:1; 70 l_uint seg_not_present:1; 71 l_uint useable:1; 72}; 73 74struct l_old_select_argv { 75 l_int nfds; 76 l_fd_set *readfds; 77 l_fd_set *writefds; 78 l_fd_set *exceptfds; 79 struct l_timeval *timeout; 80}; 81 82int 83linux_to_bsd_sigaltstack(int lsa) 84{ 85 int bsa = 0; 86 87 if (lsa & LINUX_SS_DISABLE) 88 bsa |= SS_DISABLE; 89 if (lsa & LINUX_SS_ONSTACK) 90 bsa |= SS_ONSTACK; 91 return (bsa); 92} 93 94int 95bsd_to_linux_sigaltstack(int bsa) 96{ 97 int lsa = 0; 98 99 if (bsa & SS_DISABLE) 100 lsa |= LINUX_SS_DISABLE; 101 if (bsa & SS_ONSTACK) 102 lsa |= LINUX_SS_ONSTACK; 103 return (lsa); 104} 105 106int 107linux_execve(struct thread *td, struct linux_execve_args *args) 108{ 109 int error; 110 char *newpath; 111 struct image_args eargs; 112 113 error = linux_emul_convpath(td, args->path, UIO_USERSPACE, 114 &newpath, 0); 115 if (newpath == NULL) 116 return (error); 117 118#ifdef DEBUG 119 if (ldebug(execve)) 120 printf(ARGS(execve, "%s"), newpath); 121#endif 122 123 error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE, 124 args->argp, args->envp); 125 free(newpath, M_TEMP); 126 if (error == 0) 127 kern_execve(td, &eargs, NULL); 128 exec_free_args(&eargs); 129 return (error); 130} 131 132struct l_ipc_kludge { 133 struct l_msgbuf *msgp; 134 l_long msgtyp; 135}; 136 137int 138linux_ipc(struct thread *td, struct linux_ipc_args *args) 139{ 140 141 switch (args->what & 0xFFFF) { 142 case LINUX_SEMOP: { 143 struct linux_semop_args a; 144 145 a.semid = args->arg1; 146 a.tsops = args->ptr; 147 a.nsops = args->arg2; 148 return (linux_semop(td, &a)); 149 } 150 case LINUX_SEMGET: { 151 struct linux_semget_args a; 152 153 a.key = args->arg1; 154 a.nsems = args->arg2; 155 a.semflg = args->arg3; 156 return (linux_semget(td, &a)); 157 } 158 case LINUX_SEMCTL: { 159 struct linux_semctl_args a; 160 int error; 161 162 a.semid = args->arg1; 163 a.semnum = args->arg2; 164 a.cmd = args->arg3; 165 error = copyin(args->ptr, &a.arg, sizeof(a.arg)); 166 if (error) 167 return (error); 168 return (linux_semctl(td, &a)); 169 } 170 case LINUX_MSGSND: { 171 struct linux_msgsnd_args a; 172 173 a.msqid = args->arg1; 174 a.msgp = args->ptr; 175 a.msgsz = args->arg2; 176 a.msgflg = args->arg3; 177 return (linux_msgsnd(td, &a)); 178 } 179 case LINUX_MSGRCV: { 180 struct linux_msgrcv_args a; 181 182 a.msqid = args->arg1; 183 a.msgsz = args->arg2; 184 a.msgflg = args->arg3; 185 if ((args->what >> 16) == 0) { 186 struct l_ipc_kludge tmp; 187 int error; 188 189 if (args->ptr == NULL) 190 return (EINVAL); 191 error = copyin(args->ptr, &tmp, sizeof(tmp)); 192 if (error) 193 return (error); 194 a.msgp = tmp.msgp; 195 a.msgtyp = tmp.msgtyp; 196 } else { 197 a.msgp = args->ptr; 198 a.msgtyp = args->arg5; 199 } 200 return (linux_msgrcv(td, &a)); 201 } 202 case LINUX_MSGGET: { 203 struct linux_msgget_args a; 204 205 a.key = args->arg1; 206 a.msgflg = args->arg2; 207 return (linux_msgget(td, &a)); 208 } 209 case LINUX_MSGCTL: { 210 struct linux_msgctl_args a; 211 212 a.msqid = args->arg1; 213 a.cmd = args->arg2; 214 a.buf = args->ptr; 215 return (linux_msgctl(td, &a)); 216 } 217 case LINUX_SHMAT: { 218 struct linux_shmat_args a; 219 220 a.shmid = args->arg1; 221 a.shmaddr = args->ptr; 222 a.shmflg = args->arg2; 223 a.raddr = (l_ulong *)args->arg3; 224 return (linux_shmat(td, &a)); 225 } 226 case LINUX_SHMDT: { 227 struct linux_shmdt_args a; 228 229 a.shmaddr = args->ptr; 230 return (linux_shmdt(td, &a)); 231 } 232 case LINUX_SHMGET: { 233 struct linux_shmget_args a; 234 235 a.key = args->arg1; 236 a.size = args->arg2; 237 a.shmflg = args->arg3; 238 return (linux_shmget(td, &a)); 239 } 240 case LINUX_SHMCTL: { 241 struct linux_shmctl_args a; 242 243 a.shmid = args->arg1; 244 a.cmd = args->arg2; 245 a.buf = args->ptr; 246 return (linux_shmctl(td, &a)); 247 } 248 default: 249 break; 250 } 251 252 return (EINVAL); 253} 254 255int 256linux_old_select(struct thread *td, struct linux_old_select_args *args) 257{ 258 struct l_old_select_argv linux_args; 259 struct linux_select_args newsel; 260 int error; 261 262#ifdef DEBUG 263 if (ldebug(old_select)) 264 printf(ARGS(old_select, "%p"), args->ptr); 265#endif 266 267 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 268 if (error) 269 return (error); 270 271 newsel.nfds = linux_args.nfds; 272 newsel.readfds = linux_args.readfds; 273 newsel.writefds = linux_args.writefds; 274 newsel.exceptfds = linux_args.exceptfds; 275 newsel.timeout = linux_args.timeout; 276 return (linux_select(td, &newsel)); 277} 278 279int 280linux_fork(struct thread *td, struct linux_fork_args *args) 281{ 282 int error; 283 284#ifdef DEBUG 285 if (ldebug(fork)) 286 printf(ARGS(fork, "")); 287#endif 288 289 if ((error = fork(td, (struct fork_args *)args)) != 0) 290 return (error); 291 292 if (td->td_retval[1] == 1) 293 td->td_retval[0] = 0; 294 return (0); 295} 296 297int 298linux_vfork(struct thread *td, struct linux_vfork_args *args) 299{ 300 int error; 301 302#ifdef DEBUG 303 if (ldebug(vfork)) 304 printf(ARGS(vfork, "")); 305#endif 306 307 if ((error = vfork(td, (struct vfork_args *)args)) != 0) 308 return (error); 309 /* Are we the child? */ 310 if (td->td_retval[1] == 1) 311 td->td_retval[0] = 0; 312 return (0); 313} 314 315#define CLONE_VM 0x100 316#define CLONE_FS 0x200 317#define CLONE_FILES 0x400 318#define CLONE_SIGHAND 0x800 319#define CLONE_PID 0x1000 320 321int 322linux_clone(struct thread *td, struct linux_clone_args *args) 323{ 324 int error, ff = RFPROC | RFSTOPPED; 325 struct proc *p2; 326 struct thread *td2; 327 int exit_signal; 328 329#ifdef DEBUG 330 if (ldebug(clone)) { 331 printf(ARGS(clone, "flags %x, stack %x"), 332 (unsigned int)args->flags, (unsigned int)args->stack); 333 if (args->flags & CLONE_PID) 334 printf(LMSG("CLONE_PID not yet supported")); 335 } 336#endif 337 338 if (!args->stack) 339 return (EINVAL); 340 341 exit_signal = args->flags & 0x000000ff; 342 if (exit_signal >= LINUX_NSIG) 343 return (EINVAL); 344 345 if (exit_signal <= LINUX_SIGTBLSZ) 346 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)]; 347 348 if (args->flags & CLONE_VM) 349 ff |= RFMEM; 350 if (args->flags & CLONE_SIGHAND) 351 ff |= RFSIGSHARE; 352 if (!(args->flags & CLONE_FILES)) 353 ff |= RFFDG; 354 355 error = fork1(td, ff, 0, &p2); 356 if (error) 357 return (error); 358 359 360 PROC_LOCK(p2); 361 p2->p_sigparent = exit_signal; 362 PROC_UNLOCK(p2); 363 td2 = FIRST_THREAD_IN_PROC(p2); 364 td2->td_frame->tf_esp = (unsigned int)args->stack; 365 366#ifdef DEBUG 367 if (ldebug(clone)) 368 printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"), 369 (long)p2->p_pid, args->stack, exit_signal); 370#endif 371 372 /* 373 * Make this runnable after we are finished with it. 374 */ 375 mtx_lock_spin(&sched_lock); 376 TD_SET_CAN_RUN(td2); 377 setrunqueue(td2, SRQ_BORING); 378 mtx_unlock_spin(&sched_lock); 379 380 td->td_retval[0] = p2->p_pid; 381 td->td_retval[1] = 0; 382 return (0); 383} 384 385/* XXX move */ 386struct l_mmap_argv { 387 l_caddr_t addr; 388 l_int len; 389 l_int prot; 390 l_int flags; 391 l_int fd; 392 l_int pos; 393}; 394 395#define STACK_SIZE (2 * 1024 * 1024) 396#define GUARD_SIZE (4 * PAGE_SIZE) 397 398static int linux_mmap_common(struct thread *, struct l_mmap_argv *); 399 400int 401linux_mmap2(struct thread *td, struct linux_mmap2_args *args) 402{ 403 struct l_mmap_argv linux_args; 404 405#ifdef DEBUG 406 if (ldebug(mmap2)) 407 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"), 408 (void *)args->addr, args->len, args->prot, 409 args->flags, args->fd, args->pgoff); 410#endif 411 412 linux_args.addr = (l_caddr_t)args->addr; 413 linux_args.len = args->len; 414 linux_args.prot = args->prot; 415 linux_args.flags = args->flags; 416 linux_args.fd = args->fd; 417 linux_args.pos = args->pgoff * PAGE_SIZE; 418 419 return (linux_mmap_common(td, &linux_args)); 420} 421 422int 423linux_mmap(struct thread *td, struct linux_mmap_args *args) 424{ 425 int error; 426 struct l_mmap_argv linux_args; 427 428 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 429 if (error) 430 return (error); 431 432#ifdef DEBUG 433 if (ldebug(mmap)) 434 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"), 435 (void *)linux_args.addr, linux_args.len, linux_args.prot, 436 linux_args.flags, linux_args.fd, linux_args.pos); 437#endif 438 439 return (linux_mmap_common(td, &linux_args)); 440} 441 442static int 443linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args) 444{ 445 struct proc *p = td->td_proc; 446 struct mmap_args /* { 447 caddr_t addr; 448 size_t len; 449 int prot; 450 int flags; 451 int fd; 452 long pad; 453 off_t pos; 454 } */ bsd_args; 455 int error; 456 457 error = 0; 458 bsd_args.flags = 0; 459 if (linux_args->flags & LINUX_MAP_SHARED) 460 bsd_args.flags |= MAP_SHARED; 461 if (linux_args->flags & LINUX_MAP_PRIVATE) 462 bsd_args.flags |= MAP_PRIVATE; 463 if (linux_args->flags & LINUX_MAP_FIXED) 464 bsd_args.flags |= MAP_FIXED; 465 if (linux_args->flags & LINUX_MAP_ANON) 466 bsd_args.flags |= MAP_ANON; 467 else 468 bsd_args.flags |= MAP_NOSYNC; 469 if (linux_args->flags & LINUX_MAP_GROWSDOWN) { 470 bsd_args.flags |= MAP_STACK; 471 472 /* The linux MAP_GROWSDOWN option does not limit auto 473 * growth of the region. Linux mmap with this option 474 * takes as addr the inital BOS, and as len, the initial 475 * region size. It can then grow down from addr without 476 * limit. However, linux threads has an implicit internal 477 * limit to stack size of STACK_SIZE. Its just not 478 * enforced explicitly in linux. But, here we impose 479 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 480 * region, since we can do this with our mmap. 481 * 482 * Our mmap with MAP_STACK takes addr as the maximum 483 * downsize limit on BOS, and as len the max size of 484 * the region. It them maps the top SGROWSIZ bytes, 485 * and autgrows the region down, up to the limit 486 * in addr. 487 * 488 * If we don't use the MAP_STACK option, the effect 489 * of this code is to allocate a stack region of a 490 * fixed size of (STACK_SIZE - GUARD_SIZE). 491 */ 492 493 /* This gives us TOS */ 494 bsd_args.addr = linux_args->addr + linux_args->len; 495 496 if (bsd_args.addr > p->p_vmspace->vm_maxsaddr) { 497 /* Some linux apps will attempt to mmap 498 * thread stacks near the top of their 499 * address space. If their TOS is greater 500 * than vm_maxsaddr, vm_map_growstack() 501 * will confuse the thread stack with the 502 * process stack and deliver a SEGV if they 503 * attempt to grow the thread stack past their 504 * current stacksize rlimit. To avoid this, 505 * adjust vm_maxsaddr upwards to reflect 506 * the current stacksize rlimit rather 507 * than the maximum possible stacksize. 508 * It would be better to adjust the 509 * mmap'ed region, but some apps do not check 510 * mmap's return value. 511 */ 512 PROC_LOCK(p); 513 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - 514 lim_cur(p, RLIMIT_STACK); 515 PROC_UNLOCK(p); 516 } 517 518 /* This gives us our maximum stack size */ 519 if (linux_args->len > STACK_SIZE - GUARD_SIZE) 520 bsd_args.len = linux_args->len; 521 else 522 bsd_args.len = STACK_SIZE - GUARD_SIZE; 523 524 /* This gives us a new BOS. If we're using VM_STACK, then 525 * mmap will just map the top SGROWSIZ bytes, and let 526 * the stack grow down to the limit at BOS. If we're 527 * not using VM_STACK we map the full stack, since we 528 * don't have a way to autogrow it. 529 */ 530 bsd_args.addr -= bsd_args.len; 531 } else { 532 bsd_args.addr = linux_args->addr; 533 bsd_args.len = linux_args->len; 534 } 535 536 bsd_args.prot = linux_args->prot | PROT_READ; /* always required */ 537 if (linux_args->flags & LINUX_MAP_ANON) 538 bsd_args.fd = -1; 539 else 540 bsd_args.fd = linux_args->fd; 541 bsd_args.pos = linux_args->pos; 542 bsd_args.pad = 0; 543 544#ifdef DEBUG 545 if (ldebug(mmap)) 546 printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n", 547 __func__, 548 (void *)bsd_args.addr, bsd_args.len, bsd_args.prot, 549 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos); 550#endif 551 error = mmap(td, &bsd_args); 552#ifdef DEBUG 553 if (ldebug(mmap)) 554 printf("-> %s() return: 0x%x (0x%08x)\n", 555 __func__, error, (u_int)td->td_retval[0]); 556#endif 557 return (error); 558} 559 560int 561linux_pipe(struct thread *td, struct linux_pipe_args *args) 562{ 563 int error; 564 int reg_edx; 565 566#ifdef DEBUG 567 if (ldebug(pipe)) 568 printf(ARGS(pipe, "*")); 569#endif 570 571 reg_edx = td->td_retval[1]; 572 error = pipe(td, 0); 573 if (error) { 574 td->td_retval[1] = reg_edx; 575 return (error); 576 } 577 578 error = copyout(td->td_retval, args->pipefds, 2*sizeof(int)); 579 if (error) { 580 td->td_retval[1] = reg_edx; 581 return (error); 582 } 583 584 td->td_retval[1] = reg_edx; 585 td->td_retval[0] = 0; 586 return (0); 587} 588 589int 590linux_ioperm(struct thread *td, struct linux_ioperm_args *args) 591{ 592 int error; 593 struct i386_ioperm_args iia; 594 595 iia.start = args->start; 596 iia.length = args->length; 597 iia.enable = args->enable; 598 mtx_lock(&Giant); 599 error = i386_set_ioperm(td, &iia); 600 mtx_unlock(&Giant); 601 return (error); 602} 603 604int 605linux_iopl(struct thread *td, struct linux_iopl_args *args) 606{ 607 int error; 608 609 if (args->level < 0 || args->level > 3) 610 return (EINVAL); 611 if ((error = suser(td)) != 0) 612 return (error); 613 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 614 return (error); 615 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) | 616 (args->level * (PSL_IOPL / 3)); 617 return (0); 618} 619 620int 621linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap) 622{ 623 int error; 624 struct i386_ldt_args ldt; 625 struct l_descriptor ld; 626 union descriptor desc; 627 628 if (uap->ptr == NULL) 629 return (EINVAL); 630 631 switch (uap->func) { 632 case 0x00: /* read_ldt */ 633 ldt.start = 0; 634 ldt.descs = uap->ptr; 635 ldt.num = uap->bytecount / sizeof(union descriptor); 636 mtx_lock(&Giant); 637 error = i386_get_ldt(td, &ldt); 638 td->td_retval[0] *= sizeof(union descriptor); 639 mtx_unlock(&Giant); 640 break; 641 case 0x01: /* write_ldt */ 642 case 0x11: /* write_ldt */ 643 if (uap->bytecount != sizeof(ld)) 644 return (EINVAL); 645 646 error = copyin(uap->ptr, &ld, sizeof(ld)); 647 if (error) 648 return (error); 649 650 ldt.start = ld.entry_number; 651 ldt.descs = &desc; 652 ldt.num = 1; 653 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff); 654 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16; 655 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff); 656 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24; 657 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) | 658 (ld.contents << 2); 659 desc.sd.sd_dpl = 3; 660 desc.sd.sd_p = (ld.seg_not_present ^ 1); 661 desc.sd.sd_xx = 0; 662 desc.sd.sd_def32 = ld.seg_32bit; 663 desc.sd.sd_gran = ld.limit_in_pages; 664 mtx_lock(&Giant); 665 error = i386_set_ldt(td, &ldt, &desc); 666 mtx_unlock(&Giant); 667 break; 668 default: 669 error = EINVAL; 670 break; 671 } 672 673 if (error == EOPNOTSUPP) { 674 printf("linux: modify_ldt needs kernel option USER_LDT\n"); 675 error = ENOSYS; 676 } 677 678 return (error); 679} 680 681int 682linux_sigaction(struct thread *td, struct linux_sigaction_args *args) 683{ 684 l_osigaction_t osa; 685 l_sigaction_t act, oact; 686 int error; 687 688#ifdef DEBUG 689 if (ldebug(sigaction)) 690 printf(ARGS(sigaction, "%d, %p, %p"), 691 args->sig, (void *)args->nsa, (void *)args->osa); 692#endif 693 694 if (args->nsa != NULL) { 695 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t)); 696 if (error) 697 return (error); 698 act.lsa_handler = osa.lsa_handler; 699 act.lsa_flags = osa.lsa_flags; 700 act.lsa_restorer = osa.lsa_restorer; 701 LINUX_SIGEMPTYSET(act.lsa_mask); 702 act.lsa_mask.__bits[0] = osa.lsa_mask; 703 } 704 705 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL, 706 args->osa ? &oact : NULL); 707 708 if (args->osa != NULL && !error) { 709 osa.lsa_handler = oact.lsa_handler; 710 osa.lsa_flags = oact.lsa_flags; 711 osa.lsa_restorer = oact.lsa_restorer; 712 osa.lsa_mask = oact.lsa_mask.__bits[0]; 713 error = copyout(&osa, args->osa, sizeof(l_osigaction_t)); 714 } 715 716 return (error); 717} 718 719/* 720 * Linux has two extra args, restart and oldmask. We dont use these, 721 * but it seems that "restart" is actually a context pointer that 722 * enables the signal to happen with a different register set. 723 */ 724int 725linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args) 726{ 727 sigset_t sigmask; 728 l_sigset_t mask; 729 730#ifdef DEBUG 731 if (ldebug(sigsuspend)) 732 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask); 733#endif 734 735 LINUX_SIGEMPTYSET(mask); 736 mask.__bits[0] = args->mask; 737 linux_to_bsd_sigset(&mask, &sigmask); 738 return (kern_sigsuspend(td, sigmask)); 739} 740 741int 742linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap) 743{ 744 l_sigset_t lmask; 745 sigset_t sigmask; 746 int error; 747 748#ifdef DEBUG 749 if (ldebug(rt_sigsuspend)) 750 printf(ARGS(rt_sigsuspend, "%p, %d"), 751 (void *)uap->newset, uap->sigsetsize); 752#endif 753 754 if (uap->sigsetsize != sizeof(l_sigset_t)) 755 return (EINVAL); 756 757 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t)); 758 if (error) 759 return (error); 760 761 linux_to_bsd_sigset(&lmask, &sigmask); 762 return (kern_sigsuspend(td, sigmask)); 763} 764 765int 766linux_pause(struct thread *td, struct linux_pause_args *args) 767{ 768 struct proc *p = td->td_proc; 769 sigset_t sigmask; 770 771#ifdef DEBUG 772 if (ldebug(pause)) 773 printf(ARGS(pause, "")); 774#endif 775 776 PROC_LOCK(p); 777 sigmask = td->td_sigmask; 778 PROC_UNLOCK(p); 779 return (kern_sigsuspend(td, sigmask)); 780} 781 782int 783linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) 784{ 785 stack_t ss, oss; 786 l_stack_t lss; 787 int error; 788 789#ifdef DEBUG 790 if (ldebug(sigaltstack)) 791 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss); 792#endif 793 794 if (uap->uss != NULL) { 795 error = copyin(uap->uss, &lss, sizeof(l_stack_t)); 796 if (error) 797 return (error); 798 799 ss.ss_sp = lss.ss_sp; 800 ss.ss_size = lss.ss_size; 801 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags); 802 } 803 error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL, 804 (uap->uoss != NULL) ? &oss : NULL); 805 if (!error && uap->uoss != NULL) { 806 lss.ss_sp = oss.ss_sp; 807 lss.ss_size = oss.ss_size; 808 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags); 809 error = copyout(&lss, uap->uoss, sizeof(l_stack_t)); 810 } 811 812 return (error); 813} 814 815int 816linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args) 817{ 818 struct ftruncate_args sa; 819 820#ifdef DEBUG 821 if (ldebug(ftruncate64)) 822 printf(ARGS(ftruncate64, "%u, %jd"), args->fd, 823 (intmax_t)args->length); 824#endif 825 826 sa.fd = args->fd; 827 sa.pad = 0; 828 sa.length = args->length; 829 return ftruncate(td, &sa); 830} 831 832int 833linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args) 834{ 835 /* 836 * Return an error code instead of raising a SIGSYS so that 837 * the caller will fall back to simpler LDT methods. 838 */ 839 return (ENOSYS); 840} 841 842int 843linux_gettid(struct thread *td, struct linux_gettid_args *args) 844{ 845 846 td->td_retval[0] = td->td_proc->p_pid; 847 return (0); 848} 849 850int 851linux_tkill(struct thread *td, struct linux_tkill_args *args) 852{ 853 854 return (linux_kill(td, (struct linux_kill_args *) args)); 855} 856 857