linux_machdep.c revision 140862
1/*- 2 * Copyright (c) 2000 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 140862 2005-01-26 13:59:46Z sobomax $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/lock.h> 35#include <sys/mman.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/resource.h> 39#include <sys/resourcevar.h> 40#include <sys/signalvar.h> 41#include <sys/syscallsubr.h> 42#include <sys/sysproto.h> 43#include <sys/unistd.h> 44 45#include <machine/frame.h> 46#include <machine/psl.h> 47#include <machine/segments.h> 48#include <machine/sysarch.h> 49 50#include <vm/vm.h> 51#include <vm/pmap.h> 52#include <vm/vm_map.h> 53 54#include <i386/linux/linux.h> 55#include <i386/linux/linux_proto.h> 56#include <compat/linux/linux_ipc.h> 57#include <compat/linux/linux_signal.h> 58#include <compat/linux/linux_util.h> 59 60struct l_descriptor { 61 l_uint entry_number; 62 l_ulong base_addr; 63 l_uint limit; 64 l_uint seg_32bit:1; 65 l_uint contents:2; 66 l_uint read_exec_only:1; 67 l_uint limit_in_pages:1; 68 l_uint seg_not_present:1; 69 l_uint useable:1; 70}; 71 72struct l_old_select_argv { 73 l_int nfds; 74 l_fd_set *readfds; 75 l_fd_set *writefds; 76 l_fd_set *exceptfds; 77 struct l_timeval *timeout; 78}; 79 80int 81linux_to_bsd_sigaltstack(int lsa) 82{ 83 int bsa = 0; 84 85 if (lsa & LINUX_SS_DISABLE) 86 bsa |= SS_DISABLE; 87 if (lsa & LINUX_SS_ONSTACK) 88 bsa |= SS_ONSTACK; 89 return (bsa); 90} 91 92int 93bsd_to_linux_sigaltstack(int bsa) 94{ 95 int lsa = 0; 96 97 if (bsa & SS_DISABLE) 98 lsa |= LINUX_SS_DISABLE; 99 if (bsa & SS_ONSTACK) 100 lsa |= LINUX_SS_ONSTACK; 101 return (lsa); 102} 103 104int 105linux_execve(struct thread *td, struct linux_execve_args *args) 106{ 107 struct execve_args bsd; 108 caddr_t sg; 109 110 sg = stackgap_init(); 111 CHECKALTEXIST(td, &sg, args->path); 112 113#ifdef DEBUG 114 if (ldebug(execve)) 115 printf(ARGS(execve, "%s"), args->path); 116#endif 117 118 bsd.fname = args->path; 119 bsd.argv = args->argp; 120 bsd.envv = args->envp; 121 return (execve(td, &bsd)); 122} 123 124struct l_ipc_kludge { 125 struct l_msgbuf *msgp; 126 l_long msgtyp; 127}; 128 129int 130linux_ipc(struct thread *td, struct linux_ipc_args *args) 131{ 132 133 switch (args->what & 0xFFFF) { 134 case LINUX_SEMOP: { 135 struct linux_semop_args a; 136 137 a.semid = args->arg1; 138 a.tsops = args->ptr; 139 a.nsops = args->arg2; 140 return (linux_semop(td, &a)); 141 } 142 case LINUX_SEMGET: { 143 struct linux_semget_args a; 144 145 a.key = args->arg1; 146 a.nsems = args->arg2; 147 a.semflg = args->arg3; 148 return (linux_semget(td, &a)); 149 } 150 case LINUX_SEMCTL: { 151 struct linux_semctl_args a; 152 int error; 153 154 a.semid = args->arg1; 155 a.semnum = args->arg2; 156 a.cmd = args->arg3; 157 error = copyin(args->ptr, &a.arg, sizeof(a.arg)); 158 if (error) 159 return (error); 160 return (linux_semctl(td, &a)); 161 } 162 case LINUX_MSGSND: { 163 struct linux_msgsnd_args a; 164 165 a.msqid = args->arg1; 166 a.msgp = args->ptr; 167 a.msgsz = args->arg2; 168 a.msgflg = args->arg3; 169 return (linux_msgsnd(td, &a)); 170 } 171 case LINUX_MSGRCV: { 172 struct linux_msgrcv_args a; 173 174 a.msqid = args->arg1; 175 a.msgsz = args->arg2; 176 a.msgflg = args->arg3; 177 if ((args->what >> 16) == 0) { 178 struct l_ipc_kludge tmp; 179 int error; 180 181 if (args->ptr == NULL) 182 return (EINVAL); 183 error = copyin(args->ptr, &tmp, sizeof(tmp)); 184 if (error) 185 return (error); 186 a.msgp = tmp.msgp; 187 a.msgtyp = tmp.msgtyp; 188 } else { 189 a.msgp = args->ptr; 190 a.msgtyp = args->arg5; 191 } 192 return (linux_msgrcv(td, &a)); 193 } 194 case LINUX_MSGGET: { 195 struct linux_msgget_args a; 196 197 a.key = args->arg1; 198 a.msgflg = args->arg2; 199 return (linux_msgget(td, &a)); 200 } 201 case LINUX_MSGCTL: { 202 struct linux_msgctl_args a; 203 204 a.msqid = args->arg1; 205 a.cmd = args->arg2; 206 a.buf = args->ptr; 207 return (linux_msgctl(td, &a)); 208 } 209 case LINUX_SHMAT: { 210 struct linux_shmat_args a; 211 212 a.shmid = args->arg1; 213 a.shmaddr = args->ptr; 214 a.shmflg = args->arg2; 215 a.raddr = (l_ulong *)args->arg3; 216 return (linux_shmat(td, &a)); 217 } 218 case LINUX_SHMDT: { 219 struct linux_shmdt_args a; 220 221 a.shmaddr = args->ptr; 222 return (linux_shmdt(td, &a)); 223 } 224 case LINUX_SHMGET: { 225 struct linux_shmget_args a; 226 227 a.key = args->arg1; 228 a.size = args->arg2; 229 a.shmflg = args->arg3; 230 return (linux_shmget(td, &a)); 231 } 232 case LINUX_SHMCTL: { 233 struct linux_shmctl_args a; 234 235 a.shmid = args->arg1; 236 a.cmd = args->arg2; 237 a.buf = args->ptr; 238 return (linux_shmctl(td, &a)); 239 } 240 default: 241 break; 242 } 243 244 return (EINVAL); 245} 246 247int 248linux_old_select(struct thread *td, struct linux_old_select_args *args) 249{ 250 struct l_old_select_argv linux_args; 251 struct linux_select_args newsel; 252 int error; 253 254#ifdef DEBUG 255 if (ldebug(old_select)) 256 printf(ARGS(old_select, "%p"), args->ptr); 257#endif 258 259 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 260 if (error) 261 return (error); 262 263 newsel.nfds = linux_args.nfds; 264 newsel.readfds = linux_args.readfds; 265 newsel.writefds = linux_args.writefds; 266 newsel.exceptfds = linux_args.exceptfds; 267 newsel.timeout = linux_args.timeout; 268 return (linux_select(td, &newsel)); 269} 270 271int 272linux_fork(struct thread *td, struct linux_fork_args *args) 273{ 274 int error; 275 276#ifdef DEBUG 277 if (ldebug(fork)) 278 printf(ARGS(fork, "")); 279#endif 280 281 if ((error = fork(td, (struct fork_args *)args)) != 0) 282 return (error); 283 284 if (td->td_retval[1] == 1) 285 td->td_retval[0] = 0; 286 return (0); 287} 288 289int 290linux_vfork(struct thread *td, struct linux_vfork_args *args) 291{ 292 int error; 293 294#ifdef DEBUG 295 if (ldebug(vfork)) 296 printf(ARGS(vfork, "")); 297#endif 298 299 if ((error = vfork(td, (struct vfork_args *)args)) != 0) 300 return (error); 301 /* Are we the child? */ 302 if (td->td_retval[1] == 1) 303 td->td_retval[0] = 0; 304 return (0); 305} 306 307#define CLONE_VM 0x100 308#define CLONE_FS 0x200 309#define CLONE_FILES 0x400 310#define CLONE_SIGHAND 0x800 311#define CLONE_PID 0x1000 312 313int 314linux_clone(struct thread *td, struct linux_clone_args *args) 315{ 316 int error, ff = RFPROC | RFSTOPPED; 317 struct proc *p2; 318 struct thread *td2; 319 int exit_signal; 320 321#ifdef DEBUG 322 if (ldebug(clone)) { 323 printf(ARGS(clone, "flags %x, stack %x"), 324 (unsigned int)args->flags, (unsigned int)args->stack); 325 if (args->flags & CLONE_PID) 326 printf(LMSG("CLONE_PID not yet supported")); 327 } 328#endif 329 330 if (!args->stack) 331 return (EINVAL); 332 333 exit_signal = args->flags & 0x000000ff; 334 if (exit_signal >= LINUX_NSIG) 335 return (EINVAL); 336 337 if (exit_signal <= LINUX_SIGTBLSZ) 338 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)]; 339 340 if (args->flags & CLONE_VM) 341 ff |= RFMEM; 342 if (args->flags & CLONE_SIGHAND) 343 ff |= RFSIGSHARE; 344 if (!(args->flags & CLONE_FILES)) 345 ff |= RFFDG; 346 347 error = fork1(td, ff, 0, &p2); 348 if (error) 349 return (error); 350 351 352 PROC_LOCK(p2); 353 p2->p_sigparent = exit_signal; 354 PROC_UNLOCK(p2); 355 td2 = FIRST_THREAD_IN_PROC(p2); 356 td2->td_frame->tf_esp = (unsigned int)args->stack; 357 358#ifdef DEBUG 359 if (ldebug(clone)) 360 printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"), 361 (long)p2->p_pid, args->stack, exit_signal); 362#endif 363 364 /* 365 * Make this runnable after we are finished with it. 366 */ 367 mtx_lock_spin(&sched_lock); 368 TD_SET_CAN_RUN(td2); 369 setrunqueue(td2, SRQ_BORING); 370 mtx_unlock_spin(&sched_lock); 371 372 td->td_retval[0] = p2->p_pid; 373 td->td_retval[1] = 0; 374 return (0); 375} 376 377/* XXX move */ 378struct l_mmap_argv { 379 l_caddr_t addr; 380 l_int len; 381 l_int prot; 382 l_int flags; 383 l_int fd; 384 l_int pos; 385}; 386 387#define STACK_SIZE (2 * 1024 * 1024) 388#define GUARD_SIZE (4 * PAGE_SIZE) 389 390static int linux_mmap_common(struct thread *, struct l_mmap_argv *); 391 392int 393linux_mmap2(struct thread *td, struct linux_mmap2_args *args) 394{ 395 struct l_mmap_argv linux_args; 396 397#ifdef DEBUG 398 if (ldebug(mmap2)) 399 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"), 400 (void *)args->addr, args->len, args->prot, 401 args->flags, args->fd, args->pgoff); 402#endif 403 404 linux_args.addr = (l_caddr_t)args->addr; 405 linux_args.len = args->len; 406 linux_args.prot = args->prot; 407 linux_args.flags = args->flags; 408 linux_args.fd = args->fd; 409 linux_args.pos = args->pgoff * PAGE_SIZE; 410 411 return (linux_mmap_common(td, &linux_args)); 412} 413 414int 415linux_mmap(struct thread *td, struct linux_mmap_args *args) 416{ 417 int error; 418 struct l_mmap_argv linux_args; 419 420 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 421 if (error) 422 return (error); 423 424#ifdef DEBUG 425 if (ldebug(mmap)) 426 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"), 427 (void *)linux_args.addr, linux_args.len, linux_args.prot, 428 linux_args.flags, linux_args.fd, linux_args.pos); 429#endif 430 431 return (linux_mmap_common(td, &linux_args)); 432} 433 434static int 435linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args) 436{ 437 struct proc *p = td->td_proc; 438 struct mmap_args /* { 439 caddr_t addr; 440 size_t len; 441 int prot; 442 int flags; 443 int fd; 444 long pad; 445 off_t pos; 446 } */ bsd_args; 447 int error; 448 449 error = 0; 450 bsd_args.flags = 0; 451 if (linux_args->flags & LINUX_MAP_SHARED) 452 bsd_args.flags |= MAP_SHARED; 453 if (linux_args->flags & LINUX_MAP_PRIVATE) 454 bsd_args.flags |= MAP_PRIVATE; 455 if (linux_args->flags & LINUX_MAP_FIXED) 456 bsd_args.flags |= MAP_FIXED; 457 if (linux_args->flags & LINUX_MAP_ANON) 458 bsd_args.flags |= MAP_ANON; 459 else 460 bsd_args.flags |= MAP_NOSYNC; 461 if (linux_args->flags & LINUX_MAP_GROWSDOWN) { 462 bsd_args.flags |= MAP_STACK; 463 464 /* The linux MAP_GROWSDOWN option does not limit auto 465 * growth of the region. Linux mmap with this option 466 * takes as addr the inital BOS, and as len, the initial 467 * region size. It can then grow down from addr without 468 * limit. However, linux threads has an implicit internal 469 * limit to stack size of STACK_SIZE. Its just not 470 * enforced explicitly in linux. But, here we impose 471 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 472 * region, since we can do this with our mmap. 473 * 474 * Our mmap with MAP_STACK takes addr as the maximum 475 * downsize limit on BOS, and as len the max size of 476 * the region. It them maps the top SGROWSIZ bytes, 477 * and autgrows the region down, up to the limit 478 * in addr. 479 * 480 * If we don't use the MAP_STACK option, the effect 481 * of this code is to allocate a stack region of a 482 * fixed size of (STACK_SIZE - GUARD_SIZE). 483 */ 484 485 /* This gives us TOS */ 486 bsd_args.addr = linux_args->addr + linux_args->len; 487 488 if (bsd_args.addr > p->p_vmspace->vm_maxsaddr) { 489 /* Some linux apps will attempt to mmap 490 * thread stacks near the top of their 491 * address space. If their TOS is greater 492 * than vm_maxsaddr, vm_map_growstack() 493 * will confuse the thread stack with the 494 * process stack and deliver a SEGV if they 495 * attempt to grow the thread stack past their 496 * current stacksize rlimit. To avoid this, 497 * adjust vm_maxsaddr upwards to reflect 498 * the current stacksize rlimit rather 499 * than the maximum possible stacksize. 500 * It would be better to adjust the 501 * mmap'ed region, but some apps do not check 502 * mmap's return value. 503 */ 504 PROC_LOCK(p); 505 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - 506 lim_cur(p, RLIMIT_STACK); 507 PROC_UNLOCK(p); 508 } 509 510 /* This gives us our maximum stack size */ 511 if (linux_args->len > STACK_SIZE - GUARD_SIZE) 512 bsd_args.len = linux_args->len; 513 else 514 bsd_args.len = STACK_SIZE - GUARD_SIZE; 515 516 /* This gives us a new BOS. If we're using VM_STACK, then 517 * mmap will just map the top SGROWSIZ bytes, and let 518 * the stack grow down to the limit at BOS. If we're 519 * not using VM_STACK we map the full stack, since we 520 * don't have a way to autogrow it. 521 */ 522 bsd_args.addr -= bsd_args.len; 523 } else { 524 bsd_args.addr = linux_args->addr; 525 bsd_args.len = linux_args->len; 526 } 527 528 bsd_args.prot = linux_args->prot | PROT_READ; /* always required */ 529 if (linux_args->flags & LINUX_MAP_ANON) 530 bsd_args.fd = -1; 531 else 532 bsd_args.fd = linux_args->fd; 533 bsd_args.pos = linux_args->pos; 534 bsd_args.pad = 0; 535 536#ifdef DEBUG 537 if (ldebug(mmap)) 538 printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n", 539 __func__, 540 (void *)bsd_args.addr, bsd_args.len, bsd_args.prot, 541 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos); 542#endif 543 error = mmap(td, &bsd_args); 544#ifdef DEBUG 545 if (ldebug(mmap)) 546 printf("-> %s() return: 0x%x (0x%08x)\n", 547 __func__, error, (u_int)td->td_retval[0]); 548#endif 549 return (error); 550} 551 552int 553linux_pipe(struct thread *td, struct linux_pipe_args *args) 554{ 555 int error; 556 int reg_edx; 557 558#ifdef DEBUG 559 if (ldebug(pipe)) 560 printf(ARGS(pipe, "*")); 561#endif 562 563 reg_edx = td->td_retval[1]; 564 error = pipe(td, 0); 565 if (error) { 566 td->td_retval[1] = reg_edx; 567 return (error); 568 } 569 570 error = copyout(td->td_retval, args->pipefds, 2*sizeof(int)); 571 if (error) { 572 td->td_retval[1] = reg_edx; 573 return (error); 574 } 575 576 td->td_retval[1] = reg_edx; 577 td->td_retval[0] = 0; 578 return (0); 579} 580 581int 582linux_ioperm(struct thread *td, struct linux_ioperm_args *args) 583{ 584 int error; 585 struct i386_ioperm_args iia; 586 587 iia.start = args->start; 588 iia.length = args->length; 589 iia.enable = args->enable; 590 mtx_lock(&Giant); 591 error = i386_set_ioperm(td, &iia); 592 mtx_unlock(&Giant); 593 return (error); 594} 595 596int 597linux_iopl(struct thread *td, struct linux_iopl_args *args) 598{ 599 int error; 600 601 if (args->level < 0 || args->level > 3) 602 return (EINVAL); 603 if ((error = suser(td)) != 0) 604 return (error); 605 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 606 return (error); 607 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) | 608 (args->level * (PSL_IOPL / 3)); 609 return (0); 610} 611 612int 613linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap) 614{ 615 int error; 616 struct i386_ldt_args ldt; 617 struct l_descriptor ld; 618 union descriptor desc; 619 620 if (uap->ptr == NULL) 621 return (EINVAL); 622 623 switch (uap->func) { 624 case 0x00: /* read_ldt */ 625 ldt.start = 0; 626 ldt.descs = uap->ptr; 627 ldt.num = uap->bytecount / sizeof(union descriptor); 628 mtx_lock(&Giant); 629 error = i386_get_ldt(td, &ldt); 630 td->td_retval[0] *= sizeof(union descriptor); 631 mtx_unlock(&Giant); 632 break; 633 case 0x01: /* write_ldt */ 634 case 0x11: /* write_ldt */ 635 if (uap->bytecount != sizeof(ld)) 636 return (EINVAL); 637 638 error = copyin(uap->ptr, &ld, sizeof(ld)); 639 if (error) 640 return (error); 641 642 ldt.start = ld.entry_number; 643 ldt.descs = &desc; 644 ldt.num = 1; 645 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff); 646 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16; 647 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff); 648 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24; 649 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) | 650 (ld.contents << 2); 651 desc.sd.sd_dpl = 3; 652 desc.sd.sd_p = (ld.seg_not_present ^ 1); 653 desc.sd.sd_xx = 0; 654 desc.sd.sd_def32 = ld.seg_32bit; 655 desc.sd.sd_gran = ld.limit_in_pages; 656 mtx_lock(&Giant); 657 error = i386_set_ldt(td, &ldt, &desc); 658 mtx_unlock(&Giant); 659 break; 660 default: 661 error = EINVAL; 662 break; 663 } 664 665 if (error == EOPNOTSUPP) { 666 printf("linux: modify_ldt needs kernel option USER_LDT\n"); 667 error = ENOSYS; 668 } 669 670 return (error); 671} 672 673int 674linux_sigaction(struct thread *td, struct linux_sigaction_args *args) 675{ 676 l_osigaction_t osa; 677 l_sigaction_t act, oact; 678 int error; 679 680#ifdef DEBUG 681 if (ldebug(sigaction)) 682 printf(ARGS(sigaction, "%d, %p, %p"), 683 args->sig, (void *)args->nsa, (void *)args->osa); 684#endif 685 686 if (args->nsa != NULL) { 687 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t)); 688 if (error) 689 return (error); 690 act.lsa_handler = osa.lsa_handler; 691 act.lsa_flags = osa.lsa_flags; 692 act.lsa_restorer = osa.lsa_restorer; 693 LINUX_SIGEMPTYSET(act.lsa_mask); 694 act.lsa_mask.__bits[0] = osa.lsa_mask; 695 } 696 697 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL, 698 args->osa ? &oact : NULL); 699 700 if (args->osa != NULL && !error) { 701 osa.lsa_handler = oact.lsa_handler; 702 osa.lsa_flags = oact.lsa_flags; 703 osa.lsa_restorer = oact.lsa_restorer; 704 osa.lsa_mask = oact.lsa_mask.__bits[0]; 705 error = copyout(&osa, args->osa, sizeof(l_osigaction_t)); 706 } 707 708 return (error); 709} 710 711/* 712 * Linux has two extra args, restart and oldmask. We dont use these, 713 * but it seems that "restart" is actually a context pointer that 714 * enables the signal to happen with a different register set. 715 */ 716int 717linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args) 718{ 719 sigset_t sigmask; 720 l_sigset_t mask; 721 722#ifdef DEBUG 723 if (ldebug(sigsuspend)) 724 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask); 725#endif 726 727 LINUX_SIGEMPTYSET(mask); 728 mask.__bits[0] = args->mask; 729 linux_to_bsd_sigset(&mask, &sigmask); 730 return (kern_sigsuspend(td, sigmask)); 731} 732 733int 734linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap) 735{ 736 l_sigset_t lmask; 737 sigset_t sigmask; 738 int error; 739 740#ifdef DEBUG 741 if (ldebug(rt_sigsuspend)) 742 printf(ARGS(rt_sigsuspend, "%p, %d"), 743 (void *)uap->newset, uap->sigsetsize); 744#endif 745 746 if (uap->sigsetsize != sizeof(l_sigset_t)) 747 return (EINVAL); 748 749 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t)); 750 if (error) 751 return (error); 752 753 linux_to_bsd_sigset(&lmask, &sigmask); 754 return (kern_sigsuspend(td, sigmask)); 755} 756 757int 758linux_pause(struct thread *td, struct linux_pause_args *args) 759{ 760 struct proc *p = td->td_proc; 761 sigset_t sigmask; 762 763#ifdef DEBUG 764 if (ldebug(pause)) 765 printf(ARGS(pause, "")); 766#endif 767 768 PROC_LOCK(p); 769 sigmask = td->td_sigmask; 770 PROC_UNLOCK(p); 771 return (kern_sigsuspend(td, sigmask)); 772} 773 774int 775linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) 776{ 777 stack_t ss, oss; 778 l_stack_t lss; 779 int error; 780 781#ifdef DEBUG 782 if (ldebug(sigaltstack)) 783 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss); 784#endif 785 786 if (uap->uss != NULL) { 787 error = copyin(uap->uss, &lss, sizeof(l_stack_t)); 788 if (error) 789 return (error); 790 791 ss.ss_sp = lss.ss_sp; 792 ss.ss_size = lss.ss_size; 793 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags); 794 } 795 error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL, 796 (uap->uoss != NULL) ? &oss : NULL); 797 if (!error && uap->uoss != NULL) { 798 lss.ss_sp = oss.ss_sp; 799 lss.ss_size = oss.ss_size; 800 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags); 801 error = copyout(&lss, uap->uoss, sizeof(l_stack_t)); 802 } 803 804 return (error); 805} 806 807int 808linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args) 809{ 810 struct ftruncate_args sa; 811 812#ifdef DEBUG 813 if (ldebug(ftruncate64)) 814 printf(ARGS(ftruncate64, "%u, %jd"), args->fd, 815 (intmax_t)args->length); 816#endif 817 818 sa.fd = args->fd; 819 sa.pad = 0; 820 sa.length = args->length; 821 return ftruncate(td, &sa); 822} 823 824int 825linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args) 826{ 827 /* 828 * Return an error code instead of raising a SIGSYS so that 829 * the caller will fall back to simpler LDT methods. 830 */ 831 return (ENOSYS); 832} 833 834int 835linux_gettid(struct thread *td, struct linux_gettid_args *args) 836{ 837 838 td->td_retval[0] = td->td_proc->p_pid; 839 return (0); 840} 841 842int 843linux_tkill(struct thread *td, struct linux_tkill_args *args) 844{ 845 846 return (linux_kill(td, (struct linux_kill_args *) args)); 847} 848 849