linux_machdep.c revision 218612
1/*- 2 * Copyright (c) 2000 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 218612 2011-02-12 15:50:21Z dchagin $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/file.h> 35#include <sys/fcntl.h> 36#include <sys/imgact.h> 37#include <sys/lock.h> 38#include <sys/malloc.h> 39#include <sys/mman.h> 40#include <sys/mutex.h> 41#include <sys/sx.h> 42#include <sys/priv.h> 43#include <sys/proc.h> 44#include <sys/queue.h> 45#include <sys/resource.h> 46#include <sys/resourcevar.h> 47#include <sys/signalvar.h> 48#include <sys/syscallsubr.h> 49#include <sys/sysproto.h> 50#include <sys/unistd.h> 51#include <sys/wait.h> 52#include <sys/sched.h> 53 54#include <machine/frame.h> 55#include <machine/psl.h> 56#include <machine/segments.h> 57#include <machine/sysarch.h> 58 59#include <vm/vm.h> 60#include <vm/pmap.h> 61#include <vm/vm_map.h> 62 63#include <i386/linux/linux.h> 64#include <i386/linux/linux_proto.h> 65#include <compat/linux/linux_ipc.h> 66#include <compat/linux/linux_misc.h> 67#include <compat/linux/linux_signal.h> 68#include <compat/linux/linux_util.h> 69#include <compat/linux/linux_emul.h> 70 71#include <i386/include/pcb.h> /* needed for pcb definition in linux_set_thread_area */ 72 73#include "opt_posix.h" 74 75extern struct sysentvec elf32_freebsd_sysvec; /* defined in i386/i386/elf_machdep.c */ 76 77struct l_descriptor { 78 l_uint entry_number; 79 l_ulong base_addr; 80 l_uint limit; 81 l_uint seg_32bit:1; 82 l_uint contents:2; 83 l_uint read_exec_only:1; 84 l_uint limit_in_pages:1; 85 l_uint seg_not_present:1; 86 l_uint useable:1; 87}; 88 89struct l_old_select_argv { 90 l_int nfds; 91 l_fd_set *readfds; 92 l_fd_set *writefds; 93 l_fd_set *exceptfds; 94 struct l_timeval *timeout; 95}; 96 97static int linux_mmap_common(struct thread *td, l_uintptr_t addr, 98 l_size_t len, l_int prot, l_int flags, l_int fd, 99 l_loff_t pos); 100 101int 102linux_to_bsd_sigaltstack(int lsa) 103{ 104 int bsa = 0; 105 106 if (lsa & LINUX_SS_DISABLE) 107 bsa |= SS_DISABLE; 108 if (lsa & LINUX_SS_ONSTACK) 109 bsa |= SS_ONSTACK; 110 return (bsa); 111} 112 113int 114bsd_to_linux_sigaltstack(int bsa) 115{ 116 int lsa = 0; 117 118 if (bsa & SS_DISABLE) 119 lsa |= LINUX_SS_DISABLE; 120 if (bsa & SS_ONSTACK) 121 lsa |= LINUX_SS_ONSTACK; 122 return (lsa); 123} 124 125int 126linux_execve(struct thread *td, struct linux_execve_args *args) 127{ 128 int error; 129 char *newpath; 130 struct image_args eargs; 131 132 LCONVPATHEXIST(td, args->path, &newpath); 133 134#ifdef DEBUG 135 if (ldebug(execve)) 136 printf(ARGS(execve, "%s"), newpath); 137#endif 138 139 error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE, 140 args->argp, args->envp); 141 free(newpath, M_TEMP); 142 if (error == 0) 143 error = kern_execve(td, &eargs, NULL); 144 if (error == 0) 145 /* linux process can exec fbsd one, dont attempt 146 * to create emuldata for such process using 147 * linux_proc_init, this leads to a panic on KASSERT 148 * because such process has p->p_emuldata == NULL 149 */ 150 if (SV_PROC_ABI(td->td_proc) == SV_ABI_LINUX) 151 error = linux_proc_init(td, 0, 0); 152 return (error); 153} 154 155struct l_ipc_kludge { 156 struct l_msgbuf *msgp; 157 l_long msgtyp; 158}; 159 160int 161linux_ipc(struct thread *td, struct linux_ipc_args *args) 162{ 163 164 switch (args->what & 0xFFFF) { 165 case LINUX_SEMOP: { 166 struct linux_semop_args a; 167 168 a.semid = args->arg1; 169 a.tsops = args->ptr; 170 a.nsops = args->arg2; 171 return (linux_semop(td, &a)); 172 } 173 case LINUX_SEMGET: { 174 struct linux_semget_args a; 175 176 a.key = args->arg1; 177 a.nsems = args->arg2; 178 a.semflg = args->arg3; 179 return (linux_semget(td, &a)); 180 } 181 case LINUX_SEMCTL: { 182 struct linux_semctl_args a; 183 int error; 184 185 a.semid = args->arg1; 186 a.semnum = args->arg2; 187 a.cmd = args->arg3; 188 error = copyin(args->ptr, &a.arg, sizeof(a.arg)); 189 if (error) 190 return (error); 191 return (linux_semctl(td, &a)); 192 } 193 case LINUX_MSGSND: { 194 struct linux_msgsnd_args a; 195 196 a.msqid = args->arg1; 197 a.msgp = args->ptr; 198 a.msgsz = args->arg2; 199 a.msgflg = args->arg3; 200 return (linux_msgsnd(td, &a)); 201 } 202 case LINUX_MSGRCV: { 203 struct linux_msgrcv_args a; 204 205 a.msqid = args->arg1; 206 a.msgsz = args->arg2; 207 a.msgflg = args->arg3; 208 if ((args->what >> 16) == 0) { 209 struct l_ipc_kludge tmp; 210 int error; 211 212 if (args->ptr == NULL) 213 return (EINVAL); 214 error = copyin(args->ptr, &tmp, sizeof(tmp)); 215 if (error) 216 return (error); 217 a.msgp = tmp.msgp; 218 a.msgtyp = tmp.msgtyp; 219 } else { 220 a.msgp = args->ptr; 221 a.msgtyp = args->arg5; 222 } 223 return (linux_msgrcv(td, &a)); 224 } 225 case LINUX_MSGGET: { 226 struct linux_msgget_args a; 227 228 a.key = args->arg1; 229 a.msgflg = args->arg2; 230 return (linux_msgget(td, &a)); 231 } 232 case LINUX_MSGCTL: { 233 struct linux_msgctl_args a; 234 235 a.msqid = args->arg1; 236 a.cmd = args->arg2; 237 a.buf = args->ptr; 238 return (linux_msgctl(td, &a)); 239 } 240 case LINUX_SHMAT: { 241 struct linux_shmat_args a; 242 243 a.shmid = args->arg1; 244 a.shmaddr = args->ptr; 245 a.shmflg = args->arg2; 246 a.raddr = (l_ulong *)args->arg3; 247 return (linux_shmat(td, &a)); 248 } 249 case LINUX_SHMDT: { 250 struct linux_shmdt_args a; 251 252 a.shmaddr = args->ptr; 253 return (linux_shmdt(td, &a)); 254 } 255 case LINUX_SHMGET: { 256 struct linux_shmget_args a; 257 258 a.key = args->arg1; 259 a.size = args->arg2; 260 a.shmflg = args->arg3; 261 return (linux_shmget(td, &a)); 262 } 263 case LINUX_SHMCTL: { 264 struct linux_shmctl_args a; 265 266 a.shmid = args->arg1; 267 a.cmd = args->arg2; 268 a.buf = args->ptr; 269 return (linux_shmctl(td, &a)); 270 } 271 default: 272 break; 273 } 274 275 return (EINVAL); 276} 277 278int 279linux_old_select(struct thread *td, struct linux_old_select_args *args) 280{ 281 struct l_old_select_argv linux_args; 282 struct linux_select_args newsel; 283 int error; 284 285#ifdef DEBUG 286 if (ldebug(old_select)) 287 printf(ARGS(old_select, "%p"), args->ptr); 288#endif 289 290 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 291 if (error) 292 return (error); 293 294 newsel.nfds = linux_args.nfds; 295 newsel.readfds = linux_args.readfds; 296 newsel.writefds = linux_args.writefds; 297 newsel.exceptfds = linux_args.exceptfds; 298 newsel.timeout = linux_args.timeout; 299 return (linux_select(td, &newsel)); 300} 301 302int 303linux_fork(struct thread *td, struct linux_fork_args *args) 304{ 305 int error; 306 struct proc *p2; 307 struct thread *td2; 308 309#ifdef DEBUG 310 if (ldebug(fork)) 311 printf(ARGS(fork, "")); 312#endif 313 314 if ((error = fork1(td, RFFDG | RFPROC | RFSTOPPED, 0, &p2)) != 0) 315 return (error); 316 317 if (error == 0) { 318 td->td_retval[0] = p2->p_pid; 319 td->td_retval[1] = 0; 320 } 321 322 if (td->td_retval[1] == 1) 323 td->td_retval[0] = 0; 324 error = linux_proc_init(td, td->td_retval[0], 0); 325 if (error) 326 return (error); 327 328 td2 = FIRST_THREAD_IN_PROC(p2); 329 330 /* 331 * Make this runnable after we are finished with it. 332 */ 333 thread_lock(td2); 334 TD_SET_CAN_RUN(td2); 335 sched_add(td2, SRQ_BORING); 336 thread_unlock(td2); 337 338 return (0); 339} 340 341int 342linux_vfork(struct thread *td, struct linux_vfork_args *args) 343{ 344 int error; 345 struct proc *p2; 346 struct thread *td2; 347 348#ifdef DEBUG 349 if (ldebug(vfork)) 350 printf(ARGS(vfork, "")); 351#endif 352 353 /* exclude RFPPWAIT */ 354 if ((error = fork1(td, RFFDG | RFPROC | RFMEM | RFSTOPPED, 0, &p2)) != 0) 355 return (error); 356 if (error == 0) { 357 td->td_retval[0] = p2->p_pid; 358 td->td_retval[1] = 0; 359 } 360 /* Are we the child? */ 361 if (td->td_retval[1] == 1) 362 td->td_retval[0] = 0; 363 error = linux_proc_init(td, td->td_retval[0], 0); 364 if (error) 365 return (error); 366 367 PROC_LOCK(p2); 368 p2->p_flag |= P_PPWAIT; 369 PROC_UNLOCK(p2); 370 371 td2 = FIRST_THREAD_IN_PROC(p2); 372 373 /* 374 * Make this runnable after we are finished with it. 375 */ 376 thread_lock(td2); 377 TD_SET_CAN_RUN(td2); 378 sched_add(td2, SRQ_BORING); 379 thread_unlock(td2); 380 381 /* wait for the children to exit, ie. emulate vfork */ 382 PROC_LOCK(p2); 383 while (p2->p_flag & P_PPWAIT) 384 cv_wait(&p2->p_pwait, &p2->p_mtx); 385 PROC_UNLOCK(p2); 386 387 return (0); 388} 389 390static int 391linux_set_cloned_tls(struct thread *td, void *desc) 392{ 393 struct segment_descriptor sd; 394 struct l_user_desc info; 395 int idx, error; 396 int a[2]; 397 398 error = copyin(desc, &info, sizeof(struct l_user_desc)); 399 if (error) { 400 printf(LMSG("copyin failed!")); 401 } else { 402 idx = info.entry_number; 403 404 /* 405 * looks like we're getting the idx we returned 406 * in the set_thread_area() syscall 407 */ 408 if (idx != 6 && idx != 3) { 409 printf(LMSG("resetting idx!")); 410 idx = 3; 411 } 412 413 /* this doesnt happen in practice */ 414 if (idx == 6) { 415 /* we might copy out the entry_number as 3 */ 416 info.entry_number = 3; 417 error = copyout(&info, desc, sizeof(struct l_user_desc)); 418 if (error) 419 printf(LMSG("copyout failed!")); 420 } 421 422 a[0] = LINUX_LDT_entry_a(&info); 423 a[1] = LINUX_LDT_entry_b(&info); 424 425 memcpy(&sd, &a, sizeof(a)); 426#ifdef DEBUG 427 if (ldebug(clone)) 428 printf("Segment created in clone with " 429 "CLONE_SETTLS: lobase: %x, hibase: %x, " 430 "lolimit: %x, hilimit: %x, type: %i, " 431 "dpl: %i, p: %i, xx: %i, def32: %i, " 432 "gran: %i\n", sd.sd_lobase, sd.sd_hibase, 433 sd.sd_lolimit, sd.sd_hilimit, sd.sd_type, 434 sd.sd_dpl, sd.sd_p, sd.sd_xx, 435 sd.sd_def32, sd.sd_gran); 436#endif 437 438 /* set %gs */ 439 td->td_pcb->pcb_gsd = sd; 440 td->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL); 441 } 442 443 return (error); 444} 445 446int 447linux_clone(struct thread *td, struct linux_clone_args *args) 448{ 449 int error, ff = RFPROC | RFSTOPPED; 450 struct proc *p2; 451 struct thread *td2; 452 int exit_signal; 453 struct linux_emuldata *em; 454 455#ifdef DEBUG 456 if (ldebug(clone)) { 457 printf(ARGS(clone, "flags %x, stack %x, parent tid: %x, child tid: %x"), 458 (unsigned int)args->flags, (unsigned int)args->stack, 459 (unsigned int)args->parent_tidptr, (unsigned int)args->child_tidptr); 460 } 461#endif 462 463 exit_signal = args->flags & 0x000000ff; 464 if (LINUX_SIG_VALID(exit_signal)) { 465 if (exit_signal <= LINUX_SIGTBLSZ) 466 exit_signal = 467 linux_to_bsd_signal[_SIG_IDX(exit_signal)]; 468 } else if (exit_signal != 0) 469 return (EINVAL); 470 471 if (args->flags & LINUX_CLONE_VM) 472 ff |= RFMEM; 473 if (args->flags & LINUX_CLONE_SIGHAND) 474 ff |= RFSIGSHARE; 475 /* 476 * XXX: in linux sharing of fs info (chroot/cwd/umask) 477 * and open files is independant. in fbsd its in one 478 * structure but in reality it doesn't cause any problems 479 * because both of these flags are usually set together. 480 */ 481 if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS))) 482 ff |= RFFDG; 483 484 /* 485 * Attempt to detect when linux_clone(2) is used for creating 486 * kernel threads. Unfortunately despite the existence of the 487 * CLONE_THREAD flag, version of linuxthreads package used in 488 * most popular distros as of beginning of 2005 doesn't make 489 * any use of it. Therefore, this detection relies on 490 * empirical observation that linuxthreads sets certain 491 * combination of flags, so that we can make more or less 492 * precise detection and notify the FreeBSD kernel that several 493 * processes are in fact part of the same threading group, so 494 * that special treatment is necessary for signal delivery 495 * between those processes and fd locking. 496 */ 497 if ((args->flags & 0xffffff00) == LINUX_THREADING_FLAGS) 498 ff |= RFTHREAD; 499 500 if (args->flags & LINUX_CLONE_PARENT_SETTID) 501 if (args->parent_tidptr == NULL) 502 return (EINVAL); 503 504 error = fork1(td, ff, 0, &p2); 505 if (error) 506 return (error); 507 508 if (args->flags & (LINUX_CLONE_PARENT | LINUX_CLONE_THREAD)) { 509 sx_xlock(&proctree_lock); 510 PROC_LOCK(p2); 511 proc_reparent(p2, td->td_proc->p_pptr); 512 PROC_UNLOCK(p2); 513 sx_xunlock(&proctree_lock); 514 } 515 516 /* create the emuldata */ 517 error = linux_proc_init(td, p2->p_pid, args->flags); 518 /* reference it - no need to check this */ 519 em = em_find(p2, EMUL_DOLOCK); 520 KASSERT(em != NULL, ("clone: emuldata not found.\n")); 521 /* and adjust it */ 522 523 if (args->flags & LINUX_CLONE_THREAD) { 524 /* XXX: linux mangles pgrp and pptr somehow 525 * I think it might be this but I am not sure. 526 */ 527#ifdef notyet 528 PROC_LOCK(p2); 529 p2->p_pgrp = td->td_proc->p_pgrp; 530 PROC_UNLOCK(p2); 531#endif 532 exit_signal = 0; 533 } 534 535 if (args->flags & LINUX_CLONE_CHILD_SETTID) 536 em->child_set_tid = args->child_tidptr; 537 else 538 em->child_set_tid = NULL; 539 540 if (args->flags & LINUX_CLONE_CHILD_CLEARTID) 541 em->child_clear_tid = args->child_tidptr; 542 else 543 em->child_clear_tid = NULL; 544 545 EMUL_UNLOCK(&emul_lock); 546 547 if (args->flags & LINUX_CLONE_PARENT_SETTID) { 548 error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid)); 549 if (error) 550 printf(LMSG("copyout failed!")); 551 } 552 553 PROC_LOCK(p2); 554 p2->p_sigparent = exit_signal; 555 PROC_UNLOCK(p2); 556 td2 = FIRST_THREAD_IN_PROC(p2); 557 /* 558 * in a case of stack = NULL we are supposed to COW calling process stack 559 * this is what normal fork() does so we just keep the tf_esp arg intact 560 */ 561 if (args->stack) 562 td2->td_frame->tf_esp = (unsigned int)args->stack; 563 564 if (args->flags & LINUX_CLONE_SETTLS) 565 linux_set_cloned_tls(td2, args->tls); 566 567#ifdef DEBUG 568 if (ldebug(clone)) 569 printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"), 570 (long)p2->p_pid, args->stack, exit_signal); 571#endif 572 if (args->flags & LINUX_CLONE_VFORK) { 573 PROC_LOCK(p2); 574 p2->p_flag |= P_PPWAIT; 575 PROC_UNLOCK(p2); 576 } 577 578 /* 579 * Make this runnable after we are finished with it. 580 */ 581 thread_lock(td2); 582 TD_SET_CAN_RUN(td2); 583 sched_add(td2, SRQ_BORING); 584 thread_unlock(td2); 585 586 td->td_retval[0] = p2->p_pid; 587 td->td_retval[1] = 0; 588 589 if (args->flags & LINUX_CLONE_VFORK) { 590 /* wait for the children to exit, ie. emulate vfork */ 591 PROC_LOCK(p2); 592 while (p2->p_flag & P_PPWAIT) 593 cv_wait(&p2->p_pwait, &p2->p_mtx); 594 PROC_UNLOCK(p2); 595 } 596 597 return (0); 598} 599 600#define STACK_SIZE (2 * 1024 * 1024) 601#define GUARD_SIZE (4 * PAGE_SIZE) 602 603int 604linux_mmap2(struct thread *td, struct linux_mmap2_args *args) 605{ 606 607#ifdef DEBUG 608 if (ldebug(mmap2)) 609 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"), 610 (void *)args->addr, args->len, args->prot, 611 args->flags, args->fd, args->pgoff); 612#endif 613 614 return (linux_mmap_common(td, args->addr, args->len, args->prot, 615 args->flags, args->fd, (uint64_t)(uint32_t)args->pgoff * 616 PAGE_SIZE)); 617} 618 619int 620linux_mmap(struct thread *td, struct linux_mmap_args *args) 621{ 622 int error; 623 struct l_mmap_argv linux_args; 624 625 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 626 if (error) 627 return (error); 628 629#ifdef DEBUG 630 if (ldebug(mmap)) 631 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"), 632 (void *)linux_args.addr, linux_args.len, linux_args.prot, 633 linux_args.flags, linux_args.fd, linux_args.pgoff); 634#endif 635 636 return (linux_mmap_common(td, linux_args.addr, linux_args.len, 637 linux_args.prot, linux_args.flags, linux_args.fd, 638 (uint32_t)linux_args.pgoff)); 639} 640 641static int 642linux_mmap_common(struct thread *td, l_uintptr_t addr, l_size_t len, l_int prot, 643 l_int flags, l_int fd, l_loff_t pos) 644{ 645 struct proc *p = td->td_proc; 646 struct mmap_args /* { 647 caddr_t addr; 648 size_t len; 649 int prot; 650 int flags; 651 int fd; 652 long pad; 653 off_t pos; 654 } */ bsd_args; 655 int error; 656 struct file *fp; 657 658 error = 0; 659 bsd_args.flags = 0; 660 fp = NULL; 661 662 /* 663 * Linux mmap(2): 664 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE 665 */ 666 if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE))) 667 return (EINVAL); 668 669 if (flags & LINUX_MAP_SHARED) 670 bsd_args.flags |= MAP_SHARED; 671 if (flags & LINUX_MAP_PRIVATE) 672 bsd_args.flags |= MAP_PRIVATE; 673 if (flags & LINUX_MAP_FIXED) 674 bsd_args.flags |= MAP_FIXED; 675 if (flags & LINUX_MAP_ANON) { 676 /* Enforce pos to be on page boundary, then ignore. */ 677 if ((pos & PAGE_MASK) != 0) 678 return (EINVAL); 679 pos = 0; 680 bsd_args.flags |= MAP_ANON; 681 } else 682 bsd_args.flags |= MAP_NOSYNC; 683 if (flags & LINUX_MAP_GROWSDOWN) 684 bsd_args.flags |= MAP_STACK; 685 686 /* 687 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC 688 * on Linux/i386. We do this to ensure maximum compatibility. 689 * Linux/ia64 does the same in i386 emulation mode. 690 */ 691 bsd_args.prot = prot; 692 if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) 693 bsd_args.prot |= PROT_READ | PROT_EXEC; 694 695 /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ 696 bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : fd; 697 if (bsd_args.fd != -1) { 698 /* 699 * Linux follows Solaris mmap(2) description: 700 * The file descriptor fildes is opened with 701 * read permission, regardless of the 702 * protection options specified. 703 */ 704 705 if ((error = fget(td, bsd_args.fd, &fp)) != 0) 706 return (error); 707 if (fp->f_type != DTYPE_VNODE) { 708 fdrop(fp, td); 709 return (EINVAL); 710 } 711 712 /* Linux mmap() just fails for O_WRONLY files */ 713 if (!(fp->f_flag & FREAD)) { 714 fdrop(fp, td); 715 return (EACCES); 716 } 717 718 fdrop(fp, td); 719 } 720 721 if (flags & LINUX_MAP_GROWSDOWN) { 722 /* 723 * The Linux MAP_GROWSDOWN option does not limit auto 724 * growth of the region. Linux mmap with this option 725 * takes as addr the inital BOS, and as len, the initial 726 * region size. It can then grow down from addr without 727 * limit. However, linux threads has an implicit internal 728 * limit to stack size of STACK_SIZE. Its just not 729 * enforced explicitly in linux. But, here we impose 730 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 731 * region, since we can do this with our mmap. 732 * 733 * Our mmap with MAP_STACK takes addr as the maximum 734 * downsize limit on BOS, and as len the max size of 735 * the region. It them maps the top SGROWSIZ bytes, 736 * and auto grows the region down, up to the limit 737 * in addr. 738 * 739 * If we don't use the MAP_STACK option, the effect 740 * of this code is to allocate a stack region of a 741 * fixed size of (STACK_SIZE - GUARD_SIZE). 742 */ 743 744 if ((caddr_t)PTRIN(addr) + len > p->p_vmspace->vm_maxsaddr) { 745 /* 746 * Some linux apps will attempt to mmap 747 * thread stacks near the top of their 748 * address space. If their TOS is greater 749 * than vm_maxsaddr, vm_map_growstack() 750 * will confuse the thread stack with the 751 * process stack and deliver a SEGV if they 752 * attempt to grow the thread stack past their 753 * current stacksize rlimit. To avoid this, 754 * adjust vm_maxsaddr upwards to reflect 755 * the current stacksize rlimit rather 756 * than the maximum possible stacksize. 757 * It would be better to adjust the 758 * mmap'ed region, but some apps do not check 759 * mmap's return value. 760 */ 761 PROC_LOCK(p); 762 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - 763 lim_cur(p, RLIMIT_STACK); 764 PROC_UNLOCK(p); 765 } 766 767 /* 768 * This gives us our maximum stack size and a new BOS. 769 * If we're using VM_STACK, then mmap will just map 770 * the top SGROWSIZ bytes, and let the stack grow down 771 * to the limit at BOS. If we're not using VM_STACK 772 * we map the full stack, since we don't have a way 773 * to autogrow it. 774 */ 775 if (len > STACK_SIZE - GUARD_SIZE) { 776 bsd_args.addr = (caddr_t)PTRIN(addr); 777 bsd_args.len = len; 778 } else { 779 bsd_args.addr = (caddr_t)PTRIN(addr) - 780 (STACK_SIZE - GUARD_SIZE - len); 781 bsd_args.len = STACK_SIZE - GUARD_SIZE; 782 } 783 } else { 784 bsd_args.addr = (caddr_t)PTRIN(addr); 785 bsd_args.len = len; 786 } 787 bsd_args.pos = pos; 788 789#ifdef DEBUG 790 if (ldebug(mmap)) 791 printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n", 792 __func__, 793 (void *)bsd_args.addr, bsd_args.len, bsd_args.prot, 794 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos); 795#endif 796 error = mmap(td, &bsd_args); 797#ifdef DEBUG 798 if (ldebug(mmap)) 799 printf("-> %s() return: 0x%x (0x%08x)\n", 800 __func__, error, (u_int)td->td_retval[0]); 801#endif 802 return (error); 803} 804 805int 806linux_mprotect(struct thread *td, struct linux_mprotect_args *uap) 807{ 808 struct mprotect_args bsd_args; 809 810 bsd_args.addr = uap->addr; 811 bsd_args.len = uap->len; 812 bsd_args.prot = uap->prot; 813 if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) 814 bsd_args.prot |= PROT_READ | PROT_EXEC; 815 return (mprotect(td, &bsd_args)); 816} 817 818int 819linux_pipe(struct thread *td, struct linux_pipe_args *args) 820{ 821 int error; 822 int fildes[2]; 823 824#ifdef DEBUG 825 if (ldebug(pipe)) 826 printf(ARGS(pipe, "*")); 827#endif 828 829 error = kern_pipe(td, fildes); 830 if (error) 831 return (error); 832 833 /* XXX: Close descriptors on error. */ 834 return (copyout(fildes, args->pipefds, sizeof fildes)); 835} 836 837int 838linux_ioperm(struct thread *td, struct linux_ioperm_args *args) 839{ 840 int error; 841 struct i386_ioperm_args iia; 842 843 iia.start = args->start; 844 iia.length = args->length; 845 iia.enable = args->enable; 846 error = i386_set_ioperm(td, &iia); 847 return (error); 848} 849 850int 851linux_iopl(struct thread *td, struct linux_iopl_args *args) 852{ 853 int error; 854 855 if (args->level < 0 || args->level > 3) 856 return (EINVAL); 857 if ((error = priv_check(td, PRIV_IO)) != 0) 858 return (error); 859 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 860 return (error); 861 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) | 862 (args->level * (PSL_IOPL / 3)); 863 return (0); 864} 865 866int 867linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap) 868{ 869 int error; 870 struct i386_ldt_args ldt; 871 struct l_descriptor ld; 872 union descriptor desc; 873 int size, written; 874 875 switch (uap->func) { 876 case 0x00: /* read_ldt */ 877 ldt.start = 0; 878 ldt.descs = uap->ptr; 879 ldt.num = uap->bytecount / sizeof(union descriptor); 880 error = i386_get_ldt(td, &ldt); 881 td->td_retval[0] *= sizeof(union descriptor); 882 break; 883 case 0x02: /* read_default_ldt = 0 */ 884 size = 5*sizeof(struct l_desc_struct); 885 if (size > uap->bytecount) 886 size = uap->bytecount; 887 for (written = error = 0; written < size && error == 0; written++) 888 error = subyte((char *)uap->ptr + written, 0); 889 td->td_retval[0] = written; 890 break; 891 case 0x01: /* write_ldt */ 892 case 0x11: /* write_ldt */ 893 if (uap->bytecount != sizeof(ld)) 894 return (EINVAL); 895 896 error = copyin(uap->ptr, &ld, sizeof(ld)); 897 if (error) 898 return (error); 899 900 ldt.start = ld.entry_number; 901 ldt.descs = &desc; 902 ldt.num = 1; 903 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff); 904 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16; 905 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff); 906 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24; 907 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) | 908 (ld.contents << 2); 909 desc.sd.sd_dpl = 3; 910 desc.sd.sd_p = (ld.seg_not_present ^ 1); 911 desc.sd.sd_xx = 0; 912 desc.sd.sd_def32 = ld.seg_32bit; 913 desc.sd.sd_gran = ld.limit_in_pages; 914 error = i386_set_ldt(td, &ldt, &desc); 915 break; 916 default: 917 error = ENOSYS; 918 break; 919 } 920 921 if (error == EOPNOTSUPP) { 922 printf("linux: modify_ldt needs kernel option USER_LDT\n"); 923 error = ENOSYS; 924 } 925 926 return (error); 927} 928 929int 930linux_sigaction(struct thread *td, struct linux_sigaction_args *args) 931{ 932 l_osigaction_t osa; 933 l_sigaction_t act, oact; 934 int error; 935 936#ifdef DEBUG 937 if (ldebug(sigaction)) 938 printf(ARGS(sigaction, "%d, %p, %p"), 939 args->sig, (void *)args->nsa, (void *)args->osa); 940#endif 941 942 if (args->nsa != NULL) { 943 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t)); 944 if (error) 945 return (error); 946 act.lsa_handler = osa.lsa_handler; 947 act.lsa_flags = osa.lsa_flags; 948 act.lsa_restorer = osa.lsa_restorer; 949 LINUX_SIGEMPTYSET(act.lsa_mask); 950 act.lsa_mask.__bits[0] = osa.lsa_mask; 951 } 952 953 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL, 954 args->osa ? &oact : NULL); 955 956 if (args->osa != NULL && !error) { 957 osa.lsa_handler = oact.lsa_handler; 958 osa.lsa_flags = oact.lsa_flags; 959 osa.lsa_restorer = oact.lsa_restorer; 960 osa.lsa_mask = oact.lsa_mask.__bits[0]; 961 error = copyout(&osa, args->osa, sizeof(l_osigaction_t)); 962 } 963 964 return (error); 965} 966 967/* 968 * Linux has two extra args, restart and oldmask. We dont use these, 969 * but it seems that "restart" is actually a context pointer that 970 * enables the signal to happen with a different register set. 971 */ 972int 973linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args) 974{ 975 sigset_t sigmask; 976 l_sigset_t mask; 977 978#ifdef DEBUG 979 if (ldebug(sigsuspend)) 980 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask); 981#endif 982 983 LINUX_SIGEMPTYSET(mask); 984 mask.__bits[0] = args->mask; 985 linux_to_bsd_sigset(&mask, &sigmask); 986 return (kern_sigsuspend(td, sigmask)); 987} 988 989int 990linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap) 991{ 992 l_sigset_t lmask; 993 sigset_t sigmask; 994 int error; 995 996#ifdef DEBUG 997 if (ldebug(rt_sigsuspend)) 998 printf(ARGS(rt_sigsuspend, "%p, %d"), 999 (void *)uap->newset, uap->sigsetsize); 1000#endif 1001 1002 if (uap->sigsetsize != sizeof(l_sigset_t)) 1003 return (EINVAL); 1004 1005 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t)); 1006 if (error) 1007 return (error); 1008 1009 linux_to_bsd_sigset(&lmask, &sigmask); 1010 return (kern_sigsuspend(td, sigmask)); 1011} 1012 1013int 1014linux_pause(struct thread *td, struct linux_pause_args *args) 1015{ 1016 struct proc *p = td->td_proc; 1017 sigset_t sigmask; 1018 1019#ifdef DEBUG 1020 if (ldebug(pause)) 1021 printf(ARGS(pause, "")); 1022#endif 1023 1024 PROC_LOCK(p); 1025 sigmask = td->td_sigmask; 1026 PROC_UNLOCK(p); 1027 return (kern_sigsuspend(td, sigmask)); 1028} 1029 1030int 1031linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) 1032{ 1033 stack_t ss, oss; 1034 l_stack_t lss; 1035 int error; 1036 1037#ifdef DEBUG 1038 if (ldebug(sigaltstack)) 1039 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss); 1040#endif 1041 1042 if (uap->uss != NULL) { 1043 error = copyin(uap->uss, &lss, sizeof(l_stack_t)); 1044 if (error) 1045 return (error); 1046 1047 ss.ss_sp = lss.ss_sp; 1048 ss.ss_size = lss.ss_size; 1049 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags); 1050 } 1051 error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL, 1052 (uap->uoss != NULL) ? &oss : NULL); 1053 if (!error && uap->uoss != NULL) { 1054 lss.ss_sp = oss.ss_sp; 1055 lss.ss_size = oss.ss_size; 1056 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags); 1057 error = copyout(&lss, uap->uoss, sizeof(l_stack_t)); 1058 } 1059 1060 return (error); 1061} 1062 1063int 1064linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args) 1065{ 1066 struct ftruncate_args sa; 1067 1068#ifdef DEBUG 1069 if (ldebug(ftruncate64)) 1070 printf(ARGS(ftruncate64, "%u, %jd"), args->fd, 1071 (intmax_t)args->length); 1072#endif 1073 1074 sa.fd = args->fd; 1075 sa.length = args->length; 1076 return ftruncate(td, &sa); 1077} 1078 1079int 1080linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args) 1081{ 1082 struct l_user_desc info; 1083 int error; 1084 int idx; 1085 int a[2]; 1086 struct segment_descriptor sd; 1087 1088 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 1089 if (error) 1090 return (error); 1091 1092#ifdef DEBUG 1093 if (ldebug(set_thread_area)) 1094 printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"), 1095 info.entry_number, 1096 info.base_addr, 1097 info.limit, 1098 info.seg_32bit, 1099 info.contents, 1100 info.read_exec_only, 1101 info.limit_in_pages, 1102 info.seg_not_present, 1103 info.useable); 1104#endif 1105 1106 idx = info.entry_number; 1107 /* 1108 * Semantics of linux version: every thread in the system has array of 1109 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This 1110 * syscall loads one of the selected tls decriptors with a value and 1111 * also loads GDT descriptors 6, 7 and 8 with the content of the 1112 * per-thread descriptors. 1113 * 1114 * Semantics of fbsd version: I think we can ignore that linux has 3 1115 * per-thread descriptors and use just the 1st one. The tls_array[] 1116 * is used only in set/get-thread_area() syscalls and for loading the 1117 * GDT descriptors. In fbsd we use just one GDT descriptor for TLS so 1118 * we will load just one. 1119 * 1120 * XXX: this doesn't work when a user space process tries to use more 1121 * than 1 TLS segment. Comment in the linux sources says wine might do 1122 * this. 1123 */ 1124 1125 /* 1126 * we support just GLIBC TLS now 1127 * we should let 3 proceed as well because we use this segment so 1128 * if code does two subsequent calls it should succeed 1129 */ 1130 if (idx != 6 && idx != -1 && idx != 3) 1131 return (EINVAL); 1132 1133 /* 1134 * we have to copy out the GDT entry we use 1135 * FreeBSD uses GDT entry #3 for storing %gs so load that 1136 * 1137 * XXX: what if a user space program doesn't check this value and tries 1138 * to use 6, 7 or 8? 1139 */ 1140 idx = info.entry_number = 3; 1141 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 1142 if (error) 1143 return (error); 1144 1145 if (LINUX_LDT_empty(&info)) { 1146 a[0] = 0; 1147 a[1] = 0; 1148 } else { 1149 a[0] = LINUX_LDT_entry_a(&info); 1150 a[1] = LINUX_LDT_entry_b(&info); 1151 } 1152 1153 memcpy(&sd, &a, sizeof(a)); 1154#ifdef DEBUG 1155 if (ldebug(set_thread_area)) 1156 printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase, 1157 sd.sd_hibase, 1158 sd.sd_lolimit, 1159 sd.sd_hilimit, 1160 sd.sd_type, 1161 sd.sd_dpl, 1162 sd.sd_p, 1163 sd.sd_xx, 1164 sd.sd_def32, 1165 sd.sd_gran); 1166#endif 1167 1168 /* this is taken from i386 version of cpu_set_user_tls() */ 1169 critical_enter(); 1170 /* set %gs */ 1171 td->td_pcb->pcb_gsd = sd; 1172 PCPU_GET(fsgs_gdt)[1] = sd; 1173 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 1174 critical_exit(); 1175 1176 return (0); 1177} 1178 1179int 1180linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args) 1181{ 1182 1183 struct l_user_desc info; 1184 int error; 1185 int idx; 1186 struct l_desc_struct desc; 1187 struct segment_descriptor sd; 1188 1189#ifdef DEBUG 1190 if (ldebug(get_thread_area)) 1191 printf(ARGS(get_thread_area, "%p"), args->desc); 1192#endif 1193 1194 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 1195 if (error) 1196 return (error); 1197 1198 idx = info.entry_number; 1199 /* XXX: I am not sure if we want 3 to be allowed too. */ 1200 if (idx != 6 && idx != 3) 1201 return (EINVAL); 1202 1203 idx = 3; 1204 1205 memset(&info, 0, sizeof(info)); 1206 1207 sd = PCPU_GET(fsgs_gdt)[1]; 1208 1209 memcpy(&desc, &sd, sizeof(desc)); 1210 1211 info.entry_number = idx; 1212 info.base_addr = LINUX_GET_BASE(&desc); 1213 info.limit = LINUX_GET_LIMIT(&desc); 1214 info.seg_32bit = LINUX_GET_32BIT(&desc); 1215 info.contents = LINUX_GET_CONTENTS(&desc); 1216 info.read_exec_only = !LINUX_GET_WRITABLE(&desc); 1217 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc); 1218 info.seg_not_present = !LINUX_GET_PRESENT(&desc); 1219 info.useable = LINUX_GET_USEABLE(&desc); 1220 1221 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 1222 if (error) 1223 return (EFAULT); 1224 1225 return (0); 1226} 1227 1228/* copied from kern/kern_time.c */ 1229int 1230linux_timer_create(struct thread *td, struct linux_timer_create_args *args) 1231{ 1232 return ktimer_create(td, (struct ktimer_create_args *) args); 1233} 1234 1235int 1236linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args) 1237{ 1238 return ktimer_settime(td, (struct ktimer_settime_args *) args); 1239} 1240 1241int 1242linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args) 1243{ 1244 return ktimer_gettime(td, (struct ktimer_gettime_args *) args); 1245} 1246 1247int 1248linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args) 1249{ 1250 return ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args); 1251} 1252 1253int 1254linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args) 1255{ 1256 return ktimer_delete(td, (struct ktimer_delete_args *) args); 1257} 1258 1259/* XXX: this wont work with module - convert it */ 1260int 1261linux_mq_open(struct thread *td, struct linux_mq_open_args *args) 1262{ 1263#ifdef P1003_1B_MQUEUE 1264 return kmq_open(td, (struct kmq_open_args *) args); 1265#else 1266 return (ENOSYS); 1267#endif 1268} 1269 1270int 1271linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args) 1272{ 1273#ifdef P1003_1B_MQUEUE 1274 return kmq_unlink(td, (struct kmq_unlink_args *) args); 1275#else 1276 return (ENOSYS); 1277#endif 1278} 1279 1280int 1281linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args) 1282{ 1283#ifdef P1003_1B_MQUEUE 1284 return kmq_timedsend(td, (struct kmq_timedsend_args *) args); 1285#else 1286 return (ENOSYS); 1287#endif 1288} 1289 1290int 1291linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args) 1292{ 1293#ifdef P1003_1B_MQUEUE 1294 return kmq_timedreceive(td, (struct kmq_timedreceive_args *) args); 1295#else 1296 return (ENOSYS); 1297#endif 1298} 1299 1300int 1301linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args) 1302{ 1303#ifdef P1003_1B_MQUEUE 1304 return kmq_notify(td, (struct kmq_notify_args *) args); 1305#else 1306 return (ENOSYS); 1307#endif 1308} 1309 1310int 1311linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args) 1312{ 1313#ifdef P1003_1B_MQUEUE 1314 return kmq_setattr(td, (struct kmq_setattr_args *) args); 1315#else 1316 return (ENOSYS); 1317#endif 1318} 1319 1320int 1321linux_wait4(struct thread *td, struct linux_wait4_args *args) 1322{ 1323 int error, options; 1324 struct rusage ru, *rup; 1325 1326#ifdef DEBUG 1327 if (ldebug(wait4)) 1328 printf(ARGS(wait4, "%d, %p, %d, %p"), 1329 args->pid, (void *)args->status, args->options, 1330 (void *)args->rusage); 1331#endif 1332 1333 options = (args->options & (WNOHANG | WUNTRACED)); 1334 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 1335 if (args->options & __WCLONE) 1336 options |= WLINUXCLONE; 1337 1338 if (args->rusage != NULL) 1339 rup = &ru; 1340 else 1341 rup = NULL; 1342 error = linux_common_wait(td, args->pid, args->status, options, rup); 1343 if (error) 1344 return (error); 1345 if (args->rusage != NULL) 1346 error = copyout(&ru, args->rusage, sizeof(ru)); 1347 1348 return (error); 1349} 1350