linux_machdep.c revision 162479
1/*- 2 * Copyright (c) 2000 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/i386/linux/linux_machdep.c 162479 2006-09-20 19:27:11Z netchild $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/file.h> 35#include <sys/fcntl.h> 36#include <sys/imgact.h> 37#include <sys/lock.h> 38#include <sys/malloc.h> 39#include <sys/mman.h> 40#include <sys/mutex.h> 41#include <sys/sx.h> 42#include <sys/proc.h> 43#include <sys/queue.h> 44#include <sys/resource.h> 45#include <sys/resourcevar.h> 46#include <sys/signalvar.h> 47#include <sys/syscallsubr.h> 48#include <sys/sysproto.h> 49#include <sys/unistd.h> 50#include <sys/wait.h> 51 52#include <machine/frame.h> 53#include <machine/psl.h> 54#include <machine/segments.h> 55#include <machine/sysarch.h> 56 57#include <vm/vm.h> 58#include <vm/pmap.h> 59#include <vm/vm_map.h> 60 61#include <i386/linux/linux.h> 62#include <i386/linux/linux_proto.h> 63#include <compat/linux/linux_ipc.h> 64#include <compat/linux/linux_signal.h> 65#include <compat/linux/linux_util.h> 66#include <compat/linux/linux_emul.h> 67 68#include <i386/include/pcb.h> /* needed for pcb definition in linux_set_thread_area */ 69 70#include "opt_posix.h" 71 72extern struct sysentvec elf32_freebsd_sysvec; /* defined in i386/i386/elf_machdep.c */ 73 74struct l_descriptor { 75 l_uint entry_number; 76 l_ulong base_addr; 77 l_uint limit; 78 l_uint seg_32bit:1; 79 l_uint contents:2; 80 l_uint read_exec_only:1; 81 l_uint limit_in_pages:1; 82 l_uint seg_not_present:1; 83 l_uint useable:1; 84}; 85 86struct l_old_select_argv { 87 l_int nfds; 88 l_fd_set *readfds; 89 l_fd_set *writefds; 90 l_fd_set *exceptfds; 91 struct l_timeval *timeout; 92}; 93 94int 95linux_to_bsd_sigaltstack(int lsa) 96{ 97 int bsa = 0; 98 99 if (lsa & LINUX_SS_DISABLE) 100 bsa |= SS_DISABLE; 101 if (lsa & LINUX_SS_ONSTACK) 102 bsa |= SS_ONSTACK; 103 return (bsa); 104} 105 106int 107bsd_to_linux_sigaltstack(int bsa) 108{ 109 int lsa = 0; 110 111 if (bsa & SS_DISABLE) 112 lsa |= LINUX_SS_DISABLE; 113 if (bsa & SS_ONSTACK) 114 lsa |= LINUX_SS_ONSTACK; 115 return (lsa); 116} 117 118int 119linux_execve(struct thread *td, struct linux_execve_args *args) 120{ 121 int error; 122 char *newpath; 123 struct image_args eargs; 124 125 LCONVPATHEXIST(td, args->path, &newpath); 126 127#ifdef DEBUG 128 if (ldebug(execve)) 129 printf(ARGS(execve, "%s"), newpath); 130#endif 131 132 error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE, 133 args->argp, args->envp); 134 free(newpath, M_TEMP); 135 if (error == 0) 136 error = kern_execve(td, &eargs, NULL); 137 if (error == 0) 138 /* linux process can exec fbsd one, dont attempt 139 * to create emuldata for such process using 140 * linux_proc_init, this leads to a panic on KASSERT 141 * because such process has p->p_emuldata == NULL 142 */ 143 if (td->td_proc->p_sysent == &elf_linux_sysvec) 144 error = linux_proc_init(td, 0, 0); 145 return (error); 146} 147 148struct l_ipc_kludge { 149 struct l_msgbuf *msgp; 150 l_long msgtyp; 151}; 152 153int 154linux_ipc(struct thread *td, struct linux_ipc_args *args) 155{ 156 157 switch (args->what & 0xFFFF) { 158 case LINUX_SEMOP: { 159 struct linux_semop_args a; 160 161 a.semid = args->arg1; 162 a.tsops = args->ptr; 163 a.nsops = args->arg2; 164 return (linux_semop(td, &a)); 165 } 166 case LINUX_SEMGET: { 167 struct linux_semget_args a; 168 169 a.key = args->arg1; 170 a.nsems = args->arg2; 171 a.semflg = args->arg3; 172 return (linux_semget(td, &a)); 173 } 174 case LINUX_SEMCTL: { 175 struct linux_semctl_args a; 176 int error; 177 178 a.semid = args->arg1; 179 a.semnum = args->arg2; 180 a.cmd = args->arg3; 181 error = copyin(args->ptr, &a.arg, sizeof(a.arg)); 182 if (error) 183 return (error); 184 return (linux_semctl(td, &a)); 185 } 186 case LINUX_MSGSND: { 187 struct linux_msgsnd_args a; 188 189 a.msqid = args->arg1; 190 a.msgp = args->ptr; 191 a.msgsz = args->arg2; 192 a.msgflg = args->arg3; 193 return (linux_msgsnd(td, &a)); 194 } 195 case LINUX_MSGRCV: { 196 struct linux_msgrcv_args a; 197 198 a.msqid = args->arg1; 199 a.msgsz = args->arg2; 200 a.msgflg = args->arg3; 201 if ((args->what >> 16) == 0) { 202 struct l_ipc_kludge tmp; 203 int error; 204 205 if (args->ptr == NULL) 206 return (EINVAL); 207 error = copyin(args->ptr, &tmp, sizeof(tmp)); 208 if (error) 209 return (error); 210 a.msgp = tmp.msgp; 211 a.msgtyp = tmp.msgtyp; 212 } else { 213 a.msgp = args->ptr; 214 a.msgtyp = args->arg5; 215 } 216 return (linux_msgrcv(td, &a)); 217 } 218 case LINUX_MSGGET: { 219 struct linux_msgget_args a; 220 221 a.key = args->arg1; 222 a.msgflg = args->arg2; 223 return (linux_msgget(td, &a)); 224 } 225 case LINUX_MSGCTL: { 226 struct linux_msgctl_args a; 227 228 a.msqid = args->arg1; 229 a.cmd = args->arg2; 230 a.buf = args->ptr; 231 return (linux_msgctl(td, &a)); 232 } 233 case LINUX_SHMAT: { 234 struct linux_shmat_args a; 235 236 a.shmid = args->arg1; 237 a.shmaddr = args->ptr; 238 a.shmflg = args->arg2; 239 a.raddr = (l_ulong *)args->arg3; 240 return (linux_shmat(td, &a)); 241 } 242 case LINUX_SHMDT: { 243 struct linux_shmdt_args a; 244 245 a.shmaddr = args->ptr; 246 return (linux_shmdt(td, &a)); 247 } 248 case LINUX_SHMGET: { 249 struct linux_shmget_args a; 250 251 a.key = args->arg1; 252 a.size = args->arg2; 253 a.shmflg = args->arg3; 254 return (linux_shmget(td, &a)); 255 } 256 case LINUX_SHMCTL: { 257 struct linux_shmctl_args a; 258 259 a.shmid = args->arg1; 260 a.cmd = args->arg2; 261 a.buf = args->ptr; 262 return (linux_shmctl(td, &a)); 263 } 264 default: 265 break; 266 } 267 268 return (EINVAL); 269} 270 271int 272linux_old_select(struct thread *td, struct linux_old_select_args *args) 273{ 274 struct l_old_select_argv linux_args; 275 struct linux_select_args newsel; 276 int error; 277 278#ifdef DEBUG 279 if (ldebug(old_select)) 280 printf(ARGS(old_select, "%p"), args->ptr); 281#endif 282 283 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 284 if (error) 285 return (error); 286 287 newsel.nfds = linux_args.nfds; 288 newsel.readfds = linux_args.readfds; 289 newsel.writefds = linux_args.writefds; 290 newsel.exceptfds = linux_args.exceptfds; 291 newsel.timeout = linux_args.timeout; 292 return (linux_select(td, &newsel)); 293} 294 295int 296linux_fork(struct thread *td, struct linux_fork_args *args) 297{ 298 int error; 299 300#ifdef DEBUG 301 if (ldebug(fork)) 302 printf(ARGS(fork, "")); 303#endif 304 305 if ((error = fork(td, (struct fork_args *)args)) != 0) 306 return (error); 307 308 if (td->td_retval[1] == 1) 309 td->td_retval[0] = 0; 310 error = linux_proc_init(td, td->td_retval[0], 0); 311 if (error) 312 return (error); 313 314 return (0); 315} 316 317int 318linux_vfork(struct thread *td, struct linux_vfork_args *args) 319{ 320 int error; 321 struct proc *p2; 322 323#ifdef DEBUG 324 if (ldebug(vfork)) 325 printf(ARGS(vfork, "")); 326#endif 327 328 /* exclude RFPPWAIT */ 329 if ((error = fork1(td, RFFDG | RFPROC | RFMEM, 0, &p2)) != 0) 330 return (error); 331 if (error == 0) { 332 td->td_retval[0] = p2->p_pid; 333 td->td_retval[1] = 0; 334 } 335 /* Are we the child? */ 336 if (td->td_retval[1] == 1) 337 td->td_retval[0] = 0; 338 error = linux_proc_init(td, td->td_retval[0], 0); 339 if (error) 340 return (error); 341 /* wait for the children to exit, ie. emulate vfork */ 342 PROC_LOCK(p2); 343 while (p2->p_flag & P_PPWAIT) 344 msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0); 345 PROC_UNLOCK(p2); 346 347 return (0); 348} 349 350int 351linux_clone(struct thread *td, struct linux_clone_args *args) 352{ 353 int error, ff = RFPROC | RFSTOPPED; 354 struct proc *p2; 355 struct thread *td2; 356 int exit_signal; 357 struct linux_emuldata *em; 358 359#ifdef DEBUG 360 if (ldebug(clone)) { 361 printf(ARGS(clone, "flags %x, stack %x, parent tid: %x, child tid: %x"), 362 (unsigned int)args->flags, (unsigned int)args->stack, 363 (unsigned int)args->parent_tidptr, (unsigned int)args->child_tidptr); 364 } 365#endif 366 367 exit_signal = args->flags & 0x000000ff; 368 if (exit_signal >= LINUX_NSIG) 369 return (EINVAL); 370 371 if (exit_signal <= LINUX_SIGTBLSZ) 372 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)]; 373 374 if (args->flags & CLONE_VM) 375 ff |= RFMEM; 376 if (args->flags & CLONE_SIGHAND) 377 ff |= RFSIGSHARE; 378 if (!(args->flags & CLONE_FILES)) 379 ff |= RFFDG; 380 381 /* 382 * Attempt to detect when linux_clone(2) is used for creating 383 * kernel threads. Unfortunately despite the existence of the 384 * CLONE_THREAD flag, version of linuxthreads package used in 385 * most popular distros as of beginning of 2005 doesn't make 386 * any use of it. Therefore, this detection relay fully on 387 * empirical observation that linuxthreads sets certain 388 * combination of flags, so that we can make more or less 389 * precise detection and notify the FreeBSD kernel that several 390 * processes are in fact part of the same threading group, so 391 * that special treatment is necessary for signal delivery 392 * between those processes and fd locking. 393 */ 394 if ((args->flags & 0xffffff00) == THREADING_FLAGS) 395 ff |= RFTHREAD; 396 397 error = fork1(td, ff, 0, &p2); 398 if (error) 399 return (error); 400 401 /* create the emuldata */ 402 error = linux_proc_init(td, p2->p_pid, args->flags); 403 /* reference it - no need to check this */ 404 em = em_find(p2, EMUL_UNLOCKED); 405 KASSERT(em != NULL, ("clone: emuldata not found.\n")); 406 /* and adjust it */ 407 if (args->flags & CLONE_PARENT_SETTID) { 408 if (args->parent_tidptr == NULL) { 409 EMUL_UNLOCK(&emul_lock); 410 return (EINVAL); 411 } 412 error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid)); 413 if (error) { 414 EMUL_UNLOCK(&emul_lock); 415 return (error); 416 } 417 } 418 419 if (args->flags & (CLONE_PARENT|CLONE_THREAD)) { 420 sx_xlock(&proctree_lock); 421 PROC_LOCK(p2); 422 proc_reparent(p2, td->td_proc->p_pptr); 423 PROC_UNLOCK(p2); 424 sx_xunlock(&proctree_lock); 425 } 426 427 if (args->flags & CLONE_THREAD) { 428 /* XXX: linux mangles pgrp and pptr somehow 429 * I think it might be this but I am not sure. 430 */ 431#ifdef notyet 432 PROC_LOCK(p2); 433 p2->p_pgrp = td->td_proc->p_pgrp; 434 PROC_UNLOCK(p2); 435#endif 436 exit_signal = 0; 437 } 438 439 if (args->flags & CLONE_CHILD_SETTID) 440 em->child_set_tid = args->child_tidptr; 441 else 442 em->child_set_tid = NULL; 443 444 if (args->flags & CLONE_CHILD_CLEARTID) 445 em->child_clear_tid = args->child_tidptr; 446 else 447 em->child_clear_tid = NULL; 448 449 EMUL_UNLOCK(&emul_lock); 450 451 PROC_LOCK(p2); 452 p2->p_sigparent = exit_signal; 453 PROC_UNLOCK(p2); 454 td2 = FIRST_THREAD_IN_PROC(p2); 455 /* 456 * in a case of stack = NULL we are supposed to COW calling process stack 457 * this is what normal fork() does so we just keep the tf_esp arg intact 458 */ 459 if (args->stack) 460 td2->td_frame->tf_esp = (unsigned int)args->stack; 461 462 if (args->flags & CLONE_SETTLS) { 463 struct l_user_desc info; 464 int idx; 465 int a[2]; 466 struct segment_descriptor sd; 467 468 error = copyin((void *)td->td_frame->tf_esi, &info, sizeof(struct l_user_desc)); 469 if (error) 470 return (error); 471 472 idx = info.entry_number; 473 474 /* 475 * looks like we're getting the idx we returned 476 * in the set_thread_area() syscall 477 */ 478 if (idx != 6 && idx != 3) 479 return (EINVAL); 480 481 /* this doesnt happen in practice */ 482 if (idx == 6) { 483 /* we might copy out the entry_number as 3 */ 484 info.entry_number = 3; 485 error = copyout(&info, (void *) td->td_frame->tf_esi, sizeof(struct l_user_desc)); 486 if (error) 487 return (error); 488 } 489 490 a[0] = LDT_entry_a(&info); 491 a[1] = LDT_entry_b(&info); 492 493 memcpy(&sd, &a, sizeof(a)); 494#ifdef DEBUG 495 if (ldebug(clone)) 496 printf("Segment created in clone with CLONE_SETTLS: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase, 497 sd.sd_hibase, 498 sd.sd_lolimit, 499 sd.sd_hilimit, 500 sd.sd_type, 501 sd.sd_dpl, 502 sd.sd_p, 503 sd.sd_xx, 504 sd.sd_def32, 505 sd.sd_gran); 506#endif 507 508 /* set %gs */ 509 td2->td_pcb->pcb_gsd = sd; 510 td2->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL); 511 } 512 513#ifdef DEBUG 514 if (ldebug(clone)) 515 printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"), 516 (long)p2->p_pid, args->stack, exit_signal); 517#endif 518 519 /* 520 * Make this runnable after we are finished with it. 521 */ 522 mtx_lock_spin(&sched_lock); 523 TD_SET_CAN_RUN(td2); 524 setrunqueue(td2, SRQ_BORING); 525 mtx_unlock_spin(&sched_lock); 526 527 td->td_retval[0] = p2->p_pid; 528 td->td_retval[1] = 0; 529 return (0); 530} 531 532/* XXX move */ 533struct l_mmap_argv { 534 l_caddr_t addr; 535 l_int len; 536 l_int prot; 537 l_int flags; 538 l_int fd; 539 l_int pos; 540}; 541 542#define STACK_SIZE (2 * 1024 * 1024) 543#define GUARD_SIZE (4 * PAGE_SIZE) 544 545static int linux_mmap_common(struct thread *, struct l_mmap_argv *); 546 547int 548linux_mmap2(struct thread *td, struct linux_mmap2_args *args) 549{ 550 struct l_mmap_argv linux_args; 551 552#ifdef DEBUG 553 if (ldebug(mmap2)) 554 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"), 555 (void *)args->addr, args->len, args->prot, 556 args->flags, args->fd, args->pgoff); 557#endif 558 559 linux_args.addr = (l_caddr_t)args->addr; 560 linux_args.len = args->len; 561 linux_args.prot = args->prot; 562 linux_args.flags = args->flags; 563 linux_args.fd = args->fd; 564 linux_args.pos = args->pgoff * PAGE_SIZE; 565 566 return (linux_mmap_common(td, &linux_args)); 567} 568 569int 570linux_mmap(struct thread *td, struct linux_mmap_args *args) 571{ 572 int error; 573 struct l_mmap_argv linux_args; 574 575 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 576 if (error) 577 return (error); 578 579#ifdef DEBUG 580 if (ldebug(mmap)) 581 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"), 582 (void *)linux_args.addr, linux_args.len, linux_args.prot, 583 linux_args.flags, linux_args.fd, linux_args.pos); 584#endif 585 586 return (linux_mmap_common(td, &linux_args)); 587} 588 589static int 590linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args) 591{ 592 struct proc *p = td->td_proc; 593 struct mmap_args /* { 594 caddr_t addr; 595 size_t len; 596 int prot; 597 int flags; 598 int fd; 599 long pad; 600 off_t pos; 601 } */ bsd_args; 602 int error; 603 struct file *fp; 604 605 error = 0; 606 bsd_args.flags = 0; 607 fp = NULL; 608 609 /* 610 * Linux mmap(2): 611 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE 612 */ 613 if (! ((linux_args->flags & LINUX_MAP_SHARED) ^ 614 (linux_args->flags & LINUX_MAP_PRIVATE))) 615 return (EINVAL); 616 617 if (linux_args->flags & LINUX_MAP_SHARED) 618 bsd_args.flags |= MAP_SHARED; 619 if (linux_args->flags & LINUX_MAP_PRIVATE) 620 bsd_args.flags |= MAP_PRIVATE; 621 if (linux_args->flags & LINUX_MAP_FIXED) 622 bsd_args.flags |= MAP_FIXED; 623 if (linux_args->flags & LINUX_MAP_ANON) 624 bsd_args.flags |= MAP_ANON; 625 else 626 bsd_args.flags |= MAP_NOSYNC; 627 if (linux_args->flags & LINUX_MAP_GROWSDOWN) { 628 bsd_args.flags |= MAP_STACK; 629 630 /* 631 * The linux MAP_GROWSDOWN option does not limit auto 632 * growth of the region. Linux mmap with this option 633 * takes as addr the inital BOS, and as len, the initial 634 * region size. It can then grow down from addr without 635 * limit. However, linux threads has an implicit internal 636 * limit to stack size of STACK_SIZE. Its just not 637 * enforced explicitly in linux. But, here we impose 638 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 639 * region, since we can do this with our mmap. 640 * 641 * Our mmap with MAP_STACK takes addr as the maximum 642 * downsize limit on BOS, and as len the max size of 643 * the region. It them maps the top SGROWSIZ bytes, 644 * and autgrows the region down, up to the limit 645 * in addr. 646 * 647 * If we don't use the MAP_STACK option, the effect 648 * of this code is to allocate a stack region of a 649 * fixed size of (STACK_SIZE - GUARD_SIZE). 650 */ 651 652 /* This gives us TOS */ 653 bsd_args.addr = linux_args->addr + linux_args->len; 654 655 if (bsd_args.addr > p->p_vmspace->vm_maxsaddr) { 656 /* 657 * Some linux apps will attempt to mmap 658 * thread stacks near the top of their 659 * address space. If their TOS is greater 660 * than vm_maxsaddr, vm_map_growstack() 661 * will confuse the thread stack with the 662 * process stack and deliver a SEGV if they 663 * attempt to grow the thread stack past their 664 * current stacksize rlimit. To avoid this, 665 * adjust vm_maxsaddr upwards to reflect 666 * the current stacksize rlimit rather 667 * than the maximum possible stacksize. 668 * It would be better to adjust the 669 * mmap'ed region, but some apps do not check 670 * mmap's return value. 671 */ 672 PROC_LOCK(p); 673 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - 674 lim_cur(p, RLIMIT_STACK); 675 PROC_UNLOCK(p); 676 } 677 678 /* This gives us our maximum stack size */ 679 if (linux_args->len > STACK_SIZE - GUARD_SIZE) 680 bsd_args.len = linux_args->len; 681 else 682 bsd_args.len = STACK_SIZE - GUARD_SIZE; 683 684 /* 685 * This gives us a new BOS. If we're using VM_STACK, then 686 * mmap will just map the top SGROWSIZ bytes, and let 687 * the stack grow down to the limit at BOS. If we're 688 * not using VM_STACK we map the full stack, since we 689 * don't have a way to autogrow it. 690 */ 691 bsd_args.addr -= bsd_args.len; 692 } else { 693 bsd_args.addr = linux_args->addr; 694 bsd_args.len = linux_args->len; 695 } 696 697 bsd_args.prot = linux_args->prot; 698 if (linux_args->flags & LINUX_MAP_ANON) 699 bsd_args.fd = -1; 700 else { 701 /* 702 * Linux follows Solaris mmap(2) description: 703 * The file descriptor fildes is opened with 704 * read permission, regardless of the 705 * protection options specified. 706 * If PROT_WRITE is specified, the application 707 * must have opened the file descriptor 708 * fildes with write permission unless 709 * MAP_PRIVATE is specified in the flag 710 * argument as described below. 711 */ 712 713 if ((error = fget(td, linux_args->fd, &fp)) != 0) 714 return (error); 715 if (fp->f_type != DTYPE_VNODE) { 716 fdrop(fp, td); 717 return (EINVAL); 718 } 719 720 /* Linux mmap() just fails for O_WRONLY files */ 721 if (! (fp->f_flag & FREAD)) { 722 fdrop(fp, td); 723 return (EACCES); 724 } 725 726 bsd_args.fd = linux_args->fd; 727 fdrop(fp, td); 728 } 729 bsd_args.pos = linux_args->pos; 730 bsd_args.pad = 0; 731 732#ifdef DEBUG 733 if (ldebug(mmap)) 734 printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n", 735 __func__, 736 (void *)bsd_args.addr, bsd_args.len, bsd_args.prot, 737 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos); 738#endif 739 error = mmap(td, &bsd_args); 740#ifdef DEBUG 741 if (ldebug(mmap)) 742 printf("-> %s() return: 0x%x (0x%08x)\n", 743 __func__, error, (u_int)td->td_retval[0]); 744#endif 745 return (error); 746} 747 748int 749linux_pipe(struct thread *td, struct linux_pipe_args *args) 750{ 751 int error; 752 int reg_edx; 753 754#ifdef DEBUG 755 if (ldebug(pipe)) 756 printf(ARGS(pipe, "*")); 757#endif 758 759 reg_edx = td->td_retval[1]; 760 error = pipe(td, 0); 761 if (error) { 762 td->td_retval[1] = reg_edx; 763 return (error); 764 } 765 766 error = copyout(td->td_retval, args->pipefds, 2*sizeof(int)); 767 if (error) { 768 td->td_retval[1] = reg_edx; 769 return (error); 770 } 771 772 td->td_retval[1] = reg_edx; 773 td->td_retval[0] = 0; 774 return (0); 775} 776 777int 778linux_ioperm(struct thread *td, struct linux_ioperm_args *args) 779{ 780 int error; 781 struct i386_ioperm_args iia; 782 783 iia.start = args->start; 784 iia.length = args->length; 785 iia.enable = args->enable; 786 mtx_lock(&Giant); 787 error = i386_set_ioperm(td, &iia); 788 mtx_unlock(&Giant); 789 return (error); 790} 791 792int 793linux_iopl(struct thread *td, struct linux_iopl_args *args) 794{ 795 int error; 796 797 if (args->level < 0 || args->level > 3) 798 return (EINVAL); 799 if ((error = suser(td)) != 0) 800 return (error); 801 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 802 return (error); 803 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) | 804 (args->level * (PSL_IOPL / 3)); 805 return (0); 806} 807 808int 809linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap) 810{ 811 int error; 812 struct i386_ldt_args ldt; 813 struct l_descriptor ld; 814 union descriptor desc; 815 816 if (uap->ptr == NULL) 817 return (EINVAL); 818 819 switch (uap->func) { 820 case 0x00: /* read_ldt */ 821 ldt.start = 0; 822 ldt.descs = uap->ptr; 823 ldt.num = uap->bytecount / sizeof(union descriptor); 824 mtx_lock(&Giant); 825 error = i386_get_ldt(td, &ldt); 826 td->td_retval[0] *= sizeof(union descriptor); 827 mtx_unlock(&Giant); 828 break; 829 case 0x01: /* write_ldt */ 830 case 0x11: /* write_ldt */ 831 if (uap->bytecount != sizeof(ld)) 832 return (EINVAL); 833 834 error = copyin(uap->ptr, &ld, sizeof(ld)); 835 if (error) 836 return (error); 837 838 ldt.start = ld.entry_number; 839 ldt.descs = &desc; 840 ldt.num = 1; 841 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff); 842 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16; 843 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff); 844 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24; 845 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) | 846 (ld.contents << 2); 847 desc.sd.sd_dpl = 3; 848 desc.sd.sd_p = (ld.seg_not_present ^ 1); 849 desc.sd.sd_xx = 0; 850 desc.sd.sd_def32 = ld.seg_32bit; 851 desc.sd.sd_gran = ld.limit_in_pages; 852 mtx_lock(&Giant); 853 error = i386_set_ldt(td, &ldt, &desc); 854 mtx_unlock(&Giant); 855 break; 856 default: 857 error = EINVAL; 858 break; 859 } 860 861 if (error == EOPNOTSUPP) { 862 printf("linux: modify_ldt needs kernel option USER_LDT\n"); 863 error = ENOSYS; 864 } 865 866 return (error); 867} 868 869int 870linux_sigaction(struct thread *td, struct linux_sigaction_args *args) 871{ 872 l_osigaction_t osa; 873 l_sigaction_t act, oact; 874 int error; 875 876#ifdef DEBUG 877 if (ldebug(sigaction)) 878 printf(ARGS(sigaction, "%d, %p, %p"), 879 args->sig, (void *)args->nsa, (void *)args->osa); 880#endif 881 882 if (args->nsa != NULL) { 883 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t)); 884 if (error) 885 return (error); 886 act.lsa_handler = osa.lsa_handler; 887 act.lsa_flags = osa.lsa_flags; 888 act.lsa_restorer = osa.lsa_restorer; 889 LINUX_SIGEMPTYSET(act.lsa_mask); 890 act.lsa_mask.__bits[0] = osa.lsa_mask; 891 } 892 893 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL, 894 args->osa ? &oact : NULL); 895 896 if (args->osa != NULL && !error) { 897 osa.lsa_handler = oact.lsa_handler; 898 osa.lsa_flags = oact.lsa_flags; 899 osa.lsa_restorer = oact.lsa_restorer; 900 osa.lsa_mask = oact.lsa_mask.__bits[0]; 901 error = copyout(&osa, args->osa, sizeof(l_osigaction_t)); 902 } 903 904 return (error); 905} 906 907/* 908 * Linux has two extra args, restart and oldmask. We dont use these, 909 * but it seems that "restart" is actually a context pointer that 910 * enables the signal to happen with a different register set. 911 */ 912int 913linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args) 914{ 915 sigset_t sigmask; 916 l_sigset_t mask; 917 918#ifdef DEBUG 919 if (ldebug(sigsuspend)) 920 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask); 921#endif 922 923 LINUX_SIGEMPTYSET(mask); 924 mask.__bits[0] = args->mask; 925 linux_to_bsd_sigset(&mask, &sigmask); 926 return (kern_sigsuspend(td, sigmask)); 927} 928 929int 930linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap) 931{ 932 l_sigset_t lmask; 933 sigset_t sigmask; 934 int error; 935 936#ifdef DEBUG 937 if (ldebug(rt_sigsuspend)) 938 printf(ARGS(rt_sigsuspend, "%p, %d"), 939 (void *)uap->newset, uap->sigsetsize); 940#endif 941 942 if (uap->sigsetsize != sizeof(l_sigset_t)) 943 return (EINVAL); 944 945 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t)); 946 if (error) 947 return (error); 948 949 linux_to_bsd_sigset(&lmask, &sigmask); 950 return (kern_sigsuspend(td, sigmask)); 951} 952 953int 954linux_pause(struct thread *td, struct linux_pause_args *args) 955{ 956 struct proc *p = td->td_proc; 957 sigset_t sigmask; 958 959#ifdef DEBUG 960 if (ldebug(pause)) 961 printf(ARGS(pause, "")); 962#endif 963 964 PROC_LOCK(p); 965 sigmask = td->td_sigmask; 966 PROC_UNLOCK(p); 967 return (kern_sigsuspend(td, sigmask)); 968} 969 970int 971linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) 972{ 973 stack_t ss, oss; 974 l_stack_t lss; 975 int error; 976 977#ifdef DEBUG 978 if (ldebug(sigaltstack)) 979 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss); 980#endif 981 982 if (uap->uss != NULL) { 983 error = copyin(uap->uss, &lss, sizeof(l_stack_t)); 984 if (error) 985 return (error); 986 987 ss.ss_sp = lss.ss_sp; 988 ss.ss_size = lss.ss_size; 989 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags); 990 } 991 error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL, 992 (uap->uoss != NULL) ? &oss : NULL); 993 if (!error && uap->uoss != NULL) { 994 lss.ss_sp = oss.ss_sp; 995 lss.ss_size = oss.ss_size; 996 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags); 997 error = copyout(&lss, uap->uoss, sizeof(l_stack_t)); 998 } 999 1000 return (error); 1001} 1002 1003int 1004linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args) 1005{ 1006 struct ftruncate_args sa; 1007 1008#ifdef DEBUG 1009 if (ldebug(ftruncate64)) 1010 printf(ARGS(ftruncate64, "%u, %jd"), args->fd, 1011 (intmax_t)args->length); 1012#endif 1013 1014 sa.fd = args->fd; 1015 sa.pad = 0; 1016 sa.length = args->length; 1017 return ftruncate(td, &sa); 1018} 1019 1020int 1021linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args) 1022{ 1023 struct l_user_desc info; 1024 int error; 1025 int idx; 1026 int a[2]; 1027 struct segment_descriptor sd; 1028 1029 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 1030 if (error) 1031 return (error); 1032 1033#ifdef DEBUG 1034 if (ldebug(set_thread_area)) 1035 printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"), 1036 info.entry_number, 1037 info.base_addr, 1038 info.limit, 1039 info.seg_32bit, 1040 info.contents, 1041 info.read_exec_only, 1042 info.limit_in_pages, 1043 info.seg_not_present, 1044 info.useable); 1045#endif 1046 1047 idx = info.entry_number; 1048 /* 1049 * Semantics of linux version: every thread in the system has array 1050 * of 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This 1051 * syscall loads one of the selected tls decriptors with a value 1052 * and also loads GDT descriptors 6, 7 and 8 with the content of the per-thread 1053 * descriptors. 1054 * 1055 * Semantics of fbsd version: I think we can ignore that linux has 3 per-thread 1056 * descriptors and use just the 1st one. The tls_array[] is used only in 1057 * set/get-thread_area() syscalls and for loading the GDT descriptors. In fbsd 1058 * we use just one GDT descriptor for TLS so we will load just one. 1059 * XXX: this doesnt work when user-space process tries to use more then 1 TLS segment 1060 * comment in the linux sources says wine might do that. 1061 */ 1062 1063 /* 1064 * we support just GLIBC TLS now 1065 * we should let 3 proceed as well because we use this segment so 1066 * if code does two subsequent calls it should succeed 1067 */ 1068 if (idx != 6 && idx != -1 && idx != 3) 1069 return (EINVAL); 1070 1071 /* 1072 * we have to copy out the GDT entry we use 1073 * FreeBSD uses GDT entry #3 for storing %gs so load that 1074 * XXX: what if userspace program doesnt check this value and tries 1075 * to use 6, 7 or 8? 1076 */ 1077 idx = info.entry_number = 3; 1078 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 1079 if (error) 1080 return (error); 1081 1082 if (LDT_empty(&info)) { 1083 a[0] = 0; 1084 a[1] = 0; 1085 } else { 1086 a[0] = LDT_entry_a(&info); 1087 a[1] = LDT_entry_b(&info); 1088 } 1089 1090 memcpy(&sd, &a, sizeof(a)); 1091#ifdef DEBUG 1092 if (ldebug(set_thread_area)) 1093 printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase, 1094 sd.sd_hibase, 1095 sd.sd_lolimit, 1096 sd.sd_hilimit, 1097 sd.sd_type, 1098 sd.sd_dpl, 1099 sd.sd_p, 1100 sd.sd_xx, 1101 sd.sd_def32, 1102 sd.sd_gran); 1103#endif 1104 1105 /* this is taken from i386 version of cpu_set_user_tls() */ 1106 critical_enter(); 1107 /* set %gs */ 1108 td->td_pcb->pcb_gsd = sd; 1109 PCPU_GET(fsgs_gdt)[1] = sd; 1110 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 1111 critical_exit(); 1112 1113 return (0); 1114} 1115 1116int 1117linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args) 1118{ 1119 1120 struct l_user_desc info; 1121 int error; 1122 int idx; 1123 struct l_desc_struct desc; 1124 struct segment_descriptor sd; 1125 1126#ifdef DEBUG 1127 if (ldebug(get_thread_area)) 1128 printf(ARGS(get_thread_area, "%p"), args->desc); 1129#endif 1130 1131 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 1132 if (error) 1133 return (error); 1134 1135 idx = info.entry_number; 1136 /* XXX: I am not sure if we want 3 to be allowed too. */ 1137 if (idx != 6 && idx != 3) 1138 return (EINVAL); 1139 1140 idx = 3; 1141 1142 memset(&info, 0, sizeof(info)); 1143 1144 sd = PCPU_GET(fsgs_gdt)[1]; 1145 1146 memcpy(&desc, &sd, sizeof(desc)); 1147 1148 info.entry_number = idx; 1149 info.base_addr = GET_BASE(&desc); 1150 info.limit = GET_LIMIT(&desc); 1151 info.seg_32bit = GET_32BIT(&desc); 1152 info.contents = GET_CONTENTS(&desc); 1153 info.read_exec_only = !GET_WRITABLE(&desc); 1154 info.limit_in_pages = GET_LIMIT_PAGES(&desc); 1155 info.seg_not_present = !GET_PRESENT(&desc); 1156 info.useable = GET_USEABLE(&desc); 1157 1158 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 1159 if (error) 1160 return (EFAULT); 1161 1162 return (0); 1163} 1164 1165/* copied from kern/kern_time.c */ 1166int 1167linux_timer_create(struct thread *td, struct linux_timer_create_args *args) 1168{ 1169 return ktimer_create(td, (struct ktimer_create_args *) args); 1170} 1171 1172int 1173linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args) 1174{ 1175 return ktimer_settime(td, (struct ktimer_settime_args *) args); 1176} 1177 1178int 1179linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args) 1180{ 1181 return ktimer_gettime(td, (struct ktimer_gettime_args *) args); 1182} 1183 1184int 1185linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args) 1186{ 1187 return ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args); 1188} 1189 1190int 1191linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args) 1192{ 1193 return ktimer_delete(td, (struct ktimer_delete_args *) args); 1194} 1195 1196/* XXX: this wont work with module - convert it */ 1197int 1198linux_mq_open(struct thread *td, struct linux_mq_open_args *args) 1199{ 1200#ifdef P1003_1B_MQUEUE 1201 return kmq_open(td, (struct kmq_open_args *) args); 1202#else 1203 return (ENOSYS); 1204#endif 1205} 1206 1207int 1208linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args) 1209{ 1210#ifdef P1003_1B_MQUEUE 1211 return kmq_unlink(td, (struct kmq_unlink_args *) args); 1212#else 1213 return (ENOSYS); 1214#endif 1215} 1216 1217int 1218linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args) 1219{ 1220#ifdef P1003_1B_MQUEUE 1221 return kmq_timedsend(td, (struct kmq_timedsend_args *) args); 1222#else 1223 return (ENOSYS); 1224#endif 1225} 1226 1227int 1228linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args) 1229{ 1230#ifdef P1003_1B_MQUEUE 1231 return kmq_timedreceive(td, (struct kmq_timedreceive_args *) args); 1232#else 1233 return (ENOSYS); 1234#endif 1235} 1236 1237int 1238linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args) 1239{ 1240#ifdef P1003_1B_MQUEUE 1241 return kmq_notify(td, (struct kmq_notify_args *) args); 1242#else 1243 return (ENOSYS); 1244#endif 1245} 1246 1247int 1248linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args) 1249{ 1250#ifdef P1003_1B_MQUEUE 1251 return kmq_setattr(td, (struct kmq_setattr_args *) args); 1252#else 1253 return (ENOSYS); 1254#endif 1255} 1256 1257