1/* 2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c. 3 * 4 * Copyright (C) 2000 VA Linux Co 5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> 6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> 7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 9 * Copyright (C) 2000-2002 Hewlett-Packard Co 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * 12 * These routines maintain argument size conversion between 32bit and 64bit 13 * environment. 14 */ 15 16#include <linux/config.h> 17#include <linux/kernel.h> 18#include <linux/sysctl.h> 19#include <linux/sched.h> 20#include <linux/fs.h> 21#include <linux/file.h> 22#include <linux/signal.h> 23#include <linux/utime.h> 24#include <linux/resource.h> 25#include <linux/times.h> 26#include <linux/utsname.h> 27#include <linux/timex.h> 28#include <linux/smp.h> 29#include <linux/smp_lock.h> 30#include <linux/sem.h> 31#include <linux/msg.h> 32#include <linux/mm.h> 33#include <linux/shm.h> 34#include <linux/slab.h> 35#include <linux/uio.h> 36#include <linux/nfs_fs.h> 37#include <linux/smb_fs.h> 38#include <linux/smb_mount.h> 39#include <linux/ncp_fs.h> 40#include <linux/quota.h> 41#include <linux/module.h> 42#include <linux/sunrpc/svc.h> 43#include <linux/nfsd/nfsd.h> 44#include <linux/nfsd/cache.h> 45#include <linux/nfsd/xdr.h> 46#include <linux/nfsd/syscall.h> 47#include <linux/poll.h> 48#include <linux/personality.h> 49#include <linux/stat.h> 50#include <linux/ipc.h> 51 52#include <asm/types.h> 53#include <asm/uaccess.h> 54#include <asm/semaphore.h> 55 56#include <net/scm.h> 57#include <net/sock.h> 58#include <asm/ia32.h> 59 60#define DEBUG 0 61 62#if DEBUG 63# define DBG(fmt...) printk(KERN_DEBUG fmt) 64#else 65# define DBG(fmt...) 66#endif 67 68#define A(__x) ((unsigned long)(__x)) 69#define AA(__x) ((unsigned long)(__x)) 70#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1))) 71#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de))) 72 73#define OFFSET4K(a) ((a) & 0xfff) 74#define PAGE_START(addr) ((addr) & PAGE_MASK) 75#define PAGE_OFF(addr) ((addr) & ~PAGE_MASK) 76 77extern asmlinkage long sys_execve (char *, char **, char **, struct pt_regs *); 78extern asmlinkage long sys_mprotect (unsigned long, size_t, unsigned long); 79extern asmlinkage long sys_munmap (unsigned long, size_t); 80extern unsigned long arch_get_unmapped_area (struct file *, unsigned long, unsigned long, 81 unsigned long, unsigned long); 82 83/* forward declaration: */ 84asmlinkage long sys32_mprotect (unsigned int, unsigned int, int); 85asmlinkage unsigned long sys_brk(unsigned long); 86 87/* 88 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore 89 * while doing so. 90 */ 91static DECLARE_MUTEX(ia32_mmap_sem); 92 93static int 94nargs (unsigned int arg, char **ap) 95{ 96 unsigned int addr; 97 int n, err; 98 99 if (!arg) 100 return 0; 101 102 n = 0; 103 do { 104 err = get_user(addr, (unsigned int *)A(arg)); 105 if (err) 106 return err; 107 if (ap) 108 *ap++ = (char *) A(addr); 109 arg += sizeof(unsigned int); 110 n++; 111 } while (addr); 112 return n - 1; 113} 114 115asmlinkage long 116sys32_execve (char *filename, unsigned int argv, unsigned int envp, 117 int dummy3, int dummy4, int dummy5, int dummy6, int dummy7, 118 int stack) 119{ 120 struct pt_regs *regs = (struct pt_regs *)&stack; 121 unsigned long old_map_base, old_task_size, tssd; 122 char **av, **ae; 123 int na, ne, len; 124 long r; 125 126 na = nargs(argv, NULL); 127 if (na < 0) 128 return na; 129 ne = nargs(envp, NULL); 130 if (ne < 0) 131 return ne; 132 len = (na + ne + 2) * sizeof(*av); 133 av = kmalloc(len, GFP_KERNEL); 134 if (!av) 135 return -ENOMEM; 136 137 ae = av + na + 1; 138 av[na] = NULL; 139 ae[ne] = NULL; 140 141 r = nargs(argv, av); 142 if (r < 0) 143 goto out; 144 r = nargs(envp, ae); 145 if (r < 0) 146 goto out; 147 148 old_map_base = current->thread.map_base; 149 old_task_size = current->thread.task_size; 150 tssd = ia64_get_kr(IA64_KR_TSSD); 151 152 /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */ 153 current->thread.map_base = DEFAULT_MAP_BASE; 154 current->thread.task_size = DEFAULT_TASK_SIZE; 155 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); 156 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); 157 158 set_fs(KERNEL_DS); 159 r = sys_execve(filename, av, ae, regs); 160 if (r < 0) { 161 /* oops, execve failed, switch back to old values... */ 162 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); 163 ia64_set_kr(IA64_KR_TSSD, tssd); 164 current->thread.map_base = old_map_base; 165 current->thread.task_size = old_task_size; 166 set_fs(USER_DS); /* establish new task-size as the address-limit */ 167 out: 168 kfree(av); 169 } 170 return r; 171} 172 173static inline int 174putstat (struct stat32 *ubuf, struct stat *kbuf) 175{ 176 int err; 177 178 if (clear_user(ubuf, sizeof(*ubuf))) 179 return 1; 180 181 err = __put_user(kbuf->st_dev, &ubuf->st_dev); 182 err |= __put_user(kbuf->st_ino, &ubuf->st_ino); 183 err |= __put_user(kbuf->st_mode, &ubuf->st_mode); 184 err |= __put_user(kbuf->st_nlink, &ubuf->st_nlink); 185 err |= __put_user(kbuf->st_uid, &ubuf->st_uid); 186 err |= __put_user(kbuf->st_gid, &ubuf->st_gid); 187 err |= __put_user(kbuf->st_rdev, &ubuf->st_rdev); 188 err |= __put_user(kbuf->st_size, &ubuf->st_size); 189 err |= __put_user(kbuf->st_atime, &ubuf->st_atime); 190 err |= __put_user(kbuf->st_mtime, &ubuf->st_mtime); 191 err |= __put_user(kbuf->st_ctime, &ubuf->st_ctime); 192 err |= __put_user(kbuf->st_blksize, &ubuf->st_blksize); 193 err |= __put_user(kbuf->st_blocks, &ubuf->st_blocks); 194 return err; 195} 196 197extern asmlinkage long sys_newstat (char * filename, struct stat * statbuf); 198 199asmlinkage long 200sys32_newstat (char *filename, struct stat32 *statbuf) 201{ 202 int ret; 203 struct stat s; 204 mm_segment_t old_fs = get_fs(); 205 206 set_fs(KERNEL_DS); 207 ret = sys_newstat(filename, &s); 208 set_fs(old_fs); 209 if (putstat(statbuf, &s)) 210 return -EFAULT; 211 return ret; 212} 213 214extern asmlinkage long sys_newlstat(char * filename, struct stat * statbuf); 215 216asmlinkage long 217sys32_newlstat (char *filename, struct stat32 *statbuf) 218{ 219 mm_segment_t old_fs = get_fs(); 220 struct stat s; 221 int ret; 222 223 set_fs(KERNEL_DS); 224 ret = sys_newlstat(filename, &s); 225 set_fs(old_fs); 226 if (putstat(statbuf, &s)) 227 return -EFAULT; 228 return ret; 229} 230 231extern asmlinkage long sys_newfstat(unsigned int fd, struct stat * statbuf); 232 233asmlinkage long 234sys32_newfstat (unsigned int fd, struct stat32 *statbuf) 235{ 236 mm_segment_t old_fs = get_fs(); 237 struct stat s; 238 int ret; 239 240 set_fs(KERNEL_DS); 241 ret = sys_newfstat(fd, &s); 242 set_fs(old_fs); 243 if (putstat(statbuf, &s)) 244 return -EFAULT; 245 return ret; 246} 247 248#if PAGE_SHIFT > IA32_PAGE_SHIFT 249 250 251static int 252get_page_prot (unsigned long addr) 253{ 254 struct vm_area_struct *vma = find_vma(current->mm, addr); 255 int prot = 0; 256 257 if (!vma || vma->vm_start > addr) 258 return 0; 259 260 if (vma->vm_flags & VM_READ) 261 prot |= PROT_READ; 262 if (vma->vm_flags & VM_WRITE) 263 prot |= PROT_WRITE; 264 if (vma->vm_flags & VM_EXEC) 265 prot |= PROT_EXEC; 266 return prot; 267} 268 269/* 270 * Map a subpage by creating an anonymous page that contains the union of the old page and 271 * the subpage. 272 */ 273static unsigned long 274mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags, 275 loff_t off) 276{ 277 void *page = (void *) get_zeroed_page(GFP_KERNEL); 278 struct inode *inode; 279 unsigned long ret; 280 int old_prot = get_page_prot(start); 281 282 DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n", 283 file, start, end, prot, flags, off); 284 285 if (!page) 286 return -ENOMEM; 287 288 if (old_prot) 289 copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE); 290 291 down_write(¤t->mm->mmap_sem); 292 { 293 ret = do_mmap(0, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE, 294 flags | MAP_FIXED | MAP_ANONYMOUS, 0); 295 } 296 up_write(¤t->mm->mmap_sem); 297 298 if (IS_ERR((void *) ret)) 299 goto out; 300 301 if (old_prot) { 302 /* copy back the old page contents. */ 303 if (PAGE_OFF(start)) 304 copy_to_user((void *) PAGE_START(start), page, PAGE_OFF(start)); 305 if (PAGE_OFF(end)) 306 copy_to_user((void *) end, page + PAGE_OFF(end), 307 PAGE_SIZE - PAGE_OFF(end)); 308 } 309 if (!(flags & MAP_ANONYMOUS)) { 310 /* read the file contents */ 311 inode = file->f_dentry->d_inode; 312 if (!inode->i_fop || !file->f_op->read 313 || ((*file->f_op->read)(file, (char *) start, end - start, &off) < 0)) 314 { 315 ret = -EINVAL; 316 goto out; 317 } 318 } 319 if (!(prot & PROT_WRITE)) 320 ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot); 321 out: 322 free_page((unsigned long) page); 323 return ret; 324} 325 326static unsigned long 327emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags, 328 loff_t off) 329{ 330 unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0; 331 struct inode *inode; 332 loff_t poff; 333 334 end = start + len; 335 pstart = PAGE_START(start); 336 pend = PAGE_ALIGN(end); 337 338 if (flags & MAP_FIXED) { 339 if (start > pstart) { 340 if (flags & MAP_SHARED) 341 printk(KERN_INFO 342 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", 343 current->comm, current->pid, start); 344 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, 345 off); 346 if (IS_ERR((void *) ret)) 347 return ret; 348 pstart += PAGE_SIZE; 349 if (pstart >= pend) 350 return start; /* done */ 351 } 352 if (end < pend) { 353 if (flags & MAP_SHARED) 354 printk(KERN_INFO 355 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", 356 current->comm, current->pid, end); 357 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, 358 (off + len) - PAGE_OFF(end)); 359 if (IS_ERR((void *) ret)) 360 return ret; 361 pend -= PAGE_SIZE; 362 if (pstart >= pend) 363 return start; /* done */ 364 } 365 } else { 366 /* 367 * If a start address was specified, use it if the entire rounded out area 368 * is available. 369 */ 370 if (start && !pstart) 371 fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */ 372 tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags); 373 if (tmp != pstart) { 374 pstart = tmp; 375 start = pstart + PAGE_OFF(off); /* make start congruent with off */ 376 end = start + len; 377 pend = PAGE_ALIGN(end); 378 } 379 } 380 381 poff = off + (pstart - start); /* note: (pstart - start) may be negative */ 382 is_congruent = (flags & MAP_ANONYMOUS) || (PAGE_OFF(poff) == 0); 383 384 if ((flags & MAP_SHARED) && !is_congruent) 385 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " 386 "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off); 387 388 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, 389 is_congruent ? "congruent" : "not congruent", poff); 390 391 down_write(¤t->mm->mmap_sem); 392 { 393 if (!(flags & MAP_ANONYMOUS) && is_congruent) 394 ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff); 395 else 396 ret = do_mmap(0, pstart, pend - pstart, 397 prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE), 398 flags | MAP_FIXED | MAP_ANONYMOUS, 0); 399 } 400 up_write(¤t->mm->mmap_sem); 401 402 if (IS_ERR((void *) ret)) 403 return ret; 404 405 if (!is_congruent) { 406 /* read the file contents */ 407 inode = file->f_dentry->d_inode; 408 if (!inode->i_fop || !file->f_op->read 409 || ((*file->f_op->read)(file, (char *) pstart, pend - pstart, &poff) < 0)) 410 { 411 sys_munmap(pstart, pend - pstart); 412 return -EINVAL; 413 } 414 if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0) 415 return -EINVAL; 416 } 417 return start; 418} 419 420#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ 421 422static inline unsigned int 423get_prot32 (unsigned int prot) 424{ 425 if (prot & PROT_WRITE) 426 /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */ 427 prot |= PROT_READ | PROT_WRITE | PROT_EXEC; 428 else if (prot & (PROT_READ | PROT_EXEC)) 429 /* on x86, there is no distinction between PROT_READ and PROT_EXEC */ 430 prot |= (PROT_READ | PROT_EXEC); 431 432 return prot; 433} 434 435unsigned long 436ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags, 437 loff_t offset) 438{ 439 DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n", 440 file, addr, len, prot, flags, offset); 441 442 if (file && (!file->f_op || !file->f_op->mmap)) 443 return -ENODEV; 444 445 len = IA32_PAGE_ALIGN(len); 446 if (len == 0) 447 return addr; 448 449 if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len) 450 return -EINVAL; 451 452 if (OFFSET4K(offset)) 453 return -EINVAL; 454 455 prot = get_prot32(prot); 456 457#if PAGE_SHIFT > IA32_PAGE_SHIFT 458 down(&ia32_mmap_sem); 459 { 460 addr = emulate_mmap(file, addr, len, prot, flags, offset); 461 } 462 up(&ia32_mmap_sem); 463#else 464 down_write(¤t->mm->mmap_sem); 465 { 466 addr = do_mmap(file, addr, len, prot, flags, offset); 467 } 468 up_write(¤t->mm->mmap_sem); 469#endif 470 DBG("ia32_do_mmap: returning 0x%lx\n", addr); 471 return addr; 472} 473 474/* 475 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these 476 * system calls used a memory block for parameter passing.. 477 */ 478 479struct mmap_arg_struct { 480 unsigned int addr; 481 unsigned int len; 482 unsigned int prot; 483 unsigned int flags; 484 unsigned int fd; 485 unsigned int offset; 486}; 487 488asmlinkage long 489sys32_mmap (struct mmap_arg_struct *arg) 490{ 491 struct mmap_arg_struct a; 492 struct file *file = NULL; 493 unsigned long addr; 494 int flags; 495 496 if (copy_from_user(&a, arg, sizeof(a))) 497 return -EFAULT; 498 499 if (OFFSET4K(a.offset)) 500 return -EINVAL; 501 502 flags = a.flags; 503 504 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 505 if (!(flags & MAP_ANONYMOUS)) { 506 file = fget(a.fd); 507 if (!file) 508 return -EBADF; 509 } 510 511 addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset); 512 513 if (file) 514 fput(file); 515 return addr; 516} 517 518asmlinkage long 519sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags, 520 unsigned int fd, unsigned int pgoff) 521{ 522 struct file *file = NULL; 523 unsigned long retval; 524 525 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 526 if (!(flags & MAP_ANONYMOUS)) { 527 file = fget(fd); 528 if (!file) 529 return -EBADF; 530 } 531 532 retval = ia32_do_mmap(file, addr, len, prot, flags, 533 (unsigned long) pgoff << IA32_PAGE_SHIFT); 534 535 if (file) 536 fput(file); 537 return retval; 538} 539 540asmlinkage long 541sys32_munmap (unsigned int start, unsigned int len) 542{ 543 unsigned int end = start + len; 544 long ret; 545 546#if PAGE_SHIFT <= IA32_PAGE_SHIFT 547 ret = sys_munmap(start, end - start); 548#else 549 if (start > end) 550 return -EINVAL; 551 552 start = PAGE_ALIGN(start); 553 end = PAGE_START(end); 554 555 if (start >= end) 556 return 0; 557 558 down(&ia32_mmap_sem); 559 { 560 ret = sys_munmap(start, end - start); 561 } 562 up(&ia32_mmap_sem); 563#endif 564 return ret; 565} 566 567#if PAGE_SHIFT > IA32_PAGE_SHIFT 568 569/* 570 * When mprotect()ing a partial page, we set the permission to the union of the old 571 * settings and the new settings. In other words, it's only possible to make access to a 572 * partial page less restrictive. 573 */ 574static long 575mprotect_subpage (unsigned long address, int new_prot) 576{ 577 int old_prot; 578 579 if (new_prot == PROT_NONE) 580 return 0; /* optimize case where nothing changes... */ 581 582 old_prot = get_page_prot(address); 583 return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot); 584} 585 586#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ 587 588asmlinkage long 589sys32_mprotect (unsigned int start, unsigned int len, int prot) 590{ 591 unsigned long end = start + len; 592#if PAGE_SHIFT > IA32_PAGE_SHIFT 593 long retval = 0; 594#endif 595 596 prot = get_prot32(prot); 597 598#if PAGE_SHIFT <= IA32_PAGE_SHIFT 599 return sys_mprotect(start, end - start, prot); 600#else 601 if (OFFSET4K(start)) 602 return -EINVAL; 603 604 end = IA32_PAGE_ALIGN(end); 605 if (end < start) 606 return -EINVAL; 607 608 down(&ia32_mmap_sem); 609 { 610 if (PAGE_OFF(start)) { 611 /* start address is 4KB aligned but not page aligned. */ 612 retval = mprotect_subpage(PAGE_START(start), prot); 613 if (retval < 0) 614 goto out; 615 616 start = PAGE_ALIGN(start); 617 if (start >= end) 618 goto out; /* retval is already zero... */ 619 } 620 621 if (PAGE_OFF(end)) { 622 /* end address is 4KB aligned but not page aligned. */ 623 retval = mprotect_subpage(PAGE_START(end), prot); 624 if (retval < 0) 625 return retval; 626 end = PAGE_START(end); 627 } 628 retval = sys_mprotect(start, end - start, prot); 629 } 630 out: 631 up(&ia32_mmap_sem); 632 return retval; 633#endif 634} 635 636asmlinkage long 637sys32_pipe (int *fd) 638{ 639 int retval; 640 int fds[2]; 641 642 retval = do_pipe(fds); 643 if (retval) 644 goto out; 645 if (copy_to_user(fd, fds, sizeof(fds))) 646 retval = -EFAULT; 647 out: 648 return retval; 649} 650 651static inline int 652put_statfs (struct statfs32 *ubuf, struct statfs *kbuf) 653{ 654 int err; 655 656 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf))) 657 return -EFAULT; 658 659 err = __put_user(kbuf->f_type, &ubuf->f_type); 660 err |= __put_user(kbuf->f_bsize, &ubuf->f_bsize); 661 err |= __put_user(kbuf->f_blocks, &ubuf->f_blocks); 662 err |= __put_user(kbuf->f_bfree, &ubuf->f_bfree); 663 err |= __put_user(kbuf->f_bavail, &ubuf->f_bavail); 664 err |= __put_user(kbuf->f_files, &ubuf->f_files); 665 err |= __put_user(kbuf->f_ffree, &ubuf->f_ffree); 666 err |= __put_user(kbuf->f_namelen, &ubuf->f_namelen); 667 err |= __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]); 668 err |= __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]); 669 return err; 670} 671 672extern asmlinkage long sys_statfs(const char * path, struct statfs * buf); 673 674asmlinkage long 675sys32_statfs (const char *path, struct statfs32 *buf) 676{ 677 int ret; 678 struct statfs s; 679 mm_segment_t old_fs = get_fs(); 680 681 set_fs(KERNEL_DS); 682 ret = sys_statfs(path, &s); 683 set_fs(old_fs); 684 if (put_statfs(buf, &s)) 685 return -EFAULT; 686 return ret; 687} 688 689extern asmlinkage long sys_fstatfs(unsigned int fd, struct statfs * buf); 690 691asmlinkage long 692sys32_fstatfs (unsigned int fd, struct statfs32 *buf) 693{ 694 int ret; 695 struct statfs s; 696 mm_segment_t old_fs = get_fs(); 697 698 set_fs(KERNEL_DS); 699 ret = sys_fstatfs(fd, &s); 700 set_fs(old_fs); 701 if (put_statfs(buf, &s)) 702 return -EFAULT; 703 return ret; 704} 705 706struct timeval32 707{ 708 int tv_sec, tv_usec; 709}; 710 711struct itimerval32 712{ 713 struct timeval32 it_interval; 714 struct timeval32 it_value; 715}; 716 717static inline long 718get_tv32 (struct timeval *o, struct timeval32 *i) 719{ 720 return (!access_ok(VERIFY_READ, i, sizeof(*i)) || 721 (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); 722} 723 724static inline long 725put_tv32 (struct timeval32 *o, struct timeval *i) 726{ 727 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || 728 (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); 729} 730 731static inline long 732get_it32 (struct itimerval *o, struct itimerval32 *i) 733{ 734 return (!access_ok(VERIFY_READ, i, sizeof(*i)) || 735 (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | 736 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | 737 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | 738 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); 739} 740 741static inline long 742put_it32 (struct itimerval32 *o, struct itimerval *i) 743{ 744 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || 745 (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | 746 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | 747 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | 748 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); 749} 750 751extern int do_getitimer (int which, struct itimerval *value); 752 753asmlinkage long 754sys32_getitimer (int which, struct itimerval32 *it) 755{ 756 struct itimerval kit; 757 int error; 758 759 error = do_getitimer(which, &kit); 760 if (!error && put_it32(it, &kit)) 761 error = -EFAULT; 762 763 return error; 764} 765 766extern int do_setitimer (int which, struct itimerval *, struct itimerval *); 767 768asmlinkage long 769sys32_setitimer (int which, struct itimerval32 *in, struct itimerval32 *out) 770{ 771 struct itimerval kin, kout; 772 int error; 773 774 if (in) { 775 if (get_it32(&kin, in)) 776 return -EFAULT; 777 } else 778 memset(&kin, 0, sizeof(kin)); 779 780 error = do_setitimer(which, &kin, out ? &kout : NULL); 781 if (error || !out) 782 return error; 783 if (put_it32(out, &kout)) 784 return -EFAULT; 785 786 return 0; 787 788} 789 790asmlinkage unsigned long 791sys32_alarm (unsigned int seconds) 792{ 793 struct itimerval it_new, it_old; 794 unsigned int oldalarm; 795 796 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; 797 it_new.it_value.tv_sec = seconds; 798 it_new.it_value.tv_usec = 0; 799 do_setitimer(ITIMER_REAL, &it_new, &it_old); 800 oldalarm = it_old.it_value.tv_sec; 801 /* ehhh.. We can't return 0 if we have an alarm pending.. */ 802 /* And we'd better return too much than too little anyway */ 803 if (it_old.it_value.tv_usec) 804 oldalarm++; 805 return oldalarm; 806} 807 808/* Translations due to time_t size differences. Which affects all 809 sorts of things, like timeval and itimerval. */ 810 811struct utimbuf_32 { 812 int atime; 813 int mtime; 814}; 815 816extern asmlinkage long sys_utimes(char * filename, struct timeval * utimes); 817extern asmlinkage long sys_gettimeofday (struct timeval *tv, struct timezone *tz); 818 819asmlinkage long 820sys32_utime (char *filename, struct utimbuf_32 *times32) 821{ 822 mm_segment_t old_fs = get_fs(); 823 struct timeval tv[2], *tvp; 824 long ret; 825 826 if (times32) { 827 if (get_user(tv[0].tv_sec, ×32->atime)) 828 return -EFAULT; 829 tv[0].tv_usec = 0; 830 if (get_user(tv[1].tv_sec, ×32->mtime)) 831 return -EFAULT; 832 tv[1].tv_usec = 0; 833 set_fs(KERNEL_DS); 834 tvp = tv; 835 } else 836 tvp = NULL; 837 ret = sys_utimes(filename, tvp); 838 set_fs(old_fs); 839 return ret; 840} 841 842extern struct timezone sys_tz; 843extern int do_sys_settimeofday (struct timeval *tv, struct timezone *tz); 844 845asmlinkage long 846sys32_gettimeofday (struct timeval32 *tv, struct timezone *tz) 847{ 848 if (tv) { 849 struct timeval ktv; 850 do_gettimeofday(&ktv); 851 if (put_tv32(tv, &ktv)) 852 return -EFAULT; 853 } 854 if (tz) { 855 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) 856 return -EFAULT; 857 } 858 return 0; 859} 860 861asmlinkage long 862sys32_settimeofday (struct timeval32 *tv, struct timezone *tz) 863{ 864 struct timeval ktv; 865 struct timezone ktz; 866 867 if (tv) { 868 if (get_tv32(&ktv, tv)) 869 return -EFAULT; 870 } 871 if (tz) { 872 if (copy_from_user(&ktz, tz, sizeof(ktz))) 873 return -EFAULT; 874 } 875 876 return do_sys_settimeofday(tv ? &ktv : NULL, tz ? &ktz : NULL); 877} 878 879struct getdents32_callback { 880 struct linux32_dirent * current_dir; 881 struct linux32_dirent * previous; 882 int count; 883 int error; 884}; 885 886struct readdir32_callback { 887 struct old_linux32_dirent * dirent; 888 int count; 889}; 890 891static int 892filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino, 893 unsigned int d_type) 894{ 895 struct linux32_dirent * dirent; 896 struct getdents32_callback * buf = (struct getdents32_callback *) __buf; 897 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4); 898 899 buf->error = -EINVAL; /* only used if we fail.. */ 900 if (reclen > buf->count) 901 return -EINVAL; 902 buf->error = -EFAULT; /* only used if we fail.. */ 903 dirent = buf->previous; 904 if (dirent) 905 if (put_user(offset, &dirent->d_off)) 906 return -EFAULT; 907 dirent = buf->current_dir; 908 buf->previous = dirent; 909 if (put_user(ino, &dirent->d_ino) 910 || put_user(reclen, &dirent->d_reclen) 911 || copy_to_user(dirent->d_name, name, namlen) 912 || put_user(0, dirent->d_name + namlen)) 913 return -EFAULT; 914 ((char *) dirent) += reclen; 915 buf->current_dir = dirent; 916 buf->count -= reclen; 917 return 0; 918} 919 920asmlinkage long 921sys32_getdents (unsigned int fd, struct linux32_dirent *dirent, unsigned int count) 922{ 923 struct file * file; 924 struct linux32_dirent * lastdirent; 925 struct getdents32_callback buf; 926 int error; 927 928 error = -EBADF; 929 file = fget(fd); 930 if (!file) 931 goto out; 932 933 buf.current_dir = dirent; 934 buf.previous = NULL; 935 buf.count = count; 936 buf.error = 0; 937 938 error = vfs_readdir(file, filldir32, &buf); 939 if (error < 0) 940 goto out_putf; 941 error = buf.error; 942 lastdirent = buf.previous; 943 if (lastdirent) { 944 error = -EINVAL; 945 if (put_user(file->f_pos, &lastdirent->d_off)) 946 goto out_putf; 947 error = count - buf.count; 948 } 949 950out_putf: 951 fput(file); 952out: 953 return error; 954} 955 956static int 957fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino, 958 unsigned int d_type) 959{ 960 struct readdir32_callback * buf = (struct readdir32_callback *) __buf; 961 struct old_linux32_dirent * dirent; 962 963 if (buf->count) 964 return -EINVAL; 965 buf->count++; 966 dirent = buf->dirent; 967 if (put_user(ino, &dirent->d_ino) 968 || put_user(offset, &dirent->d_offset) 969 || put_user(namlen, &dirent->d_namlen) 970 || copy_to_user(dirent->d_name, name, namlen) 971 || put_user(0, dirent->d_name + namlen)) 972 return -EFAULT; 973 return 0; 974} 975 976asmlinkage long 977sys32_readdir (unsigned int fd, void *dirent, unsigned int count) 978{ 979 int error; 980 struct file * file; 981 struct readdir32_callback buf; 982 983 error = -EBADF; 984 file = fget(fd); 985 if (!file) 986 goto out; 987 988 buf.count = 0; 989 buf.dirent = dirent; 990 991 error = vfs_readdir(file, fillonedir32, &buf); 992 if (error >= 0) 993 error = buf.count; 994 fput(file); 995out: 996 return error; 997} 998 999/* 1000 * We can actually return ERESTARTSYS instead of EINTR, but I'd 1001 * like to be certain this leads to no problems. So I return 1002 * EINTR just for safety. 1003 * 1004 * Update: ERESTARTSYS breaks at least the xview clock binary, so 1005 * I'm trying ERESTARTNOHAND which restart only when you want to. 1006 */ 1007#define MAX_SELECT_SECONDS \ 1008 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) 1009#define ROUND_UP_TIME(x,y) (((x)+(y)-1)/(y)) 1010 1011asmlinkage long 1012sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval32 *tvp32) 1013{ 1014 fd_set_bits fds; 1015 char *bits; 1016 long timeout; 1017 int ret, size; 1018 1019 timeout = MAX_SCHEDULE_TIMEOUT; 1020 if (tvp32) { 1021 time_t sec, usec; 1022 1023 ret = -EFAULT; 1024 if (get_user(sec, &tvp32->tv_sec) || get_user(usec, &tvp32->tv_usec)) 1025 goto out_nofds; 1026 1027 ret = -EINVAL; 1028 if (sec < 0 || usec < 0) 1029 goto out_nofds; 1030 1031 if ((unsigned long) sec < MAX_SELECT_SECONDS) { 1032 timeout = ROUND_UP_TIME(usec, 1000000/HZ); 1033 timeout += sec * (unsigned long) HZ; 1034 } 1035 } 1036 1037 ret = -EINVAL; 1038 if (n < 0) 1039 goto out_nofds; 1040 1041 if (n > current->files->max_fdset) 1042 n = current->files->max_fdset; 1043 1044 /* 1045 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 1046 * since we used fdset we need to allocate memory in units of 1047 * long-words. 1048 */ 1049 ret = -ENOMEM; 1050 size = FDS_BYTES(n); 1051 bits = kmalloc(6 * size, GFP_KERNEL); 1052 if (!bits) 1053 goto out_nofds; 1054 fds.in = (unsigned long *) bits; 1055 fds.out = (unsigned long *) (bits + size); 1056 fds.ex = (unsigned long *) (bits + 2*size); 1057 fds.res_in = (unsigned long *) (bits + 3*size); 1058 fds.res_out = (unsigned long *) (bits + 4*size); 1059 fds.res_ex = (unsigned long *) (bits + 5*size); 1060 1061 if ((ret = get_fd_set(n, inp, fds.in)) || 1062 (ret = get_fd_set(n, outp, fds.out)) || 1063 (ret = get_fd_set(n, exp, fds.ex))) 1064 goto out; 1065 zero_fd_set(n, fds.res_in); 1066 zero_fd_set(n, fds.res_out); 1067 zero_fd_set(n, fds.res_ex); 1068 1069 ret = do_select(n, &fds, &timeout); 1070 1071 if (tvp32 && !(current->personality & STICKY_TIMEOUTS)) { 1072 time_t sec = 0, usec = 0; 1073 if (timeout) { 1074 sec = timeout / HZ; 1075 usec = timeout % HZ; 1076 usec *= (1000000/HZ); 1077 } 1078 if (put_user(sec, &tvp32->tv_sec) || put_user(usec, &tvp32->tv_usec)) { 1079 ret = -EFAULT; 1080 goto out; 1081 } 1082 } 1083 1084 if (ret < 0) 1085 goto out; 1086 if (!ret) { 1087 ret = -ERESTARTNOHAND; 1088 if (signal_pending(current)) 1089 goto out; 1090 ret = 0; 1091 } 1092 1093 set_fd_set(n, inp, fds.res_in); 1094 set_fd_set(n, outp, fds.res_out); 1095 set_fd_set(n, exp, fds.res_ex); 1096 1097out: 1098 kfree(bits); 1099out_nofds: 1100 return ret; 1101} 1102 1103struct sel_arg_struct { 1104 unsigned int n; 1105 unsigned int inp; 1106 unsigned int outp; 1107 unsigned int exp; 1108 unsigned int tvp; 1109}; 1110 1111asmlinkage long 1112sys32_old_select (struct sel_arg_struct *arg) 1113{ 1114 struct sel_arg_struct a; 1115 1116 if (copy_from_user(&a, arg, sizeof(a))) 1117 return -EFAULT; 1118 return sys32_select(a.n, (fd_set *) A(a.inp), (fd_set *) A(a.outp), (fd_set *) A(a.exp), 1119 (struct timeval32 *) A(a.tvp)); 1120} 1121 1122extern asmlinkage long sys_nanosleep (struct timespec *rqtp, struct timespec *rmtp); 1123 1124asmlinkage long 1125sys32_nanosleep (struct timespec32 *rqtp, struct timespec32 *rmtp) 1126{ 1127 struct timespec t; 1128 int ret; 1129 mm_segment_t old_fs = get_fs(); 1130 1131 if (get_user (t.tv_sec, &rqtp->tv_sec) || get_user (t.tv_nsec, &rqtp->tv_nsec)) 1132 return -EFAULT; 1133 set_fs(KERNEL_DS); 1134 ret = sys_nanosleep(&t, rmtp ? &t : NULL); 1135 set_fs(old_fs); 1136 if (rmtp && ret == -EINTR) { 1137 if (put_user(t.tv_sec, &rmtp->tv_sec) || put_user(t.tv_nsec, &rmtp->tv_nsec)) 1138 return -EFAULT; 1139 } 1140 return ret; 1141} 1142 1143struct iovec32 { unsigned int iov_base; int iov_len; }; 1144asmlinkage ssize_t sys_readv (unsigned long,const struct iovec *,unsigned long); 1145asmlinkage ssize_t sys_writev (unsigned long,const struct iovec *,unsigned long); 1146 1147static struct iovec * 1148get_iovec32 (struct iovec32 *iov32, struct iovec *iov_buf, u32 count, int type) 1149{ 1150 int i; 1151 u32 buf, len; 1152 struct iovec *ivp, *iov; 1153 1154 /* Get the "struct iovec" from user memory */ 1155 1156 if (!count) 1157 return 0; 1158 if (verify_area(VERIFY_READ, iov32, sizeof(struct iovec32)*count)) 1159 return NULL; 1160 if (count > UIO_MAXIOV) 1161 return NULL; 1162 if (count > UIO_FASTIOV) { 1163 iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL); 1164 if (!iov) 1165 return NULL; 1166 } else 1167 iov = iov_buf; 1168 1169 ivp = iov; 1170 for (i = 0; i < count; i++) { 1171 if (__get_user(len, &iov32->iov_len) || __get_user(buf, &iov32->iov_base)) { 1172 if (iov != iov_buf) 1173 kfree(iov); 1174 return NULL; 1175 } 1176 if (verify_area(type, (void *)A(buf), len)) { 1177 if (iov != iov_buf) 1178 kfree(iov); 1179 return((struct iovec *)0); 1180 } 1181 ivp->iov_base = (void *)A(buf); 1182 ivp->iov_len = (__kernel_size_t) len; 1183 iov32++; 1184 ivp++; 1185 } 1186 return iov; 1187} 1188 1189asmlinkage long 1190sys32_readv (int fd, struct iovec32 *vector, u32 count) 1191{ 1192 struct iovec iovstack[UIO_FASTIOV]; 1193 struct iovec *iov; 1194 long ret; 1195 mm_segment_t old_fs = get_fs(); 1196 1197 iov = get_iovec32(vector, iovstack, count, VERIFY_WRITE); 1198 if (!iov) 1199 return -EFAULT; 1200 set_fs(KERNEL_DS); 1201 ret = sys_readv(fd, iov, count); 1202 set_fs(old_fs); 1203 if (iov != iovstack) 1204 kfree(iov); 1205 return ret; 1206} 1207 1208asmlinkage long 1209sys32_writev (int fd, struct iovec32 *vector, u32 count) 1210{ 1211 struct iovec iovstack[UIO_FASTIOV]; 1212 struct iovec *iov; 1213 long ret; 1214 mm_segment_t old_fs = get_fs(); 1215 1216 iov = get_iovec32(vector, iovstack, count, VERIFY_READ); 1217 if (!iov) 1218 return -EFAULT; 1219 set_fs(KERNEL_DS); 1220 ret = sys_writev(fd, iov, count); 1221 set_fs(old_fs); 1222 if (iov != iovstack) 1223 kfree(iov); 1224 return ret; 1225} 1226 1227#define RLIM_INFINITY32 0x7fffffff 1228#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) 1229 1230struct rlimit32 { 1231 int rlim_cur; 1232 int rlim_max; 1233}; 1234 1235extern asmlinkage long sys_getrlimit (unsigned int resource, struct rlimit *rlim); 1236 1237asmlinkage long 1238sys32_old_getrlimit (unsigned int resource, struct rlimit32 *rlim) 1239{ 1240 mm_segment_t old_fs = get_fs(); 1241 struct rlimit r; 1242 int ret; 1243 1244 set_fs(KERNEL_DS); 1245 ret = sys_getrlimit(resource, &r); 1246 set_fs(old_fs); 1247 if (!ret) { 1248 ret = put_user(RESOURCE32(r.rlim_cur), &rlim->rlim_cur); 1249 ret |= put_user(RESOURCE32(r.rlim_max), &rlim->rlim_max); 1250 } 1251 return ret; 1252} 1253 1254asmlinkage long 1255sys32_getrlimit (unsigned int resource, struct rlimit32 *rlim) 1256{ 1257 mm_segment_t old_fs = get_fs(); 1258 struct rlimit r; 1259 int ret; 1260 1261 set_fs(KERNEL_DS); 1262 ret = sys_getrlimit(resource, &r); 1263 set_fs(old_fs); 1264 if (!ret) { 1265 if (r.rlim_cur >= 0xffffffff) 1266 r.rlim_cur = 0xffffffff; 1267 if (r.rlim_max >= 0xffffffff) 1268 r.rlim_max = 0xffffffff; 1269 ret = put_user(r.rlim_cur, &rlim->rlim_cur); 1270 ret |= put_user(r.rlim_max, &rlim->rlim_max); 1271 } 1272 return ret; 1273} 1274 1275extern asmlinkage long sys_setrlimit (unsigned int resource, struct rlimit *rlim); 1276 1277asmlinkage long 1278sys32_setrlimit (unsigned int resource, struct rlimit32 *rlim) 1279{ 1280 struct rlimit r; 1281 int ret; 1282 mm_segment_t old_fs = get_fs(); 1283 1284 if (resource >= RLIM_NLIMITS) 1285 return -EINVAL; 1286 if (get_user(r.rlim_cur, &rlim->rlim_cur) || get_user(r.rlim_max, &rlim->rlim_max)) 1287 return -EFAULT; 1288 if (r.rlim_cur == RLIM_INFINITY32) 1289 r.rlim_cur = RLIM_INFINITY; 1290 if (r.rlim_max == RLIM_INFINITY32) 1291 r.rlim_max = RLIM_INFINITY; 1292 set_fs(KERNEL_DS); 1293 ret = sys_setrlimit(resource, &r); 1294 set_fs(old_fs); 1295 return ret; 1296} 1297 1298/* 1299 * Declare the IA32 version of the msghdr 1300 */ 1301 1302struct msghdr32 { 1303 unsigned int msg_name; /* Socket name */ 1304 int msg_namelen; /* Length of name */ 1305 unsigned int msg_iov; /* Data blocks */ 1306 unsigned int msg_iovlen; /* Number of blocks */ 1307 unsigned int msg_control; /* Per protocol magic (eg BSD file descriptor passing) */ 1308 unsigned int msg_controllen; /* Length of cmsg list */ 1309 unsigned msg_flags; 1310}; 1311 1312struct cmsghdr32 { 1313 __kernel_size_t32 cmsg_len; 1314 int cmsg_level; 1315 int cmsg_type; 1316}; 1317 1318/* Bleech... */ 1319#define __CMSG32_NXTHDR(ctl, len, cmsg, cmsglen) __cmsg32_nxthdr((ctl),(len),(cmsg),(cmsglen)) 1320#define CMSG32_NXTHDR(mhdr, cmsg, cmsglen) cmsg32_nxthdr((mhdr), (cmsg), (cmsglen)) 1321#define CMSG32_ALIGN(len) ( ((len)+sizeof(int)-1) & ~(sizeof(int)-1) ) 1322#define CMSG32_DATA(cmsg) \ 1323 ((void *)((char *)(cmsg) + CMSG32_ALIGN(sizeof(struct cmsghdr32)))) 1324#define CMSG32_SPACE(len) \ 1325 (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + CMSG32_ALIGN(len)) 1326#define CMSG32_LEN(len) (CMSG32_ALIGN(sizeof(struct cmsghdr32)) + (len)) 1327#define __CMSG32_FIRSTHDR(ctl,len) \ 1328 ((len) >= sizeof(struct cmsghdr32) ? (struct cmsghdr32 *)(ctl) : (struct cmsghdr32 *)NULL) 1329#define CMSG32_FIRSTHDR(msg) __CMSG32_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen) 1330 1331static inline struct cmsghdr32 * 1332__cmsg32_nxthdr (void *ctl, __kernel_size_t size, struct cmsghdr32 *cmsg, int cmsg_len) 1333{ 1334 struct cmsghdr32 * ptr; 1335 1336 ptr = (struct cmsghdr32 *)(((unsigned char *) cmsg) + CMSG32_ALIGN(cmsg_len)); 1337 if ((unsigned long)((char*)(ptr+1) - (char *) ctl) > size) 1338 return NULL; 1339 return ptr; 1340} 1341 1342static inline struct cmsghdr32 * 1343cmsg32_nxthdr (struct msghdr *msg, struct cmsghdr32 *cmsg, int cmsg_len) 1344{ 1345 return __cmsg32_nxthdr(msg->msg_control, msg->msg_controllen, cmsg, cmsg_len); 1346} 1347 1348static inline int 1349get_msghdr32 (struct msghdr *mp, struct msghdr32 *mp32) 1350{ 1351 int ret; 1352 unsigned int i; 1353 1354 if (!access_ok(VERIFY_READ, mp32, sizeof(*mp32))) 1355 return -EFAULT; 1356 ret = __get_user(i, &mp32->msg_name); 1357 mp->msg_name = (void *)A(i); 1358 ret |= __get_user(mp->msg_namelen, &mp32->msg_namelen); 1359 ret |= __get_user(i, &mp32->msg_iov); 1360 mp->msg_iov = (struct iovec *)A(i); 1361 ret |= __get_user(mp->msg_iovlen, &mp32->msg_iovlen); 1362 ret |= __get_user(i, &mp32->msg_control); 1363 mp->msg_control = (void *)A(i); 1364 ret |= __get_user(mp->msg_controllen, &mp32->msg_controllen); 1365 ret |= __get_user(mp->msg_flags, &mp32->msg_flags); 1366 return ret ? -EFAULT : 0; 1367} 1368 1369/* 1370 * There is a lot of hair here because the alignment rules (and thus placement) of cmsg 1371 * headers and length are different for 32-bit apps. -DaveM 1372 */ 1373static int 1374get_cmsghdr32 (struct msghdr *kmsg, unsigned char *stackbuf, struct sock *sk, size_t *bufsize) 1375{ 1376 struct cmsghdr *kcmsg, *kcmsg_base; 1377 __kernel_size_t kcmlen, tmp; 1378 __kernel_size_t32 ucmlen; 1379 struct cmsghdr32 *ucmsg; 1380 long err; 1381 1382 kcmlen = 0; 1383 kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf; 1384 ucmsg = CMSG32_FIRSTHDR(kmsg); 1385 while (ucmsg != NULL) { 1386 if (get_user(ucmlen, &ucmsg->cmsg_len)) 1387 return -EFAULT; 1388 1389 /* Catch bogons. */ 1390 if (CMSG32_ALIGN(ucmlen) < CMSG32_ALIGN(sizeof(struct cmsghdr32))) 1391 return -EINVAL; 1392 if ((unsigned long)(((char *)ucmsg - (char *)kmsg->msg_control) + ucmlen) 1393 > kmsg->msg_controllen) 1394 return -EINVAL; 1395 1396 tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) + 1397 CMSG_ALIGN(sizeof(struct cmsghdr))); 1398 kcmlen += tmp; 1399 ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen); 1400 } 1401 if (kcmlen == 0) 1402 return -EINVAL; 1403 1404 /* 1405 * The kcmlen holds the 64-bit version of the control length. It may not be 1406 * modified as we do not stick it into the kmsg until we have successfully copied 1407 * over all of the data from the user. 1408 */ 1409 if (kcmlen > *bufsize) { 1410 *bufsize = kcmlen; 1411 kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL); 1412 } 1413 if (kcmsg == NULL) 1414 return -ENOBUFS; 1415 1416 /* Now copy them over neatly. */ 1417 memset(kcmsg, 0, kcmlen); 1418 ucmsg = CMSG32_FIRSTHDR(kmsg); 1419 while (ucmsg != NULL) { 1420 err = get_user(ucmlen, &ucmsg->cmsg_len); 1421 tmp = ((ucmlen - CMSG32_ALIGN(sizeof(*ucmsg))) + 1422 CMSG_ALIGN(sizeof(struct cmsghdr))); 1423 kcmsg->cmsg_len = tmp; 1424 err |= get_user(kcmsg->cmsg_level, &ucmsg->cmsg_level); 1425 err |= get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type); 1426 1427 /* Copy over the data. */ 1428 err |= copy_from_user(CMSG_DATA(kcmsg), CMSG32_DATA(ucmsg), 1429 (ucmlen - CMSG32_ALIGN(sizeof(*ucmsg)))); 1430 if (err) 1431 goto out_free_efault; 1432 1433 /* Advance. */ 1434 kcmsg = (struct cmsghdr *)((char *)kcmsg + CMSG_ALIGN(tmp)); 1435 ucmsg = CMSG32_NXTHDR(kmsg, ucmsg, ucmlen); 1436 } 1437 1438 /* Ok, looks like we made it. Hook it up and return success. */ 1439 kmsg->msg_control = kcmsg_base; 1440 kmsg->msg_controllen = kcmlen; 1441 return 0; 1442 1443out_free_efault: 1444 if (kcmsg_base != (struct cmsghdr *)stackbuf) 1445 sock_kfree_s(sk, kcmsg_base, kcmlen); 1446 return -EFAULT; 1447} 1448 1449/* 1450 * Verify & re-shape IA32 iovec. The caller must ensure that the 1451 * iovec is big enough to hold the re-shaped message iovec. 1452 * 1453 * Save time not doing verify_area. copy_*_user will make this work 1454 * in any case. 1455 * 1456 * Don't need to check the total size for overflow (cf net/core/iovec.c), 1457 * 32-bit sizes can't overflow a 64-bit count. 1458 */ 1459 1460static inline int 1461verify_iovec32 (struct msghdr *m, struct iovec *iov, char *address, int mode) 1462{ 1463 int size, err, ct; 1464 struct iovec32 *iov32; 1465 1466 if (m->msg_namelen) { 1467 if (mode == VERIFY_READ) { 1468 err = move_addr_to_kernel(m->msg_name, m->msg_namelen, address); 1469 if (err < 0) 1470 goto out; 1471 } 1472 m->msg_name = address; 1473 } else 1474 m->msg_name = NULL; 1475 1476 err = -EFAULT; 1477 size = m->msg_iovlen * sizeof(struct iovec32); 1478 if (copy_from_user(iov, m->msg_iov, size)) 1479 goto out; 1480 m->msg_iov = iov; 1481 1482 err = 0; 1483 iov32 = (struct iovec32 *)iov; 1484 for (ct = m->msg_iovlen; ct-- > 0; ) { 1485 iov[ct].iov_len = (__kernel_size_t)iov32[ct].iov_len; 1486 iov[ct].iov_base = (void *) A(iov32[ct].iov_base); 1487 err += iov[ct].iov_len; 1488 } 1489out: 1490 return err; 1491} 1492 1493static void 1494put_cmsg32(struct msghdr *kmsg, int level, int type, int len, void *data) 1495{ 1496 struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control; 1497 struct cmsghdr32 cmhdr; 1498 int cmlen = CMSG32_LEN(len); 1499 1500 if(cm == NULL || kmsg->msg_controllen < sizeof(*cm)) { 1501 kmsg->msg_flags |= MSG_CTRUNC; 1502 return; 1503 } 1504 1505 if(kmsg->msg_controllen < cmlen) { 1506 kmsg->msg_flags |= MSG_CTRUNC; 1507 cmlen = kmsg->msg_controllen; 1508 } 1509 cmhdr.cmsg_level = level; 1510 cmhdr.cmsg_type = type; 1511 cmhdr.cmsg_len = cmlen; 1512 1513 if(copy_to_user(cm, &cmhdr, sizeof cmhdr)) 1514 return; 1515 if(copy_to_user(CMSG32_DATA(cm), data, 1516 cmlen - sizeof(struct cmsghdr32))) 1517 return; 1518 cmlen = CMSG32_SPACE(len); 1519 kmsg->msg_control += cmlen; 1520 kmsg->msg_controllen -= cmlen; 1521} 1522 1523static void 1524scm_detach_fds32 (struct msghdr *kmsg, struct scm_cookie *scm) 1525{ 1526 struct cmsghdr32 *cm = (struct cmsghdr32 *) kmsg->msg_control; 1527 int fdmax = (kmsg->msg_controllen - sizeof(struct cmsghdr32)) 1528 / sizeof(int); 1529 int fdnum = scm->fp->count; 1530 struct file **fp = scm->fp->fp; 1531 int *cmfptr; 1532 int err = 0, i; 1533 1534 if (fdnum < fdmax) 1535 fdmax = fdnum; 1536 1537 for (i = 0, cmfptr = (int *) CMSG32_DATA(cm); 1538 i < fdmax; 1539 i++, cmfptr++) { 1540 int new_fd; 1541 err = get_unused_fd(); 1542 if (err < 0) 1543 break; 1544 new_fd = err; 1545 err = put_user(new_fd, cmfptr); 1546 if (err) { 1547 put_unused_fd(new_fd); 1548 break; 1549 } 1550 /* Bump the usage count and install the file. */ 1551 get_file(fp[i]); 1552 current->files->fd[new_fd] = fp[i]; 1553 } 1554 1555 if (i > 0) { 1556 int cmlen = CMSG32_LEN(i * sizeof(int)); 1557 if (!err) 1558 err = put_user(SOL_SOCKET, &cm->cmsg_level); 1559 if (!err) 1560 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 1561 if (!err) 1562 err = put_user(cmlen, &cm->cmsg_len); 1563 if (!err) { 1564 cmlen = CMSG32_SPACE(i * sizeof(int)); 1565 kmsg->msg_control += cmlen; 1566 kmsg->msg_controllen -= cmlen; 1567 } 1568 } 1569 if (i < fdnum) 1570 kmsg->msg_flags |= MSG_CTRUNC; 1571 1572 /* 1573 * All of the files that fit in the message have had their 1574 * usage counts incremented, so we just free the list. 1575 */ 1576 __scm_destroy(scm); 1577} 1578 1579/* 1580 * In these cases we (currently) can just copy to data over verbatim because all CMSGs 1581 * created by the kernel have well defined types which have the same layout in both the 1582 * 32-bit and 64-bit API. One must add some special cased conversions here if we start 1583 * sending control messages with incompatible types. 1584 * 1585 * SCM_RIGHTS and SCM_CREDENTIALS are done by hand in recvmsg32 right after 1586 * we do our work. The remaining cases are: 1587 * 1588 * SOL_IP IP_PKTINFO struct in_pktinfo 32-bit clean 1589 * IP_TTL int 32-bit clean 1590 * IP_TOS __u8 32-bit clean 1591 * IP_RECVOPTS variable length 32-bit clean 1592 * IP_RETOPTS variable length 32-bit clean 1593 * (these last two are clean because the types are defined 1594 * by the IPv4 protocol) 1595 * IP_RECVERR struct sock_extended_err + 1596 * struct sockaddr_in 32-bit clean 1597 * SOL_IPV6 IPV6_RECVERR struct sock_extended_err + 1598 * struct sockaddr_in6 32-bit clean 1599 * IPV6_PKTINFO struct in6_pktinfo 32-bit clean 1600 * IPV6_HOPLIMIT int 32-bit clean 1601 * IPV6_FLOWINFO u32 32-bit clean 1602 * IPV6_HOPOPTS ipv6 hop exthdr 32-bit clean 1603 * IPV6_DSTOPTS ipv6 dst exthdr(s) 32-bit clean 1604 * IPV6_RTHDR ipv6 routing exthdr 32-bit clean 1605 * IPV6_AUTHHDR ipv6 auth exthdr 32-bit clean 1606 */ 1607static void 1608cmsg32_recvmsg_fixup (struct msghdr *kmsg, unsigned long orig_cmsg_uptr) 1609{ 1610 unsigned char *workbuf, *wp; 1611 unsigned long bufsz, space_avail; 1612 struct cmsghdr *ucmsg; 1613 long err; 1614 1615 bufsz = ((unsigned long)kmsg->msg_control) - orig_cmsg_uptr; 1616 space_avail = kmsg->msg_controllen + bufsz; 1617 wp = workbuf = kmalloc(bufsz, GFP_KERNEL); 1618 if (workbuf == NULL) 1619 goto fail; 1620 1621 /* To make this more sane we assume the kernel sends back properly 1622 * formatted control messages. Because of how the kernel will truncate 1623 * the cmsg_len for MSG_TRUNC cases, we need not check that case either. 1624 */ 1625 ucmsg = (struct cmsghdr *) orig_cmsg_uptr; 1626 while (((unsigned long)ucmsg) < ((unsigned long)kmsg->msg_control)) { 1627 struct cmsghdr32 *kcmsg32 = (struct cmsghdr32 *) wp; 1628 int clen64, clen32; 1629 1630 /* 1631 * UCMSG is the 64-bit format CMSG entry in user-space. KCMSG32 is within 1632 * the kernel space temporary buffer we use to convert into a 32-bit style 1633 * CMSG. 1634 */ 1635 err = get_user(kcmsg32->cmsg_len, &ucmsg->cmsg_len); 1636 err |= get_user(kcmsg32->cmsg_level, &ucmsg->cmsg_level); 1637 err |= get_user(kcmsg32->cmsg_type, &ucmsg->cmsg_type); 1638 if (err) 1639 goto fail2; 1640 1641 clen64 = kcmsg32->cmsg_len; 1642 copy_from_user(CMSG32_DATA(kcmsg32), CMSG_DATA(ucmsg), 1643 clen64 - CMSG_ALIGN(sizeof(*ucmsg))); 1644 clen32 = ((clen64 - CMSG_ALIGN(sizeof(*ucmsg))) + 1645 CMSG32_ALIGN(sizeof(struct cmsghdr32))); 1646 kcmsg32->cmsg_len = clen32; 1647 1648 ucmsg = (struct cmsghdr *) (((char *)ucmsg) + CMSG_ALIGN(clen64)); 1649 wp = (((char *)kcmsg32) + CMSG32_ALIGN(clen32)); 1650 } 1651 1652 /* Copy back fixed up data, and adjust pointers. */ 1653 bufsz = (wp - workbuf); 1654 if (copy_to_user((void *)orig_cmsg_uptr, workbuf, bufsz)) 1655 goto fail2; 1656 1657 kmsg->msg_control = (struct cmsghdr *) (((char *)orig_cmsg_uptr) + bufsz); 1658 kmsg->msg_controllen = space_avail - bufsz; 1659 kfree(workbuf); 1660 return; 1661 1662 fail2: 1663 kfree(workbuf); 1664 fail: 1665 /* 1666 * If we leave the 64-bit format CMSG chunks in there, the application could get 1667 * confused and crash. So to ensure greater recovery, we report no CMSGs. 1668 */ 1669 kmsg->msg_controllen += bufsz; 1670 kmsg->msg_control = (void *) orig_cmsg_uptr; 1671} 1672 1673static inline void 1674sockfd_put (struct socket *sock) 1675{ 1676 fput(sock->file); 1677} 1678 1679#define MAX_SOCK_ADDR 128 /* 108 for Unix domain - 1680 16 for IP, 16 for IPX, 1681 24 for IPv6, 1682 about 80 for AX.25 */ 1683 1684extern struct socket *sockfd_lookup (int fd, int *err); 1685 1686/* 1687 * BSD sendmsg interface 1688 */ 1689 1690int 1691sys32_sendmsg (int fd, struct msghdr32 *msg, unsigned flags) 1692{ 1693 struct socket *sock; 1694 char address[MAX_SOCK_ADDR]; 1695 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; 1696 unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */ 1697 unsigned char *ctl_buf = ctl; 1698 struct msghdr msg_sys; 1699 int err, iov_size, total_len; 1700 size_t ctl_len; 1701 1702 err = -EFAULT; 1703 if (get_msghdr32(&msg_sys, msg)) 1704 goto out; 1705 1706 sock = sockfd_lookup(fd, &err); 1707 if (!sock) 1708 goto out; 1709 1710 /* do not move before msg_sys is valid */ 1711 err = -EINVAL; 1712 if (msg_sys.msg_iovlen > UIO_MAXIOV) 1713 goto out_put; 1714 1715 /* Check whether to allocate the iovec area*/ 1716 err = -ENOMEM; 1717 iov_size = msg_sys.msg_iovlen * sizeof(struct iovec32); 1718 if (msg_sys.msg_iovlen > UIO_FASTIOV) { 1719 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 1720 if (!iov) 1721 goto out_put; 1722 } 1723 1724 /* This will also move the address data into kernel space */ 1725 err = verify_iovec32(&msg_sys, iov, address, VERIFY_READ); 1726 if (err < 0) 1727 goto out_freeiov; 1728 total_len = err; 1729 1730 err = -ENOBUFS; 1731 1732 if (msg_sys.msg_controllen > INT_MAX) 1733 goto out_freeiov; 1734 if (msg_sys.msg_controllen) { 1735 ctl_len = sizeof(ctl); 1736 err = get_cmsghdr32(&msg_sys, ctl_buf, sock->sk, &ctl_len); 1737 if (err) 1738 goto out_freeiov; 1739 ctl_buf = msg_sys.msg_control; 1740 } 1741 msg_sys.msg_flags = flags; 1742 1743 if (sock->file->f_flags & O_NONBLOCK) 1744 msg_sys.msg_flags |= MSG_DONTWAIT; 1745 err = sock_sendmsg(sock, &msg_sys, total_len); 1746 1747 if (ctl_buf != ctl) 1748 sock_kfree_s(sock->sk, ctl_buf, ctl_len); 1749out_freeiov: 1750 if (iov != iovstack) 1751 sock_kfree_s(sock->sk, iov, iov_size); 1752out_put: 1753 sockfd_put(sock); 1754out: 1755 return err; 1756} 1757 1758/* 1759 * BSD recvmsg interface 1760 */ 1761 1762int 1763sys32_recvmsg (int fd, struct msghdr32 *msg, unsigned int flags) 1764{ 1765 struct socket *sock; 1766 struct iovec iovstack[UIO_FASTIOV]; 1767 struct iovec *iov=iovstack; 1768 struct msghdr msg_sys; 1769 unsigned long cmsg_ptr; 1770 int err, iov_size, total_len, len; 1771 struct scm_cookie scm; 1772 1773 /* kernel mode address */ 1774 char addr[MAX_SOCK_ADDR]; 1775 1776 /* user mode address pointers */ 1777 struct sockaddr *uaddr; 1778 int *uaddr_len; 1779 1780 err = -EFAULT; 1781 if (get_msghdr32(&msg_sys, msg)) 1782 goto out; 1783 1784 sock = sockfd_lookup(fd, &err); 1785 if (!sock) 1786 goto out; 1787 1788 err = -EINVAL; 1789 if (msg_sys.msg_iovlen > UIO_MAXIOV) 1790 goto out_put; 1791 1792 /* Check whether to allocate the iovec area*/ 1793 err = -ENOMEM; 1794 iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); 1795 if (msg_sys.msg_iovlen > UIO_FASTIOV) { 1796 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 1797 if (!iov) 1798 goto out_put; 1799 } 1800 1801 /* 1802 * Save the user-mode address (verify_iovec will change the 1803 * kernel msghdr to use the kernel address space) 1804 */ 1805 1806 uaddr = msg_sys.msg_name; 1807 uaddr_len = &msg->msg_namelen; 1808 err = verify_iovec32(&msg_sys, iov, addr, VERIFY_WRITE); 1809 if (err < 0) 1810 goto out_freeiov; 1811 total_len=err; 1812 1813 cmsg_ptr = (unsigned long)msg_sys.msg_control; 1814 msg_sys.msg_flags = 0; 1815 1816 if (sock->file->f_flags & O_NONBLOCK) 1817 flags |= MSG_DONTWAIT; 1818 1819 memset(&scm, 0, sizeof(scm)); 1820 1821 lock_kernel(); 1822 { 1823 err = sock->ops->recvmsg(sock, &msg_sys, total_len, flags, &scm); 1824 if (err < 0) 1825 goto out_unlock_freeiov; 1826 1827 len = err; 1828 if (!msg_sys.msg_control) { 1829 if (sock->passcred || scm.fp) 1830 msg_sys.msg_flags |= MSG_CTRUNC; 1831 if (scm.fp) 1832 __scm_destroy(&scm); 1833 } else { 1834 /* 1835 * If recvmsg processing itself placed some control messages into 1836 * user space, it's is using 64-bit CMSG processing, so we need to 1837 * fix it up before we tack on more stuff. 1838 */ 1839 if ((unsigned long) msg_sys.msg_control != cmsg_ptr) 1840 cmsg32_recvmsg_fixup(&msg_sys, cmsg_ptr); 1841 1842 /* Wheee... */ 1843 if (sock->passcred) 1844 put_cmsg32(&msg_sys, SOL_SOCKET, SCM_CREDENTIALS, 1845 sizeof(scm.creds), &scm.creds); 1846 if (scm.fp != NULL) 1847 scm_detach_fds32(&msg_sys, &scm); 1848 } 1849 } 1850 unlock_kernel(); 1851 1852 if (uaddr != NULL) { 1853 err = move_addr_to_user(addr, msg_sys.msg_namelen, uaddr, uaddr_len); 1854 if (err < 0) 1855 goto out_freeiov; 1856 } 1857 err = __put_user(msg_sys.msg_flags, &msg->msg_flags); 1858 if (err) 1859 goto out_freeiov; 1860 err = __put_user((unsigned long)msg_sys.msg_control-cmsg_ptr, 1861 &msg->msg_controllen); 1862 if (err) 1863 goto out_freeiov; 1864 err = len; 1865 1866 out_freeiov: 1867 if (iov != iovstack) 1868 sock_kfree_s(sock->sk, iov, iov_size); 1869 out_put: 1870 sockfd_put(sock); 1871 out: 1872 return err; 1873 1874 out_unlock_freeiov: 1875 goto out_freeiov; 1876} 1877 1878/* Argument list sizes for sys_socketcall */ 1879#define AL(x) ((x) * sizeof(u32)) 1880static const unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 1881 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 1882 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)}; 1883#undef AL 1884 1885extern asmlinkage long sys_bind(int fd, struct sockaddr *umyaddr, int addrlen); 1886extern asmlinkage long sys_connect(int fd, struct sockaddr *uservaddr, 1887 int addrlen); 1888extern asmlinkage long sys_accept(int fd, struct sockaddr *upeer_sockaddr, 1889 int *upeer_addrlen); 1890extern asmlinkage long sys_getsockname(int fd, struct sockaddr *usockaddr, 1891 int *usockaddr_len); 1892extern asmlinkage long sys_getpeername(int fd, struct sockaddr *usockaddr, 1893 int *usockaddr_len); 1894extern asmlinkage long sys_send(int fd, void *buff, size_t len, unsigned flags); 1895extern asmlinkage long sys_sendto(int fd, u32 buff, __kernel_size_t32 len, 1896 unsigned flags, u32 addr, int addr_len); 1897extern asmlinkage long sys_recv(int fd, void *ubuf, size_t size, unsigned flags); 1898extern asmlinkage long sys_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size, 1899 unsigned flags, u32 addr, u32 addr_len); 1900extern asmlinkage long sys_setsockopt(int fd, int level, int optname, 1901 char *optval, int optlen); 1902extern asmlinkage long sys_getsockopt(int fd, int level, int optname, 1903 u32 optval, u32 optlen); 1904 1905extern asmlinkage long sys_socket(int family, int type, int protocol); 1906extern asmlinkage long sys_socketpair(int family, int type, int protocol, 1907 int usockvec[2]); 1908extern asmlinkage long sys_shutdown(int fd, int how); 1909extern asmlinkage long sys_listen(int fd, int backlog); 1910 1911asmlinkage long 1912sys32_socketcall (int call, u32 *args) 1913{ 1914 int ret; 1915 u32 a[6]; 1916 u32 a0,a1; 1917 1918 if (call<SYS_SOCKET||call>SYS_RECVMSG) 1919 return -EINVAL; 1920 if (copy_from_user(a, args, nas[call])) 1921 return -EFAULT; 1922 a0=a[0]; 1923 a1=a[1]; 1924 1925 switch(call) 1926 { 1927 case SYS_SOCKET: 1928 ret = sys_socket(a0, a1, a[2]); 1929 break; 1930 case SYS_BIND: 1931 ret = sys_bind(a0, (struct sockaddr *)A(a1), a[2]); 1932 break; 1933 case SYS_CONNECT: 1934 ret = sys_connect(a0, (struct sockaddr *)A(a1), a[2]); 1935 break; 1936 case SYS_LISTEN: 1937 ret = sys_listen(a0, a1); 1938 break; 1939 case SYS_ACCEPT: 1940 ret = sys_accept(a0, (struct sockaddr *)A(a1), (int *)A(a[2])); 1941 break; 1942 case SYS_GETSOCKNAME: 1943 ret = sys_getsockname(a0, (struct sockaddr *)A(a1), (int *)A(a[2])); 1944 break; 1945 case SYS_GETPEERNAME: 1946 ret = sys_getpeername(a0, (struct sockaddr *)A(a1), (int *)A(a[2])); 1947 break; 1948 case SYS_SOCKETPAIR: 1949 ret = sys_socketpair(a0, a1, a[2], (int *)A(a[3])); 1950 break; 1951 case SYS_SEND: 1952 ret = sys_send(a0, (void *)A(a1), a[2], a[3]); 1953 break; 1954 case SYS_SENDTO: 1955 ret = sys_sendto(a0, a1, a[2], a[3], a[4], a[5]); 1956 break; 1957 case SYS_RECV: 1958 ret = sys_recv(a0, (void *)A(a1), a[2], a[3]); 1959 break; 1960 case SYS_RECVFROM: 1961 ret = sys_recvfrom(a0, a1, a[2], a[3], a[4], a[5]); 1962 break; 1963 case SYS_SHUTDOWN: 1964 ret = sys_shutdown(a0,a1); 1965 break; 1966 case SYS_SETSOCKOPT: 1967 ret = sys_setsockopt(a0, a1, a[2], (char *)A(a[3]), 1968 a[4]); 1969 break; 1970 case SYS_GETSOCKOPT: 1971 ret = sys_getsockopt(a0, a1, a[2], a[3], a[4]); 1972 break; 1973 case SYS_SENDMSG: 1974 ret = sys32_sendmsg(a0, (struct msghdr32 *) A(a1), a[2]); 1975 break; 1976 case SYS_RECVMSG: 1977 ret = sys32_recvmsg(a0, (struct msghdr32 *) A(a1), a[2]); 1978 break; 1979 default: 1980 ret = EINVAL; 1981 break; 1982 } 1983 return ret; 1984} 1985 1986/* 1987 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.. 1988 * 1989 * This is really horribly ugly. 1990 */ 1991 1992struct msgbuf32 { s32 mtype; char mtext[1]; }; 1993 1994struct ipc_perm32 { 1995 key_t key; 1996 __kernel_uid_t32 uid; 1997 __kernel_gid_t32 gid; 1998 __kernel_uid_t32 cuid; 1999 __kernel_gid_t32 cgid; 2000 __kernel_mode_t32 mode; 2001 unsigned short seq; 2002}; 2003 2004struct ipc64_perm32 { 2005 key_t key; 2006 __kernel_uid32_t32 uid; 2007 __kernel_gid32_t32 gid; 2008 __kernel_uid32_t32 cuid; 2009 __kernel_gid32_t32 cgid; 2010 __kernel_mode_t32 mode; 2011 unsigned short __pad1; 2012 unsigned short seq; 2013 unsigned short __pad2; 2014 unsigned int unused1; 2015 unsigned int unused2; 2016}; 2017 2018struct semid_ds32 { 2019 struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */ 2020 __kernel_time_t32 sem_otime; /* last semop time */ 2021 __kernel_time_t32 sem_ctime; /* last change time */ 2022 u32 sem_base; /* ptr to first semaphore in array */ 2023 u32 sem_pending; /* pending operations to be processed */ 2024 u32 sem_pending_last; /* last pending operation */ 2025 u32 undo; /* undo requests on this array */ 2026 unsigned short sem_nsems; /* no. of semaphores in array */ 2027}; 2028 2029struct semid64_ds32 { 2030 struct ipc64_perm32 sem_perm; 2031 __kernel_time_t32 sem_otime; 2032 unsigned int __unused1; 2033 __kernel_time_t32 sem_ctime; 2034 unsigned int __unused2; 2035 unsigned int sem_nsems; 2036 unsigned int __unused3; 2037 unsigned int __unused4; 2038}; 2039 2040struct msqid_ds32 { 2041 struct ipc_perm32 msg_perm; 2042 u32 msg_first; 2043 u32 msg_last; 2044 __kernel_time_t32 msg_stime; 2045 __kernel_time_t32 msg_rtime; 2046 __kernel_time_t32 msg_ctime; 2047 u32 wwait; 2048 u32 rwait; 2049 unsigned short msg_cbytes; 2050 unsigned short msg_qnum; 2051 unsigned short msg_qbytes; 2052 __kernel_ipc_pid_t32 msg_lspid; 2053 __kernel_ipc_pid_t32 msg_lrpid; 2054}; 2055 2056struct msqid64_ds32 { 2057 struct ipc64_perm32 msg_perm; 2058 __kernel_time_t32 msg_stime; 2059 unsigned int __unused1; 2060 __kernel_time_t32 msg_rtime; 2061 unsigned int __unused2; 2062 __kernel_time_t32 msg_ctime; 2063 unsigned int __unused3; 2064 unsigned int msg_cbytes; 2065 unsigned int msg_qnum; 2066 unsigned int msg_qbytes; 2067 __kernel_pid_t32 msg_lspid; 2068 __kernel_pid_t32 msg_lrpid; 2069 unsigned int __unused4; 2070 unsigned int __unused5; 2071}; 2072 2073struct shmid_ds32 { 2074 struct ipc_perm32 shm_perm; 2075 int shm_segsz; 2076 __kernel_time_t32 shm_atime; 2077 __kernel_time_t32 shm_dtime; 2078 __kernel_time_t32 shm_ctime; 2079 __kernel_ipc_pid_t32 shm_cpid; 2080 __kernel_ipc_pid_t32 shm_lpid; 2081 unsigned short shm_nattch; 2082}; 2083 2084struct shmid64_ds32 { 2085 struct ipc64_perm shm_perm; 2086 __kernel_size_t32 shm_segsz; 2087 __kernel_time_t32 shm_atime; 2088 unsigned int __unused1; 2089 __kernel_time_t32 shm_dtime; 2090 unsigned int __unused2; 2091 __kernel_time_t32 shm_ctime; 2092 unsigned int __unused3; 2093 __kernel_pid_t32 shm_cpid; 2094 __kernel_pid_t32 shm_lpid; 2095 unsigned int shm_nattch; 2096 unsigned int __unused4; 2097 unsigned int __unused5; 2098}; 2099 2100struct shminfo64_32 { 2101 unsigned int shmmax; 2102 unsigned int shmmin; 2103 unsigned int shmmni; 2104 unsigned int shmseg; 2105 unsigned int shmall; 2106 unsigned int __unused1; 2107 unsigned int __unused2; 2108 unsigned int __unused3; 2109 unsigned int __unused4; 2110}; 2111 2112struct shm_info32 { 2113 int used_ids; 2114 u32 shm_tot, shm_rss, shm_swp; 2115 u32 swap_attempts, swap_successes; 2116}; 2117 2118struct ipc_kludge { 2119 struct msgbuf *msgp; 2120 long msgtyp; 2121}; 2122 2123#define SEMOP 1 2124#define SEMGET 2 2125#define SEMCTL 3 2126#define MSGSND 11 2127#define MSGRCV 12 2128#define MSGGET 13 2129#define MSGCTL 14 2130#define SHMAT 21 2131#define SHMDT 22 2132#define SHMGET 23 2133#define SHMCTL 24 2134 2135#define IPCOP_MASK(__x) (1UL << (__x)) 2136 2137static int 2138ipc_parse_version32 (int *cmd) 2139{ 2140 if (*cmd & IPC_64) { 2141 *cmd ^= IPC_64; 2142 return IPC_64; 2143 } else { 2144 return IPC_OLD; 2145 } 2146} 2147 2148static int 2149semctl32 (int first, int second, int third, void *uptr) 2150{ 2151 union semun fourth; 2152 u32 pad; 2153 int err = 0, err2; 2154 struct semid64_ds s; 2155 mm_segment_t old_fs; 2156 int version = ipc_parse_version32(&third); 2157 2158 if (!uptr) 2159 return -EINVAL; 2160 if (get_user(pad, (u32 *)uptr)) 2161 return -EFAULT; 2162 if (third == SETVAL) 2163 fourth.val = (int)pad; 2164 else 2165 fourth.__pad = (void *)A(pad); 2166 switch (third) { 2167 case IPC_INFO: 2168 case IPC_RMID: 2169 case IPC_SET: 2170 case SEM_INFO: 2171 case GETVAL: 2172 case GETPID: 2173 case GETNCNT: 2174 case GETZCNT: 2175 case GETALL: 2176 case SETVAL: 2177 case SETALL: 2178 err = sys_semctl(first, second, third, fourth); 2179 break; 2180 2181 case IPC_STAT: 2182 case SEM_STAT: 2183 fourth.__pad = &s; 2184 old_fs = get_fs(); 2185 set_fs(KERNEL_DS); 2186 err = sys_semctl(first, second, third, fourth); 2187 set_fs(old_fs); 2188 2189 if (version == IPC_64) { 2190 struct semid64_ds32 *usp64 = (struct semid64_ds32 *) A(pad); 2191 2192 if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) { 2193 err = -EFAULT; 2194 break; 2195 } 2196 err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key); 2197 err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid); 2198 err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid); 2199 err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid); 2200 err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid); 2201 err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode); 2202 err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq); 2203 err2 |= __put_user(s.sem_otime, &usp64->sem_otime); 2204 err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime); 2205 err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems); 2206 } else { 2207 struct semid_ds32 *usp32 = (struct semid_ds32 *) A(pad); 2208 2209 if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) { 2210 err = -EFAULT; 2211 break; 2212 } 2213 err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key); 2214 err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid); 2215 err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid); 2216 err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid); 2217 err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid); 2218 err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode); 2219 err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq); 2220 err2 |= __put_user(s.sem_otime, &usp32->sem_otime); 2221 err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime); 2222 err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems); 2223 } 2224 if (err2) 2225 err = -EFAULT; 2226 break; 2227 } 2228 return err; 2229} 2230 2231static int 2232do_sys32_msgsnd (int first, int second, int third, void *uptr) 2233{ 2234 struct msgbuf *p = kmalloc(second + sizeof(struct msgbuf) + 4, GFP_USER); 2235 struct msgbuf32 *up = (struct msgbuf32 *)uptr; 2236 mm_segment_t old_fs; 2237 int err; 2238 2239 if (!p) 2240 return -ENOMEM; 2241 err = get_user(p->mtype, &up->mtype); 2242 err |= copy_from_user(p->mtext, &up->mtext, second); 2243 if (err) 2244 goto out; 2245 old_fs = get_fs(); 2246 set_fs(KERNEL_DS); 2247 err = sys_msgsnd(first, p, second, third); 2248 set_fs(old_fs); 2249 out: 2250 kfree(p); 2251 return err; 2252} 2253 2254static int 2255do_sys32_msgrcv (int first, int second, int msgtyp, int third, int version, void *uptr) 2256{ 2257 struct msgbuf32 *up; 2258 struct msgbuf *p; 2259 mm_segment_t old_fs; 2260 int err; 2261 2262 if (!version) { 2263 struct ipc_kludge *uipck = (struct ipc_kludge *)uptr; 2264 struct ipc_kludge ipck; 2265 2266 err = -EINVAL; 2267 if (!uptr) 2268 goto out; 2269 err = -EFAULT; 2270 if (copy_from_user(&ipck, uipck, sizeof(struct ipc_kludge))) 2271 goto out; 2272 uptr = (void *)A(ipck.msgp); 2273 msgtyp = ipck.msgtyp; 2274 } 2275 err = -ENOMEM; 2276 p = kmalloc(second + sizeof(struct msgbuf) + 4, GFP_USER); 2277 if (!p) 2278 goto out; 2279 old_fs = get_fs(); 2280 set_fs(KERNEL_DS); 2281 err = sys_msgrcv(first, p, second + 4, msgtyp, third); 2282 set_fs(old_fs); 2283 if (err < 0) 2284 goto free_then_out; 2285 up = (struct msgbuf32 *)uptr; 2286 if (put_user(p->mtype, &up->mtype) || copy_to_user(&up->mtext, p->mtext, err)) 2287 err = -EFAULT; 2288free_then_out: 2289 kfree(p); 2290out: 2291 return err; 2292} 2293 2294static int 2295msgctl32 (int first, int second, void *uptr) 2296{ 2297 int err = -EINVAL, err2; 2298 struct msqid_ds m; 2299 struct msqid64_ds m64; 2300 struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr; 2301 struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr; 2302 mm_segment_t old_fs; 2303 int version = ipc_parse_version32(&second); 2304 2305 switch (second) { 2306 case IPC_INFO: 2307 case IPC_RMID: 2308 case MSG_INFO: 2309 err = sys_msgctl(first, second, (struct msqid_ds *)uptr); 2310 break; 2311 2312 case IPC_SET: 2313 if (version == IPC_64) { 2314 err = get_user(m.msg_perm.uid, &up64->msg_perm.uid); 2315 err |= get_user(m.msg_perm.gid, &up64->msg_perm.gid); 2316 err |= get_user(m.msg_perm.mode, &up64->msg_perm.mode); 2317 err |= get_user(m.msg_qbytes, &up64->msg_qbytes); 2318 } else { 2319 err = get_user(m.msg_perm.uid, &up32->msg_perm.uid); 2320 err |= get_user(m.msg_perm.gid, &up32->msg_perm.gid); 2321 err |= get_user(m.msg_perm.mode, &up32->msg_perm.mode); 2322 err |= get_user(m.msg_qbytes, &up32->msg_qbytes); 2323 } 2324 if (err) 2325 break; 2326 old_fs = get_fs(); 2327 set_fs(KERNEL_DS); 2328 err = sys_msgctl(first, second, &m); 2329 set_fs(old_fs); 2330 break; 2331 2332 case IPC_STAT: 2333 case MSG_STAT: 2334 old_fs = get_fs(); 2335 set_fs(KERNEL_DS); 2336 err = sys_msgctl(first, second, (void *) &m64); 2337 set_fs(old_fs); 2338 2339 if (version == IPC_64) { 2340 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) { 2341 err = -EFAULT; 2342 break; 2343 } 2344 err2 = __put_user(m64.msg_perm.key, &up64->msg_perm.key); 2345 err2 |= __put_user(m64.msg_perm.uid, &up64->msg_perm.uid); 2346 err2 |= __put_user(m64.msg_perm.gid, &up64->msg_perm.gid); 2347 err2 |= __put_user(m64.msg_perm.cuid, &up64->msg_perm.cuid); 2348 err2 |= __put_user(m64.msg_perm.cgid, &up64->msg_perm.cgid); 2349 err2 |= __put_user(m64.msg_perm.mode, &up64->msg_perm.mode); 2350 err2 |= __put_user(m64.msg_perm.seq, &up64->msg_perm.seq); 2351 err2 |= __put_user(m64.msg_stime, &up64->msg_stime); 2352 err2 |= __put_user(m64.msg_rtime, &up64->msg_rtime); 2353 err2 |= __put_user(m64.msg_ctime, &up64->msg_ctime); 2354 err2 |= __put_user(m64.msg_cbytes, &up64->msg_cbytes); 2355 err2 |= __put_user(m64.msg_qnum, &up64->msg_qnum); 2356 err2 |= __put_user(m64.msg_qbytes, &up64->msg_qbytes); 2357 err2 |= __put_user(m64.msg_lspid, &up64->msg_lspid); 2358 err2 |= __put_user(m64.msg_lrpid, &up64->msg_lrpid); 2359 if (err2) 2360 err = -EFAULT; 2361 } else { 2362 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) { 2363 err = -EFAULT; 2364 break; 2365 } 2366 err2 = __put_user(m64.msg_perm.key, &up32->msg_perm.key); 2367 err2 |= __put_user(m64.msg_perm.uid, &up32->msg_perm.uid); 2368 err2 |= __put_user(m64.msg_perm.gid, &up32->msg_perm.gid); 2369 err2 |= __put_user(m64.msg_perm.cuid, &up32->msg_perm.cuid); 2370 err2 |= __put_user(m64.msg_perm.cgid, &up32->msg_perm.cgid); 2371 err2 |= __put_user(m64.msg_perm.mode, &up32->msg_perm.mode); 2372 err2 |= __put_user(m64.msg_perm.seq, &up32->msg_perm.seq); 2373 err2 |= __put_user(m64.msg_stime, &up32->msg_stime); 2374 err2 |= __put_user(m64.msg_rtime, &up32->msg_rtime); 2375 err2 |= __put_user(m64.msg_ctime, &up32->msg_ctime); 2376 err2 |= __put_user(m64.msg_cbytes, &up32->msg_cbytes); 2377 err2 |= __put_user(m64.msg_qnum, &up32->msg_qnum); 2378 err2 |= __put_user(m64.msg_qbytes, &up32->msg_qbytes); 2379 err2 |= __put_user(m64.msg_lspid, &up32->msg_lspid); 2380 err2 |= __put_user(m64.msg_lrpid, &up32->msg_lrpid); 2381 if (err2) 2382 err = -EFAULT; 2383 } 2384 break; 2385 } 2386 return err; 2387} 2388 2389static int 2390shmat32 (int first, int second, int third, int version, void *uptr) 2391{ 2392 unsigned long raddr; 2393 u32 *uaddr = (u32 *)A((u32)third); 2394 int err; 2395 2396 if (version == 1) 2397 return -EINVAL; /* iBCS2 emulator entry point: unsupported */ 2398 err = sys_shmat(first, uptr, second, &raddr); 2399 if (err) 2400 return err; 2401 return put_user(raddr, uaddr); 2402} 2403 2404static int 2405shmctl32 (int first, int second, void *uptr) 2406{ 2407 int err = -EFAULT, err2; 2408 struct shmid_ds s; 2409 struct shmid64_ds s64; 2410 struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr; 2411 struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr; 2412 mm_segment_t old_fs; 2413 struct shm_info32 *uip = (struct shm_info32 *)uptr; 2414 struct shm_info si; 2415 int version = ipc_parse_version32(&second); 2416 struct shminfo64 smi; 2417 struct shminfo *usi32 = (struct shminfo *) uptr; 2418 struct shminfo64_32 *usi64 = (struct shminfo64_32 *) uptr; 2419 2420 switch (second) { 2421 case IPC_INFO: 2422 old_fs = get_fs(); 2423 set_fs(KERNEL_DS); 2424 err = sys_shmctl(first, second, (struct shmid_ds *)&smi); 2425 set_fs(old_fs); 2426 2427 if (version == IPC_64) { 2428 if (!access_ok(VERIFY_WRITE, usi64, sizeof(*usi64))) { 2429 err = -EFAULT; 2430 break; 2431 } 2432 err2 = __put_user(smi.shmmax, &usi64->shmmax); 2433 err2 |= __put_user(smi.shmmin, &usi64->shmmin); 2434 err2 |= __put_user(smi.shmmni, &usi64->shmmni); 2435 err2 |= __put_user(smi.shmseg, &usi64->shmseg); 2436 err2 |= __put_user(smi.shmall, &usi64->shmall); 2437 } else { 2438 if (!access_ok(VERIFY_WRITE, usi32, sizeof(*usi32))) { 2439 err = -EFAULT; 2440 break; 2441 } 2442 err2 = __put_user(smi.shmmax, &usi32->shmmax); 2443 err2 |= __put_user(smi.shmmin, &usi32->shmmin); 2444 err2 |= __put_user(smi.shmmni, &usi32->shmmni); 2445 err2 |= __put_user(smi.shmseg, &usi32->shmseg); 2446 err2 |= __put_user(smi.shmall, &usi32->shmall); 2447 } 2448 if (err2) 2449 err = -EFAULT; 2450 break; 2451 2452 case IPC_RMID: 2453 case SHM_LOCK: 2454 case SHM_UNLOCK: 2455 err = sys_shmctl(first, second, (struct shmid_ds *)uptr); 2456 break; 2457 2458 case IPC_SET: 2459 if (version == IPC_64) { 2460 err = get_user(s.shm_perm.uid, &up64->shm_perm.uid); 2461 err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid); 2462 err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode); 2463 } else { 2464 err = get_user(s.shm_perm.uid, &up32->shm_perm.uid); 2465 err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid); 2466 err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode); 2467 } 2468 if (err) 2469 break; 2470 old_fs = get_fs(); 2471 set_fs(KERNEL_DS); 2472 err = sys_shmctl(first, second, &s); 2473 set_fs(old_fs); 2474 break; 2475 2476 case IPC_STAT: 2477 case SHM_STAT: 2478 old_fs = get_fs(); 2479 set_fs(KERNEL_DS); 2480 err = sys_shmctl(first, second, (void *) &s64); 2481 set_fs(old_fs); 2482 if (err < 0) 2483 break; 2484 if (version == IPC_64) { 2485 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) { 2486 err = -EFAULT; 2487 break; 2488 } 2489 err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key); 2490 err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid); 2491 err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid); 2492 err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid); 2493 err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid); 2494 err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode); 2495 err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq); 2496 err2 |= __put_user(s64.shm_atime, &up64->shm_atime); 2497 err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime); 2498 err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime); 2499 err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz); 2500 err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch); 2501 err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid); 2502 err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid); 2503 } else { 2504 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) { 2505 err = -EFAULT; 2506 break; 2507 } 2508 err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key); 2509 err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid); 2510 err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid); 2511 err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid); 2512 err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid); 2513 err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode); 2514 err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq); 2515 err2 |= __put_user(s64.shm_atime, &up32->shm_atime); 2516 err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime); 2517 err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime); 2518 err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz); 2519 err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch); 2520 err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid); 2521 err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid); 2522 } 2523 if (err2) 2524 err = -EFAULT; 2525 break; 2526 2527 case SHM_INFO: 2528 old_fs = get_fs(); 2529 set_fs(KERNEL_DS); 2530 err = sys_shmctl(first, second, (void *)&si); 2531 set_fs(old_fs); 2532 if (err < 0) 2533 break; 2534 2535 if (!access_ok(VERIFY_WRITE, uip, sizeof(*uip))) { 2536 err = -EFAULT; 2537 break; 2538 } 2539 err2 = __put_user(si.used_ids, &uip->used_ids); 2540 err2 |= __put_user(si.shm_tot, &uip->shm_tot); 2541 err2 |= __put_user(si.shm_rss, &uip->shm_rss); 2542 err2 |= __put_user(si.shm_swp, &uip->shm_swp); 2543 err2 |= __put_user(si.swap_attempts, &uip->swap_attempts); 2544 err2 |= __put_user(si.swap_successes, &uip->swap_successes); 2545 if (err2) 2546 err = -EFAULT; 2547 break; 2548 2549 } 2550 return err; 2551} 2552 2553asmlinkage long 2554sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) 2555{ 2556 int version; 2557 2558 version = call >> 16; /* hack for backward compatibility */ 2559 call &= 0xffff; 2560 2561 switch (call) { 2562 case SEMOP: 2563 /* struct sembuf is the same on 32 and 64bit :)) */ 2564 return sys_semop(first, (struct sembuf *)AA(ptr), second); 2565 case SEMGET: 2566 return sys_semget(first, second, third); 2567 case SEMCTL: 2568 return semctl32(first, second, third, (void *)AA(ptr)); 2569 2570 case MSGSND: 2571 return do_sys32_msgsnd(first, second, third, (void *)AA(ptr)); 2572 case MSGRCV: 2573 return do_sys32_msgrcv(first, second, fifth, third, version, (void *)AA(ptr)); 2574 case MSGGET: 2575 return sys_msgget((key_t) first, second); 2576 case MSGCTL: 2577 return msgctl32(first, second, (void *)AA(ptr)); 2578 2579 case SHMAT: 2580 return shmat32(first, second, third, version, (void *)AA(ptr)); 2581 break; 2582 case SHMDT: 2583 return sys_shmdt((char *)AA(ptr)); 2584 case SHMGET: 2585 return sys_shmget(first, second, third); 2586 case SHMCTL: 2587 return shmctl32(first, second, (void *)AA(ptr)); 2588 2589 default: 2590 return -EINVAL; 2591 } 2592 return -EINVAL; 2593} 2594 2595/* 2596 * sys_time() can be implemented in user-level using 2597 * sys_gettimeofday(). IA64 did this but i386 Linux did not 2598 * so we have to implement this system call here. 2599 */ 2600asmlinkage long 2601sys32_time (int *tloc) 2602{ 2603 int i; 2604 2605 /* SMP: This is fairly trivial. We grab CURRENT_TIME and 2606 stuff it to user space. No side effects */ 2607 i = CURRENT_TIME; 2608 if (tloc) { 2609 if (put_user(i, tloc)) 2610 i = -EFAULT; 2611 } 2612 return i; 2613} 2614 2615struct rusage32 { 2616 struct timeval32 ru_utime; 2617 struct timeval32 ru_stime; 2618 int ru_maxrss; 2619 int ru_ixrss; 2620 int ru_idrss; 2621 int ru_isrss; 2622 int ru_minflt; 2623 int ru_majflt; 2624 int ru_nswap; 2625 int ru_inblock; 2626 int ru_oublock; 2627 int ru_msgsnd; 2628 int ru_msgrcv; 2629 int ru_nsignals; 2630 int ru_nvcsw; 2631 int ru_nivcsw; 2632}; 2633 2634static int 2635put_rusage (struct rusage32 *ru, struct rusage *r) 2636{ 2637 int err; 2638 2639 if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru))) 2640 return -EFAULT; 2641 2642 err = __put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec); 2643 err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec); 2644 err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec); 2645 err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec); 2646 err |= __put_user (r->ru_maxrss, &ru->ru_maxrss); 2647 err |= __put_user (r->ru_ixrss, &ru->ru_ixrss); 2648 err |= __put_user (r->ru_idrss, &ru->ru_idrss); 2649 err |= __put_user (r->ru_isrss, &ru->ru_isrss); 2650 err |= __put_user (r->ru_minflt, &ru->ru_minflt); 2651 err |= __put_user (r->ru_majflt, &ru->ru_majflt); 2652 err |= __put_user (r->ru_nswap, &ru->ru_nswap); 2653 err |= __put_user (r->ru_inblock, &ru->ru_inblock); 2654 err |= __put_user (r->ru_oublock, &ru->ru_oublock); 2655 err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd); 2656 err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv); 2657 err |= __put_user (r->ru_nsignals, &ru->ru_nsignals); 2658 err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw); 2659 err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw); 2660 return err; 2661} 2662 2663asmlinkage long 2664sys32_wait4 (int pid, unsigned int *stat_addr, int options, struct rusage32 *ru) 2665{ 2666 if (!ru) 2667 return sys_wait4(pid, stat_addr, options, NULL); 2668 else { 2669 struct rusage r; 2670 int ret; 2671 unsigned int status; 2672 mm_segment_t old_fs = get_fs(); 2673 2674 set_fs(KERNEL_DS); 2675 ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r); 2676 set_fs(old_fs); 2677 if (put_rusage(ru, &r)) 2678 return -EFAULT; 2679 if (stat_addr && put_user(status, stat_addr)) 2680 return -EFAULT; 2681 return ret; 2682 } 2683} 2684 2685asmlinkage long 2686sys32_waitpid (int pid, unsigned int *stat_addr, int options) 2687{ 2688 return sys32_wait4(pid, stat_addr, options, NULL); 2689} 2690 2691 2692extern asmlinkage long sys_getrusage (int who, struct rusage *ru); 2693 2694asmlinkage long 2695sys32_getrusage (int who, struct rusage32 *ru) 2696{ 2697 struct rusage r; 2698 int ret; 2699 mm_segment_t old_fs = get_fs(); 2700 2701 set_fs(KERNEL_DS); 2702 ret = sys_getrusage(who, &r); 2703 set_fs(old_fs); 2704 if (put_rusage (ru, &r)) 2705 return -EFAULT; 2706 return ret; 2707} 2708 2709struct tms32 { 2710 __kernel_clock_t32 tms_utime; 2711 __kernel_clock_t32 tms_stime; 2712 __kernel_clock_t32 tms_cutime; 2713 __kernel_clock_t32 tms_cstime; 2714}; 2715 2716extern asmlinkage long sys_times (struct tms * tbuf); 2717 2718asmlinkage long 2719sys32_times (struct tms32 *tbuf) 2720{ 2721 mm_segment_t old_fs = get_fs(); 2722 struct tms t; 2723 long ret; 2724 int err; 2725 2726 set_fs(KERNEL_DS); 2727 ret = sys_times(tbuf ? &t : NULL); 2728 set_fs(old_fs); 2729 if (tbuf) { 2730 err = put_user (IA32_TICK(t.tms_utime), &tbuf->tms_utime); 2731 err |= put_user (IA32_TICK(t.tms_stime), &tbuf->tms_stime); 2732 err |= put_user (IA32_TICK(t.tms_cutime), &tbuf->tms_cutime); 2733 err |= put_user (IA32_TICK(t.tms_cstime), &tbuf->tms_cstime); 2734 if (err) 2735 ret = -EFAULT; 2736 } 2737 return IA32_TICK(ret); 2738} 2739 2740static unsigned int 2741ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int *val) 2742{ 2743 size_t copied; 2744 unsigned int ret; 2745 2746 copied = access_process_vm(child, addr, val, sizeof(*val), 0); 2747 return (copied != sizeof(ret)) ? -EIO : 0; 2748} 2749 2750static unsigned int 2751ia32_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int val) 2752{ 2753 2754 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) 2755 return -EIO; 2756 return 0; 2757} 2758 2759/* 2760 * The order in which registers are stored in the ptrace regs structure 2761 */ 2762#define PT_EBX 0 2763#define PT_ECX 1 2764#define PT_EDX 2 2765#define PT_ESI 3 2766#define PT_EDI 4 2767#define PT_EBP 5 2768#define PT_EAX 6 2769#define PT_DS 7 2770#define PT_ES 8 2771#define PT_FS 9 2772#define PT_GS 10 2773#define PT_ORIG_EAX 11 2774#define PT_EIP 12 2775#define PT_CS 13 2776#define PT_EFL 14 2777#define PT_UESP 15 2778#define PT_SS 16 2779 2780static unsigned int 2781getreg (struct task_struct *child, int regno) 2782{ 2783 struct pt_regs *child_regs; 2784 2785 child_regs = ia64_task_regs(child); 2786 switch (regno / sizeof(int)) { 2787 case PT_EBX: return child_regs->r11; 2788 case PT_ECX: return child_regs->r9; 2789 case PT_EDX: return child_regs->r10; 2790 case PT_ESI: return child_regs->r14; 2791 case PT_EDI: return child_regs->r15; 2792 case PT_EBP: return child_regs->r13; 2793 case PT_EAX: return child_regs->r8; 2794 case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */ 2795 case PT_EIP: return child_regs->cr_iip; 2796 case PT_UESP: return child_regs->r12; 2797 case PT_EFL: return child->thread.eflag; 2798 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: 2799 return __USER_DS; 2800 case PT_CS: return __USER_CS; 2801 default: 2802 printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno); 2803 break; 2804 } 2805 return 0; 2806} 2807 2808static void 2809putreg (struct task_struct *child, int regno, unsigned int value) 2810{ 2811 struct pt_regs *child_regs; 2812 2813 child_regs = ia64_task_regs(child); 2814 switch (regno / sizeof(int)) { 2815 case PT_EBX: child_regs->r11 = value; break; 2816 case PT_ECX: child_regs->r9 = value; break; 2817 case PT_EDX: child_regs->r10 = value; break; 2818 case PT_ESI: child_regs->r14 = value; break; 2819 case PT_EDI: child_regs->r15 = value; break; 2820 case PT_EBP: child_regs->r13 = value; break; 2821 case PT_EAX: child_regs->r8 = value; break; 2822 case PT_ORIG_EAX: child_regs->r1 = value; break; 2823 case PT_EIP: child_regs->cr_iip = value; break; 2824 case PT_UESP: child_regs->r12 = value; break; 2825 case PT_EFL: child->thread.eflag = value; break; 2826 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: 2827 if (value != __USER_DS) 2828 printk(KERN_ERR 2829 "ia32.putreg: attempt to set invalid segment register %d = %x\n", 2830 regno, value); 2831 break; 2832 case PT_CS: 2833 if (value != __USER_CS) 2834 printk(KERN_ERR 2835 "ia32.putreg: attempt to to set invalid segment register %d = %x\n", 2836 regno, value); 2837 break; 2838 default: 2839 printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno); 2840 break; 2841 } 2842} 2843 2844static inline void 2845ia32f2ia64f (void *dst, void *src) 2846{ 2847 asm volatile ("ldfe f6=[%1];; stf.spill [%0]=f6" :: "r"(dst), "r"(src) : "memory"); 2848 return; 2849} 2850 2851static inline void 2852ia64f2ia32f (void *dst, void *src) 2853{ 2854 asm volatile ("ldf.fill f6=[%1];; stfe [%0]=f6" :: "r"(dst), "r"(src) : "memory"); 2855 return; 2856} 2857 2858static void 2859put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp, 2860 int tos) 2861{ 2862 struct _fpreg_ia32 *f; 2863 char buf[32]; 2864 2865 f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); 2866 if ((regno += tos) >= 8) 2867 regno -= 8; 2868 switch (regno) { 2869 case 0: 2870 ia64f2ia32f(f, &ptp->f8); 2871 break; 2872 case 1: 2873 ia64f2ia32f(f, &ptp->f9); 2874 break; 2875 case 2: 2876 case 3: 2877 case 4: 2878 case 5: 2879 case 6: 2880 case 7: 2881 ia64f2ia32f(f, &swp->f10 + (regno - 2)); 2882 break; 2883 } 2884 copy_to_user(reg, f, sizeof(*reg)); 2885} 2886 2887static void 2888get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp, 2889 int tos) 2890{ 2891 2892 if ((regno += tos) >= 8) 2893 regno -= 8; 2894 switch (regno) { 2895 case 0: 2896 copy_from_user(&ptp->f8, reg, sizeof(*reg)); 2897 break; 2898 case 1: 2899 copy_from_user(&ptp->f9, reg, sizeof(*reg)); 2900 break; 2901 case 2: 2902 case 3: 2903 case 4: 2904 case 5: 2905 case 6: 2906 case 7: 2907 copy_from_user(&swp->f10 + (regno - 2), reg, sizeof(*reg)); 2908 break; 2909 } 2910 return; 2911} 2912 2913static int 2914save_ia32_fpstate (struct task_struct *tsk, struct _fpstate_ia32 *save) 2915{ 2916 struct switch_stack *swp; 2917 struct pt_regs *ptp; 2918 int i, tos; 2919 2920 if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) 2921 return -EIO; 2922 __put_user(tsk->thread.fcr, &save->cw); 2923 __put_user(tsk->thread.fsr, &save->sw); 2924 __put_user(tsk->thread.fsr >> 32, &save->tag); 2925 __put_user(tsk->thread.fir, &save->ipoff); 2926 __put_user(__USER_CS, &save->cssel); 2927 __put_user(tsk->thread.fdr, &save->dataoff); 2928 __put_user(__USER_DS, &save->datasel); 2929 /* 2930 * Stack frames start with 16-bytes of temp space 2931 */ 2932 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 2933 ptp = ia64_task_regs(tsk); 2934 tos = (tsk->thread.fsr >> 11) & 3; 2935 for (i = 0; i < 8; i++) 2936 put_fpreg(i, &save->_st[i], ptp, swp, tos); 2937 return 0; 2938} 2939 2940static int 2941restore_ia32_fpstate (struct task_struct *tsk, struct _fpstate_ia32 *save) 2942{ 2943 struct switch_stack *swp; 2944 struct pt_regs *ptp; 2945 int i, tos, ret; 2946 int fsrlo, fsrhi; 2947 2948 if (!access_ok(VERIFY_READ, save, sizeof(*save))) 2949 return(-EIO); 2950 ret = __get_user(tsk->thread.fcr, (unsigned int *)&save->cw); 2951 ret |= __get_user(fsrlo, (unsigned int *)&save->sw); 2952 ret |= __get_user(fsrhi, (unsigned int *)&save->tag); 2953 tsk->thread.fsr = ((long)fsrhi << 32) | (long)fsrlo; 2954 ret |= __get_user(tsk->thread.fir, (unsigned int *)&save->ipoff); 2955 ret |= __get_user(tsk->thread.fdr, (unsigned int *)&save->dataoff); 2956 /* 2957 * Stack frames start with 16-bytes of temp space 2958 */ 2959 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 2960 ptp = ia64_task_regs(tsk); 2961 tos = (tsk->thread.fsr >> 11) & 3; 2962 for (i = 0; i < 8; i++) 2963 get_fpreg(i, &save->_st[i], ptp, swp, tos); 2964 return ret ? -EFAULT : 0; 2965} 2966 2967extern asmlinkage long sys_ptrace (long, pid_t, unsigned long, unsigned long, long, long, long, 2968 long, long); 2969 2970/* 2971 * Note that the IA32 version of `ptrace' calls the IA64 routine for 2972 * many of the requests. This will only work for requests that do 2973 * not need access to the calling processes `pt_regs' which is located 2974 * at the address of `stack'. Once we call the IA64 `sys_ptrace' then 2975 * the address of `stack' will not be the address of the `pt_regs'. 2976 */ 2977asmlinkage long 2978sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, 2979 long arg4, long arg5, long arg6, long arg7, long stack) 2980{ 2981 struct pt_regs *regs = (struct pt_regs *) &stack; 2982 struct task_struct *child; 2983 unsigned int value, tmp; 2984 long i, ret; 2985 2986 lock_kernel(); 2987 if (request == PTRACE_TRACEME) { 2988 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); 2989 goto out; 2990 } 2991 2992 ret = -ESRCH; 2993 read_lock(&tasklist_lock); 2994 child = find_task_by_pid(pid); 2995 read_unlock(&tasklist_lock); 2996 if (!child) 2997 goto out; 2998 ret = -EPERM; 2999 if (pid == 1) /* no messing around with init! */ 3000 goto out; 3001 3002 if (request == PTRACE_ATTACH) { 3003 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); 3004 goto out; 3005 } 3006 ret = -ESRCH; 3007 if (!(child->ptrace & PT_PTRACED)) 3008 goto out; 3009 if (child->state != TASK_STOPPED) { 3010 if (request != PTRACE_KILL) 3011 goto out; 3012 } 3013 if (child->p_pptr != current) 3014 goto out; 3015 3016 switch (request) { 3017 case PTRACE_PEEKTEXT: 3018 case PTRACE_PEEKDATA: /* read word at location addr */ 3019 ret = ia32_peek(regs, child, addr, &value); 3020 if (ret == 0) 3021 ret = put_user(value, (unsigned int *) A(data)); 3022 else 3023 ret = -EIO; 3024 goto out; 3025 3026 case PTRACE_POKETEXT: 3027 case PTRACE_POKEDATA: /* write the word at location addr */ 3028 ret = ia32_poke(regs, child, addr, data); 3029 goto out; 3030 3031 case PTRACE_PEEKUSR: /* read word at addr in USER area */ 3032 ret = -EIO; 3033 if ((addr & 3) || addr > 17*sizeof(int)) 3034 break; 3035 3036 tmp = getreg(child, addr); 3037 if (!put_user(tmp, (unsigned int *) A(data))) 3038 ret = 0; 3039 break; 3040 3041 case PTRACE_POKEUSR: /* write word at addr in USER area */ 3042 ret = -EIO; 3043 if ((addr & 3) || addr > 17*sizeof(int)) 3044 break; 3045 3046 putreg(child, addr, data); 3047 ret = 0; 3048 break; 3049 3050 case IA32_PTRACE_GETREGS: 3051 if (!access_ok(VERIFY_WRITE, (int *) A(data), 17*sizeof(int))) { 3052 ret = -EIO; 3053 break; 3054 } 3055 for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) { 3056 put_user(getreg(child, i), (unsigned int *) A(data)); 3057 data += sizeof(int); 3058 } 3059 ret = 0; 3060 break; 3061 3062 case IA32_PTRACE_SETREGS: 3063 if (!access_ok(VERIFY_READ, (int *) A(data), 17*sizeof(int))) { 3064 ret = -EIO; 3065 break; 3066 } 3067 for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) { 3068 get_user(tmp, (unsigned int *) A(data)); 3069 putreg(child, i, tmp); 3070 data += sizeof(int); 3071 } 3072 ret = 0; 3073 break; 3074 3075 case IA32_PTRACE_GETFPREGS: 3076 ret = save_ia32_fpstate(child, (struct _fpstate_ia32 *) A(data)); 3077 break; 3078 3079 case IA32_PTRACE_SETFPREGS: 3080 ret = restore_ia32_fpstate(child, (struct _fpstate_ia32 *) A(data)); 3081 break; 3082 3083 case PTRACE_SYSCALL: /* continue, stop after next syscall */ 3084 case PTRACE_CONT: /* restart after signal. */ 3085 case PTRACE_KILL: 3086 case PTRACE_SINGLESTEP: /* execute chile for one instruction */ 3087 case PTRACE_DETACH: /* detach a process */ 3088 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); 3089 break; 3090 3091 default: 3092 ret = -EIO; 3093 break; 3094 3095 } 3096 out: 3097 unlock_kernel(); 3098 return ret; 3099} 3100 3101static inline int 3102get_flock32(struct flock *kfl, struct flock32 *ufl) 3103{ 3104 int err; 3105 3106 if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl))) 3107 return -EFAULT; 3108 3109 err = __get_user(kfl->l_type, &ufl->l_type); 3110 err |= __get_user(kfl->l_whence, &ufl->l_whence); 3111 err |= __get_user(kfl->l_start, &ufl->l_start); 3112 err |= __get_user(kfl->l_len, &ufl->l_len); 3113 err |= __get_user(kfl->l_pid, &ufl->l_pid); 3114 return err; 3115} 3116 3117static inline int 3118put_flock32(struct flock *kfl, struct flock32 *ufl) 3119{ 3120 int err; 3121 3122 if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl))) 3123 return -EFAULT; 3124 3125 err = __put_user(kfl->l_type, &ufl->l_type); 3126 err |= __put_user(kfl->l_whence, &ufl->l_whence); 3127 err |= __put_user(kfl->l_start, &ufl->l_start); 3128 err |= __put_user(kfl->l_len, &ufl->l_len); 3129 err |= __put_user(kfl->l_pid, &ufl->l_pid); 3130 return err; 3131} 3132 3133extern asmlinkage long sys_fcntl (unsigned int fd, unsigned int cmd, unsigned long arg); 3134 3135asmlinkage long 3136sys32_fcntl (unsigned int fd, unsigned int cmd, unsigned int arg) 3137{ 3138 mm_segment_t old_fs; 3139 struct flock f; 3140 long ret; 3141 3142 switch (cmd) { 3143 case F_GETLK: 3144 case F_SETLK: 3145 case F_SETLKW: 3146 if (get_flock32(&f, (struct flock32 *) A(arg))) 3147 return -EFAULT; 3148 old_fs = get_fs(); 3149 set_fs(KERNEL_DS); 3150 ret = sys_fcntl(fd, cmd, (unsigned long) &f); 3151 set_fs(old_fs); 3152 if (cmd == F_GETLK && put_flock32(&f, (struct flock32 *) A(arg))) 3153 return -EFAULT; 3154 return ret; 3155 3156 default: 3157 /* 3158 * `sys_fcntl' lies about arg, for the F_SETOWN 3159 * sub-function arg can have a negative value. 3160 */ 3161 return sys_fcntl(fd, cmd, arg); 3162 } 3163} 3164 3165asmlinkage long sys_ni_syscall(void); 3166 3167asmlinkage long 3168sys32_ni_syscall (int dummy0, int dummy1, int dummy2, int dummy3, int dummy4, int dummy5, 3169 int dummy6, int dummy7, int stack) 3170{ 3171 struct pt_regs *regs = (struct pt_regs *)&stack; 3172 3173 printk(KERN_WARNING "IA32 syscall #%d issued, maybe we should implement it\n", 3174 (int)regs->r1); 3175 return(sys_ni_syscall()); 3176} 3177 3178/* 3179 * The IA64 maps 4 I/O ports for each 4K page 3180 */ 3181#define IOLEN ((65536 / 4) * 4096) 3182 3183asmlinkage long 3184sys32_iopl (int level) 3185{ 3186 extern unsigned long ia64_iobase; 3187 int fd; 3188 struct file * file; 3189 unsigned int old; 3190 unsigned long addr; 3191 mm_segment_t old_fs = get_fs (); 3192 3193 if (level != 3) 3194 return(-EINVAL); 3195 /* Trying to gain more privileges? */ 3196 asm volatile ("mov %0=ar.eflag ;;" : "=r"(old)); 3197 if (level > ((old >> 12) & 3)) { 3198 if (!capable(CAP_SYS_RAWIO)) 3199 return -EPERM; 3200 } 3201 set_fs(KERNEL_DS); 3202 fd = sys_open("/dev/mem", O_SYNC | O_RDWR, 0); 3203 set_fs(old_fs); 3204 if (fd < 0) 3205 return fd; 3206 file = fget(fd); 3207 if (file == NULL) { 3208 sys_close(fd); 3209 return(-EFAULT); 3210 } 3211 3212 down_write(¤t->mm->mmap_sem); 3213 addr = do_mmap_pgoff(file, IA32_IOBASE, 3214 IOLEN, PROT_READ|PROT_WRITE, MAP_SHARED, 3215 (ia64_iobase & ~PAGE_OFFSET) >> PAGE_SHIFT); 3216 up_write(¤t->mm->mmap_sem); 3217 3218 if (addr >= 0) { 3219 old = (old & ~0x3000) | (level << 12); 3220 asm volatile ("mov ar.eflag=%0;;" :: "r"(old)); 3221 } 3222 3223 fput(file); 3224 sys_close(fd); 3225 return 0; 3226} 3227 3228asmlinkage long 3229sys32_ioperm (unsigned int from, unsigned int num, int on) 3230{ 3231 3232 return sys32_iopl(3); 3233} 3234 3235typedef struct { 3236 unsigned int ss_sp; 3237 unsigned int ss_flags; 3238 unsigned int ss_size; 3239} ia32_stack_t; 3240 3241asmlinkage long 3242sys32_sigaltstack (ia32_stack_t *uss32, ia32_stack_t *uoss32, 3243 long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, long stack) 3244{ 3245 struct pt_regs *pt = (struct pt_regs *) &stack; 3246 stack_t uss, uoss; 3247 ia32_stack_t buf32; 3248 int ret; 3249 mm_segment_t old_fs = get_fs(); 3250 3251 if (uss32) 3252 if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t))) 3253 return -EFAULT; 3254 uss.ss_sp = (void *) (long) buf32.ss_sp; 3255 uss.ss_flags = buf32.ss_flags; 3256 uss.ss_size = buf32.ss_size; 3257 set_fs(KERNEL_DS); 3258 ret = do_sigaltstack(uss32 ? &uss : NULL, &uoss, pt->r12); 3259 set_fs(old_fs); 3260 if (ret < 0) 3261 return(ret); 3262 if (uoss32) { 3263 buf32.ss_sp = (long) uoss.ss_sp; 3264 buf32.ss_flags = uoss.ss_flags; 3265 buf32.ss_size = uoss.ss_size; 3266 if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t))) 3267 return -EFAULT; 3268 } 3269 return ret; 3270} 3271 3272asmlinkage int 3273sys32_pause (void) 3274{ 3275 current->state = TASK_INTERRUPTIBLE; 3276 schedule(); 3277 return -ERESTARTNOHAND; 3278} 3279 3280asmlinkage long sys_msync (unsigned long start, size_t len, int flags); 3281 3282asmlinkage int 3283sys32_msync (unsigned int start, unsigned int len, int flags) 3284{ 3285 unsigned int addr; 3286 3287 if (OFFSET4K(start)) 3288 return -EINVAL; 3289 addr = PAGE_START(start); 3290 return sys_msync(addr, len + (start - addr), flags); 3291} 3292 3293struct sysctl32 { 3294 unsigned int name; 3295 int nlen; 3296 unsigned int oldval; 3297 unsigned int oldlenp; 3298 unsigned int newval; 3299 unsigned int newlen; 3300 unsigned int __unused[4]; 3301}; 3302 3303extern asmlinkage long sys_sysctl(struct __sysctl_args *args); 3304 3305asmlinkage long 3306sys32_sysctl (struct sysctl32 *args) 3307{ 3308 struct sysctl32 a32; 3309 mm_segment_t old_fs = get_fs (); 3310 void *oldvalp, *newvalp; 3311 size_t oldlen; 3312 int *namep; 3313 long ret; 3314 3315 if (copy_from_user(&a32, args, sizeof(a32))) 3316 return -EFAULT; 3317 3318 /* 3319 * We need to pre-validate these because we have to disable address checking 3320 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the 3321 * user specifying bad addresses here. Well, since we're dealing with 32 bit 3322 * addresses, we KNOW that access_ok() will always succeed, so this is an 3323 * expensive NOP, but so what... 3324 */ 3325 namep = (int *) A(a32.name); 3326 oldvalp = (void *) A(a32.oldval); 3327 newvalp = (void *) A(a32.newval); 3328 3329 if ((oldvalp && get_user(oldlen, (int *) A(a32.oldlenp))) 3330 || !access_ok(VERIFY_WRITE, namep, 0) 3331 || !access_ok(VERIFY_WRITE, oldvalp, 0) 3332 || !access_ok(VERIFY_WRITE, newvalp, 0)) 3333 return -EFAULT; 3334 3335 set_fs(KERNEL_DS); 3336 lock_kernel(); 3337 ret = do_sysctl(namep, a32.nlen, oldvalp, &oldlen, newvalp, (size_t) a32.newlen); 3338 unlock_kernel(); 3339 set_fs(old_fs); 3340 3341 if (oldvalp && put_user (oldlen, (int *) A(a32.oldlenp))) 3342 return -EFAULT; 3343 3344 return ret; 3345} 3346 3347asmlinkage long 3348sys32_newuname (struct new_utsname *name) 3349{ 3350 extern asmlinkage long sys_newuname(struct new_utsname * name); 3351 int ret = sys_newuname(name); 3352 3353 if (!ret) 3354 if (copy_to_user(name->machine, "i686\0\0\0", 8)) 3355 ret = -EFAULT; 3356 return ret; 3357} 3358 3359extern asmlinkage long sys_getresuid (uid_t *ruid, uid_t *euid, uid_t *suid); 3360 3361asmlinkage long 3362sys32_getresuid16 (u16 *ruid, u16 *euid, u16 *suid) 3363{ 3364 uid_t a, b, c; 3365 int ret; 3366 mm_segment_t old_fs = get_fs(); 3367 3368 set_fs(KERNEL_DS); 3369 ret = sys_getresuid(&a, &b, &c); 3370 set_fs(old_fs); 3371 3372 if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid)) 3373 return -EFAULT; 3374 return ret; 3375} 3376 3377extern asmlinkage long sys_getresgid (gid_t *rgid, gid_t *egid, gid_t *sgid); 3378 3379asmlinkage long 3380sys32_getresgid16 (u16 *rgid, u16 *egid, u16 *sgid) 3381{ 3382 gid_t a, b, c; 3383 int ret; 3384 mm_segment_t old_fs = get_fs(); 3385 3386 set_fs(KERNEL_DS); 3387 ret = sys_getresgid(&a, &b, &c); 3388 set_fs(old_fs); 3389 3390 if (ret) 3391 return ret; 3392 3393 return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid); 3394} 3395 3396asmlinkage long 3397sys32_lseek (unsigned int fd, int offset, unsigned int whence) 3398{ 3399 extern off_t sys_lseek (unsigned int fd, off_t offset, unsigned int origin); 3400 3401 /* Sign-extension of "offset" is important here... */ 3402 return sys_lseek(fd, offset, whence); 3403} 3404 3405extern asmlinkage long sys_getgroups (int gidsetsize, gid_t *grouplist); 3406 3407asmlinkage long 3408sys32_getgroups16 (int gidsetsize, short *grouplist) 3409{ 3410 mm_segment_t old_fs = get_fs(); 3411 gid_t gl[NGROUPS]; 3412 int ret, i; 3413 3414 set_fs(KERNEL_DS); 3415 ret = sys_getgroups(gidsetsize, gl); 3416 set_fs(old_fs); 3417 3418 if (gidsetsize && ret > 0 && ret <= NGROUPS) 3419 for (i = 0; i < ret; i++, grouplist++) 3420 if (put_user(gl[i], grouplist)) 3421 return -EFAULT; 3422 return ret; 3423} 3424 3425extern asmlinkage long sys_setgroups (int gidsetsize, gid_t *grouplist); 3426 3427asmlinkage long 3428sys32_setgroups16 (int gidsetsize, short *grouplist) 3429{ 3430 mm_segment_t old_fs = get_fs(); 3431 gid_t gl[NGROUPS]; 3432 int ret, i; 3433 3434 if ((unsigned) gidsetsize > NGROUPS) 3435 return -EINVAL; 3436 for (i = 0; i < gidsetsize; i++, grouplist++) 3437 if (get_user(gl[i], grouplist)) 3438 return -EFAULT; 3439 set_fs(KERNEL_DS); 3440 ret = sys_setgroups(gidsetsize, gl); 3441 set_fs(old_fs); 3442 return ret; 3443} 3444 3445/* 3446 * Unfortunately, the x86 compiler aligns variables of type "long long" to a 4 byte boundary 3447 * only, which means that the x86 version of "struct flock64" doesn't match the ia64 version 3448 * of struct flock. 3449 */ 3450 3451static inline long 3452ia32_put_flock (struct flock *l, unsigned long addr) 3453{ 3454 return (put_user(l->l_type, (short *) addr) 3455 | put_user(l->l_whence, (short *) (addr + 2)) 3456 | put_user(l->l_start, (long *) (addr + 4)) 3457 | put_user(l->l_len, (long *) (addr + 12)) 3458 | put_user(l->l_pid, (int *) (addr + 20))); 3459} 3460 3461static inline long 3462ia32_get_flock (struct flock *l, unsigned long addr) 3463{ 3464 unsigned int start_lo, start_hi, len_lo, len_hi; 3465 int err = (get_user(l->l_type, (short *) addr) 3466 | get_user(l->l_whence, (short *) (addr + 2)) 3467 | get_user(start_lo, (int *) (addr + 4)) 3468 | get_user(start_hi, (int *) (addr + 8)) 3469 | get_user(len_lo, (int *) (addr + 12)) 3470 | get_user(len_hi, (int *) (addr + 16)) 3471 | get_user(l->l_pid, (int *) (addr + 20))); 3472 l->l_start = ((unsigned long) start_hi << 32) | start_lo; 3473 l->l_len = ((unsigned long) len_hi << 32) | len_lo; 3474 return err; 3475} 3476 3477asmlinkage long 3478sys32_fcntl64 (unsigned int fd, unsigned int cmd, unsigned int arg) 3479{ 3480 mm_segment_t old_fs; 3481 struct flock f; 3482 long ret; 3483 3484 switch (cmd) { 3485 case F_GETLK64: 3486 case F_SETLK64: 3487 case F_SETLKW64: 3488 if (ia32_get_flock(&f, arg)) 3489 return -EFAULT; 3490 old_fs = get_fs(); 3491 set_fs(KERNEL_DS); 3492 ret = sys_fcntl(fd, cmd, (unsigned long) &f); 3493 set_fs(old_fs); 3494 if (cmd == F_GETLK && ia32_put_flock(&f, arg)) 3495 return -EFAULT; 3496 break; 3497 3498 default: 3499 ret = sys32_fcntl(fd, cmd, arg); 3500 break; 3501 } 3502 return ret; 3503} 3504 3505asmlinkage long 3506sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi) 3507{ 3508 extern asmlinkage long sys_truncate (const char *path, unsigned long length); 3509 3510 return sys_truncate((const char *) A(path), ((unsigned long) len_hi << 32) | len_lo); 3511} 3512 3513asmlinkage long 3514sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi) 3515{ 3516 extern asmlinkage long sys_ftruncate (int fd, unsigned long length); 3517 3518 return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo); 3519} 3520 3521static int 3522putstat64 (struct stat64 *ubuf, struct stat *kbuf) 3523{ 3524 int err; 3525 3526 if (clear_user(ubuf, sizeof(*ubuf))) 3527 return 1; 3528 3529 err = __put_user(kbuf->st_dev, &ubuf->st_dev); 3530 err |= __put_user(kbuf->st_ino, &ubuf->__st_ino); 3531 err |= __put_user(kbuf->st_ino, &ubuf->st_ino_lo); 3532 err |= __put_user(kbuf->st_ino >> 32, &ubuf->st_ino_hi); 3533 err |= __put_user(kbuf->st_mode, &ubuf->st_mode); 3534 err |= __put_user(kbuf->st_nlink, &ubuf->st_nlink); 3535 err |= __put_user(kbuf->st_uid, &ubuf->st_uid); 3536 err |= __put_user(kbuf->st_gid, &ubuf->st_gid); 3537 err |= __put_user(kbuf->st_rdev, &ubuf->st_rdev); 3538 err |= __put_user(kbuf->st_size, &ubuf->st_size_lo); 3539 err |= __put_user((kbuf->st_size >> 32), &ubuf->st_size_hi); 3540 err |= __put_user(kbuf->st_atime, &ubuf->st_atime); 3541 err |= __put_user(kbuf->st_mtime, &ubuf->st_mtime); 3542 err |= __put_user(kbuf->st_ctime, &ubuf->st_ctime); 3543 err |= __put_user(kbuf->st_blksize, &ubuf->st_blksize); 3544 err |= __put_user(kbuf->st_blocks, &ubuf->st_blocks); 3545 return err; 3546} 3547 3548asmlinkage long 3549sys32_stat64 (char *filename, struct stat64 *statbuf) 3550{ 3551 mm_segment_t old_fs = get_fs(); 3552 struct stat s; 3553 long ret; 3554 3555 set_fs(KERNEL_DS); 3556 ret = sys_newstat(filename, &s); 3557 set_fs(old_fs); 3558 if (putstat64(statbuf, &s)) 3559 return -EFAULT; 3560 return ret; 3561} 3562 3563asmlinkage long 3564sys32_lstat64 (char *filename, struct stat64 *statbuf) 3565{ 3566 mm_segment_t old_fs = get_fs(); 3567 struct stat s; 3568 long ret; 3569 3570 set_fs(KERNEL_DS); 3571 ret = sys_newlstat(filename, &s); 3572 set_fs(old_fs); 3573 if (putstat64(statbuf, &s)) 3574 return -EFAULT; 3575 return ret; 3576} 3577 3578asmlinkage long 3579sys32_fstat64 (unsigned int fd, struct stat64 *statbuf) 3580{ 3581 mm_segment_t old_fs = get_fs(); 3582 struct stat s; 3583 long ret; 3584 3585 set_fs(KERNEL_DS); 3586 ret = sys_newfstat(fd, &s); 3587 set_fs(old_fs); 3588 if (putstat64(statbuf, &s)) 3589 return -EFAULT; 3590 return ret; 3591} 3592 3593asmlinkage long 3594sys32_sigpending (unsigned int *set) 3595{ 3596 return do_sigpending(set, sizeof(*set)); 3597} 3598 3599struct sysinfo32 { 3600 s32 uptime; 3601 u32 loads[3]; 3602 u32 totalram; 3603 u32 freeram; 3604 u32 sharedram; 3605 u32 bufferram; 3606 u32 totalswap; 3607 u32 freeswap; 3608 unsigned short procs; 3609 char _f[22]; 3610}; 3611 3612asmlinkage long 3613sys32_sysinfo (struct sysinfo32 *info) 3614{ 3615 extern asmlinkage long sys_sysinfo (struct sysinfo *); 3616 mm_segment_t old_fs = get_fs(); 3617 struct sysinfo s; 3618 long ret, err; 3619 3620 set_fs(KERNEL_DS); 3621 ret = sys_sysinfo(&s); 3622 set_fs(old_fs); 3623 3624 if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) 3625 return -EFAULT; 3626 3627 err = __put_user(s.uptime, &info->uptime); 3628 err |= __put_user(s.loads[0], &info->loads[0]); 3629 err |= __put_user(s.loads[1], &info->loads[1]); 3630 err |= __put_user(s.loads[2], &info->loads[2]); 3631 err |= __put_user(s.totalram, &info->totalram); 3632 err |= __put_user(s.freeram, &info->freeram); 3633 err |= __put_user(s.sharedram, &info->sharedram); 3634 err |= __put_user(s.bufferram, &info->bufferram); 3635 err |= __put_user(s.totalswap, &info->totalswap); 3636 err |= __put_user(s.freeswap, &info->freeswap); 3637 err |= __put_user(s.procs, &info->procs); 3638 if (err) 3639 return -EFAULT; 3640 return ret; 3641} 3642 3643/* In order to reduce some races, while at the same time doing additional 3644 * checking and hopefully speeding things up, we copy filenames to the 3645 * kernel data space before using them.. 3646 * 3647 * POSIX.1 2.4: an empty pathname is invalid (ENOENT). 3648 */ 3649static inline int 3650do_getname32 (const char *filename, char *page) 3651{ 3652 int retval; 3653 3654 /* 32bit pointer will be always far below TASK_SIZE :)) */ 3655 retval = strncpy_from_user((char *)page, (char *)filename, PAGE_SIZE); 3656 if (retval > 0) { 3657 if (retval < PAGE_SIZE) 3658 return 0; 3659 return -ENAMETOOLONG; 3660 } else if (!retval) 3661 retval = -ENOENT; 3662 return retval; 3663} 3664 3665static char * 3666getname32 (const char *filename) 3667{ 3668 char *tmp, *result; 3669 3670 result = ERR_PTR(-ENOMEM); 3671 tmp = (char *)__get_free_page(GFP_KERNEL); 3672 if (tmp) { 3673 int retval = do_getname32(filename, tmp); 3674 3675 result = tmp; 3676 if (retval < 0) { 3677 putname(tmp); 3678 result = ERR_PTR(retval); 3679 } 3680 } 3681 return result; 3682} 3683 3684struct dqblk32 { 3685 __u32 dqb_bhardlimit; 3686 __u32 dqb_bsoftlimit; 3687 __u32 dqb_curblocks; 3688 __u32 dqb_ihardlimit; 3689 __u32 dqb_isoftlimit; 3690 __u32 dqb_curinodes; 3691 __kernel_time_t32 dqb_btime; 3692 __kernel_time_t32 dqb_itime; 3693}; 3694 3695asmlinkage long 3696sys32_quotactl (int cmd, unsigned int special, int id, struct dqblk32 *addr) 3697{ 3698 extern asmlinkage long sys_quotactl (int, const char *, int, caddr_t); 3699 int cmds = cmd >> SUBCMDSHIFT; 3700 mm_segment_t old_fs; 3701 struct dqblk d; 3702 char *spec; 3703 long err; 3704 3705 switch (cmds) { 3706 case Q_GETQUOTA: 3707 break; 3708 case Q_SETQUOTA: 3709 case Q_SETUSE: 3710 case Q_SETQLIM: 3711 if (copy_from_user (&d, addr, sizeof(struct dqblk32))) 3712 return -EFAULT; 3713 d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; 3714 d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; 3715 break; 3716 default: 3717 return sys_quotactl(cmd, (void *) A(special), id, (caddr_t) addr); 3718 } 3719 spec = getname32((void *) A(special)); 3720 err = PTR_ERR(spec); 3721 if (IS_ERR(spec)) 3722 return err; 3723 old_fs = get_fs (); 3724 set_fs(KERNEL_DS); 3725 err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); 3726 set_fs(old_fs); 3727 putname(spec); 3728 if (cmds == Q_GETQUOTA) { 3729 __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; 3730 ((struct dqblk32 *)&d)->dqb_itime = i; 3731 ((struct dqblk32 *)&d)->dqb_btime = b; 3732 if (copy_to_user(addr, &d, sizeof(struct dqblk32))) 3733 return -EFAULT; 3734 } 3735 return err; 3736} 3737 3738asmlinkage long 3739sys32_sched_rr_get_interval (pid_t pid, struct timespec32 *interval) 3740{ 3741 extern asmlinkage long sys_sched_rr_get_interval (pid_t, struct timespec *); 3742 mm_segment_t old_fs = get_fs(); 3743 struct timespec t; 3744 long ret; 3745 3746 set_fs(KERNEL_DS); 3747 ret = sys_sched_rr_get_interval(pid, &t); 3748 set_fs(old_fs); 3749 if (put_user (t.tv_sec, &interval->tv_sec) || put_user (t.tv_nsec, &interval->tv_nsec)) 3750 return -EFAULT; 3751 return ret; 3752} 3753 3754asmlinkage long 3755sys32_pread (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi) 3756{ 3757 extern asmlinkage long sys_pread (unsigned int, char *, size_t, loff_t); 3758 return sys_pread(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); 3759} 3760 3761asmlinkage long 3762sys32_pwrite (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi) 3763{ 3764 extern asmlinkage long sys_pwrite (unsigned int, const char *, size_t, loff_t); 3765 return sys_pwrite(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); 3766} 3767 3768asmlinkage long 3769sys32_sendfile (int out_fd, int in_fd, int *offset, unsigned int count) 3770{ 3771 extern asmlinkage long sys_sendfile (int, int, off_t *, size_t); 3772 mm_segment_t old_fs = get_fs(); 3773 long ret; 3774 off_t of; 3775 3776 if (offset && get_user(of, offset)) 3777 return -EFAULT; 3778 3779 set_fs(KERNEL_DS); 3780 ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count); 3781 set_fs(old_fs); 3782 3783 if (!ret && offset && put_user(of, offset)) 3784 return -EFAULT; 3785 3786 return ret; 3787} 3788 3789asmlinkage long 3790sys32_personality (unsigned int personality) 3791{ 3792 extern asmlinkage long sys_personality (unsigned long); 3793 long ret; 3794 3795 if (current->personality == PER_LINUX32 && personality == PER_LINUX) 3796 personality = PER_LINUX32; 3797 ret = sys_personality(personality); 3798 if (ret == PER_LINUX32) 3799 ret = PER_LINUX; 3800 return ret; 3801} 3802 3803asmlinkage unsigned long 3804sys32_brk (unsigned int brk) 3805{ 3806 unsigned long ret, obrk; 3807 struct mm_struct *mm = current->mm; 3808 3809 obrk = mm->brk; 3810 ret = sys_brk(brk); 3811 if (ret < obrk) 3812 clear_user((void *) ret, PAGE_ALIGN(ret) - ret); 3813 return ret; 3814} 3815 3816#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ 3817 3818struct ncp_mount_data32 { 3819 int version; 3820 unsigned int ncp_fd; 3821 __kernel_uid_t32 mounted_uid; 3822 int wdog_pid; 3823 unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; 3824 unsigned int time_out; 3825 unsigned int retry_count; 3826 unsigned int flags; 3827 __kernel_uid_t32 uid; 3828 __kernel_gid_t32 gid; 3829 __kernel_mode_t32 file_mode; 3830 __kernel_mode_t32 dir_mode; 3831}; 3832 3833static void * 3834do_ncp_super_data_conv(void *raw_data) 3835{ 3836 struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data; 3837 struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data; 3838 3839 n->dir_mode = n32->dir_mode; 3840 n->file_mode = n32->file_mode; 3841 n->gid = n32->gid; 3842 n->uid = n32->uid; 3843 memmove (n->mounted_vol, n32->mounted_vol, 3844 (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int))); 3845 n->wdog_pid = n32->wdog_pid; 3846 n->mounted_uid = n32->mounted_uid; 3847 return raw_data; 3848} 3849 3850struct smb_mount_data32 { 3851 int version; 3852 __kernel_uid_t32 mounted_uid; 3853 __kernel_uid_t32 uid; 3854 __kernel_gid_t32 gid; 3855 __kernel_mode_t32 file_mode; 3856 __kernel_mode_t32 dir_mode; 3857}; 3858 3859static void * 3860do_smb_super_data_conv(void *raw_data) 3861{ 3862 struct smb_mount_data *s = (struct smb_mount_data *)raw_data; 3863 struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data; 3864 3865 s->version = s32->version; 3866 s->mounted_uid = s32->mounted_uid; 3867 s->uid = s32->uid; 3868 s->gid = s32->gid; 3869 s->file_mode = s32->file_mode; 3870 s->dir_mode = s32->dir_mode; 3871 return raw_data; 3872} 3873 3874static int 3875copy_mount_stuff_to_kernel(const void *user, unsigned long *kernel) 3876{ 3877 int i; 3878 unsigned long page; 3879 struct vm_area_struct *vma; 3880 3881 *kernel = 0; 3882 if(!user) 3883 return 0; 3884 vma = find_vma(current->mm, (unsigned long)user); 3885 if(!vma || (unsigned long)user < vma->vm_start) 3886 return -EFAULT; 3887 if(!(vma->vm_flags & VM_READ)) 3888 return -EFAULT; 3889 i = vma->vm_end - (unsigned long) user; 3890 if(PAGE_SIZE <= (unsigned long) i) 3891 i = PAGE_SIZE - 1; 3892 if(!(page = __get_free_page(GFP_KERNEL))) 3893 return -ENOMEM; 3894 if(copy_from_user((void *) page, user, i)) { 3895 free_page(page); 3896 return -EFAULT; 3897 } 3898 *kernel = page; 3899 return 0; 3900} 3901 3902extern asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type, 3903 unsigned long new_flags, void *data); 3904 3905#define SMBFS_NAME "smbfs" 3906#define NCPFS_NAME "ncpfs" 3907 3908asmlinkage long 3909sys32_mount(char *dev_name, char *dir_name, char *type, 3910 unsigned long new_flags, u32 data) 3911{ 3912 unsigned long type_page; 3913 int err, is_smb, is_ncp; 3914 3915 if(!capable(CAP_SYS_ADMIN)) 3916 return -EPERM; 3917 is_smb = is_ncp = 0; 3918 err = copy_mount_stuff_to_kernel((const void *)type, &type_page); 3919 if(err) 3920 return err; 3921 if(type_page) { 3922 is_smb = !strcmp((char *)type_page, SMBFS_NAME); 3923 is_ncp = !strcmp((char *)type_page, NCPFS_NAME); 3924 } 3925 if(!is_smb && !is_ncp) { 3926 if(type_page) 3927 free_page(type_page); 3928 return sys_mount(dev_name, dir_name, type, new_flags, 3929 (void *)AA(data)); 3930 } else { 3931 unsigned long dev_page, dir_page, data_page; 3932 3933 err = copy_mount_stuff_to_kernel((const void *)dev_name, 3934 &dev_page); 3935 if(err) 3936 goto out; 3937 err = copy_mount_stuff_to_kernel((const void *)dir_name, 3938 &dir_page); 3939 if(err) 3940 goto dev_out; 3941 err = copy_mount_stuff_to_kernel((const void *)AA(data), 3942 &data_page); 3943 if(err) 3944 goto dir_out; 3945 if(is_ncp) 3946 do_ncp_super_data_conv((void *)data_page); 3947 else if(is_smb) 3948 do_smb_super_data_conv((void *)data_page); 3949 else 3950 panic("The problem is here..."); 3951 err = do_mount((char *)dev_page, (char *)dir_page, 3952 (char *)type_page, new_flags, 3953 (void *)data_page); 3954 if(data_page) 3955 free_page(data_page); 3956 dir_out: 3957 if(dir_page) 3958 free_page(dir_page); 3959 dev_out: 3960 if(dev_page) 3961 free_page(dev_page); 3962 out: 3963 if(type_page) 3964 free_page(type_page); 3965 return err; 3966 } 3967} 3968 3969extern asmlinkage long sys_setreuid(uid_t ruid, uid_t euid); 3970 3971asmlinkage long sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid) 3972{ 3973 uid_t sruid, seuid; 3974 3975 sruid = (ruid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)ruid); 3976 seuid = (euid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)euid); 3977 return sys_setreuid(sruid, seuid); 3978} 3979 3980extern asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); 3981 3982asmlinkage long 3983sys32_setresuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid, 3984 __kernel_uid_t32 suid) 3985{ 3986 uid_t sruid, seuid, ssuid; 3987 3988 sruid = (ruid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)ruid); 3989 seuid = (euid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)euid); 3990 ssuid = (suid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)suid); 3991 return sys_setresuid(sruid, seuid, ssuid); 3992} 3993 3994extern asmlinkage long sys_setregid(gid_t rgid, gid_t egid); 3995 3996asmlinkage long 3997sys32_setregid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid) 3998{ 3999 gid_t srgid, segid; 4000 4001 srgid = (rgid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)rgid); 4002 segid = (egid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)egid); 4003 return sys_setregid(srgid, segid); 4004} 4005 4006extern asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); 4007 4008asmlinkage long 4009sys32_setresgid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid, 4010 __kernel_gid_t32 sgid) 4011{ 4012 gid_t srgid, segid, ssgid; 4013 4014 srgid = (rgid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)rgid); 4015 segid = (egid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)egid); 4016 ssgid = (sgid == (__kernel_gid_t32)-1) ? ((gid_t)-1) : ((gid_t)sgid); 4017 return sys_setresgid(srgid, segid, ssgid); 4018} 4019 4020/* Stuff for NFS server syscalls... */ 4021struct nfsctl_svc32 { 4022 u16 svc32_port; 4023 s32 svc32_nthreads; 4024}; 4025 4026struct nfsctl_client32 { 4027 s8 cl32_ident[NFSCLNT_IDMAX+1]; 4028 s32 cl32_naddr; 4029 struct in_addr cl32_addrlist[NFSCLNT_ADDRMAX]; 4030 s32 cl32_fhkeytype; 4031 s32 cl32_fhkeylen; 4032 u8 cl32_fhkey[NFSCLNT_KEYMAX]; 4033}; 4034 4035struct nfsctl_export32 { 4036 s8 ex32_client[NFSCLNT_IDMAX+1]; 4037 s8 ex32_path[NFS_MAXPATHLEN+1]; 4038 __kernel_dev_t32 ex32_dev; 4039 __kernel_ino_t32 ex32_ino; 4040 s32 ex32_flags; 4041 __kernel_uid_t32 ex32_anon_uid; 4042 __kernel_gid_t32 ex32_anon_gid; 4043}; 4044 4045struct nfsctl_uidmap32 { 4046 u32 ug32_ident; /* char * */ 4047 __kernel_uid_t32 ug32_uidbase; 4048 s32 ug32_uidlen; 4049 u32 ug32_udimap; /* uid_t * */ 4050 __kernel_uid_t32 ug32_gidbase; 4051 s32 ug32_gidlen; 4052 u32 ug32_gdimap; /* gid_t * */ 4053}; 4054 4055struct nfsctl_fhparm32 { 4056 struct sockaddr gf32_addr; 4057 __kernel_dev_t32 gf32_dev; 4058 __kernel_ino_t32 gf32_ino; 4059 s32 gf32_version; 4060}; 4061 4062struct nfsctl_arg32 { 4063 s32 ca32_version; /* safeguard */ 4064 union { 4065 struct nfsctl_svc32 u32_svc; 4066 struct nfsctl_client32 u32_client; 4067 struct nfsctl_export32 u32_export; 4068 struct nfsctl_uidmap32 u32_umap; 4069 struct nfsctl_fhparm32 u32_getfh; 4070 u32 u32_debug; 4071 } u; 4072#define ca32_svc u.u32_svc 4073#define ca32_client u.u32_client 4074#define ca32_export u.u32_export 4075#define ca32_umap u.u32_umap 4076#define ca32_getfh u.u32_getfh 4077#define ca32_authd u.u32_authd 4078#define ca32_debug u.u32_debug 4079}; 4080 4081union nfsctl_res32 { 4082 struct knfs_fh cr32_getfh; 4083 u32 cr32_debug; 4084}; 4085 4086static int 4087nfs_svc32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) 4088{ 4089 int err; 4090 4091 err = __get_user(karg->ca_version, &arg32->ca32_version); 4092 err |= __get_user(karg->ca_svc.svc_port, &arg32->ca32_svc.svc32_port); 4093 err |= __get_user(karg->ca_svc.svc_nthreads, 4094 &arg32->ca32_svc.svc32_nthreads); 4095 return err; 4096} 4097 4098static int 4099nfs_clnt32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) 4100{ 4101 int err; 4102 4103 err = __get_user(karg->ca_version, &arg32->ca32_version); 4104 err |= copy_from_user(&karg->ca_client.cl_ident[0], 4105 &arg32->ca32_client.cl32_ident[0], 4106 NFSCLNT_IDMAX); 4107 err |= __get_user(karg->ca_client.cl_naddr, 4108 &arg32->ca32_client.cl32_naddr); 4109 err |= copy_from_user(&karg->ca_client.cl_addrlist[0], 4110 &arg32->ca32_client.cl32_addrlist[0], 4111 (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)); 4112 err |= __get_user(karg->ca_client.cl_fhkeytype, 4113 &arg32->ca32_client.cl32_fhkeytype); 4114 err |= __get_user(karg->ca_client.cl_fhkeylen, 4115 &arg32->ca32_client.cl32_fhkeylen); 4116 err |= copy_from_user(&karg->ca_client.cl_fhkey[0], 4117 &arg32->ca32_client.cl32_fhkey[0], 4118 NFSCLNT_KEYMAX); 4119 return err; 4120} 4121 4122static int 4123nfs_exp32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) 4124{ 4125 int err; 4126 4127 err = __get_user(karg->ca_version, &arg32->ca32_version); 4128 err |= copy_from_user(&karg->ca_export.ex_client[0], 4129 &arg32->ca32_export.ex32_client[0], 4130 NFSCLNT_IDMAX); 4131 err |= copy_from_user(&karg->ca_export.ex_path[0], 4132 &arg32->ca32_export.ex32_path[0], 4133 NFS_MAXPATHLEN); 4134 err |= __get_user(karg->ca_export.ex_dev, 4135 &arg32->ca32_export.ex32_dev); 4136 err |= __get_user(karg->ca_export.ex_ino, 4137 &arg32->ca32_export.ex32_ino); 4138 err |= __get_user(karg->ca_export.ex_flags, 4139 &arg32->ca32_export.ex32_flags); 4140 err |= __get_user(karg->ca_export.ex_anon_uid, 4141 &arg32->ca32_export.ex32_anon_uid); 4142 err |= __get_user(karg->ca_export.ex_anon_gid, 4143 &arg32->ca32_export.ex32_anon_gid); 4144 return err; 4145} 4146 4147static int 4148nfs_uud32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) 4149{ 4150 u32 uaddr; 4151 int i; 4152 int err; 4153 4154 memset(karg, 0, sizeof(*karg)); 4155 if(__get_user(karg->ca_version, &arg32->ca32_version)) 4156 return -EFAULT; 4157 karg->ca_umap.ug_ident = (char *)get_free_page(GFP_USER); 4158 if(!karg->ca_umap.ug_ident) 4159 return -ENOMEM; 4160 err = __get_user(uaddr, &arg32->ca32_umap.ug32_ident); 4161 if(strncpy_from_user(karg->ca_umap.ug_ident, 4162 (char *)A(uaddr), PAGE_SIZE) <= 0) 4163 return -EFAULT; 4164 err |= __get_user(karg->ca_umap.ug_uidbase, 4165 &arg32->ca32_umap.ug32_uidbase); 4166 err |= __get_user(karg->ca_umap.ug_uidlen, 4167 &arg32->ca32_umap.ug32_uidlen); 4168 err |= __get_user(uaddr, &arg32->ca32_umap.ug32_udimap); 4169 if (err) 4170 return -EFAULT; 4171 karg->ca_umap.ug_udimap = kmalloc((sizeof(uid_t) * 4172 karg->ca_umap.ug_uidlen), 4173 GFP_USER); 4174 if(!karg->ca_umap.ug_udimap) 4175 return -ENOMEM; 4176 for(i = 0; i < karg->ca_umap.ug_uidlen; i++) 4177 err |= __get_user(karg->ca_umap.ug_udimap[i], 4178 &(((__kernel_uid_t32 *)A(uaddr))[i])); 4179 err |= __get_user(karg->ca_umap.ug_gidbase, 4180 &arg32->ca32_umap.ug32_gidbase); 4181 err |= __get_user(karg->ca_umap.ug_uidlen, 4182 &arg32->ca32_umap.ug32_gidlen); 4183 err |= __get_user(uaddr, &arg32->ca32_umap.ug32_gdimap); 4184 if (err) 4185 return -EFAULT; 4186 karg->ca_umap.ug_gdimap = kmalloc((sizeof(gid_t) * 4187 karg->ca_umap.ug_uidlen), 4188 GFP_USER); 4189 if(!karg->ca_umap.ug_gdimap) 4190 return -ENOMEM; 4191 for(i = 0; i < karg->ca_umap.ug_gidlen; i++) 4192 err |= __get_user(karg->ca_umap.ug_gdimap[i], 4193 &(((__kernel_gid_t32 *)A(uaddr))[i])); 4194 4195 return err; 4196} 4197 4198static int 4199nfs_getfh32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32) 4200{ 4201 int err; 4202 4203 err = __get_user(karg->ca_version, &arg32->ca32_version); 4204 err |= copy_from_user(&karg->ca_getfh.gf_addr, 4205 &arg32->ca32_getfh.gf32_addr, 4206 (sizeof(struct sockaddr))); 4207 err |= __get_user(karg->ca_getfh.gf_dev, 4208 &arg32->ca32_getfh.gf32_dev); 4209 err |= __get_user(karg->ca_getfh.gf_ino, 4210 &arg32->ca32_getfh.gf32_ino); 4211 err |= __get_user(karg->ca_getfh.gf_version, 4212 &arg32->ca32_getfh.gf32_version); 4213 return err; 4214} 4215 4216static int 4217nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32) 4218{ 4219 int err; 4220 4221 err = copy_to_user(&res32->cr32_getfh, 4222 &kres->cr_getfh, 4223 sizeof(res32->cr32_getfh)); 4224 err |= __put_user(kres->cr_debug, &res32->cr32_debug); 4225 return err; 4226} 4227 4228extern asmlinkage long sys_nfsservctl(int cmd, void *arg, void *resp); 4229 4230int asmlinkage 4231sys32_nfsservctl(int cmd, struct nfsctl_arg32 *arg32, union nfsctl_res32 *res32) 4232{ 4233 struct nfsctl_arg *karg = NULL; 4234 union nfsctl_res *kres = NULL; 4235 mm_segment_t oldfs; 4236 int err; 4237 4238 karg = kmalloc(sizeof(*karg), GFP_USER); 4239 if(!karg) 4240 return -ENOMEM; 4241 if(res32) { 4242 kres = kmalloc(sizeof(*kres), GFP_USER); 4243 if(!kres) { 4244 kfree(karg); 4245 return -ENOMEM; 4246 } 4247 } 4248 switch(cmd) { 4249 case NFSCTL_SVC: 4250 err = nfs_svc32_trans(karg, arg32); 4251 break; 4252 case NFSCTL_ADDCLIENT: 4253 err = nfs_clnt32_trans(karg, arg32); 4254 break; 4255 case NFSCTL_DELCLIENT: 4256 err = nfs_clnt32_trans(karg, arg32); 4257 break; 4258 case NFSCTL_EXPORT: 4259 err = nfs_exp32_trans(karg, arg32); 4260 break; 4261 /* This one is unimplemented, be we're ready for it. */ 4262 case NFSCTL_UGIDUPDATE: 4263 err = nfs_uud32_trans(karg, arg32); 4264 break; 4265 case NFSCTL_GETFH: 4266 err = nfs_getfh32_trans(karg, arg32); 4267 break; 4268 default: 4269 err = -EINVAL; 4270 break; 4271 } 4272 if(err) 4273 goto done; 4274 oldfs = get_fs(); 4275 set_fs(KERNEL_DS); 4276 err = sys_nfsservctl(cmd, karg, kres); 4277 set_fs(oldfs); 4278 4279 if(!err && cmd == NFSCTL_GETFH) 4280 err = nfs_getfh32_res_trans(kres, res32); 4281 4282done: 4283 if(karg) { 4284 if(cmd == NFSCTL_UGIDUPDATE) { 4285 if(karg->ca_umap.ug_ident) 4286 kfree(karg->ca_umap.ug_ident); 4287 if(karg->ca_umap.ug_udimap) 4288 kfree(karg->ca_umap.ug_udimap); 4289 if(karg->ca_umap.ug_gdimap) 4290 kfree(karg->ca_umap.ug_gdimap); 4291 } 4292 kfree(karg); 4293 } 4294 if(kres) 4295 kfree(kres); 4296 return err; 4297} 4298 4299/* Handle adjtimex compatability. */ 4300 4301struct timex32 { 4302 u32 modes; 4303 s32 offset, freq, maxerror, esterror; 4304 s32 status, constant, precision, tolerance; 4305 struct timeval32 time; 4306 s32 tick; 4307 s32 ppsfreq, jitter, shift, stabil; 4308 s32 jitcnt, calcnt, errcnt, stbcnt; 4309 s32 :32; s32 :32; s32 :32; s32 :32; 4310 s32 :32; s32 :32; s32 :32; s32 :32; 4311 s32 :32; s32 :32; s32 :32; s32 :32; 4312}; 4313 4314extern int do_adjtimex(struct timex *); 4315 4316asmlinkage long 4317sys32_adjtimex(struct timex32 *utp) 4318{ 4319 struct timex txc; 4320 int ret; 4321 4322 memset(&txc, 0, sizeof(struct timex)); 4323 4324 if(get_user(txc.modes, &utp->modes) || 4325 __get_user(txc.offset, &utp->offset) || 4326 __get_user(txc.freq, &utp->freq) || 4327 __get_user(txc.maxerror, &utp->maxerror) || 4328 __get_user(txc.esterror, &utp->esterror) || 4329 __get_user(txc.status, &utp->status) || 4330 __get_user(txc.constant, &utp->constant) || 4331 __get_user(txc.precision, &utp->precision) || 4332 __get_user(txc.tolerance, &utp->tolerance) || 4333 __get_user(txc.time.tv_sec, &utp->time.tv_sec) || 4334 __get_user(txc.time.tv_usec, &utp->time.tv_usec) || 4335 __get_user(txc.tick, &utp->tick) || 4336 __get_user(txc.ppsfreq, &utp->ppsfreq) || 4337 __get_user(txc.jitter, &utp->jitter) || 4338 __get_user(txc.shift, &utp->shift) || 4339 __get_user(txc.stabil, &utp->stabil) || 4340 __get_user(txc.jitcnt, &utp->jitcnt) || 4341 __get_user(txc.calcnt, &utp->calcnt) || 4342 __get_user(txc.errcnt, &utp->errcnt) || 4343 __get_user(txc.stbcnt, &utp->stbcnt)) 4344 return -EFAULT; 4345 4346 ret = do_adjtimex(&txc); 4347 4348 if(put_user(txc.modes, &utp->modes) || 4349 __put_user(txc.offset, &utp->offset) || 4350 __put_user(txc.freq, &utp->freq) || 4351 __put_user(txc.maxerror, &utp->maxerror) || 4352 __put_user(txc.esterror, &utp->esterror) || 4353 __put_user(txc.status, &utp->status) || 4354 __put_user(txc.constant, &utp->constant) || 4355 __put_user(txc.precision, &utp->precision) || 4356 __put_user(txc.tolerance, &utp->tolerance) || 4357 __put_user(txc.time.tv_sec, &utp->time.tv_sec) || 4358 __put_user(txc.time.tv_usec, &utp->time.tv_usec) || 4359 __put_user(txc.tick, &utp->tick) || 4360 __put_user(txc.ppsfreq, &utp->ppsfreq) || 4361 __put_user(txc.jitter, &utp->jitter) || 4362 __put_user(txc.shift, &utp->shift) || 4363 __put_user(txc.stabil, &utp->stabil) || 4364 __put_user(txc.jitcnt, &utp->jitcnt) || 4365 __put_user(txc.calcnt, &utp->calcnt) || 4366 __put_user(txc.errcnt, &utp->errcnt) || 4367 __put_user(txc.stbcnt, &utp->stbcnt)) 4368 ret = -EFAULT; 4369 4370 return ret; 4371} 4372#endif /* NOTYET */ 4373