242 /* 243 * Lastly check if the current node is a mount point in 244 * which case walk up the mount hierarchy making sure not to 245 * bump into the root of the mount tree (ie. dvp != udvp). 246 * 247 * We use dvp as a temporary variable here, it is no longer related 248 * to the dvp above. However, we have to ensure that both *pdvp and 249 * tdvp are locked on return. 250 */ 251 252 dvp = tdvp; 253 while ( 254 dvp != udvp && 255 (dvp->v_type == VDIR) && 256 (mp = dvp->v_mountedhere) 257 ) { 258 int relock_pdvp = 0; 259 260 if (vfs_busy(mp, 0, 0, td)) 261 continue; 262 263 if (dvp == *pdvp) 264 relock_pdvp = 1; 265 vput(dvp); 266 dvp = NULL; 267 error = VFS_ROOT(mp, LK_EXCLUSIVE, &dvp, td); 268 269 vfs_unbusy(mp, td); 270 271 if (relock_pdvp) 272 vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, td); 273 274 if (error) { 275 *vpp = NULL; 276 return (error); 277 } 278 } 279 *vpp = dvp; 280 return (0); 281} 282 283static int 284union_lookup(ap) 285 struct vop_lookup_args /* { 286 struct vnodeop_desc *a_desc; 287 struct vnode *a_dvp; 288 struct vnode **a_vpp; 289 struct componentname *a_cnp; 290 } */ *ap; 291{ 292 int error; 293 int uerror, lerror; 294 struct vnode *uppervp, *lowervp; 295 struct vnode *upperdvp, *lowerdvp; 296 struct vnode *dvp = ap->a_dvp; /* starting dir */ 297 struct union_node *dun = VTOUNION(dvp); /* associated union node */ 298 struct componentname *cnp = ap->a_cnp; 299 struct thread *td = cnp->cn_thread; 300 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount); 301 struct ucred *saved_cred = NULL; 302 int iswhiteout; 303 struct vattr va; 304 305 *ap->a_vpp = NULLVP; 306 307 /* 308 * Disallow write attempts to the filesystem mounted read-only. 309 */ 310 if ((cnp->cn_flags & ISLASTCN) && 311 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 312 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { 313 return (EROFS); 314 } 315 316 /* 317 * For any lookups we do, always return with the parent locked. 318 */ 319 cnp->cn_flags |= LOCKPARENT; 320 321 lowerdvp = dun->un_lowervp; 322 uppervp = NULLVP; 323 lowervp = NULLVP; 324 iswhiteout = 0; 325 326 uerror = ENOENT; 327 lerror = ENOENT; 328 329 /* 330 * Get a private lock on uppervp and a reference, effectively 331 * taking it out of the union_node's control. 332 * 333 * We must lock upperdvp while holding our lock on dvp 334 * to avoid a deadlock. 335 */ 336 upperdvp = union_lock_upper(dun, td); 337 338 /* 339 * Do the lookup in the upper level. 340 * If that level consumes additional pathnames, 341 * then assume that something special is going 342 * on and just return that vnode. 343 */ 344 if (upperdvp != NULLVP) { 345 /* 346 * We do not have to worry about the DOTDOT case, we've 347 * already unlocked dvp. 348 */ 349 UDEBUG(("A %p\n", upperdvp)); 350 351 /* 352 * Do the lookup. We must supply a locked and referenced 353 * upperdvp to the function and will get a new locked and 354 * referenced upperdvp back, with the old having been 355 * dereferenced. 356 * 357 * If an error is returned, uppervp will be NULLVP. If no 358 * error occurs, uppervp will be the locked and referenced. 359 * Return vnode, or possibly NULL, depending on what is being 360 * requested. It is possible that the returned uppervp 361 * will be the same as upperdvp. 362 */ 363 uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp); 364 UDEBUG(( 365 "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n", 366 uerror, 367 upperdvp, 368 vrefcnt(upperdvp), 369 VOP_ISLOCKED(upperdvp, NULL), 370 uppervp, 371 (uppervp ? vrefcnt(uppervp) : -99), 372 (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99) 373 )); 374 375 /* 376 * Disallow write attempts to the filesystem mounted read-only. 377 */ 378 if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) && 379 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 380 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) { 381 error = EROFS; 382 goto out; 383 } 384 385 /* 386 * Special case: If cn_consume != 0 then skip out. The result 387 * of the lookup is transfered to our return variable. If 388 * an error occured we have to throw away the results. 389 */ 390 391 if (cnp->cn_consume != 0) { 392 if ((error = uerror) == 0) { 393 *ap->a_vpp = uppervp; 394 uppervp = NULL; 395 } 396 goto out; 397 } 398 399 /* 400 * Calculate whiteout, fall through. 401 */ 402 403 if (uerror == ENOENT || uerror == EJUSTRETURN) { 404 if (cnp->cn_flags & ISWHITEOUT) { 405 iswhiteout = 1; 406 } else if (lowerdvp != NULLVP) { 407 int terror; 408 409 terror = VOP_GETATTR(upperdvp, &va, 410 cnp->cn_cred, cnp->cn_thread); 411 if (terror == 0 && (va.va_flags & OPAQUE)) 412 iswhiteout = 1; 413 } 414 } 415 } 416 417 /* 418 * In a similar way to the upper layer, do the lookup 419 * in the lower layer. This time, if there is some 420 * component magic going on, then vput whatever we got 421 * back from the upper layer and return the lower vnode 422 * instead. 423 */ 424 425 if (lowerdvp != NULLVP && !iswhiteout) { 426 int nameiop; 427 428 UDEBUG(("B %p\n", lowerdvp)); 429 430 /* 431 * Force only LOOKUPs on the lower node, since 432 * we won't be making changes to it anyway. 433 */ 434 nameiop = cnp->cn_nameiop; 435 cnp->cn_nameiop = LOOKUP; 436 if (um->um_op == UNMNT_BELOW) { 437 saved_cred = cnp->cn_cred; 438 cnp->cn_cred = um->um_cred; 439 } 440 441 /* 442 * We shouldn't have to worry about locking interactions 443 * between the lower layer and our union layer (w.r.t. 444 * `..' processing) because we don't futz with lowervp 445 * locks in the union-node instantiation code path. 446 * 447 * union_lookup1() requires lowervp to be locked on entry, 448 * and it will be unlocked on return. The ref count will 449 * not change. On return lowervp doesn't represent anything 450 * to us so we NULL it out. 451 */ 452 VREF(lowerdvp); 453 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, td); 454 lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp); 455 if (lowerdvp == lowervp) 456 vrele(lowerdvp); 457 else 458 vput(lowerdvp); 459 lowerdvp = NULL; /* lowerdvp invalid after vput */ 460 461 if (um->um_op == UNMNT_BELOW) 462 cnp->cn_cred = saved_cred; 463 cnp->cn_nameiop = nameiop; 464 465 if (cnp->cn_consume != 0 || lerror == EACCES) { 466 if ((error = lerror) == 0) { 467 *ap->a_vpp = lowervp; 468 lowervp = NULL; 469 } 470 goto out; 471 } 472 } else { 473 UDEBUG(("C %p\n", lowerdvp)); 474 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) { 475 if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) { 476 VREF(lowervp); 477 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, td); 478 lerror = 0; 479 } 480 } 481 } 482 483 /* 484 * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp. 485 * 486 * 1. If both layers returned an error, select the upper layer. 487 * 488 * 2. If the upper layer failed and the bottom layer succeeded, 489 * two subcases occur: 490 * 491 * a. The bottom vnode is not a directory, in which case 492 * just return a new union vnode referencing an 493 * empty top layer and the existing bottom layer. 494 * 495 * b. The bottom vnode is a directory, in which case 496 * create a new directory in the top layer and 497 * and fall through to case 3. 498 * 499 * 3. If the top layer succeeded, then return a new union 500 * vnode referencing whatever the new top layer and 501 * whatever the bottom layer returned. 502 */ 503 504 /* case 1. */ 505 if ((uerror != 0) && (lerror != 0)) { 506 error = uerror; 507 goto out; 508 } 509 510 /* case 2. */ 511 if (uerror != 0 /* && (lerror == 0) */ ) { 512 if (lowervp->v_type == VDIR) { /* case 2b. */ 513 KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL")); 514 /* 515 * Oops, uppervp has a problem, we may have to shadow. 516 */ 517 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp); 518 if (uerror) { 519 error = uerror; 520 goto out; 521 } 522 } 523 } 524 525 /* 526 * Must call union_allocvp() with both the upper and lower vnodes 527 * referenced and the upper vnode locked. ap->a_vpp is returned 528 * referenced and locked. lowervp, uppervp, and upperdvp are 529 * absorbed by union_allocvp() whether it succeeds or fails. 530 * 531 * upperdvp is the parent directory of uppervp which may be 532 * different, depending on the path, from dvp->un_uppervp. That's 533 * why it is a separate argument. Note that it must be unlocked. 534 * 535 * dvp must be locked on entry to the call and will be locked on 536 * return. 537 */ 538 539 if (uppervp && uppervp != upperdvp) 540 VOP_UNLOCK(uppervp, 0, td); 541 if (lowervp) 542 VOP_UNLOCK(lowervp, 0, td); 543 if (upperdvp) 544 VOP_UNLOCK(upperdvp, 0, td); 545 546 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp, 547 uppervp, lowervp, 1); 548 549 UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99)); 550 551 uppervp = NULL; 552 upperdvp = NULL; 553 lowervp = NULL; 554 555 /* 556 * Termination Code 557 * 558 * - put away any extra junk laying around. Note that lowervp 559 * (if not NULL) will never be the same as *ap->a_vp and 560 * neither will uppervp, because when we set that state we 561 * NULL-out lowervp or uppervp. On the otherhand, upperdvp 562 * may match uppervp or *ap->a_vpp. 563 * 564 * - relock/unlock dvp if appropriate. 565 */ 566 567out: 568 if (upperdvp) { 569 if (upperdvp == uppervp || upperdvp == *ap->a_vpp) 570 vrele(upperdvp); 571 else 572 vput(upperdvp); 573 } 574 575 if (uppervp) 576 vput(uppervp); 577 578 if (lowervp) 579 vput(lowervp); 580 581 UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp, 582 ((*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99), 583 lowervp, uppervp)); 584 585 if (error == 0 || error == EJUSTRETURN) { 586 if (cnp->cn_namelen == 1 && 587 cnp->cn_nameptr[0] == '.' && 588 *ap->a_vpp != dvp) { 589#ifdef DIAGNOSTIC 590 vprint("union_lookup: vp", *ap->a_vpp); 591 vprint("union_lookup: dvp", dvp); 592#endif 593 panic("union_lookup returning . (%p) != startdir (%p)", 594 *ap->a_vpp, dvp); 595 } 596 } 597 598 return (error); 599} 600 601/* 602 * union_create: 603 * 604 * a_dvp is locked on entry and remains locked on return. a_vpp is returned 605 * locked if no error occurs, otherwise it is garbage. 606 */ 607 608static int 609union_create(ap) 610 struct vop_create_args /* { 611 struct vnode *a_dvp; 612 struct vnode **a_vpp; 613 struct componentname *a_cnp; 614 struct vattr *a_vap; 615 } */ *ap; 616{ 617 struct union_node *dun = VTOUNION(ap->a_dvp); 618 struct componentname *cnp = ap->a_cnp; 619 struct thread *td = cnp->cn_thread; 620 struct vnode *dvp; 621 int error = EROFS; 622 623 if ((dvp = union_lock_upper(dun, td)) != NULL) { 624 struct vnode *vp; 625 struct mount *mp; 626 627 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap); 628 if (error == 0) { 629 mp = ap->a_dvp->v_mount; 630 VOP_UNLOCK(vp, 0, td); 631 UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vrefcnt(vp))); 632 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, 633 cnp, vp, NULLVP, 1); 634 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp))); 635 } 636 union_unlock_upper(dvp, td); 637 } 638 return (error); 639} 640 641static int 642union_whiteout(ap) 643 struct vop_whiteout_args /* { 644 struct vnode *a_dvp; 645 struct componentname *a_cnp; 646 int a_flags; 647 } */ *ap; 648{ 649 struct union_node *un = VTOUNION(ap->a_dvp); 650 struct componentname *cnp = ap->a_cnp; 651 struct vnode *uppervp; 652 int error; 653 654 switch (ap->a_flags) { 655 case CREATE: 656 case DELETE: 657 uppervp = union_lock_upper(un, cnp->cn_thread); 658 if (uppervp != NULLVP) { 659 error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags); 660 union_unlock_upper(uppervp, cnp->cn_thread); 661 } else 662 error = EOPNOTSUPP; 663 break; 664 case LOOKUP: 665 error = EOPNOTSUPP; 666 break; 667 default: 668 panic("union_whiteout: unknown op"); 669 } 670 return (error); 671} 672 673/* 674 * union_mknod: 675 * 676 * a_dvp is locked on entry and should remain locked on return. 677 * a_vpp is garbagre whether an error occurs or not. 678 */ 679 680static int 681union_mknod(ap) 682 struct vop_mknod_args /* { 683 struct vnode *a_dvp; 684 struct vnode **a_vpp; 685 struct componentname *a_cnp; 686 struct vattr *a_vap; 687 } */ *ap; 688{ 689 struct union_node *dun = VTOUNION(ap->a_dvp); 690 struct componentname *cnp = ap->a_cnp; 691 struct vnode *dvp; 692 int error = EROFS; 693 694 if ((dvp = union_lock_upper(dun, cnp->cn_thread)) != NULL) { 695 error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap); 696 union_unlock_upper(dvp, cnp->cn_thread); 697 } 698 return (error); 699} 700 701/* 702 * union_open: 703 * 704 * run open VOP. When opening the underlying vnode we have to mimic 705 * vn_open(). What we *really* need to do to avoid screwups if the 706 * open semantics change is to call vn_open(). For example, ufs blows 707 * up if you open a file but do not vmio it prior to writing. 708 */ 709 710static int 711union_open(ap) 712 struct vop_open_args /* { 713 struct vnodeop_desc *a_desc; 714 struct vnode *a_vp; 715 int a_mode; 716 struct ucred *a_cred; 717 struct thread *a_td; 718 } */ *ap; 719{ 720 struct union_node *un = VTOUNION(ap->a_vp); 721 struct vnode *tvp; 722 int mode = ap->a_mode; 723 struct ucred *cred = ap->a_cred; 724 struct thread *td = ap->a_td; 725 int error = 0; 726 int tvpisupper = 1; 727 728 /* 729 * If there is an existing upper vp then simply open that. 730 * The upper vp takes precedence over the lower vp. When opening 731 * a lower vp for writing copy it to the uppervp and then open the 732 * uppervp. 733 * 734 * At the end of this section tvp will be left locked. 735 */ 736 if ((tvp = union_lock_upper(un, td)) == NULLVP) { 737 /* 738 * If the lower vnode is being opened for writing, then 739 * copy the file contents to the upper vnode and open that, 740 * otherwise can simply open the lower vnode. 741 */ 742 tvp = un->un_lowervp; 743 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { 744 int docopy = !(mode & O_TRUNC); 745 error = union_copyup(un, docopy, cred, td); 746 tvp = union_lock_upper(un, td); 747 } else { 748 un->un_openl++; 749 VREF(tvp); 750 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td); 751 tvpisupper = 0; 752 } 753 } 754 755 /* 756 * We are holding the correct vnode, open it. 757 */ 758 759 if (error == 0) 760 error = VOP_OPEN(tvp, mode, cred, td, -1); 761 762 /* 763 * Release any locks held. 764 */ 765 if (tvpisupper) { 766 if (tvp) 767 union_unlock_upper(tvp, td); 768 } else { 769 vput(tvp); 770 } 771 return (error); 772} 773 774/* 775 * union_close: 776 * 777 * It is unclear whether a_vp is passed locked or unlocked. Whatever 778 * the case we do not change it. 779 */ 780 781static int 782union_close(ap) 783 struct vop_close_args /* { 784 struct vnode *a_vp; 785 int a_fflag; 786 struct ucred *a_cred; 787 struct thread *a_td; 788 } */ *ap; 789{ 790 struct union_node *un = VTOUNION(ap->a_vp); 791 struct vnode *vp; 792 793 if ((vp = un->un_uppervp) == NULLVP) { 794#ifdef UNION_DIAGNOSTIC 795 if (un->un_openl <= 0) 796 panic("union: un_openl cnt"); 797#endif 798 --un->un_openl; 799 vp = un->un_lowervp; 800 } 801 ap->a_vp = vp; 802 return (VOP_CLOSE_AP(ap)); 803} 804 805/* 806 * Check access permission on the union vnode. 807 * The access check being enforced is to check 808 * against both the underlying vnode, and any 809 * copied vnode. This ensures that no additional 810 * file permissions are given away simply because 811 * the user caused an implicit file copy. 812 */ 813static int 814union_access(ap) 815 struct vop_access_args /* { 816 struct vnodeop_desc *a_desc; 817 struct vnode *a_vp; 818 int a_mode; 819 struct ucred *a_cred; 820 struct thread *a_td; 821 } */ *ap; 822{ 823 struct union_node *un = VTOUNION(ap->a_vp); 824 struct thread *td = ap->a_td; 825 int error = EACCES; 826 struct vnode *vp; 827 828 /* 829 * Disallow write attempts on filesystems mounted read-only. 830 */ 831 if ((ap->a_mode & VWRITE) && 832 (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) { 833 switch (ap->a_vp->v_type) { 834 case VREG: 835 case VDIR: 836 case VLNK: 837 return (EROFS); 838 default: 839 break; 840 } 841 } 842 843 if ((vp = union_lock_upper(un, td)) != NULLVP) { 844 ap->a_vp = vp; 845 error = VOP_ACCESS_AP(ap); 846 union_unlock_upper(vp, td); 847 return(error); 848 } 849 850 if ((vp = un->un_lowervp) != NULLVP) { 851 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 852 ap->a_vp = vp; 853 854 /* 855 * Remove VWRITE from a_mode if our mount point is RW, because 856 * we want to allow writes and lowervp may be read-only. 857 */ 858 if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0) 859 ap->a_mode &= ~VWRITE; 860 861 error = VOP_ACCESS_AP(ap); 862 if (error == 0) { 863 struct union_mount *um; 864 865 um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount); 866 867 if (um->um_op == UNMNT_BELOW) { 868 ap->a_cred = um->um_cred; 869 error = VOP_ACCESS_AP(ap); 870 } 871 } 872 VOP_UNLOCK(vp, 0, td); 873 } 874 return(error); 875} 876 877/* 878 * We handle getattr only to change the fsid and 879 * track object sizes 880 * 881 * It's not clear whether VOP_GETATTR is to be 882 * called with the vnode locked or not. stat() calls 883 * it with (vp) locked, and fstat() calls it with 884 * (vp) unlocked. 885 * 886 * Because of this we cannot use our normal locking functions 887 * if we do not intend to lock the main a_vp node. At the moment 888 * we are running without any specific locking at all, but beware 889 * to any programmer that care must be taken if locking is added 890 * to this function. 891 */ 892 893static int 894union_getattr(ap) 895 struct vop_getattr_args /* { 896 struct vnode *a_vp; 897 struct vattr *a_vap; 898 struct ucred *a_cred; 899 struct thread *a_td; 900 } */ *ap; 901{ 902 int error; 903 struct union_node *un = VTOUNION(ap->a_vp); 904 struct union_mount *um = MOUNTTOUNIONMOUNT(ap->a_vp->v_mount); 905 struct vnode *vp; 906 struct vattr *vap; 907 struct vattr va; 908 909 /* 910 * Some programs walk the filesystem hierarchy by counting 911 * links to directories to avoid stat'ing all the time. 912 * This means the link count on directories needs to be "correct". 913 * The only way to do that is to call getattr on both layers 914 * and fix up the link count. The link count will not necessarily 915 * be accurate but will be large enough to defeat the tree walkers. 916 */ 917 918 vap = ap->a_vap; 919 920 if ((vp = un->un_uppervp) != NULLVP) { 921 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td); 922 if (error) 923 return (error); 924 /* XXX isn't this dangerous without a lock? */ 925 union_newsize(ap->a_vp, vap->va_size, VNOVAL); 926 } 927 928 if (vp == NULLVP) { 929 vp = un->un_lowervp; 930 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) { 931 vp = un->un_lowervp; 932 vap = &va; 933 } else { 934 vp = NULLVP; 935 } 936 937 if (vp != NULLVP) { 938 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td); 939 if (error) 940 return (error); 941 /* XXX isn't this dangerous without a lock? */ 942 union_newsize(ap->a_vp, VNOVAL, vap->va_size); 943 } 944 945 if (ap->a_vap->va_fsid == um->um_upperdev) 946 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 947 948 if ((vap != ap->a_vap) && (vap->va_type == VDIR)) 949 ap->a_vap->va_nlink += vap->va_nlink; 950 return (0); 951} 952 953static int 954union_setattr(ap) 955 struct vop_setattr_args /* { 956 struct vnode *a_vp; 957 struct vattr *a_vap; 958 struct ucred *a_cred; 959 struct thread *a_td; 960 } */ *ap; 961{ 962 struct union_node *un = VTOUNION(ap->a_vp); 963 struct thread *td = ap->a_td; 964 struct vattr *vap = ap->a_vap; 965 struct vnode *uppervp; 966 int error; 967 968 /* 969 * Disallow write attempts on filesystems mounted read-only. 970 */ 971 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) && 972 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 973 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 974 vap->va_mtime.tv_sec != VNOVAL || 975 vap->va_mode != (mode_t)VNOVAL)) { 976 return (EROFS); 977 } 978 979 /* 980 * Handle case of truncating lower object to zero size 981 * by creating a zero length upper object. This is to 982 * handle the case of open with O_TRUNC and O_CREAT. 983 */ 984 if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) { 985 error = union_copyup(un, (ap->a_vap->va_size != 0), 986 ap->a_cred, ap->a_td); 987 if (error) 988 return (error); 989 } 990 991 /* 992 * Try to set attributes in upper layer, 993 * otherwise return read-only filesystem error. 994 */ 995 error = EROFS; 996 if ((uppervp = union_lock_upper(un, td)) != NULLVP) { 997 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, 998 ap->a_cred, ap->a_td); 999 if ((error == 0) && (ap->a_vap->va_size != VNOVAL)) 1000 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL); 1001 union_unlock_upper(uppervp, td); 1002 } 1003 return (error); 1004} 1005 1006static int 1007union_read(ap) 1008 struct vop_read_args /* { 1009 struct vnode *a_vp; 1010 struct uio *a_uio; 1011 int a_ioflag; 1012 struct ucred *a_cred; 1013 } */ *ap; 1014{ 1015 struct union_node *un = VTOUNION(ap->a_vp); 1016 struct thread *td = ap->a_uio->uio_td; 1017 struct vnode *uvp; 1018 int error; 1019 1020 uvp = union_lock_other(un, td); 1021 KASSERT(uvp != NULL, ("union_read: backing vnode missing!")); 1022 1023 error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred); 1024 union_unlock_other(uvp, td); 1025 1026 /* 1027 * XXX 1028 * Perhaps the size of the underlying object has changed under 1029 * our feet. Take advantage of the offset information present 1030 * in the uio structure. 1031 */ 1032 if (error == 0) { 1033 struct union_node *un = VTOUNION(ap->a_vp); 1034 off_t cur = ap->a_uio->uio_offset; 1035 1036 if (uvp == un->un_uppervp) { 1037 if (cur > un->un_uppersz) 1038 union_newsize(ap->a_vp, cur, VNOVAL); 1039 } else { 1040 if (cur > un->un_lowersz) 1041 union_newsize(ap->a_vp, VNOVAL, cur); 1042 } 1043 } 1044 return (error); 1045} 1046 1047static int 1048union_write(ap) 1049 struct vop_write_args /* { 1050 struct vnode *a_vp; 1051 struct uio *a_uio; 1052 int a_ioflag; 1053 struct ucred *a_cred; 1054 } */ *ap; 1055{ 1056 struct union_node *un = VTOUNION(ap->a_vp); 1057 struct thread *td = ap->a_uio->uio_td; 1058 struct vnode *uppervp; 1059 int error; 1060 1061 if ((uppervp = union_lock_upper(un, td)) == NULLVP) 1062 panic("union: missing upper layer in write"); 1063 1064 error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred); 1065 1066 /* 1067 * The size of the underlying object may be changed by the 1068 * write. 1069 */ 1070 if (error == 0) { 1071 off_t cur = ap->a_uio->uio_offset; 1072 1073 if (cur > un->un_uppersz) 1074 union_newsize(ap->a_vp, cur, VNOVAL); 1075 } 1076 union_unlock_upper(uppervp, td); 1077 return (error); 1078} 1079 1080static int 1081union_lease(ap) 1082 struct vop_lease_args /* { 1083 struct vnode *a_vp; 1084 struct thread *a_td; 1085 struct ucred *a_cred; 1086 int a_flag; 1087 } */ *ap; 1088{ 1089 struct vnode *ovp = OTHERVP(ap->a_vp); 1090 1091 ap->a_vp = ovp; 1092 return (VOP_LEASE_AP(ap)); 1093} 1094 1095static int 1096union_ioctl(ap) 1097 struct vop_ioctl_args /* { 1098 struct vnode *a_vp; 1099 u_long a_command; 1100 caddr_t a_data; 1101 int a_fflag; 1102 struct ucred *a_cred; 1103 struct thread *a_td; 1104 } */ *ap; 1105{ 1106 struct vnode *ovp = OTHERVP(ap->a_vp); 1107 1108 ap->a_vp = ovp; 1109 return (VOP_IOCTL_AP(ap)); 1110} 1111 1112static int 1113union_poll(ap) 1114 struct vop_poll_args /* { 1115 struct vnode *a_vp; 1116 int a_events; 1117 struct ucred *a_cred; 1118 struct thread *a_td; 1119 } */ *ap; 1120{ 1121 struct vnode *ovp = OTHERVP(ap->a_vp); 1122 1123 ap->a_vp = ovp; 1124 return (VOP_POLL_AP(ap)); 1125} 1126 1127static int 1128union_fsync(ap) 1129 struct vop_fsync_args /* { 1130 struct vnode *a_vp; 1131 struct ucred *a_cred; 1132 int a_waitfor; 1133 struct thread *a_td; 1134 } */ *ap; 1135{ 1136 int error = 0; 1137 struct thread *td = ap->a_td; 1138 struct vnode *targetvp; 1139 struct union_node *un = VTOUNION(ap->a_vp); 1140 1141 if ((targetvp = union_lock_other(un, td)) != NULLVP) { 1142 error = VOP_FSYNC(targetvp, ap->a_waitfor, td); 1143 union_unlock_other(targetvp, td); 1144 } 1145 1146 return (error); 1147} 1148 1149/* 1150 * union_remove: 1151 * 1152 * Remove the specified cnp. The dvp and vp are passed to us locked 1153 * and must remain locked on return. 1154 */ 1155 1156static int 1157union_remove(ap) 1158 struct vop_remove_args /* { 1159 struct vnode *a_dvp; 1160 struct vnode *a_vp; 1161 struct componentname *a_cnp; 1162 } */ *ap; 1163{ 1164 struct union_node *dun = VTOUNION(ap->a_dvp); 1165 struct union_node *un = VTOUNION(ap->a_vp); 1166 struct componentname *cnp = ap->a_cnp; 1167 struct thread *td = cnp->cn_thread; 1168 struct vnode *uppervp; 1169 struct vnode *upperdvp; 1170 int error; 1171 1172 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP) 1173 panic("union remove: null upper vnode"); 1174 1175 if ((uppervp = union_lock_upper(un, td)) != NULLVP) { 1176 if (union_dowhiteout(un, cnp->cn_cred, td)) 1177 cnp->cn_flags |= DOWHITEOUT; 1178 if (cnp->cn_flags & DOWHITEOUT) /* XXX fs corruption */ 1179 error = EOPNOTSUPP; 1180 else 1181 error = VOP_REMOVE(upperdvp, uppervp, cnp); 1182 if (!error) 1183 union_removed_upper(un); 1184 union_unlock_upper(uppervp, td); 1185 } else { 1186 error = union_mkwhiteout( 1187 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount), 1188 upperdvp, ap->a_cnp, un->un_path); 1189 } 1190 union_unlock_upper(upperdvp, td); 1191 return (error); 1192} 1193 1194/* 1195 * union_link: 1196 * 1197 * tdvp and vp will be locked on entry. 1198 * tdvp and vp should remain locked on return. 1199 */ 1200 1201static int 1202union_link(ap) 1203 struct vop_link_args /* { 1204 struct vnode *a_tdvp; 1205 struct vnode *a_vp; 1206 struct componentname *a_cnp; 1207 } */ *ap; 1208{ 1209 struct componentname *cnp = ap->a_cnp; 1210 struct thread *td = cnp->cn_thread; 1211 struct union_node *dun = VTOUNION(ap->a_tdvp); 1212 struct vnode *vp; 1213 struct vnode *tdvp; 1214 int error = 0; 1215 1216 if (ap->a_tdvp->v_op != ap->a_vp->v_op) { 1217 vp = ap->a_vp; 1218 } else { 1219 struct union_node *tun = VTOUNION(ap->a_vp); 1220 1221 if (tun->un_uppervp == NULLVP) { 1222#if 0 1223 if (dun->un_uppervp == tun->un_dirvp) { 1224 if (dun->un_flags & UN_ULOCK) { 1225 dun->un_flags &= ~UN_ULOCK; 1226 VOP_UNLOCK(dun->un_uppervp, 0, td); 1227 } 1228 } 1229#endif 1230 error = union_copyup(tun, 1, cnp->cn_cred, td); 1231#if 0 1232 if (dun->un_uppervp == tun->un_dirvp) { 1233 vn_lock(dun->un_uppervp, 1234 LK_EXCLUSIVE | LK_RETRY, td); 1235 dun->un_flags |= UN_ULOCK; 1236 } 1237#endif 1238 if (error) 1239 return (error); 1240 } 1241 vp = tun->un_uppervp; 1242 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1243 } 1244 1245 /* 1246 * Make sure upper is locked, then unlock the union directory we were 1247 * called with to avoid a deadlock while we are calling VOP_LINK() on 1248 * the upper (with tdvp locked and vp not locked). Our ap->a_tdvp 1249 * is expected to be locked on return. 1250 */ 1251 1252 if ((tdvp = union_lock_upper(dun, td)) == NULLVP) 1253 return (EROFS); 1254 1255 VOP_UNLOCK(ap->a_tdvp, 0, td); /* unlock calling node */ 1256 error = VOP_LINK(tdvp, vp, cnp); /* call link on upper */ 1257 1258 /* 1259 * Unlock tun->un_uppervp if we locked it above. 1260 */ 1261 if (ap->a_tdvp->v_op == ap->a_vp->v_op) 1262 VOP_UNLOCK(vp, 0, td); 1263 /* 1264 * We have to unlock tdvp prior to relocking our calling node in 1265 * order to avoid a deadlock. We also have to unlock ap->a_vp 1266 * before relocking the directory, but then we have to relock 1267 * ap->a_vp as our caller expects. 1268 */ 1269 VOP_UNLOCK(ap->a_vp, 0, td); 1270 union_unlock_upper(tdvp, td); 1271 vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, td); 1272 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td); 1273 return (error); 1274} 1275 1276static int 1277union_rename(ap) 1278 struct vop_rename_args /* { 1279 struct vnode *a_fdvp; 1280 struct vnode *a_fvp; 1281 struct componentname *a_fcnp; 1282 struct vnode *a_tdvp; 1283 struct vnode *a_tvp; 1284 struct componentname *a_tcnp; 1285 } */ *ap; 1286{ 1287 int error; 1288 struct vnode *fdvp = ap->a_fdvp; 1289 struct vnode *fvp = ap->a_fvp; 1290 struct vnode *tdvp = ap->a_tdvp; 1291 struct vnode *tvp = ap->a_tvp; 1292 1293 /* 1294 * Figure out what fdvp to pass to our upper or lower vnode. If we 1295 * replace the fdvp, release the original one and ref the new one. 1296 */ 1297 1298 if (fdvp->v_op == &union_vnodeops) { /* always true */ 1299 struct union_node *un = VTOUNION(fdvp); 1300 if (un->un_uppervp == NULLVP) { 1301 /* 1302 * this should never happen in normal 1303 * operation but might if there was 1304 * a problem creating the top-level shadow 1305 * directory. 1306 */ 1307 error = EXDEV; 1308 goto bad; 1309 } 1310 fdvp = un->un_uppervp; 1311 VREF(fdvp); 1312 vrele(ap->a_fdvp); 1313 } 1314 1315 /* 1316 * Figure out what fvp to pass to our upper or lower vnode. If we 1317 * replace the fvp, release the original one and ref the new one. 1318 */ 1319 1320 if (fvp->v_op == &union_vnodeops) { /* always true */ 1321 struct union_node *un = VTOUNION(fvp); 1322#if 0 1323 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount); 1324#endif 1325 1326 if (un->un_uppervp == NULLVP) { 1327 switch(fvp->v_type) { 1328 case VREG: 1329 vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread); 1330 error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_thread); 1331 VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_thread); 1332 if (error) 1333 goto bad; 1334 break; 1335 case VDIR: 1336 /* 1337 * XXX not yet. 1338 * 1339 * There is only one way to rename a directory 1340 * based in the lowervp, and that is to copy 1341 * the entire directory hierarchy. Otherwise 1342 * it would not last across a reboot. 1343 */ 1344#if 0 1345 vrele(fvp); 1346 fvp = NULL; 1347 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread); 1348 error = union_mkshadow(um, fdvp, 1349 ap->a_fcnp, &un->un_uppervp); 1350 VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_thread); 1351 if (un->un_uppervp) 1352 VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_thread); 1353 if (error) 1354 goto bad; 1355 break; 1356#endif 1357 default: 1358 error = EXDEV; 1359 goto bad; 1360 } 1361 } 1362 1363 if (un->un_lowervp != NULLVP) 1364 ap->a_fcnp->cn_flags |= DOWHITEOUT; 1365 fvp = un->un_uppervp; 1366 VREF(fvp); 1367 vrele(ap->a_fvp); 1368 } 1369 1370 /* 1371 * Figure out what tdvp (destination directory) to pass to the 1372 * lower level. If we replace it with uppervp, we need to vput the 1373 * old one. The exclusive lock is transfered to what we will pass 1374 * down in the VOP_RENAME() and we replace uppervp with a simple 1375 * reference. 1376 */ 1377 1378 if (tdvp->v_op == &union_vnodeops) { 1379 struct union_node *un = VTOUNION(tdvp); 1380 1381 if (un->un_uppervp == NULLVP) { 1382 /* 1383 * This should never happen in normal 1384 * operation but might if there was 1385 * a problem creating the top-level shadow 1386 * directory. 1387 */ 1388 error = EXDEV; 1389 goto bad; 1390 } 1391 1392 /* 1393 * New tdvp is a lock and reference on uppervp. 1394 * Put away the old tdvp. 1395 */ 1396 tdvp = union_lock_upper(un, ap->a_tcnp->cn_thread); 1397 vput(ap->a_tdvp); 1398 } 1399 1400 /* 1401 * Figure out what tvp (destination file) to pass to the 1402 * lower level. 1403 * 1404 * If the uppervp file does not exist, put away the (wrong) 1405 * file and change tvp to NULL. 1406 */ 1407 1408 if (tvp != NULLVP && tvp->v_op == &union_vnodeops) { 1409 struct union_node *un = VTOUNION(tvp); 1410 1411 tvp = union_lock_upper(un, ap->a_tcnp->cn_thread); 1412 vput(ap->a_tvp); 1413 /* note: tvp may be NULL */ 1414 } 1415 1416 /* 1417 * VOP_RENAME() releases/vputs prior to returning, so we have no 1418 * cleanup to do. 1419 */ 1420 1421 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); 1422 1423 /* 1424 * Error. We still have to release / vput the various elements. 1425 */ 1426 1427bad: 1428 vrele(fdvp); 1429 if (fvp) 1430 vrele(fvp); 1431 vput(tdvp); 1432 if (tvp != NULLVP) { 1433 if (tvp != tdvp) 1434 vput(tvp); 1435 else 1436 vrele(tvp); 1437 } 1438 return (error); 1439} 1440 1441static int 1442union_mkdir(ap) 1443 struct vop_mkdir_args /* { 1444 struct vnode *a_dvp; 1445 struct vnode **a_vpp; 1446 struct componentname *a_cnp; 1447 struct vattr *a_vap; 1448 } */ *ap; 1449{ 1450 struct union_node *dun = VTOUNION(ap->a_dvp); 1451 struct componentname *cnp = ap->a_cnp; 1452 struct thread *td = cnp->cn_thread; 1453 struct vnode *upperdvp; 1454 int error = EROFS; 1455 1456 if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) { 1457 struct vnode *vp; 1458 1459 error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap); 1460 union_unlock_upper(upperdvp, td); 1461 1462 if (error == 0) { 1463 VOP_UNLOCK(vp, 0, td); 1464 UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vrefcnt(vp))); 1465 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, 1466 ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1); 1467 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp))); 1468 } 1469 } 1470 return (error); 1471} 1472 1473static int 1474union_rmdir(ap) 1475 struct vop_rmdir_args /* { 1476 struct vnode *a_dvp; 1477 struct vnode *a_vp; 1478 struct componentname *a_cnp; 1479 } */ *ap; 1480{ 1481 struct union_node *dun = VTOUNION(ap->a_dvp); 1482 struct union_node *un = VTOUNION(ap->a_vp); 1483 struct componentname *cnp = ap->a_cnp; 1484 struct thread *td = cnp->cn_thread; 1485 struct vnode *upperdvp; 1486 struct vnode *uppervp; 1487 int error; 1488 1489 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP) 1490 panic("union rmdir: null upper vnode"); 1491 1492 if ((uppervp = union_lock_upper(un, td)) != NULLVP) { 1493 if (union_dowhiteout(un, cnp->cn_cred, td)) 1494 cnp->cn_flags |= DOWHITEOUT; 1495 if (cnp->cn_flags & DOWHITEOUT) /* XXX fs corruption */ 1496 error = EOPNOTSUPP; 1497 else 1498 error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp); 1499 if (!error) 1500 union_removed_upper(un); 1501 union_unlock_upper(uppervp, td); 1502 } else { 1503 error = union_mkwhiteout( 1504 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount), 1505 dun->un_uppervp, ap->a_cnp, un->un_path); 1506 } 1507 union_unlock_upper(upperdvp, td); 1508 return (error); 1509} 1510 1511/* 1512 * union_symlink: 1513 * 1514 * dvp is locked on entry and remains locked on return. a_vpp is garbage 1515 * (unused). 1516 */ 1517 1518static int 1519union_symlink(ap) 1520 struct vop_symlink_args /* { 1521 struct vnode *a_dvp; 1522 struct vnode **a_vpp; 1523 struct componentname *a_cnp; 1524 struct vattr *a_vap; 1525 char *a_target; 1526 } */ *ap; 1527{ 1528 struct union_node *dun = VTOUNION(ap->a_dvp); 1529 struct componentname *cnp = ap->a_cnp; 1530 struct thread *td = cnp->cn_thread; 1531 struct vnode *dvp; 1532 int error = EROFS; 1533 1534 if ((dvp = union_lock_upper(dun, td)) != NULLVP) { 1535 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap, 1536 ap->a_target); 1537 union_unlock_upper(dvp, td); 1538 } 1539 return (error); 1540} 1541 1542/* 1543 * union_readdir ()works in concert with getdirentries() and 1544 * readdir(3) to provide a list of entries in the unioned 1545 * directories. getdirentries() is responsible for walking 1546 * down the union stack. readdir(3) is responsible for 1547 * eliminating duplicate names from the returned data stream. 1548 */ 1549static int 1550union_readdir(ap) 1551 struct vop_readdir_args /* { 1552 struct vnode *a_vp; 1553 struct uio *a_uio; 1554 struct ucred *a_cred; 1555 int *a_eofflag; 1556 u_long *a_cookies; 1557 int a_ncookies; 1558 } */ *ap; 1559{ 1560 struct union_node *un = VTOUNION(ap->a_vp); 1561 struct thread *td = ap->a_uio->uio_td; 1562 struct vnode *uvp; 1563 int error = 0; 1564 1565 if ((uvp = union_lock_upper(un, td)) != NULLVP) { 1566 ap->a_vp = uvp; 1567 error = VOP_READDIR_AP(ap); 1568 union_unlock_upper(uvp, td); 1569 } 1570 return(error); 1571} 1572 1573static int 1574union_readlink(ap) 1575 struct vop_readlink_args /* { 1576 struct vnode *a_vp; 1577 struct uio *a_uio; 1578 struct ucred *a_cred; 1579 } */ *ap; 1580{ 1581 int error; 1582 struct union_node *un = VTOUNION(ap->a_vp); 1583 struct uio *uio = ap->a_uio; 1584 struct thread *td = uio->uio_td; 1585 struct vnode *vp; 1586 1587 vp = union_lock_other(un, td); 1588 KASSERT(vp != NULL, ("union_readlink: backing vnode missing!")); 1589 1590 ap->a_vp = vp; 1591 error = VOP_READLINK_AP(ap); 1592 union_unlock_other(vp, td); 1593 1594 return (error); 1595} 1596 1597static int 1598union_getwritemount(ap) 1599 struct vop_getwritemount_args /* { 1600 struct vnode *a_vp; 1601 struct mount **a_mpp; 1602 } */ *ap; 1603{ 1604 struct vnode *vp = ap->a_vp; 1605 struct vnode *uvp = UPPERVP(vp); 1606 1607 if (uvp == NULL) { 1608 VI_LOCK(vp); 1609 if (vp->v_iflag & VI_FREE) { 1610 VI_UNLOCK(vp); 1611 return (EOPNOTSUPP); 1612 } 1613 VI_UNLOCK(vp); 1614 return (EACCES); 1615 } 1616 return(VOP_GETWRITEMOUNT(uvp, ap->a_mpp)); 1617} 1618 1619/* 1620 * union_inactive: 1621 * 1622 * Called with the vnode locked. We are expected to unlock the vnode. 1623 */ 1624 1625static int 1626union_inactive(ap) 1627 struct vop_inactive_args /* { 1628 struct vnode *a_vp; 1629 struct thread *a_td; 1630 } */ *ap; 1631{ 1632 struct vnode *vp = ap->a_vp; 1633 struct union_node *un = VTOUNION(vp); 1634 1635 /* 1636 * Do nothing (and _don't_ bypass). 1637 * Wait to vrele lowervp until reclaim, 1638 * so that until then our union_node is in the 1639 * cache and reusable. 1640 * 1641 */ 1642 1643 if (un->un_dircache != NULL) 1644 union_dircache_free(un); 1645 1646#if 0 1647 if ((un->un_flags & UN_ULOCK) && un->un_uppervp) { 1648 un->un_flags &= ~UN_ULOCK; 1649 VOP_UNLOCK(un->un_uppervp, 0, td); 1650 } 1651#endif 1652 1653 if ((un->un_flags & UN_CACHED) == 0) 1654 vgone(vp); 1655 1656 return (0); 1657} 1658 1659static int 1660union_reclaim(ap) 1661 struct vop_reclaim_args /* { 1662 struct vnode *a_vp; 1663 } */ *ap; 1664{ 1665 union_freevp(ap->a_vp); 1666 1667 return (0); 1668} 1669 1670static int 1671union_print(ap) 1672 struct vop_print_args /* { 1673 struct vnode *a_vp; 1674 } */ *ap; 1675{ 1676 struct vnode *vp = ap->a_vp; 1677 1678 printf("\tvp=%p, uppervp=%p, lowervp=%p\n", 1679 vp, UPPERVP(vp), LOWERVP(vp)); 1680 if (UPPERVP(vp) != NULLVP) 1681 vprint("union: upper", UPPERVP(vp)); 1682 if (LOWERVP(vp) != NULLVP) 1683 vprint("union: lower", LOWERVP(vp)); 1684 1685 return (0); 1686} 1687 1688static int 1689union_pathconf(ap) 1690 struct vop_pathconf_args /* { 1691 struct vnode *a_vp; 1692 int a_name; 1693 int *a_retval; 1694 } */ *ap; 1695{ 1696 int error; 1697 struct thread *td = curthread; /* XXX */ 1698 struct union_node *un = VTOUNION(ap->a_vp); 1699 struct vnode *vp; 1700 1701 vp = union_lock_other(un, td); 1702 KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!")); 1703 1704 ap->a_vp = vp; 1705 error = VOP_PATHCONF_AP(ap); 1706 union_unlock_other(vp, td); 1707 1708 return (error); 1709} 1710 1711static int 1712union_advlock(ap) 1713 struct vop_advlock_args /* { 1714 struct vnode *a_vp; 1715 caddr_t a_id; 1716 int a_op; 1717 struct flock *a_fl; 1718 int a_flags; 1719 } */ *ap; 1720{ 1721 register struct vnode *ovp = OTHERVP(ap->a_vp); 1722 1723 ap->a_vp = ovp; 1724 return (VOP_ADVLOCK_AP(ap)); 1725} 1726 1727 1728/* 1729 * XXX - vop_strategy must be hand coded because it has no 1730 * YYY - and it is not coherent with anything 1731 * 1732 * vnode in its arguments. 1733 * This goes away with a merged VM/buffer cache. 1734 */ 1735static int 1736union_strategy(ap) 1737 struct vop_strategy_args /* { 1738 struct vnode *a_vp; 1739 struct buf *a_bp; 1740 } */ *ap; 1741{ 1742 struct buf *bp = ap->a_bp; 1743 struct vnode *othervp = OTHERVP(ap->a_vp); 1744 1745#ifdef DIAGNOSTIC 1746 if (othervp == NULLVP) 1747 panic("union_strategy: nil vp"); 1748 if ((bp->b_iocmd == BIO_WRITE) && 1749 (othervp == LOWERVP(ap->a_vp))) 1750 panic("union_strategy: writing to lowervp"); 1751#endif 1752 return (VOP_STRATEGY(othervp, bp)); 1753} 1754 1755static int 1756union_getacl(ap) 1757 struct vop_getacl_args /* { 1758 struct vnode *a_vp; 1759 acl_type_t a_type; 1760 struct acl *a_aclp; 1761 struct ucred *a_cred; 1762 struct thread *a_td; 1763 } */ *ap; 1764{ 1765 int error; 1766 struct union_node *un = VTOUNION(ap->a_vp); 1767 struct vnode *vp; 1768 1769 vp = union_lock_other(un, ap->a_td); 1770 ap->a_vp = vp; 1771 error = VOP_GETACL_AP(ap); 1772 union_unlock_other(vp, ap->a_td); 1773 1774 return (error); 1775} 1776 1777static int 1778union_setacl(ap) 1779 struct vop_setacl_args /* { 1780 struct vnode *a_vp; 1781 acl_type_t a_type; 1782 struct acl *a_aclp; 1783 struct ucred *a_cred; 1784 struct thread *a_td; 1785 } */ *ap; 1786{ 1787 int error; 1788 struct union_node *un = VTOUNION(ap->a_vp); 1789 struct vnode *vp; 1790 1791 vp = union_lock_other(un, ap->a_td); 1792 ap->a_vp = vp; 1793 error = VOP_SETACL_AP(ap); 1794 union_unlock_other(vp, ap->a_td); 1795 1796 return (error); 1797} 1798 1799static int 1800union_aclcheck(ap) 1801 struct vop_aclcheck_args /* { 1802 struct vnode *a_vp; 1803 acl_type_t a_type; 1804 struct acl *a_aclp; 1805 struct ucred *a_cred; 1806 struct thread *a_td; 1807 } */ *ap; 1808{ 1809 struct vnode *ovp = OTHERVP(ap->a_vp); 1810 1811 ap->a_vp = ovp; 1812 return (VOP_ACLCHECK_AP(ap)); 1813} 1814 1815static int 1816union_closeextattr(ap) 1817 struct vop_closeextattr_args /* { 1818 struct vnode *a_vp; 1819 int a_commit; 1820 struct ucred *a_cred; 1821 struct thread *a_td; 1822 } */ *ap; 1823{ 1824 int error; 1825 struct union_node *un = VTOUNION(ap->a_vp); 1826 struct vnode *vp; 1827 1828 vp = union_lock_other(un, ap->a_td); 1829 ap->a_vp = vp; 1830 error = VOP_CLOSEEXTATTR_AP(ap); 1831 union_unlock_other(vp, ap->a_td); 1832 1833 return (error); 1834} 1835 1836static int 1837union_getextattr(ap) 1838 struct vop_getextattr_args /* { 1839 struct vnode *a_vp; 1840 int a_attrnamespace; 1841 const char *a_name; 1842 struct uio *a_uio; 1843 size_t *a_size; 1844 struct ucred *a_cred; 1845 struct thread *a_td; 1846 } */ *ap; 1847{ 1848 int error; 1849 struct union_node *un = VTOUNION(ap->a_vp); 1850 struct vnode *vp; 1851 1852 vp = union_lock_other(un, ap->a_td); 1853 ap->a_vp = vp; 1854 error = VOP_GETEXTATTR_AP(ap); 1855 union_unlock_other(vp, ap->a_td); 1856 1857 return (error); 1858} 1859 1860static int 1861union_listextattr(ap) 1862 struct vop_listextattr_args /* { 1863 struct vnode *a_vp; 1864 int a_attrnamespace; 1865 struct uio *a_uio; 1866 size_t *a_size; 1867 struct ucred *a_cred; 1868 struct thread *a_td; 1869 } */ *ap; 1870{ 1871 int error; 1872 struct union_node *un = VTOUNION(ap->a_vp); 1873 struct vnode *vp; 1874 1875 vp = union_lock_other(un, ap->a_td); 1876 ap->a_vp = vp; 1877 error = VOP_LISTEXTATTR_AP(ap); 1878 union_unlock_other(vp, ap->a_td); 1879 1880 return (error); 1881} 1882 1883static int 1884union_openextattr(ap) 1885 struct vop_openextattr_args /* { 1886 struct vnode *a_vp; 1887 struct ucred *a_cred; 1888 struct thread *a_td; 1889 } */ *ap; 1890{ 1891 int error; 1892 struct union_node *un = VTOUNION(ap->a_vp); 1893 struct vnode *vp; 1894 1895 vp = union_lock_other(un, ap->a_td); 1896 ap->a_vp = vp; 1897 error = VOP_OPENEXTATTR_AP(ap); 1898 union_unlock_other(vp, ap->a_td); 1899 1900 return (error); 1901} 1902 1903static int 1904union_deleteextattr(ap) 1905 struct vop_deleteextattr_args /* { 1906 struct vnode *a_vp; 1907 int a_attrnamespace; 1908 const char *a_name; 1909 struct ucred *a_cred; 1910 struct thread *a_td; 1911 } */ *ap; 1912{ 1913 int error; 1914 struct union_node *un = VTOUNION(ap->a_vp); 1915 struct vnode *vp; 1916 1917 vp = union_lock_other(un, ap->a_td); 1918 ap->a_vp = vp; 1919 error = VOP_DELETEEXTATTR_AP(ap); 1920 union_unlock_other(vp, ap->a_td); 1921 1922 return (error); 1923} 1924 1925static int 1926union_setextattr(ap) 1927 struct vop_setextattr_args /* { 1928 struct vnode *a_vp; 1929 int a_attrnamespace; 1930 const char *a_name; 1931 struct uio *a_uio; 1932 struct ucred *a_cred; 1933 struct thread *a_td; 1934 } */ *ap; 1935{ 1936 int error; 1937 struct union_node *un = VTOUNION(ap->a_vp); 1938 struct vnode *vp; 1939 1940 vp = union_lock_other(un, ap->a_td); 1941 ap->a_vp = vp; 1942 error = VOP_SETEXTATTR_AP(ap); 1943 union_unlock_other(vp, ap->a_td); 1944 1945 return (error); 1946} 1947 1948static int 1949union_setlabel(ap) 1950 struct vop_setlabel_args /* { 1951 struct vnode *a_vp; 1952 struct label *a_label; 1953 struct ucred *a_cred; 1954 struct thread *a_td; 1955 } */ *ap; 1956{ 1957 int error; 1958 struct union_node *un = VTOUNION(ap->a_vp); 1959 struct vnode *vp; 1960 1961 vp = union_lock_other(un, ap->a_td); 1962 ap->a_vp = vp; 1963 error = VOP_SETLABEL_AP(ap); 1964 union_unlock_other(vp, ap->a_td); 1965 1966 return (error); 1967} 1968 1969/* 1970 * Global vfs data structures 1971 */ 1972struct vop_vector union_vnodeops = { 1973 .vop_default = &default_vnodeops, 1974 1975 .vop_access = union_access, 1976 .vop_aclcheck = union_aclcheck, 1977 .vop_advlock = union_advlock, 1978 .vop_bmap = VOP_EOPNOTSUPP, 1979 .vop_close = union_close, 1980 .vop_closeextattr = union_closeextattr, 1981 .vop_create = union_create, 1982 .vop_deleteextattr = union_deleteextattr, 1983 .vop_fsync = union_fsync, 1984 .vop_getacl = union_getacl, 1985 .vop_getattr = union_getattr, 1986 .vop_getextattr = union_getextattr, 1987 .vop_getwritemount = union_getwritemount, 1988 .vop_inactive = union_inactive, 1989 .vop_ioctl = union_ioctl, 1990 .vop_lease = union_lease, 1991 .vop_link = union_link, 1992 .vop_listextattr = union_listextattr, 1993 .vop_lookup = union_lookup, 1994 .vop_mkdir = union_mkdir, 1995 .vop_mknod = union_mknod, 1996 .vop_open = union_open, 1997 .vop_openextattr = union_openextattr, 1998 .vop_pathconf = union_pathconf, 1999 .vop_poll = union_poll, 2000 .vop_print = union_print, 2001 .vop_read = union_read, 2002 .vop_readdir = union_readdir, 2003 .vop_readlink = union_readlink, 2004 .vop_reclaim = union_reclaim, 2005 .vop_remove = union_remove, 2006 .vop_rename = union_rename, 2007 .vop_rmdir = union_rmdir, 2008 .vop_setacl = union_setacl, 2009 .vop_setattr = union_setattr, 2010 .vop_setextattr = union_setextattr, 2011 .vop_setlabel = union_setlabel, 2012 .vop_strategy = union_strategy, 2013 .vop_symlink = union_symlink, 2014 .vop_whiteout = union_whiteout, 2015 .vop_write = union_write, 2016};
| 231 /* 232 * Lastly check if the current node is a mount point in 233 * which case walk up the mount hierarchy making sure not to 234 * bump into the root of the mount tree (ie. dvp != udvp). 235 * 236 * We use dvp as a temporary variable here, it is no longer related 237 * to the dvp above. However, we have to ensure that both *pdvp and 238 * tdvp are locked on return. 239 */ 240 241 dvp = tdvp; 242 while ( 243 dvp != udvp && 244 (dvp->v_type == VDIR) && 245 (mp = dvp->v_mountedhere) 246 ) { 247 int relock_pdvp = 0; 248 249 if (vfs_busy(mp, 0, 0, td)) 250 continue; 251 252 if (dvp == *pdvp) 253 relock_pdvp = 1; 254 vput(dvp); 255 dvp = NULL; 256 error = VFS_ROOT(mp, LK_EXCLUSIVE, &dvp, td); 257 258 vfs_unbusy(mp, td); 259 260 if (relock_pdvp) 261 vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, td); 262 263 if (error) { 264 *vpp = NULL; 265 return (error); 266 } 267 } 268 *vpp = dvp; 269 return (0); 270} 271 272static int 273union_lookup(ap) 274 struct vop_lookup_args /* { 275 struct vnodeop_desc *a_desc; 276 struct vnode *a_dvp; 277 struct vnode **a_vpp; 278 struct componentname *a_cnp; 279 } */ *ap; 280{ 281 int error; 282 int uerror, lerror; 283 struct vnode *uppervp, *lowervp; 284 struct vnode *upperdvp, *lowerdvp; 285 struct vnode *dvp = ap->a_dvp; /* starting dir */ 286 struct union_node *dun = VTOUNION(dvp); /* associated union node */ 287 struct componentname *cnp = ap->a_cnp; 288 struct thread *td = cnp->cn_thread; 289 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount); 290 struct ucred *saved_cred = NULL; 291 int iswhiteout; 292 struct vattr va; 293 294 *ap->a_vpp = NULLVP; 295 296 /* 297 * Disallow write attempts to the filesystem mounted read-only. 298 */ 299 if ((cnp->cn_flags & ISLASTCN) && 300 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 301 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { 302 return (EROFS); 303 } 304 305 /* 306 * For any lookups we do, always return with the parent locked. 307 */ 308 cnp->cn_flags |= LOCKPARENT; 309 310 lowerdvp = dun->un_lowervp; 311 uppervp = NULLVP; 312 lowervp = NULLVP; 313 iswhiteout = 0; 314 315 uerror = ENOENT; 316 lerror = ENOENT; 317 318 /* 319 * Get a private lock on uppervp and a reference, effectively 320 * taking it out of the union_node's control. 321 * 322 * We must lock upperdvp while holding our lock on dvp 323 * to avoid a deadlock. 324 */ 325 upperdvp = union_lock_upper(dun, td); 326 327 /* 328 * Do the lookup in the upper level. 329 * If that level consumes additional pathnames, 330 * then assume that something special is going 331 * on and just return that vnode. 332 */ 333 if (upperdvp != NULLVP) { 334 /* 335 * We do not have to worry about the DOTDOT case, we've 336 * already unlocked dvp. 337 */ 338 UDEBUG(("A %p\n", upperdvp)); 339 340 /* 341 * Do the lookup. We must supply a locked and referenced 342 * upperdvp to the function and will get a new locked and 343 * referenced upperdvp back, with the old having been 344 * dereferenced. 345 * 346 * If an error is returned, uppervp will be NULLVP. If no 347 * error occurs, uppervp will be the locked and referenced. 348 * Return vnode, or possibly NULL, depending on what is being 349 * requested. It is possible that the returned uppervp 350 * will be the same as upperdvp. 351 */ 352 uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp); 353 UDEBUG(( 354 "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n", 355 uerror, 356 upperdvp, 357 vrefcnt(upperdvp), 358 VOP_ISLOCKED(upperdvp, NULL), 359 uppervp, 360 (uppervp ? vrefcnt(uppervp) : -99), 361 (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99) 362 )); 363 364 /* 365 * Disallow write attempts to the filesystem mounted read-only. 366 */ 367 if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) && 368 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 369 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) { 370 error = EROFS; 371 goto out; 372 } 373 374 /* 375 * Special case: If cn_consume != 0 then skip out. The result 376 * of the lookup is transfered to our return variable. If 377 * an error occured we have to throw away the results. 378 */ 379 380 if (cnp->cn_consume != 0) { 381 if ((error = uerror) == 0) { 382 *ap->a_vpp = uppervp; 383 uppervp = NULL; 384 } 385 goto out; 386 } 387 388 /* 389 * Calculate whiteout, fall through. 390 */ 391 392 if (uerror == ENOENT || uerror == EJUSTRETURN) { 393 if (cnp->cn_flags & ISWHITEOUT) { 394 iswhiteout = 1; 395 } else if (lowerdvp != NULLVP) { 396 int terror; 397 398 terror = VOP_GETATTR(upperdvp, &va, 399 cnp->cn_cred, cnp->cn_thread); 400 if (terror == 0 && (va.va_flags & OPAQUE)) 401 iswhiteout = 1; 402 } 403 } 404 } 405 406 /* 407 * In a similar way to the upper layer, do the lookup 408 * in the lower layer. This time, if there is some 409 * component magic going on, then vput whatever we got 410 * back from the upper layer and return the lower vnode 411 * instead. 412 */ 413 414 if (lowerdvp != NULLVP && !iswhiteout) { 415 int nameiop; 416 417 UDEBUG(("B %p\n", lowerdvp)); 418 419 /* 420 * Force only LOOKUPs on the lower node, since 421 * we won't be making changes to it anyway. 422 */ 423 nameiop = cnp->cn_nameiop; 424 cnp->cn_nameiop = LOOKUP; 425 if (um->um_op == UNMNT_BELOW) { 426 saved_cred = cnp->cn_cred; 427 cnp->cn_cred = um->um_cred; 428 } 429 430 /* 431 * We shouldn't have to worry about locking interactions 432 * between the lower layer and our union layer (w.r.t. 433 * `..' processing) because we don't futz with lowervp 434 * locks in the union-node instantiation code path. 435 * 436 * union_lookup1() requires lowervp to be locked on entry, 437 * and it will be unlocked on return. The ref count will 438 * not change. On return lowervp doesn't represent anything 439 * to us so we NULL it out. 440 */ 441 VREF(lowerdvp); 442 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, td); 443 lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp); 444 if (lowerdvp == lowervp) 445 vrele(lowerdvp); 446 else 447 vput(lowerdvp); 448 lowerdvp = NULL; /* lowerdvp invalid after vput */ 449 450 if (um->um_op == UNMNT_BELOW) 451 cnp->cn_cred = saved_cred; 452 cnp->cn_nameiop = nameiop; 453 454 if (cnp->cn_consume != 0 || lerror == EACCES) { 455 if ((error = lerror) == 0) { 456 *ap->a_vpp = lowervp; 457 lowervp = NULL; 458 } 459 goto out; 460 } 461 } else { 462 UDEBUG(("C %p\n", lowerdvp)); 463 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) { 464 if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) { 465 VREF(lowervp); 466 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, td); 467 lerror = 0; 468 } 469 } 470 } 471 472 /* 473 * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp. 474 * 475 * 1. If both layers returned an error, select the upper layer. 476 * 477 * 2. If the upper layer failed and the bottom layer succeeded, 478 * two subcases occur: 479 * 480 * a. The bottom vnode is not a directory, in which case 481 * just return a new union vnode referencing an 482 * empty top layer and the existing bottom layer. 483 * 484 * b. The bottom vnode is a directory, in which case 485 * create a new directory in the top layer and 486 * and fall through to case 3. 487 * 488 * 3. If the top layer succeeded, then return a new union 489 * vnode referencing whatever the new top layer and 490 * whatever the bottom layer returned. 491 */ 492 493 /* case 1. */ 494 if ((uerror != 0) && (lerror != 0)) { 495 error = uerror; 496 goto out; 497 } 498 499 /* case 2. */ 500 if (uerror != 0 /* && (lerror == 0) */ ) { 501 if (lowervp->v_type == VDIR) { /* case 2b. */ 502 KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL")); 503 /* 504 * Oops, uppervp has a problem, we may have to shadow. 505 */ 506 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp); 507 if (uerror) { 508 error = uerror; 509 goto out; 510 } 511 } 512 } 513 514 /* 515 * Must call union_allocvp() with both the upper and lower vnodes 516 * referenced and the upper vnode locked. ap->a_vpp is returned 517 * referenced and locked. lowervp, uppervp, and upperdvp are 518 * absorbed by union_allocvp() whether it succeeds or fails. 519 * 520 * upperdvp is the parent directory of uppervp which may be 521 * different, depending on the path, from dvp->un_uppervp. That's 522 * why it is a separate argument. Note that it must be unlocked. 523 * 524 * dvp must be locked on entry to the call and will be locked on 525 * return. 526 */ 527 528 if (uppervp && uppervp != upperdvp) 529 VOP_UNLOCK(uppervp, 0, td); 530 if (lowervp) 531 VOP_UNLOCK(lowervp, 0, td); 532 if (upperdvp) 533 VOP_UNLOCK(upperdvp, 0, td); 534 535 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp, 536 uppervp, lowervp, 1); 537 538 UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99)); 539 540 uppervp = NULL; 541 upperdvp = NULL; 542 lowervp = NULL; 543 544 /* 545 * Termination Code 546 * 547 * - put away any extra junk laying around. Note that lowervp 548 * (if not NULL) will never be the same as *ap->a_vp and 549 * neither will uppervp, because when we set that state we 550 * NULL-out lowervp or uppervp. On the otherhand, upperdvp 551 * may match uppervp or *ap->a_vpp. 552 * 553 * - relock/unlock dvp if appropriate. 554 */ 555 556out: 557 if (upperdvp) { 558 if (upperdvp == uppervp || upperdvp == *ap->a_vpp) 559 vrele(upperdvp); 560 else 561 vput(upperdvp); 562 } 563 564 if (uppervp) 565 vput(uppervp); 566 567 if (lowervp) 568 vput(lowervp); 569 570 UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp, 571 ((*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99), 572 lowervp, uppervp)); 573 574 if (error == 0 || error == EJUSTRETURN) { 575 if (cnp->cn_namelen == 1 && 576 cnp->cn_nameptr[0] == '.' && 577 *ap->a_vpp != dvp) { 578#ifdef DIAGNOSTIC 579 vprint("union_lookup: vp", *ap->a_vpp); 580 vprint("union_lookup: dvp", dvp); 581#endif 582 panic("union_lookup returning . (%p) != startdir (%p)", 583 *ap->a_vpp, dvp); 584 } 585 } 586 587 return (error); 588} 589 590/* 591 * union_create: 592 * 593 * a_dvp is locked on entry and remains locked on return. a_vpp is returned 594 * locked if no error occurs, otherwise it is garbage. 595 */ 596 597static int 598union_create(ap) 599 struct vop_create_args /* { 600 struct vnode *a_dvp; 601 struct vnode **a_vpp; 602 struct componentname *a_cnp; 603 struct vattr *a_vap; 604 } */ *ap; 605{ 606 struct union_node *dun = VTOUNION(ap->a_dvp); 607 struct componentname *cnp = ap->a_cnp; 608 struct thread *td = cnp->cn_thread; 609 struct vnode *dvp; 610 int error = EROFS; 611 612 if ((dvp = union_lock_upper(dun, td)) != NULL) { 613 struct vnode *vp; 614 struct mount *mp; 615 616 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap); 617 if (error == 0) { 618 mp = ap->a_dvp->v_mount; 619 VOP_UNLOCK(vp, 0, td); 620 UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vrefcnt(vp))); 621 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, 622 cnp, vp, NULLVP, 1); 623 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp))); 624 } 625 union_unlock_upper(dvp, td); 626 } 627 return (error); 628} 629 630static int 631union_whiteout(ap) 632 struct vop_whiteout_args /* { 633 struct vnode *a_dvp; 634 struct componentname *a_cnp; 635 int a_flags; 636 } */ *ap; 637{ 638 struct union_node *un = VTOUNION(ap->a_dvp); 639 struct componentname *cnp = ap->a_cnp; 640 struct vnode *uppervp; 641 int error; 642 643 switch (ap->a_flags) { 644 case CREATE: 645 case DELETE: 646 uppervp = union_lock_upper(un, cnp->cn_thread); 647 if (uppervp != NULLVP) { 648 error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags); 649 union_unlock_upper(uppervp, cnp->cn_thread); 650 } else 651 error = EOPNOTSUPP; 652 break; 653 case LOOKUP: 654 error = EOPNOTSUPP; 655 break; 656 default: 657 panic("union_whiteout: unknown op"); 658 } 659 return (error); 660} 661 662/* 663 * union_mknod: 664 * 665 * a_dvp is locked on entry and should remain locked on return. 666 * a_vpp is garbagre whether an error occurs or not. 667 */ 668 669static int 670union_mknod(ap) 671 struct vop_mknod_args /* { 672 struct vnode *a_dvp; 673 struct vnode **a_vpp; 674 struct componentname *a_cnp; 675 struct vattr *a_vap; 676 } */ *ap; 677{ 678 struct union_node *dun = VTOUNION(ap->a_dvp); 679 struct componentname *cnp = ap->a_cnp; 680 struct vnode *dvp; 681 int error = EROFS; 682 683 if ((dvp = union_lock_upper(dun, cnp->cn_thread)) != NULL) { 684 error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap); 685 union_unlock_upper(dvp, cnp->cn_thread); 686 } 687 return (error); 688} 689 690/* 691 * union_open: 692 * 693 * run open VOP. When opening the underlying vnode we have to mimic 694 * vn_open(). What we *really* need to do to avoid screwups if the 695 * open semantics change is to call vn_open(). For example, ufs blows 696 * up if you open a file but do not vmio it prior to writing. 697 */ 698 699static int 700union_open(ap) 701 struct vop_open_args /* { 702 struct vnodeop_desc *a_desc; 703 struct vnode *a_vp; 704 int a_mode; 705 struct ucred *a_cred; 706 struct thread *a_td; 707 } */ *ap; 708{ 709 struct union_node *un = VTOUNION(ap->a_vp); 710 struct vnode *tvp; 711 int mode = ap->a_mode; 712 struct ucred *cred = ap->a_cred; 713 struct thread *td = ap->a_td; 714 int error = 0; 715 int tvpisupper = 1; 716 717 /* 718 * If there is an existing upper vp then simply open that. 719 * The upper vp takes precedence over the lower vp. When opening 720 * a lower vp for writing copy it to the uppervp and then open the 721 * uppervp. 722 * 723 * At the end of this section tvp will be left locked. 724 */ 725 if ((tvp = union_lock_upper(un, td)) == NULLVP) { 726 /* 727 * If the lower vnode is being opened for writing, then 728 * copy the file contents to the upper vnode and open that, 729 * otherwise can simply open the lower vnode. 730 */ 731 tvp = un->un_lowervp; 732 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { 733 int docopy = !(mode & O_TRUNC); 734 error = union_copyup(un, docopy, cred, td); 735 tvp = union_lock_upper(un, td); 736 } else { 737 un->un_openl++; 738 VREF(tvp); 739 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td); 740 tvpisupper = 0; 741 } 742 } 743 744 /* 745 * We are holding the correct vnode, open it. 746 */ 747 748 if (error == 0) 749 error = VOP_OPEN(tvp, mode, cred, td, -1); 750 751 /* 752 * Release any locks held. 753 */ 754 if (tvpisupper) { 755 if (tvp) 756 union_unlock_upper(tvp, td); 757 } else { 758 vput(tvp); 759 } 760 return (error); 761} 762 763/* 764 * union_close: 765 * 766 * It is unclear whether a_vp is passed locked or unlocked. Whatever 767 * the case we do not change it. 768 */ 769 770static int 771union_close(ap) 772 struct vop_close_args /* { 773 struct vnode *a_vp; 774 int a_fflag; 775 struct ucred *a_cred; 776 struct thread *a_td; 777 } */ *ap; 778{ 779 struct union_node *un = VTOUNION(ap->a_vp); 780 struct vnode *vp; 781 782 if ((vp = un->un_uppervp) == NULLVP) { 783#ifdef UNION_DIAGNOSTIC 784 if (un->un_openl <= 0) 785 panic("union: un_openl cnt"); 786#endif 787 --un->un_openl; 788 vp = un->un_lowervp; 789 } 790 ap->a_vp = vp; 791 return (VOP_CLOSE_AP(ap)); 792} 793 794/* 795 * Check access permission on the union vnode. 796 * The access check being enforced is to check 797 * against both the underlying vnode, and any 798 * copied vnode. This ensures that no additional 799 * file permissions are given away simply because 800 * the user caused an implicit file copy. 801 */ 802static int 803union_access(ap) 804 struct vop_access_args /* { 805 struct vnodeop_desc *a_desc; 806 struct vnode *a_vp; 807 int a_mode; 808 struct ucred *a_cred; 809 struct thread *a_td; 810 } */ *ap; 811{ 812 struct union_node *un = VTOUNION(ap->a_vp); 813 struct thread *td = ap->a_td; 814 int error = EACCES; 815 struct vnode *vp; 816 817 /* 818 * Disallow write attempts on filesystems mounted read-only. 819 */ 820 if ((ap->a_mode & VWRITE) && 821 (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) { 822 switch (ap->a_vp->v_type) { 823 case VREG: 824 case VDIR: 825 case VLNK: 826 return (EROFS); 827 default: 828 break; 829 } 830 } 831 832 if ((vp = union_lock_upper(un, td)) != NULLVP) { 833 ap->a_vp = vp; 834 error = VOP_ACCESS_AP(ap); 835 union_unlock_upper(vp, td); 836 return(error); 837 } 838 839 if ((vp = un->un_lowervp) != NULLVP) { 840 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 841 ap->a_vp = vp; 842 843 /* 844 * Remove VWRITE from a_mode if our mount point is RW, because 845 * we want to allow writes and lowervp may be read-only. 846 */ 847 if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0) 848 ap->a_mode &= ~VWRITE; 849 850 error = VOP_ACCESS_AP(ap); 851 if (error == 0) { 852 struct union_mount *um; 853 854 um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount); 855 856 if (um->um_op == UNMNT_BELOW) { 857 ap->a_cred = um->um_cred; 858 error = VOP_ACCESS_AP(ap); 859 } 860 } 861 VOP_UNLOCK(vp, 0, td); 862 } 863 return(error); 864} 865 866/* 867 * We handle getattr only to change the fsid and 868 * track object sizes 869 * 870 * It's not clear whether VOP_GETATTR is to be 871 * called with the vnode locked or not. stat() calls 872 * it with (vp) locked, and fstat() calls it with 873 * (vp) unlocked. 874 * 875 * Because of this we cannot use our normal locking functions 876 * if we do not intend to lock the main a_vp node. At the moment 877 * we are running without any specific locking at all, but beware 878 * to any programmer that care must be taken if locking is added 879 * to this function. 880 */ 881 882static int 883union_getattr(ap) 884 struct vop_getattr_args /* { 885 struct vnode *a_vp; 886 struct vattr *a_vap; 887 struct ucred *a_cred; 888 struct thread *a_td; 889 } */ *ap; 890{ 891 int error; 892 struct union_node *un = VTOUNION(ap->a_vp); 893 struct union_mount *um = MOUNTTOUNIONMOUNT(ap->a_vp->v_mount); 894 struct vnode *vp; 895 struct vattr *vap; 896 struct vattr va; 897 898 /* 899 * Some programs walk the filesystem hierarchy by counting 900 * links to directories to avoid stat'ing all the time. 901 * This means the link count on directories needs to be "correct". 902 * The only way to do that is to call getattr on both layers 903 * and fix up the link count. The link count will not necessarily 904 * be accurate but will be large enough to defeat the tree walkers. 905 */ 906 907 vap = ap->a_vap; 908 909 if ((vp = un->un_uppervp) != NULLVP) { 910 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td); 911 if (error) 912 return (error); 913 /* XXX isn't this dangerous without a lock? */ 914 union_newsize(ap->a_vp, vap->va_size, VNOVAL); 915 } 916 917 if (vp == NULLVP) { 918 vp = un->un_lowervp; 919 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) { 920 vp = un->un_lowervp; 921 vap = &va; 922 } else { 923 vp = NULLVP; 924 } 925 926 if (vp != NULLVP) { 927 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td); 928 if (error) 929 return (error); 930 /* XXX isn't this dangerous without a lock? */ 931 union_newsize(ap->a_vp, VNOVAL, vap->va_size); 932 } 933 934 if (ap->a_vap->va_fsid == um->um_upperdev) 935 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 936 937 if ((vap != ap->a_vap) && (vap->va_type == VDIR)) 938 ap->a_vap->va_nlink += vap->va_nlink; 939 return (0); 940} 941 942static int 943union_setattr(ap) 944 struct vop_setattr_args /* { 945 struct vnode *a_vp; 946 struct vattr *a_vap; 947 struct ucred *a_cred; 948 struct thread *a_td; 949 } */ *ap; 950{ 951 struct union_node *un = VTOUNION(ap->a_vp); 952 struct thread *td = ap->a_td; 953 struct vattr *vap = ap->a_vap; 954 struct vnode *uppervp; 955 int error; 956 957 /* 958 * Disallow write attempts on filesystems mounted read-only. 959 */ 960 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) && 961 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 962 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 963 vap->va_mtime.tv_sec != VNOVAL || 964 vap->va_mode != (mode_t)VNOVAL)) { 965 return (EROFS); 966 } 967 968 /* 969 * Handle case of truncating lower object to zero size 970 * by creating a zero length upper object. This is to 971 * handle the case of open with O_TRUNC and O_CREAT. 972 */ 973 if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) { 974 error = union_copyup(un, (ap->a_vap->va_size != 0), 975 ap->a_cred, ap->a_td); 976 if (error) 977 return (error); 978 } 979 980 /* 981 * Try to set attributes in upper layer, 982 * otherwise return read-only filesystem error. 983 */ 984 error = EROFS; 985 if ((uppervp = union_lock_upper(un, td)) != NULLVP) { 986 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, 987 ap->a_cred, ap->a_td); 988 if ((error == 0) && (ap->a_vap->va_size != VNOVAL)) 989 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL); 990 union_unlock_upper(uppervp, td); 991 } 992 return (error); 993} 994 995static int 996union_read(ap) 997 struct vop_read_args /* { 998 struct vnode *a_vp; 999 struct uio *a_uio; 1000 int a_ioflag; 1001 struct ucred *a_cred; 1002 } */ *ap; 1003{ 1004 struct union_node *un = VTOUNION(ap->a_vp); 1005 struct thread *td = ap->a_uio->uio_td; 1006 struct vnode *uvp; 1007 int error; 1008 1009 uvp = union_lock_other(un, td); 1010 KASSERT(uvp != NULL, ("union_read: backing vnode missing!")); 1011 1012 error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred); 1013 union_unlock_other(uvp, td); 1014 1015 /* 1016 * XXX 1017 * Perhaps the size of the underlying object has changed under 1018 * our feet. Take advantage of the offset information present 1019 * in the uio structure. 1020 */ 1021 if (error == 0) { 1022 struct union_node *un = VTOUNION(ap->a_vp); 1023 off_t cur = ap->a_uio->uio_offset; 1024 1025 if (uvp == un->un_uppervp) { 1026 if (cur > un->un_uppersz) 1027 union_newsize(ap->a_vp, cur, VNOVAL); 1028 } else { 1029 if (cur > un->un_lowersz) 1030 union_newsize(ap->a_vp, VNOVAL, cur); 1031 } 1032 } 1033 return (error); 1034} 1035 1036static int 1037union_write(ap) 1038 struct vop_write_args /* { 1039 struct vnode *a_vp; 1040 struct uio *a_uio; 1041 int a_ioflag; 1042 struct ucred *a_cred; 1043 } */ *ap; 1044{ 1045 struct union_node *un = VTOUNION(ap->a_vp); 1046 struct thread *td = ap->a_uio->uio_td; 1047 struct vnode *uppervp; 1048 int error; 1049 1050 if ((uppervp = union_lock_upper(un, td)) == NULLVP) 1051 panic("union: missing upper layer in write"); 1052 1053 error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred); 1054 1055 /* 1056 * The size of the underlying object may be changed by the 1057 * write. 1058 */ 1059 if (error == 0) { 1060 off_t cur = ap->a_uio->uio_offset; 1061 1062 if (cur > un->un_uppersz) 1063 union_newsize(ap->a_vp, cur, VNOVAL); 1064 } 1065 union_unlock_upper(uppervp, td); 1066 return (error); 1067} 1068 1069static int 1070union_lease(ap) 1071 struct vop_lease_args /* { 1072 struct vnode *a_vp; 1073 struct thread *a_td; 1074 struct ucred *a_cred; 1075 int a_flag; 1076 } */ *ap; 1077{ 1078 struct vnode *ovp = OTHERVP(ap->a_vp); 1079 1080 ap->a_vp = ovp; 1081 return (VOP_LEASE_AP(ap)); 1082} 1083 1084static int 1085union_ioctl(ap) 1086 struct vop_ioctl_args /* { 1087 struct vnode *a_vp; 1088 u_long a_command; 1089 caddr_t a_data; 1090 int a_fflag; 1091 struct ucred *a_cred; 1092 struct thread *a_td; 1093 } */ *ap; 1094{ 1095 struct vnode *ovp = OTHERVP(ap->a_vp); 1096 1097 ap->a_vp = ovp; 1098 return (VOP_IOCTL_AP(ap)); 1099} 1100 1101static int 1102union_poll(ap) 1103 struct vop_poll_args /* { 1104 struct vnode *a_vp; 1105 int a_events; 1106 struct ucred *a_cred; 1107 struct thread *a_td; 1108 } */ *ap; 1109{ 1110 struct vnode *ovp = OTHERVP(ap->a_vp); 1111 1112 ap->a_vp = ovp; 1113 return (VOP_POLL_AP(ap)); 1114} 1115 1116static int 1117union_fsync(ap) 1118 struct vop_fsync_args /* { 1119 struct vnode *a_vp; 1120 struct ucred *a_cred; 1121 int a_waitfor; 1122 struct thread *a_td; 1123 } */ *ap; 1124{ 1125 int error = 0; 1126 struct thread *td = ap->a_td; 1127 struct vnode *targetvp; 1128 struct union_node *un = VTOUNION(ap->a_vp); 1129 1130 if ((targetvp = union_lock_other(un, td)) != NULLVP) { 1131 error = VOP_FSYNC(targetvp, ap->a_waitfor, td); 1132 union_unlock_other(targetvp, td); 1133 } 1134 1135 return (error); 1136} 1137 1138/* 1139 * union_remove: 1140 * 1141 * Remove the specified cnp. The dvp and vp are passed to us locked 1142 * and must remain locked on return. 1143 */ 1144 1145static int 1146union_remove(ap) 1147 struct vop_remove_args /* { 1148 struct vnode *a_dvp; 1149 struct vnode *a_vp; 1150 struct componentname *a_cnp; 1151 } */ *ap; 1152{ 1153 struct union_node *dun = VTOUNION(ap->a_dvp); 1154 struct union_node *un = VTOUNION(ap->a_vp); 1155 struct componentname *cnp = ap->a_cnp; 1156 struct thread *td = cnp->cn_thread; 1157 struct vnode *uppervp; 1158 struct vnode *upperdvp; 1159 int error; 1160 1161 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP) 1162 panic("union remove: null upper vnode"); 1163 1164 if ((uppervp = union_lock_upper(un, td)) != NULLVP) { 1165 if (union_dowhiteout(un, cnp->cn_cred, td)) 1166 cnp->cn_flags |= DOWHITEOUT; 1167 if (cnp->cn_flags & DOWHITEOUT) /* XXX fs corruption */ 1168 error = EOPNOTSUPP; 1169 else 1170 error = VOP_REMOVE(upperdvp, uppervp, cnp); 1171 if (!error) 1172 union_removed_upper(un); 1173 union_unlock_upper(uppervp, td); 1174 } else { 1175 error = union_mkwhiteout( 1176 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount), 1177 upperdvp, ap->a_cnp, un->un_path); 1178 } 1179 union_unlock_upper(upperdvp, td); 1180 return (error); 1181} 1182 1183/* 1184 * union_link: 1185 * 1186 * tdvp and vp will be locked on entry. 1187 * tdvp and vp should remain locked on return. 1188 */ 1189 1190static int 1191union_link(ap) 1192 struct vop_link_args /* { 1193 struct vnode *a_tdvp; 1194 struct vnode *a_vp; 1195 struct componentname *a_cnp; 1196 } */ *ap; 1197{ 1198 struct componentname *cnp = ap->a_cnp; 1199 struct thread *td = cnp->cn_thread; 1200 struct union_node *dun = VTOUNION(ap->a_tdvp); 1201 struct vnode *vp; 1202 struct vnode *tdvp; 1203 int error = 0; 1204 1205 if (ap->a_tdvp->v_op != ap->a_vp->v_op) { 1206 vp = ap->a_vp; 1207 } else { 1208 struct union_node *tun = VTOUNION(ap->a_vp); 1209 1210 if (tun->un_uppervp == NULLVP) { 1211#if 0 1212 if (dun->un_uppervp == tun->un_dirvp) { 1213 if (dun->un_flags & UN_ULOCK) { 1214 dun->un_flags &= ~UN_ULOCK; 1215 VOP_UNLOCK(dun->un_uppervp, 0, td); 1216 } 1217 } 1218#endif 1219 error = union_copyup(tun, 1, cnp->cn_cred, td); 1220#if 0 1221 if (dun->un_uppervp == tun->un_dirvp) { 1222 vn_lock(dun->un_uppervp, 1223 LK_EXCLUSIVE | LK_RETRY, td); 1224 dun->un_flags |= UN_ULOCK; 1225 } 1226#endif 1227 if (error) 1228 return (error); 1229 } 1230 vp = tun->un_uppervp; 1231 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1232 } 1233 1234 /* 1235 * Make sure upper is locked, then unlock the union directory we were 1236 * called with to avoid a deadlock while we are calling VOP_LINK() on 1237 * the upper (with tdvp locked and vp not locked). Our ap->a_tdvp 1238 * is expected to be locked on return. 1239 */ 1240 1241 if ((tdvp = union_lock_upper(dun, td)) == NULLVP) 1242 return (EROFS); 1243 1244 VOP_UNLOCK(ap->a_tdvp, 0, td); /* unlock calling node */ 1245 error = VOP_LINK(tdvp, vp, cnp); /* call link on upper */ 1246 1247 /* 1248 * Unlock tun->un_uppervp if we locked it above. 1249 */ 1250 if (ap->a_tdvp->v_op == ap->a_vp->v_op) 1251 VOP_UNLOCK(vp, 0, td); 1252 /* 1253 * We have to unlock tdvp prior to relocking our calling node in 1254 * order to avoid a deadlock. We also have to unlock ap->a_vp 1255 * before relocking the directory, but then we have to relock 1256 * ap->a_vp as our caller expects. 1257 */ 1258 VOP_UNLOCK(ap->a_vp, 0, td); 1259 union_unlock_upper(tdvp, td); 1260 vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, td); 1261 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td); 1262 return (error); 1263} 1264 1265static int 1266union_rename(ap) 1267 struct vop_rename_args /* { 1268 struct vnode *a_fdvp; 1269 struct vnode *a_fvp; 1270 struct componentname *a_fcnp; 1271 struct vnode *a_tdvp; 1272 struct vnode *a_tvp; 1273 struct componentname *a_tcnp; 1274 } */ *ap; 1275{ 1276 int error; 1277 struct vnode *fdvp = ap->a_fdvp; 1278 struct vnode *fvp = ap->a_fvp; 1279 struct vnode *tdvp = ap->a_tdvp; 1280 struct vnode *tvp = ap->a_tvp; 1281 1282 /* 1283 * Figure out what fdvp to pass to our upper or lower vnode. If we 1284 * replace the fdvp, release the original one and ref the new one. 1285 */ 1286 1287 if (fdvp->v_op == &union_vnodeops) { /* always true */ 1288 struct union_node *un = VTOUNION(fdvp); 1289 if (un->un_uppervp == NULLVP) { 1290 /* 1291 * this should never happen in normal 1292 * operation but might if there was 1293 * a problem creating the top-level shadow 1294 * directory. 1295 */ 1296 error = EXDEV; 1297 goto bad; 1298 } 1299 fdvp = un->un_uppervp; 1300 VREF(fdvp); 1301 vrele(ap->a_fdvp); 1302 } 1303 1304 /* 1305 * Figure out what fvp to pass to our upper or lower vnode. If we 1306 * replace the fvp, release the original one and ref the new one. 1307 */ 1308 1309 if (fvp->v_op == &union_vnodeops) { /* always true */ 1310 struct union_node *un = VTOUNION(fvp); 1311#if 0 1312 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount); 1313#endif 1314 1315 if (un->un_uppervp == NULLVP) { 1316 switch(fvp->v_type) { 1317 case VREG: 1318 vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread); 1319 error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_thread); 1320 VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_thread); 1321 if (error) 1322 goto bad; 1323 break; 1324 case VDIR: 1325 /* 1326 * XXX not yet. 1327 * 1328 * There is only one way to rename a directory 1329 * based in the lowervp, and that is to copy 1330 * the entire directory hierarchy. Otherwise 1331 * it would not last across a reboot. 1332 */ 1333#if 0 1334 vrele(fvp); 1335 fvp = NULL; 1336 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread); 1337 error = union_mkshadow(um, fdvp, 1338 ap->a_fcnp, &un->un_uppervp); 1339 VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_thread); 1340 if (un->un_uppervp) 1341 VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_thread); 1342 if (error) 1343 goto bad; 1344 break; 1345#endif 1346 default: 1347 error = EXDEV; 1348 goto bad; 1349 } 1350 } 1351 1352 if (un->un_lowervp != NULLVP) 1353 ap->a_fcnp->cn_flags |= DOWHITEOUT; 1354 fvp = un->un_uppervp; 1355 VREF(fvp); 1356 vrele(ap->a_fvp); 1357 } 1358 1359 /* 1360 * Figure out what tdvp (destination directory) to pass to the 1361 * lower level. If we replace it with uppervp, we need to vput the 1362 * old one. The exclusive lock is transfered to what we will pass 1363 * down in the VOP_RENAME() and we replace uppervp with a simple 1364 * reference. 1365 */ 1366 1367 if (tdvp->v_op == &union_vnodeops) { 1368 struct union_node *un = VTOUNION(tdvp); 1369 1370 if (un->un_uppervp == NULLVP) { 1371 /* 1372 * This should never happen in normal 1373 * operation but might if there was 1374 * a problem creating the top-level shadow 1375 * directory. 1376 */ 1377 error = EXDEV; 1378 goto bad; 1379 } 1380 1381 /* 1382 * New tdvp is a lock and reference on uppervp. 1383 * Put away the old tdvp. 1384 */ 1385 tdvp = union_lock_upper(un, ap->a_tcnp->cn_thread); 1386 vput(ap->a_tdvp); 1387 } 1388 1389 /* 1390 * Figure out what tvp (destination file) to pass to the 1391 * lower level. 1392 * 1393 * If the uppervp file does not exist, put away the (wrong) 1394 * file and change tvp to NULL. 1395 */ 1396 1397 if (tvp != NULLVP && tvp->v_op == &union_vnodeops) { 1398 struct union_node *un = VTOUNION(tvp); 1399 1400 tvp = union_lock_upper(un, ap->a_tcnp->cn_thread); 1401 vput(ap->a_tvp); 1402 /* note: tvp may be NULL */ 1403 } 1404 1405 /* 1406 * VOP_RENAME() releases/vputs prior to returning, so we have no 1407 * cleanup to do. 1408 */ 1409 1410 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); 1411 1412 /* 1413 * Error. We still have to release / vput the various elements. 1414 */ 1415 1416bad: 1417 vrele(fdvp); 1418 if (fvp) 1419 vrele(fvp); 1420 vput(tdvp); 1421 if (tvp != NULLVP) { 1422 if (tvp != tdvp) 1423 vput(tvp); 1424 else 1425 vrele(tvp); 1426 } 1427 return (error); 1428} 1429 1430static int 1431union_mkdir(ap) 1432 struct vop_mkdir_args /* { 1433 struct vnode *a_dvp; 1434 struct vnode **a_vpp; 1435 struct componentname *a_cnp; 1436 struct vattr *a_vap; 1437 } */ *ap; 1438{ 1439 struct union_node *dun = VTOUNION(ap->a_dvp); 1440 struct componentname *cnp = ap->a_cnp; 1441 struct thread *td = cnp->cn_thread; 1442 struct vnode *upperdvp; 1443 int error = EROFS; 1444 1445 if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) { 1446 struct vnode *vp; 1447 1448 error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap); 1449 union_unlock_upper(upperdvp, td); 1450 1451 if (error == 0) { 1452 VOP_UNLOCK(vp, 0, td); 1453 UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vrefcnt(vp))); 1454 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, 1455 ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1); 1456 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp))); 1457 } 1458 } 1459 return (error); 1460} 1461 1462static int 1463union_rmdir(ap) 1464 struct vop_rmdir_args /* { 1465 struct vnode *a_dvp; 1466 struct vnode *a_vp; 1467 struct componentname *a_cnp; 1468 } */ *ap; 1469{ 1470 struct union_node *dun = VTOUNION(ap->a_dvp); 1471 struct union_node *un = VTOUNION(ap->a_vp); 1472 struct componentname *cnp = ap->a_cnp; 1473 struct thread *td = cnp->cn_thread; 1474 struct vnode *upperdvp; 1475 struct vnode *uppervp; 1476 int error; 1477 1478 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP) 1479 panic("union rmdir: null upper vnode"); 1480 1481 if ((uppervp = union_lock_upper(un, td)) != NULLVP) { 1482 if (union_dowhiteout(un, cnp->cn_cred, td)) 1483 cnp->cn_flags |= DOWHITEOUT; 1484 if (cnp->cn_flags & DOWHITEOUT) /* XXX fs corruption */ 1485 error = EOPNOTSUPP; 1486 else 1487 error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp); 1488 if (!error) 1489 union_removed_upper(un); 1490 union_unlock_upper(uppervp, td); 1491 } else { 1492 error = union_mkwhiteout( 1493 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount), 1494 dun->un_uppervp, ap->a_cnp, un->un_path); 1495 } 1496 union_unlock_upper(upperdvp, td); 1497 return (error); 1498} 1499 1500/* 1501 * union_symlink: 1502 * 1503 * dvp is locked on entry and remains locked on return. a_vpp is garbage 1504 * (unused). 1505 */ 1506 1507static int 1508union_symlink(ap) 1509 struct vop_symlink_args /* { 1510 struct vnode *a_dvp; 1511 struct vnode **a_vpp; 1512 struct componentname *a_cnp; 1513 struct vattr *a_vap; 1514 char *a_target; 1515 } */ *ap; 1516{ 1517 struct union_node *dun = VTOUNION(ap->a_dvp); 1518 struct componentname *cnp = ap->a_cnp; 1519 struct thread *td = cnp->cn_thread; 1520 struct vnode *dvp; 1521 int error = EROFS; 1522 1523 if ((dvp = union_lock_upper(dun, td)) != NULLVP) { 1524 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap, 1525 ap->a_target); 1526 union_unlock_upper(dvp, td); 1527 } 1528 return (error); 1529} 1530 1531/* 1532 * union_readdir ()works in concert with getdirentries() and 1533 * readdir(3) to provide a list of entries in the unioned 1534 * directories. getdirentries() is responsible for walking 1535 * down the union stack. readdir(3) is responsible for 1536 * eliminating duplicate names from the returned data stream. 1537 */ 1538static int 1539union_readdir(ap) 1540 struct vop_readdir_args /* { 1541 struct vnode *a_vp; 1542 struct uio *a_uio; 1543 struct ucred *a_cred; 1544 int *a_eofflag; 1545 u_long *a_cookies; 1546 int a_ncookies; 1547 } */ *ap; 1548{ 1549 struct union_node *un = VTOUNION(ap->a_vp); 1550 struct thread *td = ap->a_uio->uio_td; 1551 struct vnode *uvp; 1552 int error = 0; 1553 1554 if ((uvp = union_lock_upper(un, td)) != NULLVP) { 1555 ap->a_vp = uvp; 1556 error = VOP_READDIR_AP(ap); 1557 union_unlock_upper(uvp, td); 1558 } 1559 return(error); 1560} 1561 1562static int 1563union_readlink(ap) 1564 struct vop_readlink_args /* { 1565 struct vnode *a_vp; 1566 struct uio *a_uio; 1567 struct ucred *a_cred; 1568 } */ *ap; 1569{ 1570 int error; 1571 struct union_node *un = VTOUNION(ap->a_vp); 1572 struct uio *uio = ap->a_uio; 1573 struct thread *td = uio->uio_td; 1574 struct vnode *vp; 1575 1576 vp = union_lock_other(un, td); 1577 KASSERT(vp != NULL, ("union_readlink: backing vnode missing!")); 1578 1579 ap->a_vp = vp; 1580 error = VOP_READLINK_AP(ap); 1581 union_unlock_other(vp, td); 1582 1583 return (error); 1584} 1585 1586static int 1587union_getwritemount(ap) 1588 struct vop_getwritemount_args /* { 1589 struct vnode *a_vp; 1590 struct mount **a_mpp; 1591 } */ *ap; 1592{ 1593 struct vnode *vp = ap->a_vp; 1594 struct vnode *uvp = UPPERVP(vp); 1595 1596 if (uvp == NULL) { 1597 VI_LOCK(vp); 1598 if (vp->v_iflag & VI_FREE) { 1599 VI_UNLOCK(vp); 1600 return (EOPNOTSUPP); 1601 } 1602 VI_UNLOCK(vp); 1603 return (EACCES); 1604 } 1605 return(VOP_GETWRITEMOUNT(uvp, ap->a_mpp)); 1606} 1607 1608/* 1609 * union_inactive: 1610 * 1611 * Called with the vnode locked. We are expected to unlock the vnode. 1612 */ 1613 1614static int 1615union_inactive(ap) 1616 struct vop_inactive_args /* { 1617 struct vnode *a_vp; 1618 struct thread *a_td; 1619 } */ *ap; 1620{ 1621 struct vnode *vp = ap->a_vp; 1622 struct union_node *un = VTOUNION(vp); 1623 1624 /* 1625 * Do nothing (and _don't_ bypass). 1626 * Wait to vrele lowervp until reclaim, 1627 * so that until then our union_node is in the 1628 * cache and reusable. 1629 * 1630 */ 1631 1632 if (un->un_dircache != NULL) 1633 union_dircache_free(un); 1634 1635#if 0 1636 if ((un->un_flags & UN_ULOCK) && un->un_uppervp) { 1637 un->un_flags &= ~UN_ULOCK; 1638 VOP_UNLOCK(un->un_uppervp, 0, td); 1639 } 1640#endif 1641 1642 if ((un->un_flags & UN_CACHED) == 0) 1643 vgone(vp); 1644 1645 return (0); 1646} 1647 1648static int 1649union_reclaim(ap) 1650 struct vop_reclaim_args /* { 1651 struct vnode *a_vp; 1652 } */ *ap; 1653{ 1654 union_freevp(ap->a_vp); 1655 1656 return (0); 1657} 1658 1659static int 1660union_print(ap) 1661 struct vop_print_args /* { 1662 struct vnode *a_vp; 1663 } */ *ap; 1664{ 1665 struct vnode *vp = ap->a_vp; 1666 1667 printf("\tvp=%p, uppervp=%p, lowervp=%p\n", 1668 vp, UPPERVP(vp), LOWERVP(vp)); 1669 if (UPPERVP(vp) != NULLVP) 1670 vprint("union: upper", UPPERVP(vp)); 1671 if (LOWERVP(vp) != NULLVP) 1672 vprint("union: lower", LOWERVP(vp)); 1673 1674 return (0); 1675} 1676 1677static int 1678union_pathconf(ap) 1679 struct vop_pathconf_args /* { 1680 struct vnode *a_vp; 1681 int a_name; 1682 int *a_retval; 1683 } */ *ap; 1684{ 1685 int error; 1686 struct thread *td = curthread; /* XXX */ 1687 struct union_node *un = VTOUNION(ap->a_vp); 1688 struct vnode *vp; 1689 1690 vp = union_lock_other(un, td); 1691 KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!")); 1692 1693 ap->a_vp = vp; 1694 error = VOP_PATHCONF_AP(ap); 1695 union_unlock_other(vp, td); 1696 1697 return (error); 1698} 1699 1700static int 1701union_advlock(ap) 1702 struct vop_advlock_args /* { 1703 struct vnode *a_vp; 1704 caddr_t a_id; 1705 int a_op; 1706 struct flock *a_fl; 1707 int a_flags; 1708 } */ *ap; 1709{ 1710 register struct vnode *ovp = OTHERVP(ap->a_vp); 1711 1712 ap->a_vp = ovp; 1713 return (VOP_ADVLOCK_AP(ap)); 1714} 1715 1716 1717/* 1718 * XXX - vop_strategy must be hand coded because it has no 1719 * YYY - and it is not coherent with anything 1720 * 1721 * vnode in its arguments. 1722 * This goes away with a merged VM/buffer cache. 1723 */ 1724static int 1725union_strategy(ap) 1726 struct vop_strategy_args /* { 1727 struct vnode *a_vp; 1728 struct buf *a_bp; 1729 } */ *ap; 1730{ 1731 struct buf *bp = ap->a_bp; 1732 struct vnode *othervp = OTHERVP(ap->a_vp); 1733 1734#ifdef DIAGNOSTIC 1735 if (othervp == NULLVP) 1736 panic("union_strategy: nil vp"); 1737 if ((bp->b_iocmd == BIO_WRITE) && 1738 (othervp == LOWERVP(ap->a_vp))) 1739 panic("union_strategy: writing to lowervp"); 1740#endif 1741 return (VOP_STRATEGY(othervp, bp)); 1742} 1743 1744static int 1745union_getacl(ap) 1746 struct vop_getacl_args /* { 1747 struct vnode *a_vp; 1748 acl_type_t a_type; 1749 struct acl *a_aclp; 1750 struct ucred *a_cred; 1751 struct thread *a_td; 1752 } */ *ap; 1753{ 1754 int error; 1755 struct union_node *un = VTOUNION(ap->a_vp); 1756 struct vnode *vp; 1757 1758 vp = union_lock_other(un, ap->a_td); 1759 ap->a_vp = vp; 1760 error = VOP_GETACL_AP(ap); 1761 union_unlock_other(vp, ap->a_td); 1762 1763 return (error); 1764} 1765 1766static int 1767union_setacl(ap) 1768 struct vop_setacl_args /* { 1769 struct vnode *a_vp; 1770 acl_type_t a_type; 1771 struct acl *a_aclp; 1772 struct ucred *a_cred; 1773 struct thread *a_td; 1774 } */ *ap; 1775{ 1776 int error; 1777 struct union_node *un = VTOUNION(ap->a_vp); 1778 struct vnode *vp; 1779 1780 vp = union_lock_other(un, ap->a_td); 1781 ap->a_vp = vp; 1782 error = VOP_SETACL_AP(ap); 1783 union_unlock_other(vp, ap->a_td); 1784 1785 return (error); 1786} 1787 1788static int 1789union_aclcheck(ap) 1790 struct vop_aclcheck_args /* { 1791 struct vnode *a_vp; 1792 acl_type_t a_type; 1793 struct acl *a_aclp; 1794 struct ucred *a_cred; 1795 struct thread *a_td; 1796 } */ *ap; 1797{ 1798 struct vnode *ovp = OTHERVP(ap->a_vp); 1799 1800 ap->a_vp = ovp; 1801 return (VOP_ACLCHECK_AP(ap)); 1802} 1803 1804static int 1805union_closeextattr(ap) 1806 struct vop_closeextattr_args /* { 1807 struct vnode *a_vp; 1808 int a_commit; 1809 struct ucred *a_cred; 1810 struct thread *a_td; 1811 } */ *ap; 1812{ 1813 int error; 1814 struct union_node *un = VTOUNION(ap->a_vp); 1815 struct vnode *vp; 1816 1817 vp = union_lock_other(un, ap->a_td); 1818 ap->a_vp = vp; 1819 error = VOP_CLOSEEXTATTR_AP(ap); 1820 union_unlock_other(vp, ap->a_td); 1821 1822 return (error); 1823} 1824 1825static int 1826union_getextattr(ap) 1827 struct vop_getextattr_args /* { 1828 struct vnode *a_vp; 1829 int a_attrnamespace; 1830 const char *a_name; 1831 struct uio *a_uio; 1832 size_t *a_size; 1833 struct ucred *a_cred; 1834 struct thread *a_td; 1835 } */ *ap; 1836{ 1837 int error; 1838 struct union_node *un = VTOUNION(ap->a_vp); 1839 struct vnode *vp; 1840 1841 vp = union_lock_other(un, ap->a_td); 1842 ap->a_vp = vp; 1843 error = VOP_GETEXTATTR_AP(ap); 1844 union_unlock_other(vp, ap->a_td); 1845 1846 return (error); 1847} 1848 1849static int 1850union_listextattr(ap) 1851 struct vop_listextattr_args /* { 1852 struct vnode *a_vp; 1853 int a_attrnamespace; 1854 struct uio *a_uio; 1855 size_t *a_size; 1856 struct ucred *a_cred; 1857 struct thread *a_td; 1858 } */ *ap; 1859{ 1860 int error; 1861 struct union_node *un = VTOUNION(ap->a_vp); 1862 struct vnode *vp; 1863 1864 vp = union_lock_other(un, ap->a_td); 1865 ap->a_vp = vp; 1866 error = VOP_LISTEXTATTR_AP(ap); 1867 union_unlock_other(vp, ap->a_td); 1868 1869 return (error); 1870} 1871 1872static int 1873union_openextattr(ap) 1874 struct vop_openextattr_args /* { 1875 struct vnode *a_vp; 1876 struct ucred *a_cred; 1877 struct thread *a_td; 1878 } */ *ap; 1879{ 1880 int error; 1881 struct union_node *un = VTOUNION(ap->a_vp); 1882 struct vnode *vp; 1883 1884 vp = union_lock_other(un, ap->a_td); 1885 ap->a_vp = vp; 1886 error = VOP_OPENEXTATTR_AP(ap); 1887 union_unlock_other(vp, ap->a_td); 1888 1889 return (error); 1890} 1891 1892static int 1893union_deleteextattr(ap) 1894 struct vop_deleteextattr_args /* { 1895 struct vnode *a_vp; 1896 int a_attrnamespace; 1897 const char *a_name; 1898 struct ucred *a_cred; 1899 struct thread *a_td; 1900 } */ *ap; 1901{ 1902 int error; 1903 struct union_node *un = VTOUNION(ap->a_vp); 1904 struct vnode *vp; 1905 1906 vp = union_lock_other(un, ap->a_td); 1907 ap->a_vp = vp; 1908 error = VOP_DELETEEXTATTR_AP(ap); 1909 union_unlock_other(vp, ap->a_td); 1910 1911 return (error); 1912} 1913 1914static int 1915union_setextattr(ap) 1916 struct vop_setextattr_args /* { 1917 struct vnode *a_vp; 1918 int a_attrnamespace; 1919 const char *a_name; 1920 struct uio *a_uio; 1921 struct ucred *a_cred; 1922 struct thread *a_td; 1923 } */ *ap; 1924{ 1925 int error; 1926 struct union_node *un = VTOUNION(ap->a_vp); 1927 struct vnode *vp; 1928 1929 vp = union_lock_other(un, ap->a_td); 1930 ap->a_vp = vp; 1931 error = VOP_SETEXTATTR_AP(ap); 1932 union_unlock_other(vp, ap->a_td); 1933 1934 return (error); 1935} 1936 1937static int 1938union_setlabel(ap) 1939 struct vop_setlabel_args /* { 1940 struct vnode *a_vp; 1941 struct label *a_label; 1942 struct ucred *a_cred; 1943 struct thread *a_td; 1944 } */ *ap; 1945{ 1946 int error; 1947 struct union_node *un = VTOUNION(ap->a_vp); 1948 struct vnode *vp; 1949 1950 vp = union_lock_other(un, ap->a_td); 1951 ap->a_vp = vp; 1952 error = VOP_SETLABEL_AP(ap); 1953 union_unlock_other(vp, ap->a_td); 1954 1955 return (error); 1956} 1957 1958/* 1959 * Global vfs data structures 1960 */ 1961struct vop_vector union_vnodeops = { 1962 .vop_default = &default_vnodeops, 1963 1964 .vop_access = union_access, 1965 .vop_aclcheck = union_aclcheck, 1966 .vop_advlock = union_advlock, 1967 .vop_bmap = VOP_EOPNOTSUPP, 1968 .vop_close = union_close, 1969 .vop_closeextattr = union_closeextattr, 1970 .vop_create = union_create, 1971 .vop_deleteextattr = union_deleteextattr, 1972 .vop_fsync = union_fsync, 1973 .vop_getacl = union_getacl, 1974 .vop_getattr = union_getattr, 1975 .vop_getextattr = union_getextattr, 1976 .vop_getwritemount = union_getwritemount, 1977 .vop_inactive = union_inactive, 1978 .vop_ioctl = union_ioctl, 1979 .vop_lease = union_lease, 1980 .vop_link = union_link, 1981 .vop_listextattr = union_listextattr, 1982 .vop_lookup = union_lookup, 1983 .vop_mkdir = union_mkdir, 1984 .vop_mknod = union_mknod, 1985 .vop_open = union_open, 1986 .vop_openextattr = union_openextattr, 1987 .vop_pathconf = union_pathconf, 1988 .vop_poll = union_poll, 1989 .vop_print = union_print, 1990 .vop_read = union_read, 1991 .vop_readdir = union_readdir, 1992 .vop_readlink = union_readlink, 1993 .vop_reclaim = union_reclaim, 1994 .vop_remove = union_remove, 1995 .vop_rename = union_rename, 1996 .vop_rmdir = union_rmdir, 1997 .vop_setacl = union_setacl, 1998 .vop_setattr = union_setattr, 1999 .vop_setextattr = union_setextattr, 2000 .vop_setlabel = union_setlabel, 2001 .vop_strategy = union_strategy, 2002 .vop_symlink = union_symlink, 2003 .vop_whiteout = union_whiteout, 2004 .vop_write = union_write, 2005};
|