vfs_default.c revision 138290
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 138290 2004-12-01 23:16:38Z phk $"); 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/bio.h> 41#include <sys/buf.h> 42#include <sys/conf.h> 43#include <sys/kernel.h> 44#include <sys/limits.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mount.h> 48#include <sys/mutex.h> 49#include <sys/unistd.h> 50#include <sys/vnode.h> 51#include <sys/poll.h> 52 53#include <vm/vm.h> 54#include <vm/vm_object.h> 55#include <vm/vm_extern.h> 56#include <vm/pmap.h> 57#include <vm/vm_map.h> 58#include <vm/vm_page.h> 59#include <vm/vm_pager.h> 60#include <vm/vnode_pager.h> 61 62static int vop_nolookup(struct vop_lookup_args *); 63static int vop_nostrategy(struct vop_strategy_args *); 64 65/* 66 * This vnode table stores what we want to do if the filesystem doesn't 67 * implement a particular VOP. 68 * 69 * If there is no specific entry here, we will return EOPNOTSUPP. 70 * 71 */ 72 73struct vop_vector default_vnodeops = { 74 .vop_default = NULL, 75 .vop_advlock = VOP_EINVAL, 76 .vop_bmap = vop_stdbmap, 77 .vop_close = VOP_NULL, 78 .vop_createvobject = vop_stdcreatevobject, 79 .vop_destroyvobject = vop_stddestroyvobject, 80 .vop_fsync = VOP_NULL, 81 .vop_getpages = vop_stdgetpages, 82 .vop_getvobject = vop_stdgetvobject, 83 .vop_getwritemount = vop_stdgetwritemount, 84 .vop_inactive = vop_stdinactive, 85 .vop_ioctl = VOP_ENOTTY, 86 .vop_islocked = vop_stdislocked, 87 .vop_lease = VOP_NULL, 88 .vop_lock = vop_stdlock, 89 .vop_lookup = vop_nolookup, 90 .vop_open = VOP_NULL, 91 .vop_pathconf = VOP_EINVAL, 92 .vop_poll = vop_nopoll, 93 .vop_putpages = vop_stdputpages, 94 .vop_readlink = VOP_EINVAL, 95 .vop_revoke = VOP_PANIC, 96 .vop_strategy = vop_nostrategy, 97 .vop_unlock = vop_stdunlock, 98}; 99 100/* 101 * Series of placeholder functions for various error returns for 102 * VOPs. 103 */ 104 105int 106vop_eopnotsupp(struct vop_generic_args *ap) 107{ 108 /* 109 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 110 */ 111 112 return (EOPNOTSUPP); 113} 114 115int 116vop_ebadf(struct vop_generic_args *ap) 117{ 118 119 return (EBADF); 120} 121 122int 123vop_enotty(struct vop_generic_args *ap) 124{ 125 126 return (ENOTTY); 127} 128 129int 130vop_einval(struct vop_generic_args *ap) 131{ 132 133 return (EINVAL); 134} 135 136int 137vop_null(struct vop_generic_args *ap) 138{ 139 140 return (0); 141} 142 143/* 144 * Helper function to panic on some bad VOPs in some filesystems. 145 */ 146int 147vop_panic(struct vop_generic_args *ap) 148{ 149 150 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 151} 152 153/* 154 * vop_std<something> and vop_no<something> are default functions for use by 155 * filesystems that need the "default reasonable" implementation for a 156 * particular operation. 157 * 158 * The documentation for the operations they implement exists (if it exists) 159 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 160 */ 161 162/* 163 * Default vop for filesystems that do not support name lookup 164 */ 165static int 166vop_nolookup(ap) 167 struct vop_lookup_args /* { 168 struct vnode *a_dvp; 169 struct vnode **a_vpp; 170 struct componentname *a_cnp; 171 } */ *ap; 172{ 173 174 *ap->a_vpp = NULL; 175 return (ENOTDIR); 176} 177 178/* 179 * vop_nostrategy: 180 * 181 * Strategy routine for VFS devices that have none. 182 * 183 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 184 * routine. Typically this is done for a BIO_READ strategy call. 185 * Typically B_INVAL is assumed to already be clear prior to a write 186 * and should not be cleared manually unless you just made the buffer 187 * invalid. BIO_ERROR should be cleared either way. 188 */ 189 190static int 191vop_nostrategy (struct vop_strategy_args *ap) 192{ 193 printf("No strategy for buffer at %p\n", ap->a_bp); 194 vprint("vnode", ap->a_vp); 195 ap->a_bp->b_ioflags |= BIO_ERROR; 196 ap->a_bp->b_error = EOPNOTSUPP; 197 bufdone(ap->a_bp); 198 return (EOPNOTSUPP); 199} 200 201/* 202 * vop_stdpathconf: 203 * 204 * Standard implementation of POSIX pathconf, to get information about limits 205 * for a filesystem. 206 * Override per filesystem for the case where the filesystem has smaller 207 * limits. 208 */ 209int 210vop_stdpathconf(ap) 211 struct vop_pathconf_args /* { 212 struct vnode *a_vp; 213 int a_name; 214 int *a_retval; 215 } */ *ap; 216{ 217 218 switch (ap->a_name) { 219 case _PC_LINK_MAX: 220 *ap->a_retval = LINK_MAX; 221 return (0); 222 case _PC_MAX_CANON: 223 *ap->a_retval = MAX_CANON; 224 return (0); 225 case _PC_MAX_INPUT: 226 *ap->a_retval = MAX_INPUT; 227 return (0); 228 case _PC_PIPE_BUF: 229 *ap->a_retval = PIPE_BUF; 230 return (0); 231 case _PC_CHOWN_RESTRICTED: 232 *ap->a_retval = 1; 233 return (0); 234 case _PC_VDISABLE: 235 *ap->a_retval = _POSIX_VDISABLE; 236 return (0); 237 default: 238 return (EINVAL); 239 } 240 /* NOTREACHED */ 241} 242 243/* 244 * Standard lock, unlock and islocked functions. 245 */ 246int 247vop_stdlock(ap) 248 struct vop_lock_args /* { 249 struct vnode *a_vp; 250 int a_flags; 251 struct thread *a_td; 252 } */ *ap; 253{ 254 struct vnode *vp = ap->a_vp; 255 256#ifndef DEBUG_LOCKS 257 return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td)); 258#else 259 return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 260 ap->a_td, "vop_stdlock", vp->filename, vp->line)); 261#endif 262} 263 264/* See above. */ 265int 266vop_stdunlock(ap) 267 struct vop_unlock_args /* { 268 struct vnode *a_vp; 269 int a_flags; 270 struct thread *a_td; 271 } */ *ap; 272{ 273 struct vnode *vp = ap->a_vp; 274 275 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp), 276 ap->a_td)); 277} 278 279/* See above. */ 280int 281vop_stdislocked(ap) 282 struct vop_islocked_args /* { 283 struct vnode *a_vp; 284 struct thread *a_td; 285 } */ *ap; 286{ 287 288 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); 289} 290 291/* Mark the vnode inactive */ 292int 293vop_stdinactive(ap) 294 struct vop_inactive_args /* { 295 struct vnode *a_vp; 296 struct thread *a_td; 297 } */ *ap; 298{ 299 300 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 301 return (0); 302} 303 304/* 305 * Return true for select/poll. 306 */ 307int 308vop_nopoll(ap) 309 struct vop_poll_args /* { 310 struct vnode *a_vp; 311 int a_events; 312 struct ucred *a_cred; 313 struct thread *a_td; 314 } */ *ap; 315{ 316 /* 317 * Return true for read/write. If the user asked for something 318 * special, return POLLNVAL, so that clients have a way of 319 * determining reliably whether or not the extended 320 * functionality is present without hard-coding knowledge 321 * of specific filesystem implementations. 322 * Stay in sync with kern_conf.c::no_poll(). 323 */ 324 if (ap->a_events & ~POLLSTANDARD) 325 return (POLLNVAL); 326 327 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 328} 329 330/* 331 * Implement poll for local filesystems that support it. 332 */ 333int 334vop_stdpoll(ap) 335 struct vop_poll_args /* { 336 struct vnode *a_vp; 337 int a_events; 338 struct ucred *a_cred; 339 struct thread *a_td; 340 } */ *ap; 341{ 342 if (ap->a_events & ~POLLSTANDARD) 343 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 344 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 345} 346 347/* 348 * Return our mount point, as we will take charge of the writes. 349 */ 350int 351vop_stdgetwritemount(ap) 352 struct vop_getwritemount_args /* { 353 struct vnode *a_vp; 354 struct mount **a_mpp; 355 } */ *ap; 356{ 357 358 *(ap->a_mpp) = ap->a_vp->v_mount; 359 return (0); 360} 361 362/* Create the VM system backing object for this vnode */ 363int 364vop_stdcreatevobject(ap) 365 struct vop_createvobject_args /* { 366 struct vnode *vp; 367 struct ucred *cred; 368 struct thread *td; 369 } */ *ap; 370{ 371 struct vnode *vp = ap->a_vp; 372 struct ucred *cred = ap->a_cred; 373 struct thread *td = ap->a_td; 374 struct vattr vat; 375 vm_object_t object; 376 int error = 0; 377 vm_ooffset_t size; 378 379 GIANT_REQUIRED; 380 381 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 382 return (0); 383 384 while ((object = vp->v_object) != NULL) { 385 VM_OBJECT_LOCK(object); 386 if (!(object->flags & OBJ_DEAD)) { 387 VM_OBJECT_UNLOCK(object); 388 break; 389 } 390 VOP_UNLOCK(vp, 0, td); 391 vm_object_set_flag(object, OBJ_DISCONNECTWNT); 392 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0); 393 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 394 } 395 396 if (object == NULL) { 397 if (vn_isdisk(vp, NULL)) { 398 /* 399 * This simply allocates the biggest object possible 400 * for a disk vnode. This should be fixed, but doesn't 401 * cause any problems (yet). 402 */ 403 size = IDX_TO_OFF(INT_MAX); 404 } else { 405 if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0) 406 return (error); 407 size = vat.va_size; 408 } 409 410 object = vnode_pager_alloc(vp, size, 0, 0); 411 /* 412 * Dereference the reference we just created. This assumes 413 * that the object is associated with the vp. 414 */ 415 VM_OBJECT_LOCK(object); 416 object->ref_count--; 417 VM_OBJECT_UNLOCK(object); 418 vrele(vp); 419 } 420 421 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 422 vp->v_vflag |= VV_OBJBUF; 423 424 return (error); 425} 426 427/* Destroy the VM system object associated with this vnode */ 428int 429vop_stddestroyvobject(ap) 430 struct vop_destroyvobject_args /* { 431 struct vnode *vp; 432 } */ *ap; 433{ 434 struct vnode *vp = ap->a_vp; 435 vm_object_t obj = vp->v_object; 436 437 GIANT_REQUIRED; 438 439 if (obj == NULL) 440 return (0); 441 VM_OBJECT_LOCK(obj); 442 if (obj->ref_count == 0) { 443 /* 444 * vclean() may be called twice. The first time 445 * removes the primary reference to the object, 446 * the second time goes one further and is a 447 * special-case to terminate the object. 448 * 449 * don't double-terminate the object 450 */ 451 if ((obj->flags & OBJ_DEAD) == 0) 452 vm_object_terminate(obj); 453 else 454 VM_OBJECT_UNLOCK(obj); 455 } else { 456 /* 457 * Woe to the process that tries to page now :-). 458 */ 459 vm_pager_deallocate(obj); 460 VM_OBJECT_UNLOCK(obj); 461 } 462 return (0); 463} 464 465/* 466 * Return the underlying VM object. This routine may be called with or 467 * without the vnode interlock held. If called without, the returned 468 * object is not guarenteed to be valid. The syncer typically gets the 469 * object without holding the interlock in order to quickly test whether 470 * it might be dirty before going heavy-weight. vm_object's use zalloc 471 * and thus stable-storage, so this is safe. 472 */ 473int 474vop_stdgetvobject(ap) 475 struct vop_getvobject_args /* { 476 struct vnode *vp; 477 struct vm_object **objpp; 478 } */ *ap; 479{ 480 struct vnode *vp = ap->a_vp; 481 struct vm_object **objpp = ap->a_objpp; 482 483 if (objpp) 484 *objpp = vp->v_object; 485 return (vp->v_object ? 0 : EINVAL); 486} 487 488/* XXX Needs good comment and VOP_BMAP(9) manpage */ 489int 490vop_stdbmap(ap) 491 struct vop_bmap_args /* { 492 struct vnode *a_vp; 493 daddr_t a_bn; 494 struct bufobj **a_bop; 495 daddr_t *a_bnp; 496 int *a_runp; 497 int *a_runb; 498 } */ *ap; 499{ 500 501 if (ap->a_bop != NULL) 502 *ap->a_bop = &ap->a_vp->v_bufobj; 503 if (ap->a_bnp != NULL) 504 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 505 if (ap->a_runp != NULL) 506 *ap->a_runp = 0; 507 if (ap->a_runb != NULL) 508 *ap->a_runb = 0; 509 return (0); 510} 511 512int 513vop_stdfsync(ap) 514 struct vop_fsync_args /* { 515 struct vnode *a_vp; 516 struct ucred *a_cred; 517 int a_waitfor; 518 struct thread *a_td; 519 } */ *ap; 520{ 521 struct vnode *vp = ap->a_vp; 522 struct buf *bp; 523 struct bufobj *bo; 524 struct buf *nbp; 525 int s, error = 0; 526 int maxretry = 100; /* large, arbitrarily chosen */ 527 528 VI_LOCK(vp); 529loop1: 530 /* 531 * MARK/SCAN initialization to avoid infinite loops. 532 */ 533 s = splbio(); 534 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 535 bp->b_vflags &= ~BV_SCANNED; 536 bp->b_error = 0; 537 } 538 splx(s); 539 540 /* 541 * Flush all dirty buffers associated with a block device. 542 */ 543loop2: 544 s = splbio(); 545 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) { 546 if ((bp->b_vflags & BV_SCANNED) != 0) 547 continue; 548 bp->b_vflags |= BV_SCANNED; 549 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 550 continue; 551 VI_UNLOCK(vp); 552 if ((bp->b_flags & B_DELWRI) == 0) 553 panic("fsync: not dirty"); 554 if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) { 555 vfs_bio_awrite(bp); 556 splx(s); 557 } else { 558 bremfree(bp); 559 splx(s); 560 bawrite(bp); 561 } 562 VI_LOCK(vp); 563 goto loop2; 564 } 565 566 /* 567 * If synchronous the caller expects us to completely resolve all 568 * dirty buffers in the system. Wait for in-progress I/O to 569 * complete (which could include background bitmap writes), then 570 * retry if dirty blocks still exist. 571 */ 572 if (ap->a_waitfor == MNT_WAIT) { 573 bo = &vp->v_bufobj; 574 bufobj_wwait(bo, 0, 0); 575 if (bo->bo_dirty.bv_cnt > 0) { 576 /* 577 * If we are unable to write any of these buffers 578 * then we fail now rather than trying endlessly 579 * to write them out. 580 */ 581 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 582 if ((error = bp->b_error) == 0) 583 continue; 584 if (error == 0 && --maxretry >= 0) { 585 splx(s); 586 goto loop1; 587 } 588 vprint("fsync: giving up on dirty", vp); 589 error = EAGAIN; 590 } 591 } 592 VI_UNLOCK(vp); 593 splx(s); 594 595 return (error); 596} 597 598/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 599int 600vop_stdgetpages(ap) 601 struct vop_getpages_args /* { 602 struct vnode *a_vp; 603 vm_page_t *a_m; 604 int a_count; 605 int a_reqpage; 606 vm_ooffset_t a_offset; 607 } */ *ap; 608{ 609 610 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 611 ap->a_count, ap->a_reqpage); 612} 613 614/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 615int 616vop_stdputpages(ap) 617 struct vop_putpages_args /* { 618 struct vnode *a_vp; 619 vm_page_t *a_m; 620 int a_count; 621 int a_sync; 622 int *a_rtvals; 623 vm_ooffset_t a_offset; 624 } */ *ap; 625{ 626 627 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 628 ap->a_sync, ap->a_rtvals); 629} 630 631/* 632 * vfs default ops 633 * used to fill the vfs function table to get reasonable default return values. 634 */ 635int 636vfs_stdroot (mp, vpp, td) 637 struct mount *mp; 638 struct vnode **vpp; 639 struct thread *td; 640{ 641 642 return (EOPNOTSUPP); 643} 644 645int 646vfs_stdstatfs (mp, sbp, td) 647 struct mount *mp; 648 struct statfs *sbp; 649 struct thread *td; 650{ 651 652 return (EOPNOTSUPP); 653} 654 655int 656vfs_stdvptofh (vp, fhp) 657 struct vnode *vp; 658 struct fid *fhp; 659{ 660 661 return (EOPNOTSUPP); 662} 663 664int 665vfs_stdstart (mp, flags, td) 666 struct mount *mp; 667 int flags; 668 struct thread *td; 669{ 670 671 return (0); 672} 673 674int 675vfs_stdquotactl (mp, cmds, uid, arg, td) 676 struct mount *mp; 677 int cmds; 678 uid_t uid; 679 caddr_t arg; 680 struct thread *td; 681{ 682 683 return (EOPNOTSUPP); 684} 685 686int 687vfs_stdsync(mp, waitfor, cred, td) 688 struct mount *mp; 689 int waitfor; 690 struct ucred *cred; 691 struct thread *td; 692{ 693 struct vnode *vp, *nvp; 694 int error, lockreq, allerror = 0; 695 696 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 697 if (waitfor != MNT_WAIT) 698 lockreq |= LK_NOWAIT; 699 /* 700 * Force stale buffer cache information to be flushed. 701 */ 702 MNT_ILOCK(mp); 703loop: 704 MNT_VNODE_FOREACH(vp, mp, nvp) { 705 706 VI_LOCK(vp); 707 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 708 VI_UNLOCK(vp); 709 continue; 710 } 711 MNT_IUNLOCK(mp); 712 713 if ((error = vget(vp, lockreq, td)) != 0) { 714 MNT_ILOCK(mp); 715 if (error == ENOENT) 716 goto loop; 717 continue; 718 } 719 error = VOP_FSYNC(vp, cred, waitfor, td); 720 if (error) 721 allerror = error; 722 723 VOP_UNLOCK(vp, 0, td); 724 vrele(vp); 725 MNT_ILOCK(mp); 726 } 727 MNT_IUNLOCK(mp); 728 return (allerror); 729} 730 731int 732vfs_stdnosync (mp, waitfor, cred, td) 733 struct mount *mp; 734 int waitfor; 735 struct ucred *cred; 736 struct thread *td; 737{ 738 739 return (0); 740} 741 742int 743vfs_stdvget (mp, ino, flags, vpp) 744 struct mount *mp; 745 ino_t ino; 746 int flags; 747 struct vnode **vpp; 748{ 749 750 return (EOPNOTSUPP); 751} 752 753int 754vfs_stdfhtovp (mp, fhp, vpp) 755 struct mount *mp; 756 struct fid *fhp; 757 struct vnode **vpp; 758{ 759 760 return (EOPNOTSUPP); 761} 762 763int 764vfs_stdinit (vfsp) 765 struct vfsconf *vfsp; 766{ 767 768 return (0); 769} 770 771int 772vfs_stduninit (vfsp) 773 struct vfsconf *vfsp; 774{ 775 776 return(0); 777} 778 779int 780vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 781 struct mount *mp; 782 int cmd; 783 struct vnode *filename_vp; 784 int attrnamespace; 785 const char *attrname; 786 struct thread *td; 787{ 788 789 if (filename_vp != NULL) 790 VOP_UNLOCK(filename_vp, 0, td); 791 return (EOPNOTSUPP); 792} 793 794int 795vfs_stdsysctl(mp, op, req) 796 struct mount *mp; 797 fsctlop_t op; 798 struct sysctl_req *req; 799{ 800 801 return (EOPNOTSUPP); 802} 803 804/* end of vfs default ops */ 805