vfs_default.c revision 114216
1139749Simp/*
2136467Ssimokawa * Copyright (c) 1989, 1993
3121468Ssimokawa *	The Regents of the University of California.  All rights reserved.
4121468Ssimokawa *
5121468Ssimokawa * This code is derived from software contributed
6121468Ssimokawa * to Berkeley by John Heidemann of the UCLA Ficus project.
7121468Ssimokawa *
8121468Ssimokawa * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9121468Ssimokawa *
10121468Ssimokawa * Redistribution and use in source and binary forms, with or without
11121468Ssimokawa * modification, are permitted provided that the following conditions
12121468Ssimokawa * are met:
13121468Ssimokawa * 1. Redistributions of source code must retain the above copyright
14121468Ssimokawa *    notice, this list of conditions and the following disclaimer.
15121468Ssimokawa * 2. Redistributions in binary form must reproduce the above copyright
16121468Ssimokawa *    notice, this list of conditions and the following disclaimer in the
17121468Ssimokawa *    documentation and/or other materials provided with the distribution.
18121468Ssimokawa * 3. All advertising materials mentioning features or use of this software
19121468Ssimokawa *    must display the following acknowledgement:
20121468Ssimokawa *	This product includes software developed by the University of
21121468Ssimokawa *	California, Berkeley and its contributors.
22121468Ssimokawa * 4. Neither the name of the University nor the names of its contributors
23121468Ssimokawa *    may be used to endorse or promote products derived from this software
24121468Ssimokawa *    without specific prior written permission.
25121468Ssimokawa *
26121468Ssimokawa * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27121468Ssimokawa * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28121468Ssimokawa * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29121468Ssimokawa * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30121468Ssimokawa * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31121468Ssimokawa * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32121468Ssimokawa * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33121468Ssimokawa * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34121468Ssimokawa * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35121468Ssimokawa * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36121468Ssimokawa * SUCH DAMAGE.
37121468Ssimokawa *
38136781Ssimokawa *
39121468Ssimokawa * $FreeBSD: head/sys/kern/vfs_default.c 114216 2003-04-29 13:36:06Z kan $
40121468Ssimokawa */
41121468Ssimokawa
42121468Ssimokawa#include <sys/param.h>
43121468Ssimokawa#include <sys/systm.h>
44121468Ssimokawa#include <sys/bio.h>
45121468Ssimokawa#include <sys/buf.h>
46121468Ssimokawa#include <sys/conf.h>
47121468Ssimokawa#include <sys/kernel.h>
48121468Ssimokawa#include <sys/limits.h>
49121468Ssimokawa#include <sys/lock.h>
50121468Ssimokawa#include <sys/malloc.h>
51121468Ssimokawa#include <sys/mount.h>
52121468Ssimokawa#include <sys/mutex.h>
53121468Ssimokawa#include <sys/unistd.h>
54121468Ssimokawa#include <sys/vnode.h>
55121468Ssimokawa#include <sys/poll.h>
56121468Ssimokawa
57121468Ssimokawa#include <vm/vm.h>
58121468Ssimokawa#include <vm/vm_object.h>
59121468Ssimokawa#include <vm/vm_extern.h>
60121468Ssimokawa#include <vm/pmap.h>
61121468Ssimokawa#include <vm/vm_map.h>
62121468Ssimokawa#include <vm/vm_page.h>
63121468Ssimokawa#include <vm/vm_pager.h>
64121468Ssimokawa#include <vm/vnode_pager.h>
65121468Ssimokawa
66121468Ssimokawastatic int	vop_nolookup(struct vop_lookup_args *);
67121468Ssimokawastatic int	vop_nostrategy(struct vop_strategy_args *);
68170420Ssimokawastatic int	vop_nospecstrategy(struct vop_specstrategy_args *);
69170420Ssimokawa
70121468Ssimokawa/*
71121468Ssimokawa * This vnode table stores what we want to do if the filesystem doesn't
72121468Ssimokawa * implement a particular VOP.
73121468Ssimokawa *
74121468Ssimokawa * If there is no specific entry here, we will return EOPNOTSUPP.
75121468Ssimokawa *
76121468Ssimokawa */
77121468Ssimokawa
78121468Ssimokawavop_t **default_vnodeop_p;
79136781Ssimokawastatic struct vnodeopv_entry_desc default_vnodeop_entries[] = {
80121468Ssimokawa	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
81121468Ssimokawa	{ &vop_advlock_desc,		(vop_t *) vop_einval },
82121468Ssimokawa	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
83121468Ssimokawa	{ &vop_close_desc,		(vop_t *) vop_null },
84121468Ssimokawa	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
85121468Ssimokawa	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
86121468Ssimokawa	{ &vop_fsync_desc,		(vop_t *) vop_null },
87121468Ssimokawa	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
88121468Ssimokawa	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
89121468Ssimokawa	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
90121468Ssimokawa	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
91121468Ssimokawa	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
92121468Ssimokawa	{ &vop_lease_desc,		(vop_t *) vop_null },
93170420Ssimokawa	{ &vop_lock_desc,		(vop_t *) vop_stdlock },
94121468Ssimokawa	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
95136781Ssimokawa	{ &vop_open_desc,		(vop_t *) vop_null },
96136467Ssimokawa	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
97136467Ssimokawa	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
98136467Ssimokawa	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
99136467Ssimokawa	{ &vop_readlink_desc,		(vop_t *) vop_einval },
100136467Ssimokawa	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
101181905Sed	{ &vop_specstrategy_desc,	(vop_t *) vop_nospecstrategy },
102125862Ssimokawa	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
103136467Ssimokawa	{ &vop_unlock_desc,		(vop_t *) vop_stdunlock },
104136467Ssimokawa	{ NULL, NULL }
105136467Ssimokawa};
106136467Ssimokawa
107136467Ssimokawastatic struct vnodeopv_desc default_vnodeop_opv_desc =
108136467Ssimokawa        { &default_vnodeop_p, default_vnodeop_entries };
109121468Ssimokawa
110VNODEOP_SET(default_vnodeop_opv_desc);
111
112/*
113 * Series of placeholder functions for various error returns for
114 * VOPs.
115 */
116
117int
118vop_eopnotsupp(struct vop_generic_args *ap)
119{
120	/*
121	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
122	*/
123
124	return (EOPNOTSUPP);
125}
126
127int
128vop_ebadf(struct vop_generic_args *ap)
129{
130
131	return (EBADF);
132}
133
134int
135vop_enotty(struct vop_generic_args *ap)
136{
137
138	return (ENOTTY);
139}
140
141int
142vop_einval(struct vop_generic_args *ap)
143{
144
145	return (EINVAL);
146}
147
148int
149vop_null(struct vop_generic_args *ap)
150{
151
152	return (0);
153}
154
155/*
156 * Used to make a defined VOP fall back to the default VOP.
157 */
158int
159vop_defaultop(struct vop_generic_args *ap)
160{
161
162	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
163}
164
165/*
166 * Helper function to panic on some bad VOPs in some filesystems.
167 */
168int
169vop_panic(struct vop_generic_args *ap)
170{
171
172	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
173}
174
175/*
176 * vop_std<something> and vop_no<something> are default functions for use by
177 * filesystems that need the "default reasonable" implementation for a
178 * particular operation.
179 *
180 * The documentation for the operations they implement exists (if it exists)
181 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
182 */
183
184/*
185 * Default vop for filesystems that do not support name lookup
186 */
187static int
188vop_nolookup(ap)
189	struct vop_lookup_args /* {
190		struct vnode *a_dvp;
191		struct vnode **a_vpp;
192		struct componentname *a_cnp;
193	} */ *ap;
194{
195
196	*ap->a_vpp = NULL;
197	return (ENOTDIR);
198}
199
200/*
201 *	vop_nostrategy:
202 *
203 *	Strategy routine for VFS devices that have none.
204 *
205 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
206 *	routine.  Typically this is done for a BIO_READ strategy call.
207 *	Typically B_INVAL is assumed to already be clear prior to a write
208 *	and should not be cleared manually unless you just made the buffer
209 *	invalid.  BIO_ERROR should be cleared either way.
210 */
211
212static int
213vop_nostrategy (struct vop_strategy_args *ap)
214{
215	printf("No strategy for buffer at %p\n", ap->a_bp);
216	vprint("vnode", ap->a_vp);
217	vprint("device vnode", ap->a_bp->b_vp);
218	ap->a_bp->b_ioflags |= BIO_ERROR;
219	ap->a_bp->b_error = EOPNOTSUPP;
220	bufdone(ap->a_bp);
221	return (EOPNOTSUPP);
222}
223
224/*
225 *	vop_nospecstrategy:
226 *
227 *	This shouldn't happen.  VOP_SPECSTRATEGY should always have a VCHR
228 *	argument vnode, and thos have a method for specstrategy over in
229 *	specfs, so we only ever get here if somebody botched it.
230 *	Pass the call to VOP_STRATEGY() and get on with life.
231 *	The first time we print some info useful for debugging.
232 */
233
234static int
235vop_nospecstrategy (struct vop_specstrategy_args *ap)
236{
237	static int once;
238
239	if (!once) {
240		vprint("VOP_SPECSTRATEGY on non-VCHR", ap->a_vp);
241		backtrace();
242		once++;
243	}
244	return VOP_STRATEGY(ap->a_vp, ap->a_bp);
245}
246
247/*
248 * vop_stdpathconf:
249 *
250 * Standard implementation of POSIX pathconf, to get information about limits
251 * for a filesystem.
252 * Override per filesystem for the case where the filesystem has smaller
253 * limits.
254 */
255int
256vop_stdpathconf(ap)
257	struct vop_pathconf_args /* {
258	struct vnode *a_vp;
259	int a_name;
260	int *a_retval;
261	} */ *ap;
262{
263
264	switch (ap->a_name) {
265		case _PC_LINK_MAX:
266			*ap->a_retval = LINK_MAX;
267			return (0);
268		case _PC_MAX_CANON:
269			*ap->a_retval = MAX_CANON;
270			return (0);
271		case _PC_MAX_INPUT:
272			*ap->a_retval = MAX_INPUT;
273			return (0);
274		case _PC_PIPE_BUF:
275			*ap->a_retval = PIPE_BUF;
276			return (0);
277		case _PC_CHOWN_RESTRICTED:
278			*ap->a_retval = 1;
279			return (0);
280		case _PC_VDISABLE:
281			*ap->a_retval = _POSIX_VDISABLE;
282			return (0);
283		default:
284			return (EINVAL);
285	}
286	/* NOTREACHED */
287}
288
289/*
290 * Standard lock, unlock and islocked functions.
291 */
292int
293vop_stdlock(ap)
294	struct vop_lock_args /* {
295		struct vnode *a_vp;
296		int a_flags;
297		struct thread *a_td;
298	} */ *ap;
299{
300	struct vnode *vp = ap->a_vp;
301
302#ifndef	DEBUG_LOCKS
303	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
304#else
305	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
306	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
307#endif
308}
309
310/* See above. */
311int
312vop_stdunlock(ap)
313	struct vop_unlock_args /* {
314		struct vnode *a_vp;
315		int a_flags;
316		struct thread *a_td;
317	} */ *ap;
318{
319	struct vnode *vp = ap->a_vp;
320
321	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
322	    ap->a_td));
323}
324
325/* See above. */
326int
327vop_stdislocked(ap)
328	struct vop_islocked_args /* {
329		struct vnode *a_vp;
330		struct thread *a_td;
331	} */ *ap;
332{
333
334	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
335}
336
337/* Mark the vnode inactive */
338int
339vop_stdinactive(ap)
340	struct vop_inactive_args /* {
341		struct vnode *a_vp;
342		struct thread *a_td;
343	} */ *ap;
344{
345
346	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
347	return (0);
348}
349
350/*
351 * Return true for select/poll.
352 */
353int
354vop_nopoll(ap)
355	struct vop_poll_args /* {
356		struct vnode *a_vp;
357		int  a_events;
358		struct ucred *a_cred;
359		struct thread *a_td;
360	} */ *ap;
361{
362	/*
363	 * Return true for read/write.  If the user asked for something
364	 * special, return POLLNVAL, so that clients have a way of
365	 * determining reliably whether or not the extended
366	 * functionality is present without hard-coding knowledge
367	 * of specific filesystem implementations.
368	 */
369	if (ap->a_events & ~POLLSTANDARD)
370		return (POLLNVAL);
371
372	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
373}
374
375/*
376 * Implement poll for local filesystems that support it.
377 */
378int
379vop_stdpoll(ap)
380	struct vop_poll_args /* {
381		struct vnode *a_vp;
382		int  a_events;
383		struct ucred *a_cred;
384		struct thread *a_td;
385	} */ *ap;
386{
387	if (ap->a_events & ~POLLSTANDARD)
388		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
389	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
390}
391
392/*
393 * Stubs to use when there is no locking to be done on the underlying object.
394 * A minimal shared lock is necessary to ensure that the underlying object
395 * is not revoked while an operation is in progress. So, an active shared
396 * count is maintained in an auxillary vnode lock structure.
397 */
398int
399vop_sharedlock(ap)
400	struct vop_lock_args /* {
401		struct vnode *a_vp;
402		int a_flags;
403		struct thread *a_td;
404	} */ *ap;
405{
406	/*
407	 * This code cannot be used until all the non-locking filesystems
408	 * (notably NFS) are converted to properly lock and release nodes.
409	 * Also, certain vnode operations change the locking state within
410	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
411	 * and symlink). Ideally these operations should not change the
412	 * lock state, but should be changed to let the caller of the
413	 * function unlock them. Otherwise all intermediate vnode layers
414	 * (such as union, umapfs, etc) must catch these functions to do
415	 * the necessary locking at their layer. Note that the inactive
416	 * and lookup operations also change their lock state, but this
417	 * cannot be avoided, so these two operations will always need
418	 * to be handled in intermediate layers.
419	 */
420	struct vnode *vp = ap->a_vp;
421	int vnflags, flags = ap->a_flags;
422
423	switch (flags & LK_TYPE_MASK) {
424	case LK_DRAIN:
425		vnflags = LK_DRAIN;
426		break;
427	case LK_EXCLUSIVE:
428#ifdef DEBUG_VFS_LOCKS
429		/*
430		 * Normally, we use shared locks here, but that confuses
431		 * the locking assertions.
432		 */
433		vnflags = LK_EXCLUSIVE;
434		break;
435#endif
436	case LK_SHARED:
437		vnflags = LK_SHARED;
438		break;
439	case LK_UPGRADE:
440	case LK_EXCLUPGRADE:
441	case LK_DOWNGRADE:
442		return (0);
443	case LK_RELEASE:
444	default:
445		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
446	}
447	vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
448#ifndef	DEBUG_LOCKS
449	return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
450#else
451	return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
452	    "vop_sharedlock", vp->filename, vp->line));
453#endif
454}
455
456/*
457 * Stubs to use when there is no locking to be done on the underlying object.
458 * A minimal shared lock is necessary to ensure that the underlying object
459 * is not revoked while an operation is in progress. So, an active shared
460 * count is maintained in an auxillary vnode lock structure.
461 */
462int
463vop_nolock(ap)
464	struct vop_lock_args /* {
465		struct vnode *a_vp;
466		int a_flags;
467		struct thread *a_td;
468	} */ *ap;
469{
470#ifdef notyet
471	/*
472	 * This code cannot be used until all the non-locking filesystems
473	 * (notably NFS) are converted to properly lock and release nodes.
474	 * Also, certain vnode operations change the locking state within
475	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
476	 * and symlink). Ideally these operations should not change the
477	 * lock state, but should be changed to let the caller of the
478	 * function unlock them. Otherwise all intermediate vnode layers
479	 * (such as union, umapfs, etc) must catch these functions to do
480	 * the necessary locking at their layer. Note that the inactive
481	 * and lookup operations also change their lock state, but this
482	 * cannot be avoided, so these two operations will always need
483	 * to be handled in intermediate layers.
484	 */
485	struct vnode *vp = ap->a_vp;
486	int vnflags, flags = ap->a_flags;
487
488	switch (flags & LK_TYPE_MASK) {
489	case LK_DRAIN:
490		vnflags = LK_DRAIN;
491		break;
492	case LK_EXCLUSIVE:
493	case LK_SHARED:
494		vnflags = LK_SHARED;
495		break;
496	case LK_UPGRADE:
497	case LK_EXCLUPGRADE:
498	case LK_DOWNGRADE:
499		return (0);
500	case LK_RELEASE:
501	default:
502		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
503	}
504	vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
505	return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
506#else /* for now */
507	/*
508	 * Since we are not using the lock manager, we must clear
509	 * the interlock here.
510	 */
511	if (ap->a_flags & LK_INTERLOCK)
512		VI_UNLOCK(ap->a_vp);
513	return (0);
514#endif
515}
516
517/*
518 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
519 */
520int
521vop_nounlock(ap)
522	struct vop_unlock_args /* {
523		struct vnode *a_vp;
524		int a_flags;
525		struct thread *a_td;
526	} */ *ap;
527{
528
529	/*
530	 * Since we are not using the lock manager, we must clear
531	 * the interlock here.
532	 */
533	if (ap->a_flags & LK_INTERLOCK)
534		VI_UNLOCK(ap->a_vp);
535	return (0);
536}
537
538/*
539 * Return whether or not the node is in use.
540 */
541int
542vop_noislocked(ap)
543	struct vop_islocked_args /* {
544		struct vnode *a_vp;
545		struct thread *a_td;
546	} */ *ap;
547{
548
549	return (0);
550}
551
552/*
553 * Return our mount point, as we will take charge of the writes.
554 */
555int
556vop_stdgetwritemount(ap)
557	struct vop_getwritemount_args /* {
558		struct vnode *a_vp;
559		struct mount **a_mpp;
560	} */ *ap;
561{
562
563	*(ap->a_mpp) = ap->a_vp->v_mount;
564	return (0);
565}
566
567/* Create the VM system backing object for this vnode */
568int
569vop_stdcreatevobject(ap)
570	struct vop_createvobject_args /* {
571		struct vnode *vp;
572		struct ucred *cred;
573		struct thread *td;
574	} */ *ap;
575{
576	struct vnode *vp = ap->a_vp;
577	struct ucred *cred = ap->a_cred;
578	struct thread *td = ap->a_td;
579	struct vattr vat;
580	vm_object_t object;
581	int error = 0;
582
583	GIANT_REQUIRED;
584
585	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
586		return (0);
587
588retry:
589	if ((object = vp->v_object) == NULL) {
590		if (vp->v_type == VREG || vp->v_type == VDIR) {
591			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
592				goto retn;
593			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
594		} else if (devsw(vp->v_rdev) != NULL) {
595			/*
596			 * This simply allocates the biggest object possible
597			 * for a disk vnode.  This should be fixed, but doesn't
598			 * cause any problems (yet).
599			 */
600			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
601		} else {
602			goto retn;
603		}
604		/*
605		 * Dereference the reference we just created.  This assumes
606		 * that the object is associated with the vp.
607		 */
608		object->ref_count--;
609		vrele(vp);
610	} else {
611		if (object->flags & OBJ_DEAD) {
612			VOP_UNLOCK(vp, 0, td);
613			tsleep(object, PVM, "vodead", 0);
614			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
615			goto retry;
616		}
617	}
618
619	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
620	vp->v_vflag |= VV_OBJBUF;
621
622retn:
623	return (error);
624}
625
626/* Destroy the VM system object associated with this vnode */
627int
628vop_stddestroyvobject(ap)
629	struct vop_destroyvobject_args /* {
630		struct vnode *vp;
631	} */ *ap;
632{
633	struct vnode *vp = ap->a_vp;
634	vm_object_t obj = vp->v_object;
635
636	GIANT_REQUIRED;
637
638	if (obj == NULL)
639		return (0);
640	VM_OBJECT_LOCK(obj);
641	if (obj->ref_count == 0) {
642		/*
643		 * vclean() may be called twice. The first time
644		 * removes the primary reference to the object,
645		 * the second time goes one further and is a
646		 * special-case to terminate the object.
647		 *
648		 * don't double-terminate the object
649		 */
650		if ((obj->flags & OBJ_DEAD) == 0)
651			vm_object_terminate(obj);
652		else
653			VM_OBJECT_UNLOCK(obj);
654	} else {
655		VM_OBJECT_UNLOCK(obj);
656		/*
657		 * Woe to the process that tries to page now :-).
658		 */
659		vm_pager_deallocate(obj);
660	}
661	return (0);
662}
663
664/*
665 * Return the underlying VM object.  This routine may be called with or
666 * without the vnode interlock held.  If called without, the returned
667 * object is not guarenteed to be valid.  The syncer typically gets the
668 * object without holding the interlock in order to quickly test whether
669 * it might be dirty before going heavy-weight.  vm_object's use zalloc
670 * and thus stable-storage, so this is safe.
671 */
672int
673vop_stdgetvobject(ap)
674	struct vop_getvobject_args /* {
675		struct vnode *vp;
676		struct vm_object **objpp;
677	} */ *ap;
678{
679	struct vnode *vp = ap->a_vp;
680	struct vm_object **objpp = ap->a_objpp;
681
682	if (objpp)
683		*objpp = vp->v_object;
684	return (vp->v_object ? 0 : EINVAL);
685}
686
687/* XXX Needs good comment and VOP_BMAP(9) manpage */
688int
689vop_stdbmap(ap)
690	struct vop_bmap_args /* {
691		struct vnode *a_vp;
692		daddr_t  a_bn;
693		struct vnode **a_vpp;
694		daddr_t *a_bnp;
695		int *a_runp;
696		int *a_runb;
697	} */ *ap;
698{
699
700	if (ap->a_vpp != NULL)
701		*ap->a_vpp = ap->a_vp;
702	if (ap->a_bnp != NULL)
703		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
704	if (ap->a_runp != NULL)
705		*ap->a_runp = 0;
706	if (ap->a_runb != NULL)
707		*ap->a_runb = 0;
708	return (0);
709}
710
711int
712vop_stdfsync(ap)
713	struct vop_fsync_args /* {
714		struct vnode *a_vp;
715		struct ucred *a_cred;
716		int a_waitfor;
717		struct thread *a_td;
718	} */ *ap;
719{
720	struct vnode *vp = ap->a_vp;
721	struct buf *bp;
722	struct buf *nbp;
723	int s, error = 0;
724	int maxretry = 100;     /* large, arbitrarily chosen */
725
726	VI_LOCK(vp);
727loop1:
728	/*
729	 * MARK/SCAN initialization to avoid infinite loops.
730	 */
731	s = splbio();
732        TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
733                bp->b_vflags &= ~BV_SCANNED;
734		bp->b_error = 0;
735	}
736	splx(s);
737
738	/*
739	 * Flush all dirty buffers associated with a block device.
740	 */
741loop2:
742	s = splbio();
743	for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
744		nbp = TAILQ_NEXT(bp, b_vnbufs);
745		if ((bp->b_vflags & BV_SCANNED) != 0)
746			continue;
747		bp->b_vflags |= BV_SCANNED;
748		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
749			continue;
750		VI_UNLOCK(vp);
751		if ((bp->b_flags & B_DELWRI) == 0)
752			panic("fsync: not dirty");
753		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
754			vfs_bio_awrite(bp);
755			splx(s);
756		} else {
757			bremfree(bp);
758			splx(s);
759			bawrite(bp);
760		}
761		VI_LOCK(vp);
762		goto loop2;
763	}
764
765	/*
766	 * If synchronous the caller expects us to completely resolve all
767	 * dirty buffers in the system.  Wait for in-progress I/O to
768	 * complete (which could include background bitmap writes), then
769	 * retry if dirty blocks still exist.
770	 */
771	if (ap->a_waitfor == MNT_WAIT) {
772		while (vp->v_numoutput) {
773			vp->v_iflag |= VI_BWAIT;
774			msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
775			    PRIBIO + 1, "fsync", 0);
776		}
777		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
778			/*
779			 * If we are unable to write any of these buffers
780			 * then we fail now rather than trying endlessly
781			 * to write them out.
782			 */
783			TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs)
784				if ((error = bp->b_error) == 0)
785					continue;
786			if (error == 0 && --maxretry >= 0) {
787				splx(s);
788				goto loop1;
789			}
790			vprint("fsync: giving up on dirty", vp);
791			error = EAGAIN;
792		}
793	}
794	VI_UNLOCK(vp);
795	splx(s);
796
797	return (error);
798}
799
800/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
801int
802vop_stdgetpages(ap)
803	struct vop_getpages_args /* {
804		struct vnode *a_vp;
805		vm_page_t *a_m;
806		int a_count;
807		int a_reqpage;
808		vm_ooffset_t a_offset;
809	} */ *ap;
810{
811
812	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
813	    ap->a_count, ap->a_reqpage);
814}
815
816/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
817int
818vop_stdputpages(ap)
819	struct vop_putpages_args /* {
820		struct vnode *a_vp;
821		vm_page_t *a_m;
822		int a_count;
823		int a_sync;
824		int *a_rtvals;
825		vm_ooffset_t a_offset;
826	} */ *ap;
827{
828
829	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
830	     ap->a_sync, ap->a_rtvals);
831}
832
833/*
834 * vfs default ops
835 * used to fill the vfs function table to get reasonable default return values.
836 */
837int
838vfs_stdroot (mp, vpp)
839	struct mount *mp;
840	struct vnode **vpp;
841{
842	return (EOPNOTSUPP);
843}
844
845int
846vfs_stdstatfs (mp, sbp, td)
847	struct mount *mp;
848	struct statfs *sbp;
849	struct thread *td;
850{
851	return (EOPNOTSUPP);
852}
853
854int
855vfs_stdvptofh (vp, fhp)
856	struct vnode *vp;
857	struct fid *fhp;
858{
859	return (EOPNOTSUPP);
860}
861
862int
863vfs_stdstart (mp, flags, td)
864	struct mount *mp;
865	int flags;
866	struct thread *td;
867{
868	return (0);
869}
870
871int
872vfs_stdquotactl (mp, cmds, uid, arg, td)
873	struct mount *mp;
874	int cmds;
875	uid_t uid;
876	caddr_t arg;
877	struct thread *td;
878{
879	return (EOPNOTSUPP);
880}
881
882int
883vfs_stdsync(mp, waitfor, cred, td)
884	struct mount *mp;
885	int waitfor;
886	struct ucred *cred;
887	struct thread *td;
888{
889	struct vnode *vp, *nvp;
890	int error, lockreq, allerror = 0;
891
892	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
893	if (waitfor != MNT_WAIT)
894		lockreq |= LK_NOWAIT;
895	/*
896	 * Force stale buffer cache information to be flushed.
897	 */
898	mtx_lock(&mntvnode_mtx);
899loop:
900	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
901		/*
902		 * If the vnode that we are about to sync is no longer
903		 * associated with this mount point, start over.
904		 */
905		if (vp->v_mount != mp)
906			goto loop;
907
908		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
909
910		VI_LOCK(vp);
911		if (TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
912			VI_UNLOCK(vp);
913			continue;
914		}
915		mtx_unlock(&mntvnode_mtx);
916
917		if ((error = vget(vp, lockreq, td)) != 0) {
918			if (error == ENOENT)
919				goto loop;
920			continue;
921		}
922		error = VOP_FSYNC(vp, cred, waitfor, td);
923		if (error)
924			allerror = error;
925
926		mtx_lock(&mntvnode_mtx);
927		if (nvp != TAILQ_NEXT(vp, v_nmntvnodes)) {
928			vput(vp);
929			goto loop;
930		}
931		vput(vp);
932	}
933	mtx_unlock(&mntvnode_mtx);
934	return (allerror);
935}
936
937int
938vfs_stdnosync (mp, waitfor, cred, td)
939	struct mount *mp;
940	int waitfor;
941	struct ucred *cred;
942	struct thread *td;
943{
944	return (0);
945}
946
947int
948vfs_stdvget (mp, ino, flags, vpp)
949	struct mount *mp;
950	ino_t ino;
951	int flags;
952	struct vnode **vpp;
953{
954	return (EOPNOTSUPP);
955}
956
957int
958vfs_stdfhtovp (mp, fhp, vpp)
959	struct mount *mp;
960	struct fid *fhp;
961	struct vnode **vpp;
962{
963	return (EOPNOTSUPP);
964}
965
966int
967vfs_stdinit (vfsp)
968	struct vfsconf *vfsp;
969{
970	return (0);
971}
972
973int
974vfs_stduninit (vfsp)
975	struct vfsconf *vfsp;
976{
977	return(0);
978}
979
980int
981vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
982	struct mount *mp;
983	int cmd;
984	struct vnode *filename_vp;
985	int attrnamespace;
986	const char *attrname;
987	struct thread *td;
988{
989	if (filename_vp != NULL)
990		VOP_UNLOCK(filename_vp, 0, td);
991	return(EOPNOTSUPP);
992}
993
994/* end of vfs default ops */
995