vfs_default.c revision 114774
1232337Smav/*
2232337Smav * Copyright (c) 1989, 1993
3232337Smav *	The Regents of the University of California.  All rights reserved.
4232337Smav *
5232337Smav * This code is derived from software contributed
6232337Smav * to Berkeley by John Heidemann of the UCLA Ficus project.
7232337Smav *
8232337Smav * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9232337Smav *
10232337Smav * Redistribution and use in source and binary forms, with or without
11232337Smav * modification, are permitted provided that the following conditions
12232337Smav * are met:
13232337Smav * 1. Redistributions of source code must retain the above copyright
14232337Smav *    notice, this list of conditions and the following disclaimer.
15232337Smav * 2. Redistributions in binary form must reproduce the above copyright
16232337Smav *    notice, this list of conditions and the following disclaimer in the
17232337Smav *    documentation and/or other materials provided with the distribution.
18232337Smav * 3. All advertising materials mentioning features or use of this software
19232337Smav *    must display the following acknowledgement:
20232337Smav *	This product includes software developed by the University of
21232337Smav *	California, Berkeley and its contributors.
22232337Smav * 4. Neither the name of the University nor the names of its contributors
23232337Smav *    may be used to endorse or promote products derived from this software
24232337Smav *    without specific prior written permission.
25232337Smav *
26232337Smav * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27232337Smav * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28232337Smav * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29232337Smav * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30232337Smav * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31232337Smav * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32232337Smav * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33232337Smav * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34232337Smav * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35232337Smav * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36232337Smav * SUCH DAMAGE.
37232337Smav *
38232337Smav *
39232337Smav * $FreeBSD: head/sys/kern/vfs_default.c 114774 2003-05-06 02:45:28Z alc $
40232337Smav */
41232337Smav
42232337Smav#include <sys/param.h>
43232337Smav#include <sys/systm.h>
44232337Smav#include <sys/bio.h>
45232337Smav#include <sys/buf.h>
46232337Smav#include <sys/conf.h>
47232337Smav#include <sys/kernel.h>
48232337Smav#include <sys/limits.h>
49232337Smav#include <sys/lock.h>
50232337Smav#include <sys/malloc.h>
51232337Smav#include <sys/mount.h>
52232337Smav#include <sys/mutex.h>
53232337Smav#include <sys/unistd.h>
54232337Smav#include <sys/vnode.h>
55232337Smav#include <sys/poll.h>
56232337Smav
57232337Smav#include <vm/vm.h>
58232337Smav#include <vm/vm_object.h>
59232337Smav#include <vm/vm_extern.h>
60232337Smav#include <vm/pmap.h>
61232337Smav#include <vm/vm_map.h>
62232337Smav#include <vm/vm_page.h>
63232337Smav#include <vm/vm_pager.h>
64232337Smav#include <vm/vnode_pager.h>
65232337Smav
66232337Smavstatic int	vop_nolookup(struct vop_lookup_args *);
67232337Smavstatic int	vop_nostrategy(struct vop_strategy_args *);
68232337Smavstatic int	vop_nospecstrategy(struct vop_specstrategy_args *);
69232337Smav
70232337Smav/*
71232337Smav * This vnode table stores what we want to do if the filesystem doesn't
72232337Smav * implement a particular VOP.
73232337Smav *
74232337Smav * If there is no specific entry here, we will return EOPNOTSUPP.
75232337Smav *
76232337Smav */
77232337Smav
78232337Smavvop_t **default_vnodeop_p;
79232337Smavstatic struct vnodeopv_entry_desc default_vnodeop_entries[] = {
80232337Smav	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
81232337Smav	{ &vop_advlock_desc,		(vop_t *) vop_einval },
82232337Smav	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
83232337Smav	{ &vop_close_desc,		(vop_t *) vop_null },
84232337Smav	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
85232337Smav	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
86232337Smav	{ &vop_fsync_desc,		(vop_t *) vop_null },
87232337Smav	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
88232337Smav	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
89232337Smav	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
90232337Smav	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
91232337Smav	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
92232337Smav	{ &vop_lease_desc,		(vop_t *) vop_null },
93232337Smav	{ &vop_lock_desc,		(vop_t *) vop_stdlock },
94232337Smav	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
95232337Smav	{ &vop_open_desc,		(vop_t *) vop_null },
96232337Smav	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
97232337Smav	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
98232337Smav	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
99232337Smav	{ &vop_readlink_desc,		(vop_t *) vop_einval },
100232337Smav	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
101232337Smav	{ &vop_specstrategy_desc,	(vop_t *) vop_nospecstrategy },
102232337Smav	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
103232337Smav	{ &vop_unlock_desc,		(vop_t *) vop_stdunlock },
104232337Smav	{ NULL, NULL }
105232337Smav};
106232337Smav
107232337Smavstatic struct vnodeopv_desc default_vnodeop_opv_desc =
108232337Smav        { &default_vnodeop_p, default_vnodeop_entries };
109232337Smav
110241066SkevloVNODEOP_SET(default_vnodeop_opv_desc);
111232337Smav
112232337Smav/*
113232337Smav * Series of placeholder functions for various error returns for
114232337Smav * VOPs.
115232337Smav */
116232337Smav
117232337Smavint
118232337Smavvop_eopnotsupp(struct vop_generic_args *ap)
119232337Smav{
120232337Smav	/*
121232337Smav	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
122232337Smav	*/
123232337Smav
124232337Smav	return (EOPNOTSUPP);
125232337Smav}
126232337Smav
127232337Smavint
128232337Smavvop_ebadf(struct vop_generic_args *ap)
129232337Smav{
130232337Smav
131232337Smav	return (EBADF);
132232337Smav}
133232337Smav
134232337Smavint
135232337Smavvop_enotty(struct vop_generic_args *ap)
136232337Smav{
137232337Smav
138232337Smav	return (ENOTTY);
139232337Smav}
140232337Smav
141232337Smavint
142232337Smavvop_einval(struct vop_generic_args *ap)
143232337Smav{
144232337Smav
145232337Smav	return (EINVAL);
146232337Smav}
147232337Smav
148232337Smavint
149232337Smavvop_null(struct vop_generic_args *ap)
150232337Smav{
151232337Smav
152232337Smav	return (0);
153232337Smav}
154232337Smav
155232337Smav/*
156232337Smav * Used to make a defined VOP fall back to the default VOP.
157232337Smav */
158232337Smavint
159232337Smavvop_defaultop(struct vop_generic_args *ap)
160232337Smav{
161232337Smav
162232337Smav	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
163232337Smav}
164232337Smav
165232337Smav/*
166232337Smav * Helper function to panic on some bad VOPs in some filesystems.
167232337Smav */
168232337Smavint
169232337Smavvop_panic(struct vop_generic_args *ap)
170232337Smav{
171232337Smav
172232337Smav	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
173232337Smav}
174232337Smav
175232337Smav/*
176232337Smav * vop_std<something> and vop_no<something> are default functions for use by
177232337Smav * filesystems that need the "default reasonable" implementation for a
178232337Smav * particular operation.
179232337Smav *
180232337Smav * The documentation for the operations they implement exists (if it exists)
181232337Smav * in the VOP_<SOMETHING>(9) manpage (all uppercase).
182232337Smav */
183232337Smav
184232337Smav/*
185232337Smav * Default vop for filesystems that do not support name lookup
186232337Smav */
187232337Smavstatic int
188232337Smavvop_nolookup(ap)
189232337Smav	struct vop_lookup_args /* {
190232337Smav		struct vnode *a_dvp;
191232337Smav		struct vnode **a_vpp;
192232337Smav		struct componentname *a_cnp;
193232337Smav	} */ *ap;
194232337Smav{
195232337Smav
196232337Smav	*ap->a_vpp = NULL;
197232337Smav	return (ENOTDIR);
198232337Smav}
199232337Smav
200232337Smav/*
201232337Smav *	vop_nostrategy:
202232337Smav *
203232337Smav *	Strategy routine for VFS devices that have none.
204232337Smav *
205232337Smav *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
206232337Smav *	routine.  Typically this is done for a BIO_READ strategy call.
207232337Smav *	Typically B_INVAL is assumed to already be clear prior to a write
208232337Smav *	and should not be cleared manually unless you just made the buffer
209232337Smav *	invalid.  BIO_ERROR should be cleared either way.
210232337Smav */
211232337Smav
212232337Smavstatic int
213232337Smavvop_nostrategy (struct vop_strategy_args *ap)
214232337Smav{
215232337Smav	printf("No strategy for buffer at %p\n", ap->a_bp);
216232337Smav	vprint("vnode", ap->a_vp);
217232337Smav	vprint("device vnode", ap->a_bp->b_vp);
218232337Smav	ap->a_bp->b_ioflags |= BIO_ERROR;
219232337Smav	ap->a_bp->b_error = EOPNOTSUPP;
220232337Smav	bufdone(ap->a_bp);
221232337Smav	return (EOPNOTSUPP);
222232337Smav}
223232337Smav
224232337Smav/*
225232337Smav *	vop_nospecstrategy:
226232337Smav *
227232337Smav *	This shouldn't happen.  VOP_SPECSTRATEGY should always have a VCHR
228232337Smav *	argument vnode, and thos have a method for specstrategy over in
229232337Smav *	specfs, so we only ever get here if somebody botched it.
230232337Smav *	Pass the call to VOP_STRATEGY() and get on with life.
231232337Smav *	The first time we print some info useful for debugging.
232232337Smav */
233232337Smav
234232337Smavstatic int
235232337Smavvop_nospecstrategy (struct vop_specstrategy_args *ap)
236232337Smav{
237232337Smav	static int once;
238232337Smav
239232337Smav	if (!once) {
240232337Smav		vprint("VOP_SPECSTRATEGY on non-VCHR", ap->a_vp);
241232337Smav		backtrace();
242232337Smav		once++;
243232337Smav	}
244232337Smav	return VOP_STRATEGY(ap->a_vp, ap->a_bp);
245232337Smav}
246232337Smav
247232337Smav/*
248232337Smav * vop_stdpathconf:
249232337Smav *
250232337Smav * Standard implementation of POSIX pathconf, to get information about limits
251232337Smav * for a filesystem.
252232337Smav * Override per filesystem for the case where the filesystem has smaller
253232337Smav * limits.
254232337Smav */
255232337Smavint
256232337Smavvop_stdpathconf(ap)
257232337Smav	struct vop_pathconf_args /* {
258232337Smav	struct vnode *a_vp;
259232337Smav	int a_name;
260232337Smav	int *a_retval;
261232337Smav	} */ *ap;
262232337Smav{
263232337Smav
264232337Smav	switch (ap->a_name) {
265232337Smav		case _PC_LINK_MAX:
266232337Smav			*ap->a_retval = LINK_MAX;
267232337Smav			return (0);
268232337Smav		case _PC_MAX_CANON:
269232337Smav			*ap->a_retval = MAX_CANON;
270232337Smav			return (0);
271232337Smav		case _PC_MAX_INPUT:
272232337Smav			*ap->a_retval = MAX_INPUT;
273232337Smav			return (0);
274232337Smav		case _PC_PIPE_BUF:
275232337Smav			*ap->a_retval = PIPE_BUF;
276232337Smav			return (0);
277232337Smav		case _PC_CHOWN_RESTRICTED:
278232337Smav			*ap->a_retval = 1;
279232337Smav			return (0);
280232337Smav		case _PC_VDISABLE:
281232337Smav			*ap->a_retval = _POSIX_VDISABLE;
282232337Smav			return (0);
283232337Smav		default:
284232337Smav			return (EINVAL);
285232337Smav	}
286232337Smav	/* NOTREACHED */
287232337Smav}
288232337Smav
289232337Smav/*
290232337Smav * Standard lock, unlock and islocked functions.
291232337Smav */
292232337Smavint
293232337Smavvop_stdlock(ap)
294232337Smav	struct vop_lock_args /* {
295232337Smav		struct vnode *a_vp;
296232337Smav		int a_flags;
297232337Smav		struct thread *a_td;
298232337Smav	} */ *ap;
299232337Smav{
300232337Smav	struct vnode *vp = ap->a_vp;
301254263Sscottl
302232337Smav#ifndef	DEBUG_LOCKS
303232337Smav	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
304232337Smav#else
305232337Smav	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
306232337Smav	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
307232337Smav#endif
308232337Smav}
309232337Smav
310232337Smav/* See above. */
311232337Smavint
312232337Smavvop_stdunlock(ap)
313232337Smav	struct vop_unlock_args /* {
314232337Smav		struct vnode *a_vp;
315232337Smav		int a_flags;
316232337Smav		struct thread *a_td;
317232337Smav	} */ *ap;
318232337Smav{
319232337Smav	struct vnode *vp = ap->a_vp;
320232337Smav
321232337Smav	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
322232337Smav	    ap->a_td));
323232337Smav}
324232337Smav
325232337Smav/* See above. */
326232337Smavint
327232337Smavvop_stdislocked(ap)
328232337Smav	struct vop_islocked_args /* {
329232337Smav		struct vnode *a_vp;
330232337Smav		struct thread *a_td;
331232337Smav	} */ *ap;
332232337Smav{
333232337Smav
334232337Smav	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
335232337Smav}
336237975Sglebius
337232337Smav/* Mark the vnode inactive */
338232337Smavint
339232337Smavvop_stdinactive(ap)
340232337Smav	struct vop_inactive_args /* {
341232337Smav		struct vnode *a_vp;
342232337Smav		struct thread *a_td;
343232337Smav	} */ *ap;
344232337Smav{
345232337Smav
346232337Smav	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
347232337Smav	return (0);
348232337Smav}
349232337Smav
350232337Smav/*
351232337Smav * Return true for select/poll.
352232337Smav */
353232337Smavint
354232337Smavvop_nopoll(ap)
355232337Smav	struct vop_poll_args /* {
356232337Smav		struct vnode *a_vp;
357232337Smav		int  a_events;
358232337Smav		struct ucred *a_cred;
359232337Smav		struct thread *a_td;
360232337Smav	} */ *ap;
361232337Smav{
362232337Smav	/*
363232337Smav	 * Return true for read/write.  If the user asked for something
364232337Smav	 * special, return POLLNVAL, so that clients have a way of
365232337Smav	 * determining reliably whether or not the extended
366232337Smav	 * functionality is present without hard-coding knowledge
367232337Smav	 * of specific filesystem implementations.
368232337Smav	 */
369232337Smav	if (ap->a_events & ~POLLSTANDARD)
370232337Smav		return (POLLNVAL);
371232337Smav
372232337Smav	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
373232337Smav}
374232337Smav
375232337Smav/*
376232337Smav * Implement poll for local filesystems that support it.
377232337Smav */
378232337Smavint
379232337Smavvop_stdpoll(ap)
380232337Smav	struct vop_poll_args /* {
381232337Smav		struct vnode *a_vp;
382232337Smav		int  a_events;
383232337Smav		struct ucred *a_cred;
384232337Smav		struct thread *a_td;
385232337Smav	} */ *ap;
386232337Smav{
387232337Smav	if (ap->a_events & ~POLLSTANDARD)
388232337Smav		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
389232337Smav	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
390232337Smav}
391232337Smav
392232337Smav/*
393232337Smav * Stubs to use when there is no locking to be done on the underlying object.
394232337Smav * A minimal shared lock is necessary to ensure that the underlying object
395232337Smav * is not revoked while an operation is in progress. So, an active shared
396237975Sglebius * count is maintained in an auxillary vnode lock structure.
397237975Sglebius */
398237975Sglebiusint
399vop_sharedlock(ap)
400	struct vop_lock_args /* {
401		struct vnode *a_vp;
402		int a_flags;
403		struct thread *a_td;
404	} */ *ap;
405{
406	/*
407	 * This code cannot be used until all the non-locking filesystems
408	 * (notably NFS) are converted to properly lock and release nodes.
409	 * Also, certain vnode operations change the locking state within
410	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
411	 * and symlink). Ideally these operations should not change the
412	 * lock state, but should be changed to let the caller of the
413	 * function unlock them. Otherwise all intermediate vnode layers
414	 * (such as union, umapfs, etc) must catch these functions to do
415	 * the necessary locking at their layer. Note that the inactive
416	 * and lookup operations also change their lock state, but this
417	 * cannot be avoided, so these two operations will always need
418	 * to be handled in intermediate layers.
419	 */
420	struct vnode *vp = ap->a_vp;
421	int vnflags, flags = ap->a_flags;
422
423	switch (flags & LK_TYPE_MASK) {
424	case LK_DRAIN:
425		vnflags = LK_DRAIN;
426		break;
427	case LK_EXCLUSIVE:
428#ifdef DEBUG_VFS_LOCKS
429		/*
430		 * Normally, we use shared locks here, but that confuses
431		 * the locking assertions.
432		 */
433		vnflags = LK_EXCLUSIVE;
434		break;
435#endif
436	case LK_SHARED:
437		vnflags = LK_SHARED;
438		break;
439	case LK_UPGRADE:
440	case LK_EXCLUPGRADE:
441	case LK_DOWNGRADE:
442		return (0);
443	case LK_RELEASE:
444	default:
445		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
446	}
447	vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
448#ifndef	DEBUG_LOCKS
449	return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
450#else
451	return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
452	    "vop_sharedlock", vp->filename, vp->line));
453#endif
454}
455
456/*
457 * Stubs to use when there is no locking to be done on the underlying object.
458 * A minimal shared lock is necessary to ensure that the underlying object
459 * is not revoked while an operation is in progress. So, an active shared
460 * count is maintained in an auxillary vnode lock structure.
461 */
462int
463vop_nolock(ap)
464	struct vop_lock_args /* {
465		struct vnode *a_vp;
466		int a_flags;
467		struct thread *a_td;
468	} */ *ap;
469{
470#ifdef notyet
471	/*
472	 * This code cannot be used until all the non-locking filesystems
473	 * (notably NFS) are converted to properly lock and release nodes.
474	 * Also, certain vnode operations change the locking state within
475	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
476	 * and symlink). Ideally these operations should not change the
477	 * lock state, but should be changed to let the caller of the
478	 * function unlock them. Otherwise all intermediate vnode layers
479	 * (such as union, umapfs, etc) must catch these functions to do
480	 * the necessary locking at their layer. Note that the inactive
481	 * and lookup operations also change their lock state, but this
482	 * cannot be avoided, so these two operations will always need
483	 * to be handled in intermediate layers.
484	 */
485	struct vnode *vp = ap->a_vp;
486	int vnflags, flags = ap->a_flags;
487
488	switch (flags & LK_TYPE_MASK) {
489	case LK_DRAIN:
490		vnflags = LK_DRAIN;
491		break;
492	case LK_EXCLUSIVE:
493	case LK_SHARED:
494		vnflags = LK_SHARED;
495		break;
496	case LK_UPGRADE:
497	case LK_EXCLUPGRADE:
498	case LK_DOWNGRADE:
499		return (0);
500	case LK_RELEASE:
501	default:
502		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
503	}
504	vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
505	return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
506#else /* for now */
507	/*
508	 * Since we are not using the lock manager, we must clear
509	 * the interlock here.
510	 */
511	if (ap->a_flags & LK_INTERLOCK)
512		VI_UNLOCK(ap->a_vp);
513	return (0);
514#endif
515}
516
517/*
518 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
519 */
520int
521vop_nounlock(ap)
522	struct vop_unlock_args /* {
523		struct vnode *a_vp;
524		int a_flags;
525		struct thread *a_td;
526	} */ *ap;
527{
528
529	/*
530	 * Since we are not using the lock manager, we must clear
531	 * the interlock here.
532	 */
533	if (ap->a_flags & LK_INTERLOCK)
534		VI_UNLOCK(ap->a_vp);
535	return (0);
536}
537
538/*
539 * Return whether or not the node is in use.
540 */
541int
542vop_noislocked(ap)
543	struct vop_islocked_args /* {
544		struct vnode *a_vp;
545		struct thread *a_td;
546	} */ *ap;
547{
548
549	return (0);
550}
551
552/*
553 * Return our mount point, as we will take charge of the writes.
554 */
555int
556vop_stdgetwritemount(ap)
557	struct vop_getwritemount_args /* {
558		struct vnode *a_vp;
559		struct mount **a_mpp;
560	} */ *ap;
561{
562
563	*(ap->a_mpp) = ap->a_vp->v_mount;
564	return (0);
565}
566
567/* Create the VM system backing object for this vnode */
568int
569vop_stdcreatevobject(ap)
570	struct vop_createvobject_args /* {
571		struct vnode *vp;
572		struct ucred *cred;
573		struct thread *td;
574	} */ *ap;
575{
576	struct vnode *vp = ap->a_vp;
577	struct ucred *cred = ap->a_cred;
578	struct thread *td = ap->a_td;
579	struct vattr vat;
580	vm_object_t object;
581	int error = 0;
582
583	GIANT_REQUIRED;
584
585	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
586		return (0);
587
588retry:
589	if ((object = vp->v_object) == NULL) {
590		if (vp->v_type == VREG || vp->v_type == VDIR) {
591			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
592				goto retn;
593			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
594		} else if (devsw(vp->v_rdev) != NULL) {
595			/*
596			 * This simply allocates the biggest object possible
597			 * for a disk vnode.  This should be fixed, but doesn't
598			 * cause any problems (yet).
599			 */
600			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
601		} else {
602			goto retn;
603		}
604		/*
605		 * Dereference the reference we just created.  This assumes
606		 * that the object is associated with the vp.
607		 */
608		VM_OBJECT_LOCK(object);
609		object->ref_count--;
610		VM_OBJECT_UNLOCK(object);
611		vrele(vp);
612	} else {
613		VM_OBJECT_LOCK(object);
614		if (object->flags & OBJ_DEAD) {
615			VOP_UNLOCK(vp, 0, td);
616			msleep(object, VM_OBJECT_MTX(object), PDROP | PVM,
617			    "vodead", 0);
618			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
619			goto retry;
620		}
621		VM_OBJECT_UNLOCK(object);
622	}
623
624	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
625	vp->v_vflag |= VV_OBJBUF;
626
627retn:
628	return (error);
629}
630
631/* Destroy the VM system object associated with this vnode */
632int
633vop_stddestroyvobject(ap)
634	struct vop_destroyvobject_args /* {
635		struct vnode *vp;
636	} */ *ap;
637{
638	struct vnode *vp = ap->a_vp;
639	vm_object_t obj = vp->v_object;
640
641	GIANT_REQUIRED;
642
643	if (obj == NULL)
644		return (0);
645	VM_OBJECT_LOCK(obj);
646	if (obj->ref_count == 0) {
647		/*
648		 * vclean() may be called twice. The first time
649		 * removes the primary reference to the object,
650		 * the second time goes one further and is a
651		 * special-case to terminate the object.
652		 *
653		 * don't double-terminate the object
654		 */
655		if ((obj->flags & OBJ_DEAD) == 0)
656			vm_object_terminate(obj);
657		else
658			VM_OBJECT_UNLOCK(obj);
659	} else {
660		/*
661		 * Woe to the process that tries to page now :-).
662		 */
663		vm_pager_deallocate(obj);
664		VM_OBJECT_UNLOCK(obj);
665	}
666	return (0);
667}
668
669/*
670 * Return the underlying VM object.  This routine may be called with or
671 * without the vnode interlock held.  If called without, the returned
672 * object is not guarenteed to be valid.  The syncer typically gets the
673 * object without holding the interlock in order to quickly test whether
674 * it might be dirty before going heavy-weight.  vm_object's use zalloc
675 * and thus stable-storage, so this is safe.
676 */
677int
678vop_stdgetvobject(ap)
679	struct vop_getvobject_args /* {
680		struct vnode *vp;
681		struct vm_object **objpp;
682	} */ *ap;
683{
684	struct vnode *vp = ap->a_vp;
685	struct vm_object **objpp = ap->a_objpp;
686
687	if (objpp)
688		*objpp = vp->v_object;
689	return (vp->v_object ? 0 : EINVAL);
690}
691
692/* XXX Needs good comment and VOP_BMAP(9) manpage */
693int
694vop_stdbmap(ap)
695	struct vop_bmap_args /* {
696		struct vnode *a_vp;
697		daddr_t  a_bn;
698		struct vnode **a_vpp;
699		daddr_t *a_bnp;
700		int *a_runp;
701		int *a_runb;
702	} */ *ap;
703{
704
705	if (ap->a_vpp != NULL)
706		*ap->a_vpp = ap->a_vp;
707	if (ap->a_bnp != NULL)
708		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
709	if (ap->a_runp != NULL)
710		*ap->a_runp = 0;
711	if (ap->a_runb != NULL)
712		*ap->a_runb = 0;
713	return (0);
714}
715
716int
717vop_stdfsync(ap)
718	struct vop_fsync_args /* {
719		struct vnode *a_vp;
720		struct ucred *a_cred;
721		int a_waitfor;
722		struct thread *a_td;
723	} */ *ap;
724{
725	struct vnode *vp = ap->a_vp;
726	struct buf *bp;
727	struct buf *nbp;
728	int s, error = 0;
729	int maxretry = 100;     /* large, arbitrarily chosen */
730
731	VI_LOCK(vp);
732loop1:
733	/*
734	 * MARK/SCAN initialization to avoid infinite loops.
735	 */
736	s = splbio();
737        TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
738                bp->b_vflags &= ~BV_SCANNED;
739		bp->b_error = 0;
740	}
741	splx(s);
742
743	/*
744	 * Flush all dirty buffers associated with a block device.
745	 */
746loop2:
747	s = splbio();
748	for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
749		nbp = TAILQ_NEXT(bp, b_vnbufs);
750		if ((bp->b_vflags & BV_SCANNED) != 0)
751			continue;
752		bp->b_vflags |= BV_SCANNED;
753		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
754			continue;
755		VI_UNLOCK(vp);
756		if ((bp->b_flags & B_DELWRI) == 0)
757			panic("fsync: not dirty");
758		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
759			vfs_bio_awrite(bp);
760			splx(s);
761		} else {
762			bremfree(bp);
763			splx(s);
764			bawrite(bp);
765		}
766		VI_LOCK(vp);
767		goto loop2;
768	}
769
770	/*
771	 * If synchronous the caller expects us to completely resolve all
772	 * dirty buffers in the system.  Wait for in-progress I/O to
773	 * complete (which could include background bitmap writes), then
774	 * retry if dirty blocks still exist.
775	 */
776	if (ap->a_waitfor == MNT_WAIT) {
777		while (vp->v_numoutput) {
778			vp->v_iflag |= VI_BWAIT;
779			msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
780			    PRIBIO + 1, "fsync", 0);
781		}
782		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
783			/*
784			 * If we are unable to write any of these buffers
785			 * then we fail now rather than trying endlessly
786			 * to write them out.
787			 */
788			TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs)
789				if ((error = bp->b_error) == 0)
790					continue;
791			if (error == 0 && --maxretry >= 0) {
792				splx(s);
793				goto loop1;
794			}
795			vprint("fsync: giving up on dirty", vp);
796			error = EAGAIN;
797		}
798	}
799	VI_UNLOCK(vp);
800	splx(s);
801
802	return (error);
803}
804
805/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
806int
807vop_stdgetpages(ap)
808	struct vop_getpages_args /* {
809		struct vnode *a_vp;
810		vm_page_t *a_m;
811		int a_count;
812		int a_reqpage;
813		vm_ooffset_t a_offset;
814	} */ *ap;
815{
816
817	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
818	    ap->a_count, ap->a_reqpage);
819}
820
821/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
822int
823vop_stdputpages(ap)
824	struct vop_putpages_args /* {
825		struct vnode *a_vp;
826		vm_page_t *a_m;
827		int a_count;
828		int a_sync;
829		int *a_rtvals;
830		vm_ooffset_t a_offset;
831	} */ *ap;
832{
833
834	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
835	     ap->a_sync, ap->a_rtvals);
836}
837
838/*
839 * vfs default ops
840 * used to fill the vfs function table to get reasonable default return values.
841 */
842int
843vfs_stdroot (mp, vpp)
844	struct mount *mp;
845	struct vnode **vpp;
846{
847	return (EOPNOTSUPP);
848}
849
850int
851vfs_stdstatfs (mp, sbp, td)
852	struct mount *mp;
853	struct statfs *sbp;
854	struct thread *td;
855{
856	return (EOPNOTSUPP);
857}
858
859int
860vfs_stdvptofh (vp, fhp)
861	struct vnode *vp;
862	struct fid *fhp;
863{
864	return (EOPNOTSUPP);
865}
866
867int
868vfs_stdstart (mp, flags, td)
869	struct mount *mp;
870	int flags;
871	struct thread *td;
872{
873	return (0);
874}
875
876int
877vfs_stdquotactl (mp, cmds, uid, arg, td)
878	struct mount *mp;
879	int cmds;
880	uid_t uid;
881	caddr_t arg;
882	struct thread *td;
883{
884	return (EOPNOTSUPP);
885}
886
887int
888vfs_stdsync(mp, waitfor, cred, td)
889	struct mount *mp;
890	int waitfor;
891	struct ucred *cred;
892	struct thread *td;
893{
894	struct vnode *vp, *nvp;
895	int error, lockreq, allerror = 0;
896
897	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
898	if (waitfor != MNT_WAIT)
899		lockreq |= LK_NOWAIT;
900	/*
901	 * Force stale buffer cache information to be flushed.
902	 */
903	mtx_lock(&mntvnode_mtx);
904loop:
905	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
906		/*
907		 * If the vnode that we are about to sync is no longer
908		 * associated with this mount point, start over.
909		 */
910		if (vp->v_mount != mp)
911			goto loop;
912
913		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
914
915		VI_LOCK(vp);
916		if (TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
917			VI_UNLOCK(vp);
918			continue;
919		}
920		mtx_unlock(&mntvnode_mtx);
921
922		if ((error = vget(vp, lockreq, td)) != 0) {
923			if (error == ENOENT)
924				goto loop;
925			continue;
926		}
927		error = VOP_FSYNC(vp, cred, waitfor, td);
928		if (error)
929			allerror = error;
930
931		mtx_lock(&mntvnode_mtx);
932		if (nvp != TAILQ_NEXT(vp, v_nmntvnodes)) {
933			vput(vp);
934			goto loop;
935		}
936		vput(vp);
937	}
938	mtx_unlock(&mntvnode_mtx);
939	return (allerror);
940}
941
942int
943vfs_stdnosync (mp, waitfor, cred, td)
944	struct mount *mp;
945	int waitfor;
946	struct ucred *cred;
947	struct thread *td;
948{
949	return (0);
950}
951
952int
953vfs_stdvget (mp, ino, flags, vpp)
954	struct mount *mp;
955	ino_t ino;
956	int flags;
957	struct vnode **vpp;
958{
959	return (EOPNOTSUPP);
960}
961
962int
963vfs_stdfhtovp (mp, fhp, vpp)
964	struct mount *mp;
965	struct fid *fhp;
966	struct vnode **vpp;
967{
968	return (EOPNOTSUPP);
969}
970
971int
972vfs_stdinit (vfsp)
973	struct vfsconf *vfsp;
974{
975	return (0);
976}
977
978int
979vfs_stduninit (vfsp)
980	struct vfsconf *vfsp;
981{
982	return(0);
983}
984
985int
986vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
987	struct mount *mp;
988	int cmd;
989	struct vnode *filename_vp;
990	int attrnamespace;
991	const char *attrname;
992	struct thread *td;
993{
994	if (filename_vp != NULL)
995		VOP_UNLOCK(filename_vp, 0, td);
996	return(EOPNOTSUPP);
997}
998
999/* end of vfs default ops */
1000