vfs_default.c revision 66615
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
39 * $FreeBSD: head/sys/kern/vfs_default.c 66615 2000-10-04 01:29:17Z jasone $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/bio.h>
45#include <sys/buf.h>
46#include <sys/conf.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mount.h>
51#include <sys/unistd.h>
52#include <sys/vnode.h>
53#include <sys/poll.h>
54
55#include <machine/limits.h>
56#include <machine/mutex.h>
57
58#include <vm/vm.h>
59#include <vm/vm_object.h>
60#include <vm/vm_extern.h>
61#include <vm/pmap.h>
62#include <vm/vm_map.h>
63#include <vm/vm_page.h>
64#include <vm/vm_pager.h>
65#include <vm/vnode_pager.h>
66#include <vm/vm_zone.h>
67
68static int vop_nostrategy __P((struct vop_strategy_args *));
69
70/*
71 * This vnode table stores what we want to do if the filesystem doesn't
72 * implement a particular VOP.
73 *
74 * If there is no specific entry here, we will return EOPNOTSUPP.
75 *
76 */
77
78vop_t **default_vnodeop_p;
79static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
80	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
81	{ &vop_advlock_desc,		(vop_t *) vop_einval },
82	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
83	{ &vop_close_desc,		(vop_t *) vop_null },
84	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
85	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
86	{ &vop_fsync_desc,		(vop_t *) vop_null },
87	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
88	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
89	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
90	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
91	{ &vop_lease_desc,		(vop_t *) vop_null },
92	{ &vop_lock_desc,		(vop_t *) vop_nolock },
93	{ &vop_mmap_desc,		(vop_t *) vop_einval },
94	{ &vop_open_desc,		(vop_t *) vop_null },
95	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
96	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
97	{ &vop_readlink_desc,		(vop_t *) vop_einval },
98	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
99	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
100	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
101	{ NULL, NULL }
102};
103
104static struct vnodeopv_desc default_vnodeop_opv_desc =
105        { &default_vnodeop_p, default_vnodeop_entries };
106
107VNODEOP_SET(default_vnodeop_opv_desc);
108
109int
110vop_eopnotsupp(struct vop_generic_args *ap)
111{
112	/*
113	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
114	*/
115
116	return (EOPNOTSUPP);
117}
118
119int
120vop_ebadf(struct vop_generic_args *ap)
121{
122
123	return (EBADF);
124}
125
126int
127vop_enotty(struct vop_generic_args *ap)
128{
129
130	return (ENOTTY);
131}
132
133int
134vop_einval(struct vop_generic_args *ap)
135{
136
137	return (EINVAL);
138}
139
140int
141vop_null(struct vop_generic_args *ap)
142{
143
144	return (0);
145}
146
147int
148vop_defaultop(struct vop_generic_args *ap)
149{
150
151	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
152}
153
154int
155vop_panic(struct vop_generic_args *ap)
156{
157
158	printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
159	panic("Filesystem goof");
160	return (0);
161}
162
163/*
164 *	vop_nostrategy:
165 *
166 *	Strategy routine for VFS devices that have none.
167 *
168 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
169 *	routine.  Typically this is done for a BIO_READ strategy call.
170 *	Typically B_INVAL is assumed to already be clear prior to a write
171 *	and should not be cleared manually unless you just made the buffer
172 *	invalid.  BIO_ERROR should be cleared either way.
173 */
174
175static int
176vop_nostrategy (struct vop_strategy_args *ap)
177{
178	printf("No strategy for buffer at %p\n", ap->a_bp);
179	vprint("", ap->a_vp);
180	vprint("", ap->a_bp->b_vp);
181	ap->a_bp->b_ioflags |= BIO_ERROR;
182	ap->a_bp->b_error = EOPNOTSUPP;
183	bufdone(ap->a_bp);
184	return (EOPNOTSUPP);
185}
186
187int
188vop_stdpathconf(ap)
189	struct vop_pathconf_args /* {
190	struct vnode *a_vp;
191	int a_name;
192	int *a_retval;
193	} */ *ap;
194{
195
196	switch (ap->a_name) {
197		case _PC_LINK_MAX:
198			*ap->a_retval = LINK_MAX;
199			return (0);
200		case _PC_MAX_CANON:
201			*ap->a_retval = MAX_CANON;
202			return (0);
203		case _PC_MAX_INPUT:
204			*ap->a_retval = MAX_INPUT;
205			return (0);
206		case _PC_PIPE_BUF:
207			*ap->a_retval = PIPE_BUF;
208			return (0);
209		case _PC_CHOWN_RESTRICTED:
210			*ap->a_retval = 1;
211			return (0);
212		case _PC_VDISABLE:
213			*ap->a_retval = _POSIX_VDISABLE;
214			return (0);
215		default:
216			return (EINVAL);
217	}
218	/* NOTREACHED */
219}
220
221/*
222 * Standard lock, unlock and islocked functions.
223 *
224 * These depend on the lock structure being the first element in the
225 * inode, ie: vp->v_data points to the the lock!
226 */
227int
228vop_stdlock(ap)
229	struct vop_lock_args /* {
230		struct vnode *a_vp;
231		int a_flags;
232		struct proc *a_p;
233	} */ *ap;
234{
235	struct vnode *vp = ap->a_vp;
236
237#ifndef	DEBUG_LOCKS
238	return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
239#else
240	return (debuglockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock,
241	    ap->a_p, "vop_stdlock", vp->filename, vp->line));
242#endif
243}
244
245int
246vop_stdunlock(ap)
247	struct vop_unlock_args /* {
248		struct vnode *a_vp;
249		int a_flags;
250		struct proc *a_p;
251	} */ *ap;
252{
253	struct vnode *vp = ap->a_vp;
254
255	return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock,
256	    ap->a_p));
257}
258
259int
260vop_stdislocked(ap)
261	struct vop_islocked_args /* {
262		struct vnode *a_vp;
263		struct proc *a_p;
264	} */ *ap;
265{
266
267	return (lockstatus(&ap->a_vp->v_lock, ap->a_p));
268}
269
270int
271vop_stdinactive(ap)
272	struct vop_inactive_args /* {
273		struct vnode *a_vp;
274		struct proc *a_p;
275	} */ *ap;
276{
277
278	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
279	return (0);
280}
281
282/*
283 * Return true for select/poll.
284 */
285int
286vop_nopoll(ap)
287	struct vop_poll_args /* {
288		struct vnode *a_vp;
289		int  a_events;
290		struct ucred *a_cred;
291		struct proc *a_p;
292	} */ *ap;
293{
294	/*
295	 * Return true for read/write.  If the user asked for something
296	 * special, return POLLNVAL, so that clients have a way of
297	 * determining reliably whether or not the extended
298	 * functionality is present without hard-coding knowledge
299	 * of specific filesystem implementations.
300	 */
301	if (ap->a_events & ~POLLSTANDARD)
302		return (POLLNVAL);
303
304	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
305}
306
307/*
308 * Implement poll for local filesystems that support it.
309 */
310int
311vop_stdpoll(ap)
312	struct vop_poll_args /* {
313		struct vnode *a_vp;
314		int  a_events;
315		struct ucred *a_cred;
316		struct proc *a_p;
317	} */ *ap;
318{
319	if ((ap->a_events & ~POLLSTANDARD) == 0)
320		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
321	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
322}
323
324int
325vop_stdbwrite(ap)
326	struct vop_bwrite_args *ap;
327{
328	return (bwrite(ap->a_bp));
329}
330
331/*
332 * Stubs to use when there is no locking to be done on the underlying object.
333 * A minimal shared lock is necessary to ensure that the underlying object
334 * is not revoked while an operation is in progress. So, an active shared
335 * count is maintained in an auxillary vnode lock structure.
336 */
337int
338vop_sharedlock(ap)
339	struct vop_lock_args /* {
340		struct vnode *a_vp;
341		int a_flags;
342		struct proc *a_p;
343	} */ *ap;
344{
345	/*
346	 * This code cannot be used until all the non-locking filesystems
347	 * (notably NFS) are converted to properly lock and release nodes.
348	 * Also, certain vnode operations change the locking state within
349	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
350	 * and symlink). Ideally these operations should not change the
351	 * lock state, but should be changed to let the caller of the
352	 * function unlock them. Otherwise all intermediate vnode layers
353	 * (such as union, umapfs, etc) must catch these functions to do
354	 * the necessary locking at their layer. Note that the inactive
355	 * and lookup operations also change their lock state, but this
356	 * cannot be avoided, so these two operations will always need
357	 * to be handled in intermediate layers.
358	 */
359	struct vnode *vp = ap->a_vp;
360	int vnflags, flags = ap->a_flags;
361
362	switch (flags & LK_TYPE_MASK) {
363	case LK_DRAIN:
364		vnflags = LK_DRAIN;
365		break;
366	case LK_EXCLUSIVE:
367#ifdef DEBUG_VFS_LOCKS
368		/*
369		 * Normally, we use shared locks here, but that confuses
370		 * the locking assertions.
371		 */
372		vnflags = LK_EXCLUSIVE;
373		break;
374#endif
375	case LK_SHARED:
376		vnflags = LK_SHARED;
377		break;
378	case LK_UPGRADE:
379	case LK_EXCLUPGRADE:
380	case LK_DOWNGRADE:
381		return (0);
382	case LK_RELEASE:
383	default:
384		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
385	}
386	if (flags & LK_INTERLOCK)
387		vnflags |= LK_INTERLOCK;
388#ifndef	DEBUG_LOCKS
389	return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
390#else
391	return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p,
392	    "vop_sharedlock", vp->filename, vp->line));
393#endif
394}
395
396/*
397 * Stubs to use when there is no locking to be done on the underlying object.
398 * A minimal shared lock is necessary to ensure that the underlying object
399 * is not revoked while an operation is in progress. So, an active shared
400 * count is maintained in an auxillary vnode lock structure.
401 */
402int
403vop_nolock(ap)
404	struct vop_lock_args /* {
405		struct vnode *a_vp;
406		int a_flags;
407		struct proc *a_p;
408	} */ *ap;
409{
410#ifdef notyet
411	/*
412	 * This code cannot be used until all the non-locking filesystems
413	 * (notably NFS) are converted to properly lock and release nodes.
414	 * Also, certain vnode operations change the locking state within
415	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
416	 * and symlink). Ideally these operations should not change the
417	 * lock state, but should be changed to let the caller of the
418	 * function unlock them. Otherwise all intermediate vnode layers
419	 * (such as union, umapfs, etc) must catch these functions to do
420	 * the necessary locking at their layer. Note that the inactive
421	 * and lookup operations also change their lock state, but this
422	 * cannot be avoided, so these two operations will always need
423	 * to be handled in intermediate layers.
424	 */
425	struct vnode *vp = ap->a_vp;
426	int vnflags, flags = ap->a_flags;
427
428	switch (flags & LK_TYPE_MASK) {
429	case LK_DRAIN:
430		vnflags = LK_DRAIN;
431		break;
432	case LK_EXCLUSIVE:
433	case LK_SHARED:
434		vnflags = LK_SHARED;
435		break;
436	case LK_UPGRADE:
437	case LK_EXCLUPGRADE:
438	case LK_DOWNGRADE:
439		return (0);
440	case LK_RELEASE:
441	default:
442		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
443	}
444	if (flags & LK_INTERLOCK)
445		vnflags |= LK_INTERLOCK;
446	return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
447#else /* for now */
448	/*
449	 * Since we are not using the lock manager, we must clear
450	 * the interlock here.
451	 */
452	if (ap->a_flags & LK_INTERLOCK)
453		mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
454	return (0);
455#endif
456}
457
458/*
459 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
460 */
461int
462vop_nounlock(ap)
463	struct vop_unlock_args /* {
464		struct vnode *a_vp;
465		int a_flags;
466		struct proc *a_p;
467	} */ *ap;
468{
469
470	/*
471	 * Since we are not using the lock manager, we must clear
472	 * the interlock here.
473	 */
474	if (ap->a_flags & LK_INTERLOCK)
475		mtx_exit(&ap->a_vp->v_interlock, MTX_DEF);
476	return (0);
477}
478
479/*
480 * Return whether or not the node is in use.
481 */
482int
483vop_noislocked(ap)
484	struct vop_islocked_args /* {
485		struct vnode *a_vp;
486		struct proc *a_p;
487	} */ *ap;
488{
489
490	return (0);
491}
492
493/*
494 * Return our mount point, as we will take charge of the writes.
495 */
496int
497vop_stdgetwritemount(ap)
498	struct vop_getwritemount_args /* {
499		struct vnode *a_vp;
500		struct mount **a_mpp;
501	} */ *ap;
502{
503
504	*(ap->a_mpp) = ap->a_vp->v_mount;
505	return (0);
506}
507
508int
509vop_stdcreatevobject(ap)
510	struct vop_createvobject_args /* {
511		struct vnode *vp;
512		struct ucred *cred;
513		struct proc *p;
514	} */ *ap;
515{
516	struct vnode *vp = ap->a_vp;
517	struct ucred *cred = ap->a_cred;
518	struct proc *p = ap->a_p;
519	struct vattr vat;
520	vm_object_t object;
521	int error = 0;
522
523	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
524		return (0);
525
526retry:
527	if ((object = vp->v_object) == NULL) {
528		if (vp->v_type == VREG || vp->v_type == VDIR) {
529			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
530				goto retn;
531			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
532		} else if (devsw(vp->v_rdev) != NULL) {
533			/*
534			 * This simply allocates the biggest object possible
535			 * for a disk vnode.  This should be fixed, but doesn't
536			 * cause any problems (yet).
537			 */
538			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
539		} else {
540			goto retn;
541		}
542		/*
543		 * Dereference the reference we just created.  This assumes
544		 * that the object is associated with the vp.
545		 */
546		object->ref_count--;
547		vp->v_usecount--;
548	} else {
549		if (object->flags & OBJ_DEAD) {
550			VOP_UNLOCK(vp, 0, p);
551			tsleep(object, PVM, "vodead", 0);
552			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
553			goto retry;
554		}
555	}
556
557	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
558	vp->v_flag |= VOBJBUF;
559
560retn:
561	return (error);
562}
563
564int
565vop_stddestroyvobject(ap)
566	struct vop_destroyvobject_args /* {
567		struct vnode *vp;
568	} */ *ap;
569{
570	struct vnode *vp = ap->a_vp;
571	vm_object_t obj = vp->v_object;
572
573	if (vp->v_object == NULL)
574		return (0);
575
576	if (obj->ref_count == 0) {
577		/*
578		 * vclean() may be called twice. The first time
579		 * removes the primary reference to the object,
580		 * the second time goes one further and is a
581		 * special-case to terminate the object.
582		 */
583		vm_object_terminate(obj);
584	} else {
585		/*
586		 * Woe to the process that tries to page now :-).
587		 */
588		vm_pager_deallocate(obj);
589	}
590	return (0);
591}
592
593int
594vop_stdgetvobject(ap)
595	struct vop_getvobject_args /* {
596		struct vnode *vp;
597		struct vm_object **objpp;
598	} */ *ap;
599{
600	struct vnode *vp = ap->a_vp;
601	struct vm_object **objpp = ap->a_objpp;
602
603	if (objpp)
604		*objpp = vp->v_object;
605	return (vp->v_object ? 0 : EINVAL);
606}
607
608/*
609 * vfs default ops
610 * used to fill the vfs fucntion table to get reasonable default return values.
611 */
612int
613vfs_stdmount (mp, path, data, ndp, p)
614	struct mount *mp;
615	char *path;
616	caddr_t data;
617	struct nameidata *ndp;
618	struct proc *p;
619{
620	return (0);
621}
622
623int
624vfs_stdunmount (mp, mntflags, p)
625	struct mount *mp;
626	int mntflags;
627	struct proc *p;
628{
629	return (0);
630}
631
632int
633vfs_stdroot (mp, vpp)
634	struct mount *mp;
635	struct vnode **vpp;
636{
637	return (EOPNOTSUPP);
638}
639
640int
641vfs_stdstatfs (mp, sbp, p)
642	struct mount *mp;
643	struct statfs *sbp;
644	struct proc *p;
645{
646	return (EOPNOTSUPP);
647}
648
649int
650vfs_stdvptofh (vp, fhp)
651	struct vnode *vp;
652	struct fid *fhp;
653{
654	return (EOPNOTSUPP);
655}
656
657int
658vfs_stdstart (mp, flags, p)
659	struct mount *mp;
660	int flags;
661	struct proc *p;
662{
663	return (0);
664}
665
666int
667vfs_stdquotactl (mp, cmds, uid, arg, p)
668	struct mount *mp;
669	int cmds;
670	uid_t uid;
671	caddr_t arg;
672	struct proc *p;
673{
674	return (EOPNOTSUPP);
675}
676
677int
678vfs_stdsync (mp, waitfor, cred, p)
679	struct mount *mp;
680	int waitfor;
681	struct ucred *cred;
682	struct proc *p;
683{
684	return (0);
685}
686
687int
688vfs_stdvget (mp, ino, vpp)
689	struct mount *mp;
690	ino_t ino;
691	struct vnode **vpp;
692{
693	return (EOPNOTSUPP);
694}
695
696int
697vfs_stdfhtovp (mp, fhp, vpp)
698	struct mount *mp;
699	struct fid *fhp;
700	struct vnode **vpp;
701{
702	return (EOPNOTSUPP);
703}
704
705int
706vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
707	struct mount *mp;
708	struct sockaddr *nam;
709	int *extflagsp;
710	struct ucred **credanonp;
711{
712	return (EOPNOTSUPP);
713}
714
715int
716vfs_stdinit (vfsp)
717	struct vfsconf *vfsp;
718{
719	return (0);
720}
721
722int
723vfs_stduninit (vfsp)
724	struct vfsconf *vfsp;
725{
726	return(0);
727}
728
729int
730vfs_stdextattrctl(mp, cmd, attrname, arg, p)
731	struct mount *mp;
732	int cmd;
733	const char *attrname;
734	caddr_t arg;
735	struct proc *p;
736{
737	return(EOPNOTSUPP);
738}
739
740/* end of vfs default ops */
741