vfs_default.c revision 58345
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
39 * $FreeBSD: head/sys/kern/vfs_default.c 58345 2000-03-20 10:44:49Z phk $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/buf.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mount.h>
49#include <sys/unistd.h>
50#include <sys/vnode.h>
51#include <sys/poll.h>
52
53static int vop_nostrategy __P((struct vop_strategy_args *));
54
55/*
56 * This vnode table stores what we want to do if the filesystem doesn't
57 * implement a particular VOP.
58 *
59 * If there is no specific entry here, we will return EOPNOTSUPP.
60 *
61 */
62
63vop_t **default_vnodeop_p;
64static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
65	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
66	{ &vop_advlock_desc,		(vop_t *) vop_einval },
67	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
68	{ &vop_close_desc,		(vop_t *) vop_null },
69	{ &vop_fsync_desc,		(vop_t *) vop_null },
70	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
71	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
72	{ &vop_lease_desc,		(vop_t *) vop_null },
73	{ &vop_lock_desc,		(vop_t *) vop_nolock },
74	{ &vop_mmap_desc,		(vop_t *) vop_einval },
75	{ &vop_open_desc,		(vop_t *) vop_null },
76	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
77	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
78	{ &vop_readlink_desc,		(vop_t *) vop_einval },
79	{ &vop_reallocblks_desc,	(vop_t *) vop_eopnotsupp },
80	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
81	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
82	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
83	{ &vop_getacl_desc,		(vop_t *) vop_eopnotsupp },
84	{ &vop_setacl_desc,		(vop_t *) vop_eopnotsupp },
85	{ &vop_aclcheck_desc,		(vop_t *) vop_eopnotsupp },
86	{ &vop_getextattr_desc,		(vop_t *) vop_eopnotsupp },
87	{ &vop_setextattr_desc,		(vop_t *) vop_eopnotsupp },
88	{ NULL, NULL }
89};
90
91static struct vnodeopv_desc default_vnodeop_opv_desc =
92        { &default_vnodeop_p, default_vnodeop_entries };
93
94VNODEOP_SET(default_vnodeop_opv_desc);
95
96int
97vop_eopnotsupp(struct vop_generic_args *ap)
98{
99	/*
100	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
101	*/
102
103	return (EOPNOTSUPP);
104}
105
106int
107vop_ebadf(struct vop_generic_args *ap)
108{
109
110	return (EBADF);
111}
112
113int
114vop_enotty(struct vop_generic_args *ap)
115{
116
117	return (ENOTTY);
118}
119
120int
121vop_einval(struct vop_generic_args *ap)
122{
123
124	return (EINVAL);
125}
126
127int
128vop_null(struct vop_generic_args *ap)
129{
130
131	return (0);
132}
133
134int
135vop_defaultop(struct vop_generic_args *ap)
136{
137
138	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
139}
140
141int
142vop_panic(struct vop_generic_args *ap)
143{
144
145	printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
146	panic("Filesystem goof");
147	return (0);
148}
149
150/*
151 *	vop_nostrategy:
152 *
153 *	Strategy routine for VFS devices that have none.
154 *
155 *	B_ERROR and B_INVAL must be cleared prior to calling any strategy
156 *	routine.  Typically this is done for a BIO_READ strategy call.
157 *	Typically B_INVAL is assumed to already be clear prior to a write
158 *	and should not be cleared manually unless you just made the buffer
159 *	invalid.  B_ERROR should be cleared either way.
160 */
161
162static int
163vop_nostrategy (struct vop_strategy_args *ap)
164{
165	printf("No strategy for buffer at %p\n", ap->a_bp);
166	vprint("", ap->a_vp);
167	vprint("", ap->a_bp->b_vp);
168	ap->a_bp->b_flags |= B_ERROR;
169	ap->a_bp->b_error = EOPNOTSUPP;
170	biodone(ap->a_bp);
171	return (EOPNOTSUPP);
172}
173
174int
175vop_stdpathconf(ap)
176	struct vop_pathconf_args /* {
177	struct vnode *a_vp;
178	int a_name;
179	int *a_retval;
180	} */ *ap;
181{
182
183	switch (ap->a_name) {
184		case _PC_LINK_MAX:
185			*ap->a_retval = LINK_MAX;
186			return (0);
187		case _PC_MAX_CANON:
188			*ap->a_retval = MAX_CANON;
189			return (0);
190		case _PC_MAX_INPUT:
191			*ap->a_retval = MAX_INPUT;
192			return (0);
193		case _PC_PIPE_BUF:
194			*ap->a_retval = PIPE_BUF;
195			return (0);
196		case _PC_CHOWN_RESTRICTED:
197			*ap->a_retval = 1;
198			return (0);
199		case _PC_VDISABLE:
200			*ap->a_retval = _POSIX_VDISABLE;
201			return (0);
202		default:
203			return (EINVAL);
204	}
205	/* NOTREACHED */
206}
207
208/*
209 * Standard lock, unlock and islocked functions.
210 *
211 * These depend on the lock structure being the first element in the
212 * inode, ie: vp->v_data points to the the lock!
213 */
214int
215vop_stdlock(ap)
216	struct vop_lock_args /* {
217		struct vnode *a_vp;
218		int a_flags;
219		struct proc *a_p;
220	} */ *ap;
221{
222	struct lock *l;
223
224	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
225		if (ap->a_flags & LK_INTERLOCK)
226			simple_unlock(&ap->a_vp->v_interlock);
227		return 0;
228	}
229
230#ifndef	DEBUG_LOCKS
231	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
232#else
233	return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
234	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
235#endif
236}
237
238int
239vop_stdunlock(ap)
240	struct vop_unlock_args /* {
241		struct vnode *a_vp;
242		int a_flags;
243		struct proc *a_p;
244	} */ *ap;
245{
246	struct lock *l;
247
248	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
249		if (ap->a_flags & LK_INTERLOCK)
250			simple_unlock(&ap->a_vp->v_interlock);
251		return 0;
252	}
253
254	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
255	    ap->a_p));
256}
257
258int
259vop_stdislocked(ap)
260	struct vop_islocked_args /* {
261		struct vnode *a_vp;
262		struct proc *a_p;
263	} */ *ap;
264{
265	struct lock *l;
266
267	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
268		return 0;
269
270	return (lockstatus(l, ap->a_p));
271}
272
273/*
274 * Return true for select/poll.
275 */
276int
277vop_nopoll(ap)
278	struct vop_poll_args /* {
279		struct vnode *a_vp;
280		int  a_events;
281		struct ucred *a_cred;
282		struct proc *a_p;
283	} */ *ap;
284{
285	/*
286	 * Return true for read/write.  If the user asked for something
287	 * special, return POLLNVAL, so that clients have a way of
288	 * determining reliably whether or not the extended
289	 * functionality is present without hard-coding knowledge
290	 * of specific filesystem implementations.
291	 */
292	if (ap->a_events & ~POLLSTANDARD)
293		return (POLLNVAL);
294
295	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
296}
297
298/*
299 * Implement poll for local filesystems that support it.
300 */
301int
302vop_stdpoll(ap)
303	struct vop_poll_args /* {
304		struct vnode *a_vp;
305		int  a_events;
306		struct ucred *a_cred;
307		struct proc *a_p;
308	} */ *ap;
309{
310	if ((ap->a_events & ~POLLSTANDARD) == 0)
311		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
312	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
313}
314
315int
316vop_stdbwrite(ap)
317	struct vop_bwrite_args *ap;
318{
319	return (bwrite(ap->a_bp));
320}
321
322/*
323 * Stubs to use when there is no locking to be done on the underlying object.
324 * A minimal shared lock is necessary to ensure that the underlying object
325 * is not revoked while an operation is in progress. So, an active shared
326 * count is maintained in an auxillary vnode lock structure.
327 */
328int
329vop_sharedlock(ap)
330	struct vop_lock_args /* {
331		struct vnode *a_vp;
332		int a_flags;
333		struct proc *a_p;
334	} */ *ap;
335{
336	/*
337	 * This code cannot be used until all the non-locking filesystems
338	 * (notably NFS) are converted to properly lock and release nodes.
339	 * Also, certain vnode operations change the locking state within
340	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
341	 * and symlink). Ideally these operations should not change the
342	 * lock state, but should be changed to let the caller of the
343	 * function unlock them. Otherwise all intermediate vnode layers
344	 * (such as union, umapfs, etc) must catch these functions to do
345	 * the necessary locking at their layer. Note that the inactive
346	 * and lookup operations also change their lock state, but this
347	 * cannot be avoided, so these two operations will always need
348	 * to be handled in intermediate layers.
349	 */
350	struct vnode *vp = ap->a_vp;
351	int vnflags, flags = ap->a_flags;
352
353	if (vp->v_vnlock == NULL) {
354		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
355			return (0);
356		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
357		    M_VNODE, M_WAITOK);
358		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
359	}
360	switch (flags & LK_TYPE_MASK) {
361	case LK_DRAIN:
362		vnflags = LK_DRAIN;
363		break;
364	case LK_EXCLUSIVE:
365#ifdef DEBUG_VFS_LOCKS
366		/*
367		 * Normally, we use shared locks here, but that confuses
368		 * the locking assertions.
369		 */
370		vnflags = LK_EXCLUSIVE;
371		break;
372#endif
373	case LK_SHARED:
374		vnflags = LK_SHARED;
375		break;
376	case LK_UPGRADE:
377	case LK_EXCLUPGRADE:
378	case LK_DOWNGRADE:
379		return (0);
380	case LK_RELEASE:
381	default:
382		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
383	}
384	if (flags & LK_INTERLOCK)
385		vnflags |= LK_INTERLOCK;
386#ifndef	DEBUG_LOCKS
387	return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
388#else
389	return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
390	    "vop_sharedlock", vp->filename, vp->line));
391#endif
392}
393
394/*
395 * Stubs to use when there is no locking to be done on the underlying object.
396 * A minimal shared lock is necessary to ensure that the underlying object
397 * is not revoked while an operation is in progress. So, an active shared
398 * count is maintained in an auxillary vnode lock structure.
399 */
400int
401vop_nolock(ap)
402	struct vop_lock_args /* {
403		struct vnode *a_vp;
404		int a_flags;
405		struct proc *a_p;
406	} */ *ap;
407{
408#ifdef notyet
409	/*
410	 * This code cannot be used until all the non-locking filesystems
411	 * (notably NFS) are converted to properly lock and release nodes.
412	 * Also, certain vnode operations change the locking state within
413	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
414	 * and symlink). Ideally these operations should not change the
415	 * lock state, but should be changed to let the caller of the
416	 * function unlock them. Otherwise all intermediate vnode layers
417	 * (such as union, umapfs, etc) must catch these functions to do
418	 * the necessary locking at their layer. Note that the inactive
419	 * and lookup operations also change their lock state, but this
420	 * cannot be avoided, so these two operations will always need
421	 * to be handled in intermediate layers.
422	 */
423	struct vnode *vp = ap->a_vp;
424	int vnflags, flags = ap->a_flags;
425
426	if (vp->v_vnlock == NULL) {
427		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
428			return (0);
429		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
430		    M_VNODE, M_WAITOK);
431		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
432	}
433	switch (flags & LK_TYPE_MASK) {
434	case LK_DRAIN:
435		vnflags = LK_DRAIN;
436		break;
437	case LK_EXCLUSIVE:
438	case LK_SHARED:
439		vnflags = LK_SHARED;
440		break;
441	case LK_UPGRADE:
442	case LK_EXCLUPGRADE:
443	case LK_DOWNGRADE:
444		return (0);
445	case LK_RELEASE:
446	default:
447		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
448	}
449	if (flags & LK_INTERLOCK)
450		vnflags |= LK_INTERLOCK;
451	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
452#else /* for now */
453	/*
454	 * Since we are not using the lock manager, we must clear
455	 * the interlock here.
456	 */
457	if (ap->a_flags & LK_INTERLOCK)
458		simple_unlock(&ap->a_vp->v_interlock);
459	return (0);
460#endif
461}
462
463/*
464 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
465 */
466int
467vop_nounlock(ap)
468	struct vop_unlock_args /* {
469		struct vnode *a_vp;
470		int a_flags;
471		struct proc *a_p;
472	} */ *ap;
473{
474	struct vnode *vp = ap->a_vp;
475
476	if (vp->v_vnlock == NULL) {
477		if (ap->a_flags & LK_INTERLOCK)
478			simple_unlock(&ap->a_vp->v_interlock);
479		return (0);
480	}
481	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
482		&ap->a_vp->v_interlock, ap->a_p));
483}
484
485/*
486 * Return whether or not the node is in use.
487 */
488int
489vop_noislocked(ap)
490	struct vop_islocked_args /* {
491		struct vnode *a_vp;
492		struct proc *a_p;
493	} */ *ap;
494{
495	struct vnode *vp = ap->a_vp;
496
497	if (vp->v_vnlock == NULL)
498		return (0);
499	return (lockstatus(vp->v_vnlock, ap->a_p));
500}
501
502/*
503 * vfs default ops
504 * used to fill the vfs fucntion table to get reasonable default return values.
505 */
506int
507vfs_stdmount (mp, path, data, ndp, p)
508	struct mount *mp;
509	char *path;
510	caddr_t data;
511	struct nameidata *ndp;
512	struct proc *p;
513{
514	return (0);
515}
516
517int
518vfs_stdunmount (mp, mntflags, p)
519	struct mount *mp;
520	int mntflags;
521	struct proc *p;
522{
523	return (0);
524}
525
526int
527vfs_stdroot (mp, vpp)
528	struct mount *mp;
529	struct vnode **vpp;
530{
531	return (EOPNOTSUPP);
532}
533
534int
535vfs_stdstatfs (mp, sbp, p)
536	struct mount *mp;
537	struct statfs *sbp;
538	struct proc *p;
539{
540	return (EOPNOTSUPP);
541}
542
543int
544vfs_stdvptofh (vp, fhp)
545	struct vnode *vp;
546	struct fid *fhp;
547{
548	return (EOPNOTSUPP);
549}
550
551int
552vfs_stdstart (mp, flags, p)
553	struct mount *mp;
554	int flags;
555	struct proc *p;
556{
557	return (0);
558}
559
560int
561vfs_stdquotactl (mp, cmds, uid, arg, p)
562	struct mount *mp;
563	int cmds;
564	uid_t uid;
565	caddr_t arg;
566	struct proc *p;
567{
568	return (EOPNOTSUPP);
569}
570
571int
572vfs_stdsync (mp, waitfor, cred, p)
573	struct mount *mp;
574	int waitfor;
575	struct ucred *cred;
576	struct proc *p;
577{
578	return (0);
579}
580
581int
582vfs_stdvget (mp, ino, vpp)
583	struct mount *mp;
584	ino_t ino;
585	struct vnode **vpp;
586{
587	return (EOPNOTSUPP);
588}
589
590int
591vfs_stdfhtovp (mp, fhp, vpp)
592	struct mount *mp;
593	struct fid *fhp;
594	struct vnode **vpp;
595{
596	return (EOPNOTSUPP);
597}
598
599int
600vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
601	struct mount *mp;
602	struct sockaddr *nam;
603	int *extflagsp;
604	struct ucred **credanonp;
605{
606	return (EOPNOTSUPP);
607}
608
609int
610vfs_stdinit (vfsp)
611	struct vfsconf *vfsp;
612{
613	return (0);
614}
615
616int
617vfs_stduninit (vfsp)
618	struct vfsconf *vfsp;
619{
620	return(0);
621}
622
623int
624vfs_stdextattrctl(mp, cmd, attrname, arg, p)
625	struct mount *mp;
626	int cmd;
627	const char *attrname;
628	caddr_t arg;
629	struct proc *p;
630{
631	return(EOPNOTSUPP);
632}
633
634/* end of vfs default ops */
635