vfs_default.c revision 54655
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
39 * $FreeBSD: head/sys/kern/vfs_default.c 54655 1999-12-15 23:02:35Z eivind $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/buf.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mount.h>
49#include <sys/unistd.h>
50#include <sys/vnode.h>
51#include <sys/poll.h>
52
53static int vop_nostrategy __P((struct vop_strategy_args *));
54
55/*
56 * This vnode table stores what we want to do if the filesystem doesn't
57 * implement a particular VOP.
58 *
59 * If there is no specific entry here, we will return EOPNOTSUPP.
60 *
61 */
62
63vop_t **default_vnodeop_p;
64static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
65	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
66	{ &vop_advlock_desc,		(vop_t *) vop_einval },
67	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
68	{ &vop_close_desc,		(vop_t *) vop_null },
69	{ &vop_fsync_desc,		(vop_t *) vop_null },
70	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
71	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
72	{ &vop_lease_desc,		(vop_t *) vop_null },
73	{ &vop_lock_desc,		(vop_t *) vop_nolock },
74	{ &vop_mmap_desc,		(vop_t *) vop_einval },
75	{ &vop_open_desc,		(vop_t *) vop_null },
76	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
77	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
78	{ &vop_readlink_desc,		(vop_t *) vop_einval },
79	{ &vop_reallocblks_desc,	(vop_t *) vop_eopnotsupp },
80	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
81	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
82	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
83	{ NULL, NULL }
84};
85
86static struct vnodeopv_desc default_vnodeop_opv_desc =
87        { &default_vnodeop_p, default_vnodeop_entries };
88
89VNODEOP_SET(default_vnodeop_opv_desc);
90
91int
92vop_eopnotsupp(struct vop_generic_args *ap)
93{
94	/*
95	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
96	*/
97
98	return (EOPNOTSUPP);
99}
100
101int
102vop_ebadf(struct vop_generic_args *ap)
103{
104
105	return (EBADF);
106}
107
108int
109vop_enotty(struct vop_generic_args *ap)
110{
111
112	return (ENOTTY);
113}
114
115int
116vop_einval(struct vop_generic_args *ap)
117{
118
119	return (EINVAL);
120}
121
122int
123vop_null(struct vop_generic_args *ap)
124{
125
126	return (0);
127}
128
129int
130vop_defaultop(struct vop_generic_args *ap)
131{
132
133	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
134}
135
136int
137vop_panic(struct vop_generic_args *ap)
138{
139
140	printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
141	panic("Filesystem goof");
142	return (0);
143}
144
145/*
146 *	vop_nostrategy:
147 *
148 *	Strategy routine for VFS devices that have none.
149 *
150 *	B_ERROR and B_INVAL must be cleared prior to calling any strategy
151 *	routine.  Typically this is done for a B_READ strategy call.  Typically
152 *	B_INVAL is assumed to already be clear prior to a write and should not
153 *	be cleared manually unless you just made the buffer invalid.  B_ERROR
154 *	should be cleared either way.
155 */
156
157static int
158vop_nostrategy (struct vop_strategy_args *ap)
159{
160	printf("No strategy for buffer at %p\n", ap->a_bp);
161	vprint("", ap->a_vp);
162	vprint("", ap->a_bp->b_vp);
163	ap->a_bp->b_flags |= B_ERROR;
164	ap->a_bp->b_error = EOPNOTSUPP;
165	biodone(ap->a_bp);
166	return (EOPNOTSUPP);
167}
168
169int
170vop_stdpathconf(ap)
171	struct vop_pathconf_args /* {
172	struct vnode *a_vp;
173	int a_name;
174	int *a_retval;
175	} */ *ap;
176{
177
178	switch (ap->a_name) {
179		case _PC_LINK_MAX:
180			*ap->a_retval = LINK_MAX;
181			return (0);
182		case _PC_MAX_CANON:
183			*ap->a_retval = MAX_CANON;
184			return (0);
185		case _PC_MAX_INPUT:
186			*ap->a_retval = MAX_INPUT;
187			return (0);
188		case _PC_PIPE_BUF:
189			*ap->a_retval = PIPE_BUF;
190			return (0);
191		case _PC_CHOWN_RESTRICTED:
192			*ap->a_retval = 1;
193			return (0);
194		case _PC_VDISABLE:
195			*ap->a_retval = _POSIX_VDISABLE;
196			return (0);
197		default:
198			return (EINVAL);
199	}
200	/* NOTREACHED */
201}
202
203/*
204 * Standard lock, unlock and islocked functions.
205 *
206 * These depend on the lock structure being the first element in the
207 * inode, ie: vp->v_data points to the the lock!
208 */
209int
210vop_stdlock(ap)
211	struct vop_lock_args /* {
212		struct vnode *a_vp;
213		int a_flags;
214		struct proc *a_p;
215	} */ *ap;
216{
217	struct lock *l;
218
219	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
220		if (ap->a_flags & LK_INTERLOCK)
221			simple_unlock(&ap->a_vp->v_interlock);
222		return 0;
223	}
224
225#ifndef	DEBUG_LOCKS
226	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
227#else
228	return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
229	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
230#endif
231}
232
233int
234vop_stdunlock(ap)
235	struct vop_unlock_args /* {
236		struct vnode *a_vp;
237		int a_flags;
238		struct proc *a_p;
239	} */ *ap;
240{
241	struct lock *l;
242
243	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
244		if (ap->a_flags & LK_INTERLOCK)
245			simple_unlock(&ap->a_vp->v_interlock);
246		return 0;
247	}
248
249	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
250	    ap->a_p));
251}
252
253int
254vop_stdislocked(ap)
255	struct vop_islocked_args /* {
256		struct vnode *a_vp;
257		struct proc *a_p;
258	} */ *ap;
259{
260	struct lock *l;
261
262	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
263		return 0;
264
265	return (lockstatus(l, ap->a_p));
266}
267
268/*
269 * Return true for select/poll.
270 */
271int
272vop_nopoll(ap)
273	struct vop_poll_args /* {
274		struct vnode *a_vp;
275		int  a_events;
276		struct ucred *a_cred;
277		struct proc *a_p;
278	} */ *ap;
279{
280	/*
281	 * Return true for read/write.  If the user asked for something
282	 * special, return POLLNVAL, so that clients have a way of
283	 * determining reliably whether or not the extended
284	 * functionality is present without hard-coding knowledge
285	 * of specific filesystem implementations.
286	 */
287	if (ap->a_events & ~POLLSTANDARD)
288		return (POLLNVAL);
289
290	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
291}
292
293/*
294 * Implement poll for local filesystems that support it.
295 */
296int
297vop_stdpoll(ap)
298	struct vop_poll_args /* {
299		struct vnode *a_vp;
300		int  a_events;
301		struct ucred *a_cred;
302		struct proc *a_p;
303	} */ *ap;
304{
305	if ((ap->a_events & ~POLLSTANDARD) == 0)
306		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
307	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
308}
309
310int
311vop_stdbwrite(ap)
312	struct vop_bwrite_args *ap;
313{
314	return (bwrite(ap->a_bp));
315}
316
317/*
318 * Stubs to use when there is no locking to be done on the underlying object.
319 * A minimal shared lock is necessary to ensure that the underlying object
320 * is not revoked while an operation is in progress. So, an active shared
321 * count is maintained in an auxillary vnode lock structure.
322 */
323int
324vop_sharedlock(ap)
325	struct vop_lock_args /* {
326		struct vnode *a_vp;
327		int a_flags;
328		struct proc *a_p;
329	} */ *ap;
330{
331	/*
332	 * This code cannot be used until all the non-locking filesystems
333	 * (notably NFS) are converted to properly lock and release nodes.
334	 * Also, certain vnode operations change the locking state within
335	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
336	 * and symlink). Ideally these operations should not change the
337	 * lock state, but should be changed to let the caller of the
338	 * function unlock them. Otherwise all intermediate vnode layers
339	 * (such as union, umapfs, etc) must catch these functions to do
340	 * the necessary locking at their layer. Note that the inactive
341	 * and lookup operations also change their lock state, but this
342	 * cannot be avoided, so these two operations will always need
343	 * to be handled in intermediate layers.
344	 */
345	struct vnode *vp = ap->a_vp;
346	int vnflags, flags = ap->a_flags;
347
348	if (vp->v_vnlock == NULL) {
349		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
350			return (0);
351		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
352		    M_VNODE, M_WAITOK);
353		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
354	}
355	switch (flags & LK_TYPE_MASK) {
356	case LK_DRAIN:
357		vnflags = LK_DRAIN;
358		break;
359	case LK_EXCLUSIVE:
360#ifdef DEBUG_VFS_LOCKS
361		/*
362		 * Normally, we use shared locks here, but that confuses
363		 * the locking assertions.
364		 */
365		vnflags = LK_EXCLUSIVE;
366		break;
367#endif
368	case LK_SHARED:
369		vnflags = LK_SHARED;
370		break;
371	case LK_UPGRADE:
372	case LK_EXCLUPGRADE:
373	case LK_DOWNGRADE:
374		return (0);
375	case LK_RELEASE:
376	default:
377		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
378	}
379	if (flags & LK_INTERLOCK)
380		vnflags |= LK_INTERLOCK;
381#ifndef	DEBUG_LOCKS
382	return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
383#else
384	return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
385	    "vop_sharedlock", vp->filename, vp->line));
386#endif
387}
388
389/*
390 * Stubs to use when there is no locking to be done on the underlying object.
391 * A minimal shared lock is necessary to ensure that the underlying object
392 * is not revoked while an operation is in progress. So, an active shared
393 * count is maintained in an auxillary vnode lock structure.
394 */
395int
396vop_nolock(ap)
397	struct vop_lock_args /* {
398		struct vnode *a_vp;
399		int a_flags;
400		struct proc *a_p;
401	} */ *ap;
402{
403#ifdef notyet
404	/*
405	 * This code cannot be used until all the non-locking filesystems
406	 * (notably NFS) are converted to properly lock and release nodes.
407	 * Also, certain vnode operations change the locking state within
408	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
409	 * and symlink). Ideally these operations should not change the
410	 * lock state, but should be changed to let the caller of the
411	 * function unlock them. Otherwise all intermediate vnode layers
412	 * (such as union, umapfs, etc) must catch these functions to do
413	 * the necessary locking at their layer. Note that the inactive
414	 * and lookup operations also change their lock state, but this
415	 * cannot be avoided, so these two operations will always need
416	 * to be handled in intermediate layers.
417	 */
418	struct vnode *vp = ap->a_vp;
419	int vnflags, flags = ap->a_flags;
420
421	if (vp->v_vnlock == NULL) {
422		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
423			return (0);
424		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
425		    M_VNODE, M_WAITOK);
426		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
427	}
428	switch (flags & LK_TYPE_MASK) {
429	case LK_DRAIN:
430		vnflags = LK_DRAIN;
431		break;
432	case LK_EXCLUSIVE:
433	case LK_SHARED:
434		vnflags = LK_SHARED;
435		break;
436	case LK_UPGRADE:
437	case LK_EXCLUPGRADE:
438	case LK_DOWNGRADE:
439		return (0);
440	case LK_RELEASE:
441	default:
442		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
443	}
444	if (flags & LK_INTERLOCK)
445		vnflags |= LK_INTERLOCK;
446	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
447#else /* for now */
448	/*
449	 * Since we are not using the lock manager, we must clear
450	 * the interlock here.
451	 */
452	if (ap->a_flags & LK_INTERLOCK)
453		simple_unlock(&ap->a_vp->v_interlock);
454	return (0);
455#endif
456}
457
458/*
459 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
460 */
461int
462vop_nounlock(ap)
463	struct vop_unlock_args /* {
464		struct vnode *a_vp;
465		int a_flags;
466		struct proc *a_p;
467	} */ *ap;
468{
469	struct vnode *vp = ap->a_vp;
470
471	if (vp->v_vnlock == NULL) {
472		if (ap->a_flags & LK_INTERLOCK)
473			simple_unlock(&ap->a_vp->v_interlock);
474		return (0);
475	}
476	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
477		&ap->a_vp->v_interlock, ap->a_p));
478}
479
480/*
481 * Return whether or not the node is in use.
482 */
483int
484vop_noislocked(ap)
485	struct vop_islocked_args /* {
486		struct vnode *a_vp;
487		struct proc *a_p;
488	} */ *ap;
489{
490	struct vnode *vp = ap->a_vp;
491
492	if (vp->v_vnlock == NULL)
493		return (0);
494	return (lockstatus(vp->v_vnlock, ap->a_p));
495}
496
497/*
498 * vfs default ops
499 * used to fill the vfs fucntion table to get reasonable default return values.
500 */
501int
502vfs_stdmount (mp, path, data, ndp, p)
503	struct mount *mp;
504	char *path;
505	caddr_t data;
506	struct nameidata *ndp;
507	struct proc *p;
508{
509	return (0);
510}
511
512int
513vfs_stdunmount (mp, mntflags, p)
514	struct mount *mp;
515	int mntflags;
516	struct proc *p;
517{
518	return (0);
519}
520
521int
522vfs_stdroot (mp, vpp)
523	struct mount *mp;
524	struct vnode **vpp;
525{
526	return (EOPNOTSUPP);
527}
528
529int
530vfs_stdstatfs (mp, sbp, p)
531	struct mount *mp;
532	struct statfs *sbp;
533	struct proc *p;
534{
535	return (EOPNOTSUPP);
536}
537
538int
539vfs_stdvptofh (vp, fhp)
540	struct vnode *vp;
541	struct fid *fhp;
542{
543	return (EOPNOTSUPP);
544}
545
546int
547vfs_stdstart (mp, flags, p)
548	struct mount *mp;
549	int flags;
550	struct proc *p;
551{
552	return (0);
553}
554
555int
556vfs_stdquotactl (mp, cmds, uid, arg, p)
557	struct mount *mp;
558	int cmds;
559	uid_t uid;
560	caddr_t arg;
561	struct proc *p;
562{
563	return (EOPNOTSUPP);
564}
565
566int
567vfs_stdsync (mp, waitfor, cred, p)
568	struct mount *mp;
569	int waitfor;
570	struct ucred *cred;
571	struct proc *p;
572{
573	return (0);
574}
575
576int
577vfs_stdvget (mp, ino, vpp)
578	struct mount *mp;
579	ino_t ino;
580	struct vnode **vpp;
581{
582	return (EOPNOTSUPP);
583}
584
585int
586vfs_stdfhtovp (mp, fhp, vpp)
587	struct mount *mp;
588	struct fid *fhp;
589	struct vnode **vpp;
590{
591	return (EOPNOTSUPP);
592}
593
594int
595vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
596	struct mount *mp;
597	struct sockaddr *nam;
598	int *extflagsp;
599	struct ucred **credanonp;
600{
601	return (EOPNOTSUPP);
602}
603
604int
605vfs_stdinit (vfsp)
606	struct vfsconf *vfsp;
607{
608	return (0);
609}
610
611int
612vfs_stduninit (vfsp)
613	struct vfsconf *vfsp;
614{
615	return(0);
616}
617
618/* end of vfs default ops */
619