vfs_default.c revision 64819
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
39 * $FreeBSD: head/sys/kern/vfs_default.c 64819 2000-08-18 10:01:02Z phk $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/bio.h>
45#include <sys/buf.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/unistd.h>
51#include <sys/vnode.h>
52#include <sys/poll.h>
53
54static int vop_nostrategy __P((struct vop_strategy_args *));
55
56/*
57 * This vnode table stores what we want to do if the filesystem doesn't
58 * implement a particular VOP.
59 *
60 * If there is no specific entry here, we will return EOPNOTSUPP.
61 *
62 */
63
64vop_t **default_vnodeop_p;
65static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
66	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
67	{ &vop_advlock_desc,		(vop_t *) vop_einval },
68	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
69	{ &vop_close_desc,		(vop_t *) vop_null },
70	{ &vop_fsync_desc,		(vop_t *) vop_null },
71	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
72	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
73	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
74	{ &vop_lease_desc,		(vop_t *) vop_null },
75	{ &vop_lock_desc,		(vop_t *) vop_nolock },
76	{ &vop_mmap_desc,		(vop_t *) vop_einval },
77	{ &vop_open_desc,		(vop_t *) vop_null },
78	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
79	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
80	{ &vop_readlink_desc,		(vop_t *) vop_einval },
81	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
82	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
83	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
84	{ NULL, NULL }
85};
86
87static struct vnodeopv_desc default_vnodeop_opv_desc =
88        { &default_vnodeop_p, default_vnodeop_entries };
89
90VNODEOP_SET(default_vnodeop_opv_desc);
91
92int
93vop_eopnotsupp(struct vop_generic_args *ap)
94{
95	/*
96	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
97	*/
98
99	return (EOPNOTSUPP);
100}
101
102int
103vop_ebadf(struct vop_generic_args *ap)
104{
105
106	return (EBADF);
107}
108
109int
110vop_enotty(struct vop_generic_args *ap)
111{
112
113	return (ENOTTY);
114}
115
116int
117vop_einval(struct vop_generic_args *ap)
118{
119
120	return (EINVAL);
121}
122
123int
124vop_null(struct vop_generic_args *ap)
125{
126
127	return (0);
128}
129
130int
131vop_defaultop(struct vop_generic_args *ap)
132{
133
134	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
135}
136
137int
138vop_panic(struct vop_generic_args *ap)
139{
140
141	printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
142	panic("Filesystem goof");
143	return (0);
144}
145
146/*
147 *	vop_nostrategy:
148 *
149 *	Strategy routine for VFS devices that have none.
150 *
151 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
152 *	routine.  Typically this is done for a BIO_READ strategy call.
153 *	Typically B_INVAL is assumed to already be clear prior to a write
154 *	and should not be cleared manually unless you just made the buffer
155 *	invalid.  BIO_ERROR should be cleared either way.
156 */
157
158static int
159vop_nostrategy (struct vop_strategy_args *ap)
160{
161	printf("No strategy for buffer at %p\n", ap->a_bp);
162	vprint("", ap->a_vp);
163	vprint("", ap->a_bp->b_vp);
164	ap->a_bp->b_ioflags |= BIO_ERROR;
165	ap->a_bp->b_error = EOPNOTSUPP;
166	bufdone(ap->a_bp);
167	return (EOPNOTSUPP);
168}
169
170int
171vop_stdpathconf(ap)
172	struct vop_pathconf_args /* {
173	struct vnode *a_vp;
174	int a_name;
175	int *a_retval;
176	} */ *ap;
177{
178
179	switch (ap->a_name) {
180		case _PC_LINK_MAX:
181			*ap->a_retval = LINK_MAX;
182			return (0);
183		case _PC_MAX_CANON:
184			*ap->a_retval = MAX_CANON;
185			return (0);
186		case _PC_MAX_INPUT:
187			*ap->a_retval = MAX_INPUT;
188			return (0);
189		case _PC_PIPE_BUF:
190			*ap->a_retval = PIPE_BUF;
191			return (0);
192		case _PC_CHOWN_RESTRICTED:
193			*ap->a_retval = 1;
194			return (0);
195		case _PC_VDISABLE:
196			*ap->a_retval = _POSIX_VDISABLE;
197			return (0);
198		default:
199			return (EINVAL);
200	}
201	/* NOTREACHED */
202}
203
204/*
205 * Standard lock, unlock and islocked functions.
206 *
207 * These depend on the lock structure being the first element in the
208 * inode, ie: vp->v_data points to the the lock!
209 */
210int
211vop_stdlock(ap)
212	struct vop_lock_args /* {
213		struct vnode *a_vp;
214		int a_flags;
215		struct proc *a_p;
216	} */ *ap;
217{
218	struct lock *l;
219
220	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
221		if (ap->a_flags & LK_INTERLOCK)
222			simple_unlock(&ap->a_vp->v_interlock);
223		return 0;
224	}
225
226#ifndef	DEBUG_LOCKS
227	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
228#else
229	return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
230	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
231#endif
232}
233
234int
235vop_stdunlock(ap)
236	struct vop_unlock_args /* {
237		struct vnode *a_vp;
238		int a_flags;
239		struct proc *a_p;
240	} */ *ap;
241{
242	struct lock *l;
243
244	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
245		if (ap->a_flags & LK_INTERLOCK)
246			simple_unlock(&ap->a_vp->v_interlock);
247		return 0;
248	}
249
250	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
251	    ap->a_p));
252}
253
254int
255vop_stdislocked(ap)
256	struct vop_islocked_args /* {
257		struct vnode *a_vp;
258		struct proc *a_p;
259	} */ *ap;
260{
261	struct lock *l;
262
263	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
264		return 0;
265
266	return (lockstatus(l, ap->a_p));
267}
268
269int
270vop_stdinactive(ap)
271	struct vop_inactive_args /* {
272		struct vnode *a_vp;
273		struct proc *a_p;
274	} */ *ap;
275{
276
277	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
278	return (0);
279}
280
281/*
282 * Return true for select/poll.
283 */
284int
285vop_nopoll(ap)
286	struct vop_poll_args /* {
287		struct vnode *a_vp;
288		int  a_events;
289		struct ucred *a_cred;
290		struct proc *a_p;
291	} */ *ap;
292{
293	/*
294	 * Return true for read/write.  If the user asked for something
295	 * special, return POLLNVAL, so that clients have a way of
296	 * determining reliably whether or not the extended
297	 * functionality is present without hard-coding knowledge
298	 * of specific filesystem implementations.
299	 */
300	if (ap->a_events & ~POLLSTANDARD)
301		return (POLLNVAL);
302
303	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
304}
305
306/*
307 * Implement poll for local filesystems that support it.
308 */
309int
310vop_stdpoll(ap)
311	struct vop_poll_args /* {
312		struct vnode *a_vp;
313		int  a_events;
314		struct ucred *a_cred;
315		struct proc *a_p;
316	} */ *ap;
317{
318	if ((ap->a_events & ~POLLSTANDARD) == 0)
319		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
320	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
321}
322
323int
324vop_stdbwrite(ap)
325	struct vop_bwrite_args *ap;
326{
327	return (bwrite(ap->a_bp));
328}
329
330/*
331 * Stubs to use when there is no locking to be done on the underlying object.
332 * A minimal shared lock is necessary to ensure that the underlying object
333 * is not revoked while an operation is in progress. So, an active shared
334 * count is maintained in an auxillary vnode lock structure.
335 */
336int
337vop_sharedlock(ap)
338	struct vop_lock_args /* {
339		struct vnode *a_vp;
340		int a_flags;
341		struct proc *a_p;
342	} */ *ap;
343{
344	/*
345	 * This code cannot be used until all the non-locking filesystems
346	 * (notably NFS) are converted to properly lock and release nodes.
347	 * Also, certain vnode operations change the locking state within
348	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
349	 * and symlink). Ideally these operations should not change the
350	 * lock state, but should be changed to let the caller of the
351	 * function unlock them. Otherwise all intermediate vnode layers
352	 * (such as union, umapfs, etc) must catch these functions to do
353	 * the necessary locking at their layer. Note that the inactive
354	 * and lookup operations also change their lock state, but this
355	 * cannot be avoided, so these two operations will always need
356	 * to be handled in intermediate layers.
357	 */
358	struct vnode *vp = ap->a_vp;
359	int vnflags, flags = ap->a_flags;
360
361	if (vp->v_vnlock == NULL) {
362		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
363			return (0);
364		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
365		    M_VNODE, M_WAITOK);
366		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
367	}
368	switch (flags & LK_TYPE_MASK) {
369	case LK_DRAIN:
370		vnflags = LK_DRAIN;
371		break;
372	case LK_EXCLUSIVE:
373#ifdef DEBUG_VFS_LOCKS
374		/*
375		 * Normally, we use shared locks here, but that confuses
376		 * the locking assertions.
377		 */
378		vnflags = LK_EXCLUSIVE;
379		break;
380#endif
381	case LK_SHARED:
382		vnflags = LK_SHARED;
383		break;
384	case LK_UPGRADE:
385	case LK_EXCLUPGRADE:
386	case LK_DOWNGRADE:
387		return (0);
388	case LK_RELEASE:
389	default:
390		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
391	}
392	if (flags & LK_INTERLOCK)
393		vnflags |= LK_INTERLOCK;
394#ifndef	DEBUG_LOCKS
395	return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
396#else
397	return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
398	    "vop_sharedlock", vp->filename, vp->line));
399#endif
400}
401
402/*
403 * Stubs to use when there is no locking to be done on the underlying object.
404 * A minimal shared lock is necessary to ensure that the underlying object
405 * is not revoked while an operation is in progress. So, an active shared
406 * count is maintained in an auxillary vnode lock structure.
407 */
408int
409vop_nolock(ap)
410	struct vop_lock_args /* {
411		struct vnode *a_vp;
412		int a_flags;
413		struct proc *a_p;
414	} */ *ap;
415{
416#ifdef notyet
417	/*
418	 * This code cannot be used until all the non-locking filesystems
419	 * (notably NFS) are converted to properly lock and release nodes.
420	 * Also, certain vnode operations change the locking state within
421	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
422	 * and symlink). Ideally these operations should not change the
423	 * lock state, but should be changed to let the caller of the
424	 * function unlock them. Otherwise all intermediate vnode layers
425	 * (such as union, umapfs, etc) must catch these functions to do
426	 * the necessary locking at their layer. Note that the inactive
427	 * and lookup operations also change their lock state, but this
428	 * cannot be avoided, so these two operations will always need
429	 * to be handled in intermediate layers.
430	 */
431	struct vnode *vp = ap->a_vp;
432	int vnflags, flags = ap->a_flags;
433
434	if (vp->v_vnlock == NULL) {
435		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
436			return (0);
437		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
438		    M_VNODE, M_WAITOK);
439		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
440	}
441	switch (flags & LK_TYPE_MASK) {
442	case LK_DRAIN:
443		vnflags = LK_DRAIN;
444		break;
445	case LK_EXCLUSIVE:
446	case LK_SHARED:
447		vnflags = LK_SHARED;
448		break;
449	case LK_UPGRADE:
450	case LK_EXCLUPGRADE:
451	case LK_DOWNGRADE:
452		return (0);
453	case LK_RELEASE:
454	default:
455		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
456	}
457	if (flags & LK_INTERLOCK)
458		vnflags |= LK_INTERLOCK;
459	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
460#else /* for now */
461	/*
462	 * Since we are not using the lock manager, we must clear
463	 * the interlock here.
464	 */
465	if (ap->a_flags & LK_INTERLOCK)
466		simple_unlock(&ap->a_vp->v_interlock);
467	return (0);
468#endif
469}
470
471/*
472 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
473 */
474int
475vop_nounlock(ap)
476	struct vop_unlock_args /* {
477		struct vnode *a_vp;
478		int a_flags;
479		struct proc *a_p;
480	} */ *ap;
481{
482	struct vnode *vp = ap->a_vp;
483
484	if (vp->v_vnlock == NULL) {
485		if (ap->a_flags & LK_INTERLOCK)
486			simple_unlock(&ap->a_vp->v_interlock);
487		return (0);
488	}
489	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
490		&ap->a_vp->v_interlock, ap->a_p));
491}
492
493/*
494 * Return whether or not the node is in use.
495 */
496int
497vop_noislocked(ap)
498	struct vop_islocked_args /* {
499		struct vnode *a_vp;
500		struct proc *a_p;
501	} */ *ap;
502{
503	struct vnode *vp = ap->a_vp;
504
505	if (vp->v_vnlock == NULL)
506		return (0);
507	return (lockstatus(vp->v_vnlock, ap->a_p));
508}
509
510/*
511 * Return our mount point, as we will take charge of the writes.
512 */
513int
514vop_stdgetwritemount(ap)
515	struct vop_getwritemount_args /* {
516		struct vnode *a_vp;
517		struct mount **a_mpp;
518	} */ *ap;
519{
520
521	*(ap->a_mpp) = ap->a_vp->v_mount;
522	return (0);
523}
524
525/*
526 * vfs default ops
527 * used to fill the vfs fucntion table to get reasonable default return values.
528 */
529int
530vfs_stdmount (mp, path, data, ndp, p)
531	struct mount *mp;
532	char *path;
533	caddr_t data;
534	struct nameidata *ndp;
535	struct proc *p;
536{
537	return (0);
538}
539
540int
541vfs_stdunmount (mp, mntflags, p)
542	struct mount *mp;
543	int mntflags;
544	struct proc *p;
545{
546	return (0);
547}
548
549int
550vfs_stdroot (mp, vpp)
551	struct mount *mp;
552	struct vnode **vpp;
553{
554	return (EOPNOTSUPP);
555}
556
557int
558vfs_stdstatfs (mp, sbp, p)
559	struct mount *mp;
560	struct statfs *sbp;
561	struct proc *p;
562{
563	return (EOPNOTSUPP);
564}
565
566int
567vfs_stdvptofh (vp, fhp)
568	struct vnode *vp;
569	struct fid *fhp;
570{
571	return (EOPNOTSUPP);
572}
573
574int
575vfs_stdstart (mp, flags, p)
576	struct mount *mp;
577	int flags;
578	struct proc *p;
579{
580	return (0);
581}
582
583int
584vfs_stdquotactl (mp, cmds, uid, arg, p)
585	struct mount *mp;
586	int cmds;
587	uid_t uid;
588	caddr_t arg;
589	struct proc *p;
590{
591	return (EOPNOTSUPP);
592}
593
594int
595vfs_stdsync (mp, waitfor, cred, p)
596	struct mount *mp;
597	int waitfor;
598	struct ucred *cred;
599	struct proc *p;
600{
601	return (0);
602}
603
604int
605vfs_stdvget (mp, ino, vpp)
606	struct mount *mp;
607	ino_t ino;
608	struct vnode **vpp;
609{
610	return (EOPNOTSUPP);
611}
612
613int
614vfs_stdfhtovp (mp, fhp, vpp)
615	struct mount *mp;
616	struct fid *fhp;
617	struct vnode **vpp;
618{
619	return (EOPNOTSUPP);
620}
621
622int
623vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
624	struct mount *mp;
625	struct sockaddr *nam;
626	int *extflagsp;
627	struct ucred **credanonp;
628{
629	return (EOPNOTSUPP);
630}
631
632int
633vfs_stdinit (vfsp)
634	struct vfsconf *vfsp;
635{
636	return (0);
637}
638
639int
640vfs_stduninit (vfsp)
641	struct vfsconf *vfsp;
642{
643	return(0);
644}
645
646int
647vfs_stdextattrctl(mp, cmd, attrname, arg, p)
648	struct mount *mp;
649	int cmd;
650	const char *attrname;
651	caddr_t arg;
652	struct proc *p;
653{
654	return(EOPNOTSUPP);
655}
656
657/* end of vfs default ops */
658