Deleted Added
full compact
vfs_default.c (59249) vfs_default.c (60041)
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
39 * $FreeBSD: head/sys/kern/vfs_default.c 59249 2000-04-15 05:54:02Z phk $
39 * $FreeBSD: head/sys/kern/vfs_default.c 60041 2000-05-05 09:59:14Z phk $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/bio.h>
44#include <sys/buf.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mount.h>
49#include <sys/unistd.h>
50#include <sys/vnode.h>
51#include <sys/poll.h>
52
53static int vop_nostrategy __P((struct vop_strategy_args *));
54
55/*
56 * This vnode table stores what we want to do if the filesystem doesn't
57 * implement a particular VOP.
58 *
59 * If there is no specific entry here, we will return EOPNOTSUPP.
60 *
61 */
62
63vop_t **default_vnodeop_p;
64static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
65 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
66 { &vop_advlock_desc, (vop_t *) vop_einval },
67 { &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
68 { &vop_close_desc, (vop_t *) vop_null },
69 { &vop_fsync_desc, (vop_t *) vop_null },
70 { &vop_ioctl_desc, (vop_t *) vop_enotty },
71 { &vop_islocked_desc, (vop_t *) vop_noislocked },
72 { &vop_lease_desc, (vop_t *) vop_null },
73 { &vop_lock_desc, (vop_t *) vop_nolock },
74 { &vop_mmap_desc, (vop_t *) vop_einval },
75 { &vop_open_desc, (vop_t *) vop_null },
76 { &vop_pathconf_desc, (vop_t *) vop_einval },
77 { &vop_poll_desc, (vop_t *) vop_nopoll },
78 { &vop_readlink_desc, (vop_t *) vop_einval },
79 { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp },
80 { &vop_revoke_desc, (vop_t *) vop_revoke },
81 { &vop_strategy_desc, (vop_t *) vop_nostrategy },
82 { &vop_unlock_desc, (vop_t *) vop_nounlock },
83 { &vop_getacl_desc, (vop_t *) vop_eopnotsupp },
84 { &vop_setacl_desc, (vop_t *) vop_eopnotsupp },
85 { &vop_aclcheck_desc, (vop_t *) vop_eopnotsupp },
86 { &vop_getextattr_desc, (vop_t *) vop_eopnotsupp },
87 { &vop_setextattr_desc, (vop_t *) vop_eopnotsupp },
88 { NULL, NULL }
89};
90
91static struct vnodeopv_desc default_vnodeop_opv_desc =
92 { &default_vnodeop_p, default_vnodeop_entries };
93
94VNODEOP_SET(default_vnodeop_opv_desc);
95
96int
97vop_eopnotsupp(struct vop_generic_args *ap)
98{
99 /*
100 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
101 */
102
103 return (EOPNOTSUPP);
104}
105
106int
107vop_ebadf(struct vop_generic_args *ap)
108{
109
110 return (EBADF);
111}
112
113int
114vop_enotty(struct vop_generic_args *ap)
115{
116
117 return (ENOTTY);
118}
119
120int
121vop_einval(struct vop_generic_args *ap)
122{
123
124 return (EINVAL);
125}
126
127int
128vop_null(struct vop_generic_args *ap)
129{
130
131 return (0);
132}
133
134int
135vop_defaultop(struct vop_generic_args *ap)
136{
137
138 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
139}
140
141int
142vop_panic(struct vop_generic_args *ap)
143{
144
145 printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
146 panic("Filesystem goof");
147 return (0);
148}
149
150/*
151 * vop_nostrategy:
152 *
153 * Strategy routine for VFS devices that have none.
154 *
155 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
156 * routine. Typically this is done for a BIO_READ strategy call.
157 * Typically B_INVAL is assumed to already be clear prior to a write
158 * and should not be cleared manually unless you just made the buffer
159 * invalid. BIO_ERROR should be cleared either way.
160 */
161
162static int
163vop_nostrategy (struct vop_strategy_args *ap)
164{
165 printf("No strategy for buffer at %p\n", ap->a_bp);
166 vprint("", ap->a_vp);
167 vprint("", ap->a_bp->b_vp);
168 ap->a_bp->b_ioflags |= BIO_ERROR;
169 ap->a_bp->b_error = EOPNOTSUPP;
170 bufdone(ap->a_bp);
171 return (EOPNOTSUPP);
172}
173
174int
175vop_stdpathconf(ap)
176 struct vop_pathconf_args /* {
177 struct vnode *a_vp;
178 int a_name;
179 int *a_retval;
180 } */ *ap;
181{
182
183 switch (ap->a_name) {
184 case _PC_LINK_MAX:
185 *ap->a_retval = LINK_MAX;
186 return (0);
187 case _PC_MAX_CANON:
188 *ap->a_retval = MAX_CANON;
189 return (0);
190 case _PC_MAX_INPUT:
191 *ap->a_retval = MAX_INPUT;
192 return (0);
193 case _PC_PIPE_BUF:
194 *ap->a_retval = PIPE_BUF;
195 return (0);
196 case _PC_CHOWN_RESTRICTED:
197 *ap->a_retval = 1;
198 return (0);
199 case _PC_VDISABLE:
200 *ap->a_retval = _POSIX_VDISABLE;
201 return (0);
202 default:
203 return (EINVAL);
204 }
205 /* NOTREACHED */
206}
207
208/*
209 * Standard lock, unlock and islocked functions.
210 *
211 * These depend on the lock structure being the first element in the
212 * inode, ie: vp->v_data points to the the lock!
213 */
214int
215vop_stdlock(ap)
216 struct vop_lock_args /* {
217 struct vnode *a_vp;
218 int a_flags;
219 struct proc *a_p;
220 } */ *ap;
221{
222 struct lock *l;
223
224 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
225 if (ap->a_flags & LK_INTERLOCK)
226 simple_unlock(&ap->a_vp->v_interlock);
227 return 0;
228 }
229
230#ifndef DEBUG_LOCKS
231 return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
232#else
233 return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
234 "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
235#endif
236}
237
238int
239vop_stdunlock(ap)
240 struct vop_unlock_args /* {
241 struct vnode *a_vp;
242 int a_flags;
243 struct proc *a_p;
244 } */ *ap;
245{
246 struct lock *l;
247
248 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
249 if (ap->a_flags & LK_INTERLOCK)
250 simple_unlock(&ap->a_vp->v_interlock);
251 return 0;
252 }
253
254 return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
255 ap->a_p));
256}
257
258int
259vop_stdislocked(ap)
260 struct vop_islocked_args /* {
261 struct vnode *a_vp;
262 struct proc *a_p;
263 } */ *ap;
264{
265 struct lock *l;
266
267 if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
268 return 0;
269
270 return (lockstatus(l, ap->a_p));
271}
272
273/*
274 * Return true for select/poll.
275 */
276int
277vop_nopoll(ap)
278 struct vop_poll_args /* {
279 struct vnode *a_vp;
280 int a_events;
281 struct ucred *a_cred;
282 struct proc *a_p;
283 } */ *ap;
284{
285 /*
286 * Return true for read/write. If the user asked for something
287 * special, return POLLNVAL, so that clients have a way of
288 * determining reliably whether or not the extended
289 * functionality is present without hard-coding knowledge
290 * of specific filesystem implementations.
291 */
292 if (ap->a_events & ~POLLSTANDARD)
293 return (POLLNVAL);
294
295 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
296}
297
298/*
299 * Implement poll for local filesystems that support it.
300 */
301int
302vop_stdpoll(ap)
303 struct vop_poll_args /* {
304 struct vnode *a_vp;
305 int a_events;
306 struct ucred *a_cred;
307 struct proc *a_p;
308 } */ *ap;
309{
310 if ((ap->a_events & ~POLLSTANDARD) == 0)
311 return (ap->a_events & (POLLRDNORM|POLLWRNORM));
312 return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
313}
314
315int
316vop_stdbwrite(ap)
317 struct vop_bwrite_args *ap;
318{
319 return (bwrite(ap->a_bp));
320}
321
322/*
323 * Stubs to use when there is no locking to be done on the underlying object.
324 * A minimal shared lock is necessary to ensure that the underlying object
325 * is not revoked while an operation is in progress. So, an active shared
326 * count is maintained in an auxillary vnode lock structure.
327 */
328int
329vop_sharedlock(ap)
330 struct vop_lock_args /* {
331 struct vnode *a_vp;
332 int a_flags;
333 struct proc *a_p;
334 } */ *ap;
335{
336 /*
337 * This code cannot be used until all the non-locking filesystems
338 * (notably NFS) are converted to properly lock and release nodes.
339 * Also, certain vnode operations change the locking state within
340 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
341 * and symlink). Ideally these operations should not change the
342 * lock state, but should be changed to let the caller of the
343 * function unlock them. Otherwise all intermediate vnode layers
344 * (such as union, umapfs, etc) must catch these functions to do
345 * the necessary locking at their layer. Note that the inactive
346 * and lookup operations also change their lock state, but this
347 * cannot be avoided, so these two operations will always need
348 * to be handled in intermediate layers.
349 */
350 struct vnode *vp = ap->a_vp;
351 int vnflags, flags = ap->a_flags;
352
353 if (vp->v_vnlock == NULL) {
354 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
355 return (0);
356 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
357 M_VNODE, M_WAITOK);
358 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
359 }
360 switch (flags & LK_TYPE_MASK) {
361 case LK_DRAIN:
362 vnflags = LK_DRAIN;
363 break;
364 case LK_EXCLUSIVE:
365#ifdef DEBUG_VFS_LOCKS
366 /*
367 * Normally, we use shared locks here, but that confuses
368 * the locking assertions.
369 */
370 vnflags = LK_EXCLUSIVE;
371 break;
372#endif
373 case LK_SHARED:
374 vnflags = LK_SHARED;
375 break;
376 case LK_UPGRADE:
377 case LK_EXCLUPGRADE:
378 case LK_DOWNGRADE:
379 return (0);
380 case LK_RELEASE:
381 default:
382 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
383 }
384 if (flags & LK_INTERLOCK)
385 vnflags |= LK_INTERLOCK;
386#ifndef DEBUG_LOCKS
387 return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
388#else
389 return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
390 "vop_sharedlock", vp->filename, vp->line));
391#endif
392}
393
394/*
395 * Stubs to use when there is no locking to be done on the underlying object.
396 * A minimal shared lock is necessary to ensure that the underlying object
397 * is not revoked while an operation is in progress. So, an active shared
398 * count is maintained in an auxillary vnode lock structure.
399 */
400int
401vop_nolock(ap)
402 struct vop_lock_args /* {
403 struct vnode *a_vp;
404 int a_flags;
405 struct proc *a_p;
406 } */ *ap;
407{
408#ifdef notyet
409 /*
410 * This code cannot be used until all the non-locking filesystems
411 * (notably NFS) are converted to properly lock and release nodes.
412 * Also, certain vnode operations change the locking state within
413 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
414 * and symlink). Ideally these operations should not change the
415 * lock state, but should be changed to let the caller of the
416 * function unlock them. Otherwise all intermediate vnode layers
417 * (such as union, umapfs, etc) must catch these functions to do
418 * the necessary locking at their layer. Note that the inactive
419 * and lookup operations also change their lock state, but this
420 * cannot be avoided, so these two operations will always need
421 * to be handled in intermediate layers.
422 */
423 struct vnode *vp = ap->a_vp;
424 int vnflags, flags = ap->a_flags;
425
426 if (vp->v_vnlock == NULL) {
427 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
428 return (0);
429 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
430 M_VNODE, M_WAITOK);
431 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
432 }
433 switch (flags & LK_TYPE_MASK) {
434 case LK_DRAIN:
435 vnflags = LK_DRAIN;
436 break;
437 case LK_EXCLUSIVE:
438 case LK_SHARED:
439 vnflags = LK_SHARED;
440 break;
441 case LK_UPGRADE:
442 case LK_EXCLUPGRADE:
443 case LK_DOWNGRADE:
444 return (0);
445 case LK_RELEASE:
446 default:
447 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
448 }
449 if (flags & LK_INTERLOCK)
450 vnflags |= LK_INTERLOCK;
451 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
452#else /* for now */
453 /*
454 * Since we are not using the lock manager, we must clear
455 * the interlock here.
456 */
457 if (ap->a_flags & LK_INTERLOCK)
458 simple_unlock(&ap->a_vp->v_interlock);
459 return (0);
460#endif
461}
462
463/*
464 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
465 */
466int
467vop_nounlock(ap)
468 struct vop_unlock_args /* {
469 struct vnode *a_vp;
470 int a_flags;
471 struct proc *a_p;
472 } */ *ap;
473{
474 struct vnode *vp = ap->a_vp;
475
476 if (vp->v_vnlock == NULL) {
477 if (ap->a_flags & LK_INTERLOCK)
478 simple_unlock(&ap->a_vp->v_interlock);
479 return (0);
480 }
481 return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
482 &ap->a_vp->v_interlock, ap->a_p));
483}
484
485/*
486 * Return whether or not the node is in use.
487 */
488int
489vop_noislocked(ap)
490 struct vop_islocked_args /* {
491 struct vnode *a_vp;
492 struct proc *a_p;
493 } */ *ap;
494{
495 struct vnode *vp = ap->a_vp;
496
497 if (vp->v_vnlock == NULL)
498 return (0);
499 return (lockstatus(vp->v_vnlock, ap->a_p));
500}
501
502/*
503 * vfs default ops
504 * used to fill the vfs fucntion table to get reasonable default return values.
505 */
506int
507vfs_stdmount (mp, path, data, ndp, p)
508 struct mount *mp;
509 char *path;
510 caddr_t data;
511 struct nameidata *ndp;
512 struct proc *p;
513{
514 return (0);
515}
516
517int
518vfs_stdunmount (mp, mntflags, p)
519 struct mount *mp;
520 int mntflags;
521 struct proc *p;
522{
523 return (0);
524}
525
526int
527vfs_stdroot (mp, vpp)
528 struct mount *mp;
529 struct vnode **vpp;
530{
531 return (EOPNOTSUPP);
532}
533
534int
535vfs_stdstatfs (mp, sbp, p)
536 struct mount *mp;
537 struct statfs *sbp;
538 struct proc *p;
539{
540 return (EOPNOTSUPP);
541}
542
543int
544vfs_stdvptofh (vp, fhp)
545 struct vnode *vp;
546 struct fid *fhp;
547{
548 return (EOPNOTSUPP);
549}
550
551int
552vfs_stdstart (mp, flags, p)
553 struct mount *mp;
554 int flags;
555 struct proc *p;
556{
557 return (0);
558}
559
560int
561vfs_stdquotactl (mp, cmds, uid, arg, p)
562 struct mount *mp;
563 int cmds;
564 uid_t uid;
565 caddr_t arg;
566 struct proc *p;
567{
568 return (EOPNOTSUPP);
569}
570
571int
572vfs_stdsync (mp, waitfor, cred, p)
573 struct mount *mp;
574 int waitfor;
575 struct ucred *cred;
576 struct proc *p;
577{
578 return (0);
579}
580
581int
582vfs_stdvget (mp, ino, vpp)
583 struct mount *mp;
584 ino_t ino;
585 struct vnode **vpp;
586{
587 return (EOPNOTSUPP);
588}
589
590int
591vfs_stdfhtovp (mp, fhp, vpp)
592 struct mount *mp;
593 struct fid *fhp;
594 struct vnode **vpp;
595{
596 return (EOPNOTSUPP);
597}
598
599int
600vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
601 struct mount *mp;
602 struct sockaddr *nam;
603 int *extflagsp;
604 struct ucred **credanonp;
605{
606 return (EOPNOTSUPP);
607}
608
609int
610vfs_stdinit (vfsp)
611 struct vfsconf *vfsp;
612{
613 return (0);
614}
615
616int
617vfs_stduninit (vfsp)
618 struct vfsconf *vfsp;
619{
620 return(0);
621}
622
623int
624vfs_stdextattrctl(mp, cmd, attrname, arg, p)
625 struct mount *mp;
626 int cmd;
627 const char *attrname;
628 caddr_t arg;
629 struct proc *p;
630{
631 return(EOPNOTSUPP);
632}
633
634/* end of vfs default ops */
45#include <sys/buf.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/unistd.h>
51#include <sys/vnode.h>
52#include <sys/poll.h>
53
54static int vop_nostrategy __P((struct vop_strategy_args *));
55
56/*
57 * This vnode table stores what we want to do if the filesystem doesn't
58 * implement a particular VOP.
59 *
60 * If there is no specific entry here, we will return EOPNOTSUPP.
61 *
62 */
63
64vop_t **default_vnodeop_p;
65static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
66 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
67 { &vop_advlock_desc, (vop_t *) vop_einval },
68 { &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
69 { &vop_close_desc, (vop_t *) vop_null },
70 { &vop_fsync_desc, (vop_t *) vop_null },
71 { &vop_ioctl_desc, (vop_t *) vop_enotty },
72 { &vop_islocked_desc, (vop_t *) vop_noislocked },
73 { &vop_lease_desc, (vop_t *) vop_null },
74 { &vop_lock_desc, (vop_t *) vop_nolock },
75 { &vop_mmap_desc, (vop_t *) vop_einval },
76 { &vop_open_desc, (vop_t *) vop_null },
77 { &vop_pathconf_desc, (vop_t *) vop_einval },
78 { &vop_poll_desc, (vop_t *) vop_nopoll },
79 { &vop_readlink_desc, (vop_t *) vop_einval },
80 { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp },
81 { &vop_revoke_desc, (vop_t *) vop_revoke },
82 { &vop_strategy_desc, (vop_t *) vop_nostrategy },
83 { &vop_unlock_desc, (vop_t *) vop_nounlock },
84 { &vop_getacl_desc, (vop_t *) vop_eopnotsupp },
85 { &vop_setacl_desc, (vop_t *) vop_eopnotsupp },
86 { &vop_aclcheck_desc, (vop_t *) vop_eopnotsupp },
87 { &vop_getextattr_desc, (vop_t *) vop_eopnotsupp },
88 { &vop_setextattr_desc, (vop_t *) vop_eopnotsupp },
89 { NULL, NULL }
90};
91
92static struct vnodeopv_desc default_vnodeop_opv_desc =
93 { &default_vnodeop_p, default_vnodeop_entries };
94
95VNODEOP_SET(default_vnodeop_opv_desc);
96
97int
98vop_eopnotsupp(struct vop_generic_args *ap)
99{
100 /*
101 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
102 */
103
104 return (EOPNOTSUPP);
105}
106
107int
108vop_ebadf(struct vop_generic_args *ap)
109{
110
111 return (EBADF);
112}
113
114int
115vop_enotty(struct vop_generic_args *ap)
116{
117
118 return (ENOTTY);
119}
120
121int
122vop_einval(struct vop_generic_args *ap)
123{
124
125 return (EINVAL);
126}
127
128int
129vop_null(struct vop_generic_args *ap)
130{
131
132 return (0);
133}
134
135int
136vop_defaultop(struct vop_generic_args *ap)
137{
138
139 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
140}
141
142int
143vop_panic(struct vop_generic_args *ap)
144{
145
146 printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
147 panic("Filesystem goof");
148 return (0);
149}
150
151/*
152 * vop_nostrategy:
153 *
154 * Strategy routine for VFS devices that have none.
155 *
156 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
157 * routine. Typically this is done for a BIO_READ strategy call.
158 * Typically B_INVAL is assumed to already be clear prior to a write
159 * and should not be cleared manually unless you just made the buffer
160 * invalid. BIO_ERROR should be cleared either way.
161 */
162
163static int
164vop_nostrategy (struct vop_strategy_args *ap)
165{
166 printf("No strategy for buffer at %p\n", ap->a_bp);
167 vprint("", ap->a_vp);
168 vprint("", ap->a_bp->b_vp);
169 ap->a_bp->b_ioflags |= BIO_ERROR;
170 ap->a_bp->b_error = EOPNOTSUPP;
171 bufdone(ap->a_bp);
172 return (EOPNOTSUPP);
173}
174
175int
176vop_stdpathconf(ap)
177 struct vop_pathconf_args /* {
178 struct vnode *a_vp;
179 int a_name;
180 int *a_retval;
181 } */ *ap;
182{
183
184 switch (ap->a_name) {
185 case _PC_LINK_MAX:
186 *ap->a_retval = LINK_MAX;
187 return (0);
188 case _PC_MAX_CANON:
189 *ap->a_retval = MAX_CANON;
190 return (0);
191 case _PC_MAX_INPUT:
192 *ap->a_retval = MAX_INPUT;
193 return (0);
194 case _PC_PIPE_BUF:
195 *ap->a_retval = PIPE_BUF;
196 return (0);
197 case _PC_CHOWN_RESTRICTED:
198 *ap->a_retval = 1;
199 return (0);
200 case _PC_VDISABLE:
201 *ap->a_retval = _POSIX_VDISABLE;
202 return (0);
203 default:
204 return (EINVAL);
205 }
206 /* NOTREACHED */
207}
208
209/*
210 * Standard lock, unlock and islocked functions.
211 *
212 * These depend on the lock structure being the first element in the
213 * inode, ie: vp->v_data points to the the lock!
214 */
215int
216vop_stdlock(ap)
217 struct vop_lock_args /* {
218 struct vnode *a_vp;
219 int a_flags;
220 struct proc *a_p;
221 } */ *ap;
222{
223 struct lock *l;
224
225 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
226 if (ap->a_flags & LK_INTERLOCK)
227 simple_unlock(&ap->a_vp->v_interlock);
228 return 0;
229 }
230
231#ifndef DEBUG_LOCKS
232 return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
233#else
234 return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
235 "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
236#endif
237}
238
239int
240vop_stdunlock(ap)
241 struct vop_unlock_args /* {
242 struct vnode *a_vp;
243 int a_flags;
244 struct proc *a_p;
245 } */ *ap;
246{
247 struct lock *l;
248
249 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
250 if (ap->a_flags & LK_INTERLOCK)
251 simple_unlock(&ap->a_vp->v_interlock);
252 return 0;
253 }
254
255 return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
256 ap->a_p));
257}
258
259int
260vop_stdislocked(ap)
261 struct vop_islocked_args /* {
262 struct vnode *a_vp;
263 struct proc *a_p;
264 } */ *ap;
265{
266 struct lock *l;
267
268 if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
269 return 0;
270
271 return (lockstatus(l, ap->a_p));
272}
273
274/*
275 * Return true for select/poll.
276 */
277int
278vop_nopoll(ap)
279 struct vop_poll_args /* {
280 struct vnode *a_vp;
281 int a_events;
282 struct ucred *a_cred;
283 struct proc *a_p;
284 } */ *ap;
285{
286 /*
287 * Return true for read/write. If the user asked for something
288 * special, return POLLNVAL, so that clients have a way of
289 * determining reliably whether or not the extended
290 * functionality is present without hard-coding knowledge
291 * of specific filesystem implementations.
292 */
293 if (ap->a_events & ~POLLSTANDARD)
294 return (POLLNVAL);
295
296 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
297}
298
299/*
300 * Implement poll for local filesystems that support it.
301 */
302int
303vop_stdpoll(ap)
304 struct vop_poll_args /* {
305 struct vnode *a_vp;
306 int a_events;
307 struct ucred *a_cred;
308 struct proc *a_p;
309 } */ *ap;
310{
311 if ((ap->a_events & ~POLLSTANDARD) == 0)
312 return (ap->a_events & (POLLRDNORM|POLLWRNORM));
313 return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
314}
315
316int
317vop_stdbwrite(ap)
318 struct vop_bwrite_args *ap;
319{
320 return (bwrite(ap->a_bp));
321}
322
323/*
324 * Stubs to use when there is no locking to be done on the underlying object.
325 * A minimal shared lock is necessary to ensure that the underlying object
326 * is not revoked while an operation is in progress. So, an active shared
327 * count is maintained in an auxillary vnode lock structure.
328 */
329int
330vop_sharedlock(ap)
331 struct vop_lock_args /* {
332 struct vnode *a_vp;
333 int a_flags;
334 struct proc *a_p;
335 } */ *ap;
336{
337 /*
338 * This code cannot be used until all the non-locking filesystems
339 * (notably NFS) are converted to properly lock and release nodes.
340 * Also, certain vnode operations change the locking state within
341 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
342 * and symlink). Ideally these operations should not change the
343 * lock state, but should be changed to let the caller of the
344 * function unlock them. Otherwise all intermediate vnode layers
345 * (such as union, umapfs, etc) must catch these functions to do
346 * the necessary locking at their layer. Note that the inactive
347 * and lookup operations also change their lock state, but this
348 * cannot be avoided, so these two operations will always need
349 * to be handled in intermediate layers.
350 */
351 struct vnode *vp = ap->a_vp;
352 int vnflags, flags = ap->a_flags;
353
354 if (vp->v_vnlock == NULL) {
355 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
356 return (0);
357 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
358 M_VNODE, M_WAITOK);
359 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
360 }
361 switch (flags & LK_TYPE_MASK) {
362 case LK_DRAIN:
363 vnflags = LK_DRAIN;
364 break;
365 case LK_EXCLUSIVE:
366#ifdef DEBUG_VFS_LOCKS
367 /*
368 * Normally, we use shared locks here, but that confuses
369 * the locking assertions.
370 */
371 vnflags = LK_EXCLUSIVE;
372 break;
373#endif
374 case LK_SHARED:
375 vnflags = LK_SHARED;
376 break;
377 case LK_UPGRADE:
378 case LK_EXCLUPGRADE:
379 case LK_DOWNGRADE:
380 return (0);
381 case LK_RELEASE:
382 default:
383 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
384 }
385 if (flags & LK_INTERLOCK)
386 vnflags |= LK_INTERLOCK;
387#ifndef DEBUG_LOCKS
388 return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
389#else
390 return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
391 "vop_sharedlock", vp->filename, vp->line));
392#endif
393}
394
395/*
396 * Stubs to use when there is no locking to be done on the underlying object.
397 * A minimal shared lock is necessary to ensure that the underlying object
398 * is not revoked while an operation is in progress. So, an active shared
399 * count is maintained in an auxillary vnode lock structure.
400 */
401int
402vop_nolock(ap)
403 struct vop_lock_args /* {
404 struct vnode *a_vp;
405 int a_flags;
406 struct proc *a_p;
407 } */ *ap;
408{
409#ifdef notyet
410 /*
411 * This code cannot be used until all the non-locking filesystems
412 * (notably NFS) are converted to properly lock and release nodes.
413 * Also, certain vnode operations change the locking state within
414 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
415 * and symlink). Ideally these operations should not change the
416 * lock state, but should be changed to let the caller of the
417 * function unlock them. Otherwise all intermediate vnode layers
418 * (such as union, umapfs, etc) must catch these functions to do
419 * the necessary locking at their layer. Note that the inactive
420 * and lookup operations also change their lock state, but this
421 * cannot be avoided, so these two operations will always need
422 * to be handled in intermediate layers.
423 */
424 struct vnode *vp = ap->a_vp;
425 int vnflags, flags = ap->a_flags;
426
427 if (vp->v_vnlock == NULL) {
428 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
429 return (0);
430 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
431 M_VNODE, M_WAITOK);
432 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
433 }
434 switch (flags & LK_TYPE_MASK) {
435 case LK_DRAIN:
436 vnflags = LK_DRAIN;
437 break;
438 case LK_EXCLUSIVE:
439 case LK_SHARED:
440 vnflags = LK_SHARED;
441 break;
442 case LK_UPGRADE:
443 case LK_EXCLUPGRADE:
444 case LK_DOWNGRADE:
445 return (0);
446 case LK_RELEASE:
447 default:
448 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
449 }
450 if (flags & LK_INTERLOCK)
451 vnflags |= LK_INTERLOCK;
452 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
453#else /* for now */
454 /*
455 * Since we are not using the lock manager, we must clear
456 * the interlock here.
457 */
458 if (ap->a_flags & LK_INTERLOCK)
459 simple_unlock(&ap->a_vp->v_interlock);
460 return (0);
461#endif
462}
463
464/*
465 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
466 */
467int
468vop_nounlock(ap)
469 struct vop_unlock_args /* {
470 struct vnode *a_vp;
471 int a_flags;
472 struct proc *a_p;
473 } */ *ap;
474{
475 struct vnode *vp = ap->a_vp;
476
477 if (vp->v_vnlock == NULL) {
478 if (ap->a_flags & LK_INTERLOCK)
479 simple_unlock(&ap->a_vp->v_interlock);
480 return (0);
481 }
482 return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
483 &ap->a_vp->v_interlock, ap->a_p));
484}
485
486/*
487 * Return whether or not the node is in use.
488 */
489int
490vop_noislocked(ap)
491 struct vop_islocked_args /* {
492 struct vnode *a_vp;
493 struct proc *a_p;
494 } */ *ap;
495{
496 struct vnode *vp = ap->a_vp;
497
498 if (vp->v_vnlock == NULL)
499 return (0);
500 return (lockstatus(vp->v_vnlock, ap->a_p));
501}
502
503/*
504 * vfs default ops
505 * used to fill the vfs fucntion table to get reasonable default return values.
506 */
507int
508vfs_stdmount (mp, path, data, ndp, p)
509 struct mount *mp;
510 char *path;
511 caddr_t data;
512 struct nameidata *ndp;
513 struct proc *p;
514{
515 return (0);
516}
517
518int
519vfs_stdunmount (mp, mntflags, p)
520 struct mount *mp;
521 int mntflags;
522 struct proc *p;
523{
524 return (0);
525}
526
527int
528vfs_stdroot (mp, vpp)
529 struct mount *mp;
530 struct vnode **vpp;
531{
532 return (EOPNOTSUPP);
533}
534
535int
536vfs_stdstatfs (mp, sbp, p)
537 struct mount *mp;
538 struct statfs *sbp;
539 struct proc *p;
540{
541 return (EOPNOTSUPP);
542}
543
544int
545vfs_stdvptofh (vp, fhp)
546 struct vnode *vp;
547 struct fid *fhp;
548{
549 return (EOPNOTSUPP);
550}
551
552int
553vfs_stdstart (mp, flags, p)
554 struct mount *mp;
555 int flags;
556 struct proc *p;
557{
558 return (0);
559}
560
561int
562vfs_stdquotactl (mp, cmds, uid, arg, p)
563 struct mount *mp;
564 int cmds;
565 uid_t uid;
566 caddr_t arg;
567 struct proc *p;
568{
569 return (EOPNOTSUPP);
570}
571
572int
573vfs_stdsync (mp, waitfor, cred, p)
574 struct mount *mp;
575 int waitfor;
576 struct ucred *cred;
577 struct proc *p;
578{
579 return (0);
580}
581
582int
583vfs_stdvget (mp, ino, vpp)
584 struct mount *mp;
585 ino_t ino;
586 struct vnode **vpp;
587{
588 return (EOPNOTSUPP);
589}
590
591int
592vfs_stdfhtovp (mp, fhp, vpp)
593 struct mount *mp;
594 struct fid *fhp;
595 struct vnode **vpp;
596{
597 return (EOPNOTSUPP);
598}
599
600int
601vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
602 struct mount *mp;
603 struct sockaddr *nam;
604 int *extflagsp;
605 struct ucred **credanonp;
606{
607 return (EOPNOTSUPP);
608}
609
610int
611vfs_stdinit (vfsp)
612 struct vfsconf *vfsp;
613{
614 return (0);
615}
616
617int
618vfs_stduninit (vfsp)
619 struct vfsconf *vfsp;
620{
621 return(0);
622}
623
624int
625vfs_stdextattrctl(mp, cmd, attrname, arg, p)
626 struct mount *mp;
627 int cmd;
628 const char *attrname;
629 caddr_t arg;
630 struct proc *p;
631{
632 return(EOPNOTSUPP);
633}
634
635/* end of vfs default ops */