Deleted Added
full compact
vfs_default.c (164248) vfs_default.c (166774)
1/*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 164248 2006-11-13 05:51:22Z kmacy $");
36__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 166774 2007-02-15 22:08:35Z pjd $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/conf.h>
43#include <sys/event.h>
44#include <sys/kernel.h>
45#include <sys/limits.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mount.h>
49#include <sys/mutex.h>
50#include <sys/unistd.h>
51#include <sys/vnode.h>
52#include <sys/poll.h>
53
54#include <vm/vm.h>
55#include <vm/vm_object.h>
56#include <vm/vm_extern.h>
57#include <vm/pmap.h>
58#include <vm/vm_map.h>
59#include <vm/vm_page.h>
60#include <vm/vm_pager.h>
61#include <vm/vnode_pager.h>
62
63static int vop_nolookup(struct vop_lookup_args *);
64static int vop_nostrategy(struct vop_strategy_args *);
65
66/*
67 * This vnode table stores what we want to do if the filesystem doesn't
68 * implement a particular VOP.
69 *
70 * If there is no specific entry here, we will return EOPNOTSUPP.
71 *
72 */
73
74struct vop_vector default_vnodeops = {
75 .vop_default = NULL,
76 .vop_bypass = VOP_EOPNOTSUPP,
77
78 .vop_advlock = VOP_EINVAL,
79 .vop_bmap = vop_stdbmap,
80 .vop_close = VOP_NULL,
81 .vop_fsync = VOP_NULL,
82 .vop_getpages = vop_stdgetpages,
83 .vop_getwritemount = vop_stdgetwritemount,
84 .vop_inactive = VOP_NULL,
85 .vop_ioctl = VOP_ENOTTY,
86 .vop_kqfilter = vop_stdkqfilter,
87 .vop_islocked = vop_stdislocked,
88 .vop_lease = VOP_NULL,
89 ._vop_lock = vop_stdlock,
90 .vop_lookup = vop_nolookup,
91 .vop_open = VOP_NULL,
92 .vop_pathconf = VOP_EINVAL,
93 .vop_poll = vop_nopoll,
94 .vop_putpages = vop_stdputpages,
95 .vop_readlink = VOP_EINVAL,
96 .vop_revoke = VOP_PANIC,
97 .vop_strategy = vop_nostrategy,
98 .vop_unlock = vop_stdunlock,
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/conf.h>
43#include <sys/event.h>
44#include <sys/kernel.h>
45#include <sys/limits.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mount.h>
49#include <sys/mutex.h>
50#include <sys/unistd.h>
51#include <sys/vnode.h>
52#include <sys/poll.h>
53
54#include <vm/vm.h>
55#include <vm/vm_object.h>
56#include <vm/vm_extern.h>
57#include <vm/pmap.h>
58#include <vm/vm_map.h>
59#include <vm/vm_page.h>
60#include <vm/vm_pager.h>
61#include <vm/vnode_pager.h>
62
63static int vop_nolookup(struct vop_lookup_args *);
64static int vop_nostrategy(struct vop_strategy_args *);
65
66/*
67 * This vnode table stores what we want to do if the filesystem doesn't
68 * implement a particular VOP.
69 *
70 * If there is no specific entry here, we will return EOPNOTSUPP.
71 *
72 */
73
74struct vop_vector default_vnodeops = {
75 .vop_default = NULL,
76 .vop_bypass = VOP_EOPNOTSUPP,
77
78 .vop_advlock = VOP_EINVAL,
79 .vop_bmap = vop_stdbmap,
80 .vop_close = VOP_NULL,
81 .vop_fsync = VOP_NULL,
82 .vop_getpages = vop_stdgetpages,
83 .vop_getwritemount = vop_stdgetwritemount,
84 .vop_inactive = VOP_NULL,
85 .vop_ioctl = VOP_ENOTTY,
86 .vop_kqfilter = vop_stdkqfilter,
87 .vop_islocked = vop_stdislocked,
88 .vop_lease = VOP_NULL,
89 ._vop_lock = vop_stdlock,
90 .vop_lookup = vop_nolookup,
91 .vop_open = VOP_NULL,
92 .vop_pathconf = VOP_EINVAL,
93 .vop_poll = vop_nopoll,
94 .vop_putpages = vop_stdputpages,
95 .vop_readlink = VOP_EINVAL,
96 .vop_revoke = VOP_PANIC,
97 .vop_strategy = vop_nostrategy,
98 .vop_unlock = vop_stdunlock,
99 .vop_vptofh = vop_stdvptofh,
99};
100
101/*
102 * Series of placeholder functions for various error returns for
103 * VOPs.
104 */
105
106int
107vop_eopnotsupp(struct vop_generic_args *ap)
108{
109 /*
110 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
111 */
112
113 return (EOPNOTSUPP);
114}
115
116int
117vop_ebadf(struct vop_generic_args *ap)
118{
119
120 return (EBADF);
121}
122
123int
124vop_enotty(struct vop_generic_args *ap)
125{
126
127 return (ENOTTY);
128}
129
130int
131vop_einval(struct vop_generic_args *ap)
132{
133
134 return (EINVAL);
135}
136
137int
138vop_null(struct vop_generic_args *ap)
139{
140
141 return (0);
142}
143
144/*
145 * Helper function to panic on some bad VOPs in some filesystems.
146 */
147int
148vop_panic(struct vop_generic_args *ap)
149{
150
151 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
152}
153
154/*
155 * vop_std<something> and vop_no<something> are default functions for use by
156 * filesystems that need the "default reasonable" implementation for a
157 * particular operation.
158 *
159 * The documentation for the operations they implement exists (if it exists)
160 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
161 */
162
163/*
164 * Default vop for filesystems that do not support name lookup
165 */
166static int
167vop_nolookup(ap)
168 struct vop_lookup_args /* {
169 struct vnode *a_dvp;
170 struct vnode **a_vpp;
171 struct componentname *a_cnp;
172 } */ *ap;
173{
174
175 *ap->a_vpp = NULL;
176 return (ENOTDIR);
177}
178
179/*
180 * vop_nostrategy:
181 *
182 * Strategy routine for VFS devices that have none.
183 *
184 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
185 * routine. Typically this is done for a BIO_READ strategy call.
186 * Typically B_INVAL is assumed to already be clear prior to a write
187 * and should not be cleared manually unless you just made the buffer
188 * invalid. BIO_ERROR should be cleared either way.
189 */
190
191static int
192vop_nostrategy (struct vop_strategy_args *ap)
193{
194 printf("No strategy for buffer at %p\n", ap->a_bp);
195 vprint("vnode", ap->a_vp);
196 ap->a_bp->b_ioflags |= BIO_ERROR;
197 ap->a_bp->b_error = EOPNOTSUPP;
198 bufdone(ap->a_bp);
199 return (EOPNOTSUPP);
200}
201
202/*
203 * vop_stdpathconf:
204 *
205 * Standard implementation of POSIX pathconf, to get information about limits
206 * for a filesystem.
207 * Override per filesystem for the case where the filesystem has smaller
208 * limits.
209 */
210int
211vop_stdpathconf(ap)
212 struct vop_pathconf_args /* {
213 struct vnode *a_vp;
214 int a_name;
215 int *a_retval;
216 } */ *ap;
217{
218
219 switch (ap->a_name) {
220 case _PC_NAME_MAX:
221 *ap->a_retval = NAME_MAX;
222 return (0);
223 case _PC_PATH_MAX:
224 *ap->a_retval = PATH_MAX;
225 return (0);
226 case _PC_LINK_MAX:
227 *ap->a_retval = LINK_MAX;
228 return (0);
229 case _PC_MAX_CANON:
230 *ap->a_retval = MAX_CANON;
231 return (0);
232 case _PC_MAX_INPUT:
233 *ap->a_retval = MAX_INPUT;
234 return (0);
235 case _PC_PIPE_BUF:
236 *ap->a_retval = PIPE_BUF;
237 return (0);
238 case _PC_CHOWN_RESTRICTED:
239 *ap->a_retval = 1;
240 return (0);
241 case _PC_VDISABLE:
242 *ap->a_retval = _POSIX_VDISABLE;
243 return (0);
244 default:
245 return (EINVAL);
246 }
247 /* NOTREACHED */
248}
249
250/*
251 * Standard lock, unlock and islocked functions.
252 */
253int
254vop_stdlock(ap)
255 struct _vop_lock_args /* {
256 struct vnode *a_vp;
257 int a_flags;
258 struct thread *a_td;
259 char *file;
260 int line;
261 } */ *ap;
262{
263 struct vnode *vp = ap->a_vp;
264
265 return (_lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line));
266}
267
268/* See above. */
269int
270vop_stdunlock(ap)
271 struct vop_unlock_args /* {
272 struct vnode *a_vp;
273 int a_flags;
274 struct thread *a_td;
275 } */ *ap;
276{
277 struct vnode *vp = ap->a_vp;
278
279 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
280 ap->a_td));
281}
282
283/* See above. */
284int
285vop_stdislocked(ap)
286 struct vop_islocked_args /* {
287 struct vnode *a_vp;
288 struct thread *a_td;
289 } */ *ap;
290{
291
292 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
293}
294
295/*
296 * Return true for select/poll.
297 */
298int
299vop_nopoll(ap)
300 struct vop_poll_args /* {
301 struct vnode *a_vp;
302 int a_events;
303 struct ucred *a_cred;
304 struct thread *a_td;
305 } */ *ap;
306{
307 /*
308 * Return true for read/write. If the user asked for something
309 * special, return POLLNVAL, so that clients have a way of
310 * determining reliably whether or not the extended
311 * functionality is present without hard-coding knowledge
312 * of specific filesystem implementations.
313 * Stay in sync with kern_conf.c::no_poll().
314 */
315 if (ap->a_events & ~POLLSTANDARD)
316 return (POLLNVAL);
317
318 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
319}
320
321/*
322 * Implement poll for local filesystems that support it.
323 */
324int
325vop_stdpoll(ap)
326 struct vop_poll_args /* {
327 struct vnode *a_vp;
328 int a_events;
329 struct ucred *a_cred;
330 struct thread *a_td;
331 } */ *ap;
332{
333 if (ap->a_events & ~POLLSTANDARD)
334 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
335 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
336}
337
338/*
339 * Return our mount point, as we will take charge of the writes.
340 */
341int
342vop_stdgetwritemount(ap)
343 struct vop_getwritemount_args /* {
344 struct vnode *a_vp;
345 struct mount **a_mpp;
346 } */ *ap;
347{
348 struct mount *mp;
349
350 /*
351 * XXX Since this is called unlocked we may be recycled while
352 * attempting to ref the mount. If this is the case or mountpoint
353 * will be set to NULL. We only have to prevent this call from
354 * returning with a ref to an incorrect mountpoint. It is not
355 * harmful to return with a ref to our previous mountpoint.
356 */
357 mp = ap->a_vp->v_mount;
358 if (mp != NULL) {
359 vfs_ref(mp);
360 if (mp != ap->a_vp->v_mount) {
361 vfs_rel(mp);
362 mp = NULL;
363 }
364 }
365 *(ap->a_mpp) = mp;
366 return (0);
367}
368
369/* XXX Needs good comment and VOP_BMAP(9) manpage */
370int
371vop_stdbmap(ap)
372 struct vop_bmap_args /* {
373 struct vnode *a_vp;
374 daddr_t a_bn;
375 struct bufobj **a_bop;
376 daddr_t *a_bnp;
377 int *a_runp;
378 int *a_runb;
379 } */ *ap;
380{
381
382 if (ap->a_bop != NULL)
383 *ap->a_bop = &ap->a_vp->v_bufobj;
384 if (ap->a_bnp != NULL)
385 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
386 if (ap->a_runp != NULL)
387 *ap->a_runp = 0;
388 if (ap->a_runb != NULL)
389 *ap->a_runb = 0;
390 return (0);
391}
392
393int
394vop_stdfsync(ap)
395 struct vop_fsync_args /* {
396 struct vnode *a_vp;
397 struct ucred *a_cred;
398 int a_waitfor;
399 struct thread *a_td;
400 } */ *ap;
401{
402 struct vnode *vp = ap->a_vp;
403 struct buf *bp;
404 struct bufobj *bo;
405 struct buf *nbp;
406 int error = 0;
407 int maxretry = 1000; /* large, arbitrarily chosen */
408
409 VI_LOCK(vp);
410loop1:
411 /*
412 * MARK/SCAN initialization to avoid infinite loops.
413 */
414 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
415 bp->b_vflags &= ~BV_SCANNED;
416 bp->b_error = 0;
417 }
418
419 /*
420 * Flush all dirty buffers associated with a vnode.
421 */
422loop2:
423 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
424 if ((bp->b_vflags & BV_SCANNED) != 0)
425 continue;
426 bp->b_vflags |= BV_SCANNED;
427 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
428 continue;
429 VI_UNLOCK(vp);
430 KASSERT(bp->b_bufobj == &vp->v_bufobj,
431 ("bp %p wrong b_bufobj %p should be %p",
432 bp, bp->b_bufobj, &vp->v_bufobj));
433 if ((bp->b_flags & B_DELWRI) == 0)
434 panic("fsync: not dirty");
435 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
436 vfs_bio_awrite(bp);
437 } else {
438 bremfree(bp);
439 bawrite(bp);
440 }
441 VI_LOCK(vp);
442 goto loop2;
443 }
444
445 /*
446 * If synchronous the caller expects us to completely resolve all
447 * dirty buffers in the system. Wait for in-progress I/O to
448 * complete (which could include background bitmap writes), then
449 * retry if dirty blocks still exist.
450 */
451 if (ap->a_waitfor == MNT_WAIT) {
452 bo = &vp->v_bufobj;
453 bufobj_wwait(bo, 0, 0);
454 if (bo->bo_dirty.bv_cnt > 0) {
455 /*
456 * If we are unable to write any of these buffers
457 * then we fail now rather than trying endlessly
458 * to write them out.
459 */
460 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
461 if ((error = bp->b_error) == 0)
462 continue;
463 if (error == 0 && --maxretry >= 0)
464 goto loop1;
465 error = EAGAIN;
466 }
467 }
468 VI_UNLOCK(vp);
469 if (error == EAGAIN)
470 vprint("fsync: giving up on dirty", vp);
471
472 return (error);
473}
474
475/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
476int
477vop_stdgetpages(ap)
478 struct vop_getpages_args /* {
479 struct vnode *a_vp;
480 vm_page_t *a_m;
481 int a_count;
482 int a_reqpage;
483 vm_ooffset_t a_offset;
484 } */ *ap;
485{
486
487 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
488 ap->a_count, ap->a_reqpage);
489}
490
491int
492vop_stdkqfilter(struct vop_kqfilter_args *ap)
493{
494 return vfs_kqfilter(ap);
495}
496
497/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
498int
499vop_stdputpages(ap)
500 struct vop_putpages_args /* {
501 struct vnode *a_vp;
502 vm_page_t *a_m;
503 int a_count;
504 int a_sync;
505 int *a_rtvals;
506 vm_ooffset_t a_offset;
507 } */ *ap;
508{
509
510 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
511 ap->a_sync, ap->a_rtvals);
512}
513
100};
101
102/*
103 * Series of placeholder functions for various error returns for
104 * VOPs.
105 */
106
107int
108vop_eopnotsupp(struct vop_generic_args *ap)
109{
110 /*
111 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
112 */
113
114 return (EOPNOTSUPP);
115}
116
117int
118vop_ebadf(struct vop_generic_args *ap)
119{
120
121 return (EBADF);
122}
123
124int
125vop_enotty(struct vop_generic_args *ap)
126{
127
128 return (ENOTTY);
129}
130
131int
132vop_einval(struct vop_generic_args *ap)
133{
134
135 return (EINVAL);
136}
137
138int
139vop_null(struct vop_generic_args *ap)
140{
141
142 return (0);
143}
144
145/*
146 * Helper function to panic on some bad VOPs in some filesystems.
147 */
148int
149vop_panic(struct vop_generic_args *ap)
150{
151
152 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
153}
154
155/*
156 * vop_std<something> and vop_no<something> are default functions for use by
157 * filesystems that need the "default reasonable" implementation for a
158 * particular operation.
159 *
160 * The documentation for the operations they implement exists (if it exists)
161 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
162 */
163
164/*
165 * Default vop for filesystems that do not support name lookup
166 */
167static int
168vop_nolookup(ap)
169 struct vop_lookup_args /* {
170 struct vnode *a_dvp;
171 struct vnode **a_vpp;
172 struct componentname *a_cnp;
173 } */ *ap;
174{
175
176 *ap->a_vpp = NULL;
177 return (ENOTDIR);
178}
179
180/*
181 * vop_nostrategy:
182 *
183 * Strategy routine for VFS devices that have none.
184 *
185 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
186 * routine. Typically this is done for a BIO_READ strategy call.
187 * Typically B_INVAL is assumed to already be clear prior to a write
188 * and should not be cleared manually unless you just made the buffer
189 * invalid. BIO_ERROR should be cleared either way.
190 */
191
192static int
193vop_nostrategy (struct vop_strategy_args *ap)
194{
195 printf("No strategy for buffer at %p\n", ap->a_bp);
196 vprint("vnode", ap->a_vp);
197 ap->a_bp->b_ioflags |= BIO_ERROR;
198 ap->a_bp->b_error = EOPNOTSUPP;
199 bufdone(ap->a_bp);
200 return (EOPNOTSUPP);
201}
202
203/*
204 * vop_stdpathconf:
205 *
206 * Standard implementation of POSIX pathconf, to get information about limits
207 * for a filesystem.
208 * Override per filesystem for the case where the filesystem has smaller
209 * limits.
210 */
211int
212vop_stdpathconf(ap)
213 struct vop_pathconf_args /* {
214 struct vnode *a_vp;
215 int a_name;
216 int *a_retval;
217 } */ *ap;
218{
219
220 switch (ap->a_name) {
221 case _PC_NAME_MAX:
222 *ap->a_retval = NAME_MAX;
223 return (0);
224 case _PC_PATH_MAX:
225 *ap->a_retval = PATH_MAX;
226 return (0);
227 case _PC_LINK_MAX:
228 *ap->a_retval = LINK_MAX;
229 return (0);
230 case _PC_MAX_CANON:
231 *ap->a_retval = MAX_CANON;
232 return (0);
233 case _PC_MAX_INPUT:
234 *ap->a_retval = MAX_INPUT;
235 return (0);
236 case _PC_PIPE_BUF:
237 *ap->a_retval = PIPE_BUF;
238 return (0);
239 case _PC_CHOWN_RESTRICTED:
240 *ap->a_retval = 1;
241 return (0);
242 case _PC_VDISABLE:
243 *ap->a_retval = _POSIX_VDISABLE;
244 return (0);
245 default:
246 return (EINVAL);
247 }
248 /* NOTREACHED */
249}
250
251/*
252 * Standard lock, unlock and islocked functions.
253 */
254int
255vop_stdlock(ap)
256 struct _vop_lock_args /* {
257 struct vnode *a_vp;
258 int a_flags;
259 struct thread *a_td;
260 char *file;
261 int line;
262 } */ *ap;
263{
264 struct vnode *vp = ap->a_vp;
265
266 return (_lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line));
267}
268
269/* See above. */
270int
271vop_stdunlock(ap)
272 struct vop_unlock_args /* {
273 struct vnode *a_vp;
274 int a_flags;
275 struct thread *a_td;
276 } */ *ap;
277{
278 struct vnode *vp = ap->a_vp;
279
280 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
281 ap->a_td));
282}
283
284/* See above. */
285int
286vop_stdislocked(ap)
287 struct vop_islocked_args /* {
288 struct vnode *a_vp;
289 struct thread *a_td;
290 } */ *ap;
291{
292
293 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
294}
295
296/*
297 * Return true for select/poll.
298 */
299int
300vop_nopoll(ap)
301 struct vop_poll_args /* {
302 struct vnode *a_vp;
303 int a_events;
304 struct ucred *a_cred;
305 struct thread *a_td;
306 } */ *ap;
307{
308 /*
309 * Return true for read/write. If the user asked for something
310 * special, return POLLNVAL, so that clients have a way of
311 * determining reliably whether or not the extended
312 * functionality is present without hard-coding knowledge
313 * of specific filesystem implementations.
314 * Stay in sync with kern_conf.c::no_poll().
315 */
316 if (ap->a_events & ~POLLSTANDARD)
317 return (POLLNVAL);
318
319 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
320}
321
322/*
323 * Implement poll for local filesystems that support it.
324 */
325int
326vop_stdpoll(ap)
327 struct vop_poll_args /* {
328 struct vnode *a_vp;
329 int a_events;
330 struct ucred *a_cred;
331 struct thread *a_td;
332 } */ *ap;
333{
334 if (ap->a_events & ~POLLSTANDARD)
335 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
336 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
337}
338
339/*
340 * Return our mount point, as we will take charge of the writes.
341 */
342int
343vop_stdgetwritemount(ap)
344 struct vop_getwritemount_args /* {
345 struct vnode *a_vp;
346 struct mount **a_mpp;
347 } */ *ap;
348{
349 struct mount *mp;
350
351 /*
352 * XXX Since this is called unlocked we may be recycled while
353 * attempting to ref the mount. If this is the case or mountpoint
354 * will be set to NULL. We only have to prevent this call from
355 * returning with a ref to an incorrect mountpoint. It is not
356 * harmful to return with a ref to our previous mountpoint.
357 */
358 mp = ap->a_vp->v_mount;
359 if (mp != NULL) {
360 vfs_ref(mp);
361 if (mp != ap->a_vp->v_mount) {
362 vfs_rel(mp);
363 mp = NULL;
364 }
365 }
366 *(ap->a_mpp) = mp;
367 return (0);
368}
369
370/* XXX Needs good comment and VOP_BMAP(9) manpage */
371int
372vop_stdbmap(ap)
373 struct vop_bmap_args /* {
374 struct vnode *a_vp;
375 daddr_t a_bn;
376 struct bufobj **a_bop;
377 daddr_t *a_bnp;
378 int *a_runp;
379 int *a_runb;
380 } */ *ap;
381{
382
383 if (ap->a_bop != NULL)
384 *ap->a_bop = &ap->a_vp->v_bufobj;
385 if (ap->a_bnp != NULL)
386 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
387 if (ap->a_runp != NULL)
388 *ap->a_runp = 0;
389 if (ap->a_runb != NULL)
390 *ap->a_runb = 0;
391 return (0);
392}
393
394int
395vop_stdfsync(ap)
396 struct vop_fsync_args /* {
397 struct vnode *a_vp;
398 struct ucred *a_cred;
399 int a_waitfor;
400 struct thread *a_td;
401 } */ *ap;
402{
403 struct vnode *vp = ap->a_vp;
404 struct buf *bp;
405 struct bufobj *bo;
406 struct buf *nbp;
407 int error = 0;
408 int maxretry = 1000; /* large, arbitrarily chosen */
409
410 VI_LOCK(vp);
411loop1:
412 /*
413 * MARK/SCAN initialization to avoid infinite loops.
414 */
415 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
416 bp->b_vflags &= ~BV_SCANNED;
417 bp->b_error = 0;
418 }
419
420 /*
421 * Flush all dirty buffers associated with a vnode.
422 */
423loop2:
424 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
425 if ((bp->b_vflags & BV_SCANNED) != 0)
426 continue;
427 bp->b_vflags |= BV_SCANNED;
428 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
429 continue;
430 VI_UNLOCK(vp);
431 KASSERT(bp->b_bufobj == &vp->v_bufobj,
432 ("bp %p wrong b_bufobj %p should be %p",
433 bp, bp->b_bufobj, &vp->v_bufobj));
434 if ((bp->b_flags & B_DELWRI) == 0)
435 panic("fsync: not dirty");
436 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
437 vfs_bio_awrite(bp);
438 } else {
439 bremfree(bp);
440 bawrite(bp);
441 }
442 VI_LOCK(vp);
443 goto loop2;
444 }
445
446 /*
447 * If synchronous the caller expects us to completely resolve all
448 * dirty buffers in the system. Wait for in-progress I/O to
449 * complete (which could include background bitmap writes), then
450 * retry if dirty blocks still exist.
451 */
452 if (ap->a_waitfor == MNT_WAIT) {
453 bo = &vp->v_bufobj;
454 bufobj_wwait(bo, 0, 0);
455 if (bo->bo_dirty.bv_cnt > 0) {
456 /*
457 * If we are unable to write any of these buffers
458 * then we fail now rather than trying endlessly
459 * to write them out.
460 */
461 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
462 if ((error = bp->b_error) == 0)
463 continue;
464 if (error == 0 && --maxretry >= 0)
465 goto loop1;
466 error = EAGAIN;
467 }
468 }
469 VI_UNLOCK(vp);
470 if (error == EAGAIN)
471 vprint("fsync: giving up on dirty", vp);
472
473 return (error);
474}
475
476/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
477int
478vop_stdgetpages(ap)
479 struct vop_getpages_args /* {
480 struct vnode *a_vp;
481 vm_page_t *a_m;
482 int a_count;
483 int a_reqpage;
484 vm_ooffset_t a_offset;
485 } */ *ap;
486{
487
488 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
489 ap->a_count, ap->a_reqpage);
490}
491
492int
493vop_stdkqfilter(struct vop_kqfilter_args *ap)
494{
495 return vfs_kqfilter(ap);
496}
497
498/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
499int
500vop_stdputpages(ap)
501 struct vop_putpages_args /* {
502 struct vnode *a_vp;
503 vm_page_t *a_m;
504 int a_count;
505 int a_sync;
506 int *a_rtvals;
507 vm_ooffset_t a_offset;
508 } */ *ap;
509{
510
511 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
512 ap->a_sync, ap->a_rtvals);
513}
514
515int
516vop_stdvptofh(struct vop_vptofh_args *ap)
517{
518 return VFS_VPTOFH(ap->a_vp, ap->a_fhp);
519}
520
514/*
515 * vfs default ops
516 * used to fill the vfs function table to get reasonable default return values.
517 */
518int
519vfs_stdroot (mp, flags, vpp, td)
520 struct mount *mp;
521 int flags;
522 struct vnode **vpp;
523 struct thread *td;
524{
525
526 return (EOPNOTSUPP);
527}
528
529int
530vfs_stdstatfs (mp, sbp, td)
531 struct mount *mp;
532 struct statfs *sbp;
533 struct thread *td;
534{
535
536 return (EOPNOTSUPP);
537}
538
521/*
522 * vfs default ops
523 * used to fill the vfs function table to get reasonable default return values.
524 */
525int
526vfs_stdroot (mp, flags, vpp, td)
527 struct mount *mp;
528 int flags;
529 struct vnode **vpp;
530 struct thread *td;
531{
532
533 return (EOPNOTSUPP);
534}
535
536int
537vfs_stdstatfs (mp, sbp, td)
538 struct mount *mp;
539 struct statfs *sbp;
540 struct thread *td;
541{
542
543 return (EOPNOTSUPP);
544}
545
546#if __FreeBSD_version < 800000
539int
540vfs_stdvptofh (vp, fhp)
541 struct vnode *vp;
542 struct fid *fhp;
543{
544
545 return (EOPNOTSUPP);
546}
547int
548vfs_stdvptofh (vp, fhp)
549 struct vnode *vp;
550 struct fid *fhp;
551{
552
553 return (EOPNOTSUPP);
554}
555#else
556#error Remove this code, vfs_vptofh was replaced with vop_vptofh.
557#endif
547
548int
549vfs_stdquotactl (mp, cmds, uid, arg, td)
550 struct mount *mp;
551 int cmds;
552 uid_t uid;
553 void *arg;
554 struct thread *td;
555{
556
557 return (EOPNOTSUPP);
558}
559
560int
561vfs_stdsync(mp, waitfor, td)
562 struct mount *mp;
563 int waitfor;
564 struct thread *td;
565{
566 struct vnode *vp, *mvp;
567 int error, lockreq, allerror = 0;
568
569 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
570 if (waitfor != MNT_WAIT)
571 lockreq |= LK_NOWAIT;
572 /*
573 * Force stale buffer cache information to be flushed.
574 */
575 MNT_ILOCK(mp);
576loop:
577 MNT_VNODE_FOREACH(vp, mp, mvp) {
578
579 VI_LOCK(vp);
580 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
581 VI_UNLOCK(vp);
582 continue;
583 }
584 MNT_IUNLOCK(mp);
585
586 if ((error = vget(vp, lockreq, td)) != 0) {
587 MNT_ILOCK(mp);
588 if (error == ENOENT) {
589 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
590 goto loop;
591 }
592 continue;
593 }
594 error = VOP_FSYNC(vp, waitfor, td);
595 if (error)
596 allerror = error;
597
598 /* Do not turn this into vput. td is not always curthread. */
599 VOP_UNLOCK(vp, 0, td);
600 vrele(vp);
601 MNT_ILOCK(mp);
602 }
603 MNT_IUNLOCK(mp);
604 return (allerror);
605}
606
607int
608vfs_stdnosync (mp, waitfor, td)
609 struct mount *mp;
610 int waitfor;
611 struct thread *td;
612{
613
614 return (0);
615}
616
617int
618vfs_stdvget (mp, ino, flags, vpp)
619 struct mount *mp;
620 ino_t ino;
621 int flags;
622 struct vnode **vpp;
623{
624
625 return (EOPNOTSUPP);
626}
627
628int
629vfs_stdfhtovp (mp, fhp, vpp)
630 struct mount *mp;
631 struct fid *fhp;
632 struct vnode **vpp;
633{
634
635 return (EOPNOTSUPP);
636}
637
638int
639vfs_stdinit (vfsp)
640 struct vfsconf *vfsp;
641{
642
643 return (0);
644}
645
646int
647vfs_stduninit (vfsp)
648 struct vfsconf *vfsp;
649{
650
651 return(0);
652}
653
654int
655vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
656 struct mount *mp;
657 int cmd;
658 struct vnode *filename_vp;
659 int attrnamespace;
660 const char *attrname;
661 struct thread *td;
662{
663
664 if (filename_vp != NULL)
665 VOP_UNLOCK(filename_vp, 0, td);
666 return (EOPNOTSUPP);
667}
668
669int
670vfs_stdsysctl(mp, op, req)
671 struct mount *mp;
672 fsctlop_t op;
673 struct sysctl_req *req;
674{
675
676 return (EOPNOTSUPP);
677}
678
679/* end of vfs default ops */
558
559int
560vfs_stdquotactl (mp, cmds, uid, arg, td)
561 struct mount *mp;
562 int cmds;
563 uid_t uid;
564 void *arg;
565 struct thread *td;
566{
567
568 return (EOPNOTSUPP);
569}
570
571int
572vfs_stdsync(mp, waitfor, td)
573 struct mount *mp;
574 int waitfor;
575 struct thread *td;
576{
577 struct vnode *vp, *mvp;
578 int error, lockreq, allerror = 0;
579
580 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
581 if (waitfor != MNT_WAIT)
582 lockreq |= LK_NOWAIT;
583 /*
584 * Force stale buffer cache information to be flushed.
585 */
586 MNT_ILOCK(mp);
587loop:
588 MNT_VNODE_FOREACH(vp, mp, mvp) {
589
590 VI_LOCK(vp);
591 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
592 VI_UNLOCK(vp);
593 continue;
594 }
595 MNT_IUNLOCK(mp);
596
597 if ((error = vget(vp, lockreq, td)) != 0) {
598 MNT_ILOCK(mp);
599 if (error == ENOENT) {
600 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
601 goto loop;
602 }
603 continue;
604 }
605 error = VOP_FSYNC(vp, waitfor, td);
606 if (error)
607 allerror = error;
608
609 /* Do not turn this into vput. td is not always curthread. */
610 VOP_UNLOCK(vp, 0, td);
611 vrele(vp);
612 MNT_ILOCK(mp);
613 }
614 MNT_IUNLOCK(mp);
615 return (allerror);
616}
617
618int
619vfs_stdnosync (mp, waitfor, td)
620 struct mount *mp;
621 int waitfor;
622 struct thread *td;
623{
624
625 return (0);
626}
627
628int
629vfs_stdvget (mp, ino, flags, vpp)
630 struct mount *mp;
631 ino_t ino;
632 int flags;
633 struct vnode **vpp;
634{
635
636 return (EOPNOTSUPP);
637}
638
639int
640vfs_stdfhtovp (mp, fhp, vpp)
641 struct mount *mp;
642 struct fid *fhp;
643 struct vnode **vpp;
644{
645
646 return (EOPNOTSUPP);
647}
648
649int
650vfs_stdinit (vfsp)
651 struct vfsconf *vfsp;
652{
653
654 return (0);
655}
656
657int
658vfs_stduninit (vfsp)
659 struct vfsconf *vfsp;
660{
661
662 return(0);
663}
664
665int
666vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
667 struct mount *mp;
668 int cmd;
669 struct vnode *filename_vp;
670 int attrnamespace;
671 const char *attrname;
672 struct thread *td;
673{
674
675 if (filename_vp != NULL)
676 VOP_UNLOCK(filename_vp, 0, td);
677 return (EOPNOTSUPP);
678}
679
680int
681vfs_stdsysctl(mp, op, req)
682 struct mount *mp;
683 fsctlop_t op;
684 struct sysctl_req *req;
685{
686
687 return (EOPNOTSUPP);
688}
689
690/* end of vfs default ops */