Deleted Added
full compact
vfs_default.c (30739) vfs_default.c (30743)
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project

--- 26 unchanged lines hidden (view full) ---

35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 */
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project

--- 26 unchanged lines hidden (view full) ---

35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 */
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/malloc.h>
43#include <sys/mount.h>
44#include <sys/unistd.h>
45#include <sys/vnode.h>
44#include <sys/mount.h>
45#include <sys/unistd.h>
46#include <sys/vnode.h>
47#include <sys/poll.h>
46
47static int vop_nostrategy __P((struct vop_strategy_args *));
48
49/*
50 * This vnode table stores what we want to do if the filesystem doesn't
51 * implement a particular VOP.
52 *
53 * If there is no specific entry here, we will return EOPNOTSUPP.
54 *
55 */
56
57vop_t **default_vnodeop_p;
58static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
59 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
48
49static int vop_nostrategy __P((struct vop_strategy_args *));
50
51/*
52 * This vnode table stores what we want to do if the filesystem doesn't
53 * implement a particular VOP.
54 *
55 * If there is no specific entry here, we will return EOPNOTSUPP.
56 *
57 */
58
59vop_t **default_vnodeop_p;
60static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
61 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
60 { &vop_abortop_desc, (vop_t *) nullop },
62 { &vop_abortop_desc, (vop_t *) vop_null },
61 { &vop_advlock_desc, (vop_t *) vop_einval },
63 { &vop_advlock_desc, (vop_t *) vop_einval },
62 { &vop_bwrite_desc, (vop_t *) vn_bwrite },
64 { &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
63 { &vop_close_desc, (vop_t *) vop_null },
64 { &vop_fsync_desc, (vop_t *) vop_null },
65 { &vop_ioctl_desc, (vop_t *) vop_enotty },
66 { &vop_islocked_desc, (vop_t *) vop_noislocked },
67 { &vop_lease_desc, (vop_t *) vop_null },
68 { &vop_lock_desc, (vop_t *) vop_nolock },
69 { &vop_mmap_desc, (vop_t *) vop_einval },
70 { &vop_open_desc, (vop_t *) vop_null },

--- 141 unchanged lines hidden (view full) ---

212 struct vnode *a_vp;
213 } */ *ap;
214{
215 struct lock *l = (struct lock*)ap->a_vp->v_data;
216
217 return (lockstatus(l));
218}
219
65 { &vop_close_desc, (vop_t *) vop_null },
66 { &vop_fsync_desc, (vop_t *) vop_null },
67 { &vop_ioctl_desc, (vop_t *) vop_enotty },
68 { &vop_islocked_desc, (vop_t *) vop_noislocked },
69 { &vop_lease_desc, (vop_t *) vop_null },
70 { &vop_lock_desc, (vop_t *) vop_nolock },
71 { &vop_mmap_desc, (vop_t *) vop_einval },
72 { &vop_open_desc, (vop_t *) vop_null },

--- 141 unchanged lines hidden (view full) ---

214 struct vnode *a_vp;
215 } */ *ap;
216{
217 struct lock *l = (struct lock*)ap->a_vp->v_data;
218
219 return (lockstatus(l));
220}
221
222/*
223 * Return true for select/poll.
224 */
225int
226vop_nopoll(ap)
227 struct vop_poll_args /* {
228 struct vnode *a_vp;
229 int a_events;
230 struct ucred *a_cred;
231 struct proc *a_p;
232 } */ *ap;
233{
234
235 /*
236 * Just return what we were asked for.
237 */
238 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
239}
240
241int
242vop_stdbwrite(ap)
243 struct vop_bwrite_args *ap;
244{
245 return (bwrite(ap->a_bp));
246}
247
248/*
249 * Stubs to use when there is no locking to be done on the underlying object.
250 * A minimal shared lock is necessary to ensure that the underlying object
251 * is not revoked while an operation is in progress. So, an active shared
252 * count is maintained in an auxillary vnode lock structure.
253 */
254int
255vop_sharedlock(ap)
256 struct vop_lock_args /* {
257 struct vnode *a_vp;
258 int a_flags;
259 struct proc *a_p;
260 } */ *ap;
261{
262 /*
263 * This code cannot be used until all the non-locking filesystems
264 * (notably NFS) are converted to properly lock and release nodes.
265 * Also, certain vnode operations change the locking state within
266 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
267 * and symlink). Ideally these operations should not change the
268 * lock state, but should be changed to let the caller of the
269 * function unlock them. Otherwise all intermediate vnode layers
270 * (such as union, umapfs, etc) must catch these functions to do
271 * the necessary locking at their layer. Note that the inactive
272 * and lookup operations also change their lock state, but this
273 * cannot be avoided, so these two operations will always need
274 * to be handled in intermediate layers.
275 */
276 struct vnode *vp = ap->a_vp;
277 int vnflags, flags = ap->a_flags;
278
279 if (vp->v_vnlock == NULL) {
280 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
281 return (0);
282 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
283 M_VNODE, M_WAITOK);
284 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
285 }
286 switch (flags & LK_TYPE_MASK) {
287 case LK_DRAIN:
288 vnflags = LK_DRAIN;
289 break;
290 case LK_EXCLUSIVE:
291#ifdef DEBUG_VFS_LOCKS
292 /*
293 * Normally, we use shared locks here, but that confuses
294 * the locking assertions.
295 */
296 vnflags = LK_EXCLUSIVE;
297 break;
298#endif
299 case LK_SHARED:
300 vnflags = LK_SHARED;
301 break;
302 case LK_UPGRADE:
303 case LK_EXCLUPGRADE:
304 case LK_DOWNGRADE:
305 return (0);
306 case LK_RELEASE:
307 default:
308 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
309 }
310 if (flags & LK_INTERLOCK)
311 vnflags |= LK_INTERLOCK;
312 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
313}
314
315/*
316 * Stubs to use when there is no locking to be done on the underlying object.
317 * A minimal shared lock is necessary to ensure that the underlying object
318 * is not revoked while an operation is in progress. So, an active shared
319 * count is maintained in an auxillary vnode lock structure.
320 */
321int
322vop_nolock(ap)
323 struct vop_lock_args /* {
324 struct vnode *a_vp;
325 int a_flags;
326 struct proc *a_p;
327 } */ *ap;
328{
329#ifdef notyet
330 /*
331 * This code cannot be used until all the non-locking filesystems
332 * (notably NFS) are converted to properly lock and release nodes.
333 * Also, certain vnode operations change the locking state within
334 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
335 * and symlink). Ideally these operations should not change the
336 * lock state, but should be changed to let the caller of the
337 * function unlock them. Otherwise all intermediate vnode layers
338 * (such as union, umapfs, etc) must catch these functions to do
339 * the necessary locking at their layer. Note that the inactive
340 * and lookup operations also change their lock state, but this
341 * cannot be avoided, so these two operations will always need
342 * to be handled in intermediate layers.
343 */
344 struct vnode *vp = ap->a_vp;
345 int vnflags, flags = ap->a_flags;
346
347 if (vp->v_vnlock == NULL) {
348 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
349 return (0);
350 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
351 M_VNODE, M_WAITOK);
352 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
353 }
354 switch (flags & LK_TYPE_MASK) {
355 case LK_DRAIN:
356 vnflags = LK_DRAIN;
357 break;
358 case LK_EXCLUSIVE:
359 case LK_SHARED:
360 vnflags = LK_SHARED;
361 break;
362 case LK_UPGRADE:
363 case LK_EXCLUPGRADE:
364 case LK_DOWNGRADE:
365 return (0);
366 case LK_RELEASE:
367 default:
368 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
369 }
370 if (flags & LK_INTERLOCK)
371 vnflags |= LK_INTERLOCK;
372 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
373#else /* for now */
374 /*
375 * Since we are not using the lock manager, we must clear
376 * the interlock here.
377 */
378 if (ap->a_flags & LK_INTERLOCK) {
379 simple_unlock(&ap->a_vp->v_interlock);
380 }
381 return (0);
382#endif
383}
384
385/*
386 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
387 */
388int
389vop_nounlock(ap)
390 struct vop_unlock_args /* {
391 struct vnode *a_vp;
392 int a_flags;
393 struct proc *a_p;
394 } */ *ap;
395{
396 struct vnode *vp = ap->a_vp;
397
398 if (vp->v_vnlock == NULL) {
399 if (ap->a_flags & LK_INTERLOCK)
400 simple_unlock(&ap->a_vp->v_interlock);
401 return (0);
402 }
403 return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
404 &ap->a_vp->v_interlock, ap->a_p));
405}
406
407/*
408 * Return whether or not the node is in use.
409 */
410int
411vop_noislocked(ap)
412 struct vop_islocked_args /* {
413 struct vnode *a_vp;
414 } */ *ap;
415{
416 struct vnode *vp = ap->a_vp;
417
418 if (vp->v_vnlock == NULL)
419 return (0);
420 return (lockstatus(vp->v_vnlock));
421}
422