null_subr.c revision 143642
1/*-
2 * Copyright (c) 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software donated to Berkeley by
6 * Jan-Simon Pendry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)null_subr.c	8.7 (Berkeley) 5/14/95
33 *
34 * $FreeBSD: head/sys/fs/nullfs/null_subr.c 143642 2005-03-15 13:49:33Z jeff $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/malloc.h>
43#include <sys/mount.h>
44#include <sys/proc.h>
45#include <sys/vnode.h>
46
47#include <fs/nullfs/null.h>
48
49#define LOG2_SIZEVNODE 8		/* log2(sizeof struct vnode) */
50#define	NNULLNODECACHE 16
51
52/*
53 * Null layer cache:
54 * Each cache entry holds a reference to the lower vnode
55 * along with a pointer to the alias vnode.  When an
56 * entry is added the lower vnode is VREF'd.  When the
57 * alias is removed the lower vnode is vrele'd.
58 */
59
60#define	NULL_NHASH(vp) \
61	(&null_node_hashtbl[(((uintptr_t)vp)>>LOG2_SIZEVNODE) & null_node_hash])
62
63static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
64static u_long null_node_hash;
65struct mtx null_hashmtx;
66
67static MALLOC_DEFINE(M_NULLFSHASH, "NULLFS hash", "NULLFS hash table");
68MALLOC_DEFINE(M_NULLFSNODE, "NULLFS node", "NULLFS vnode private part");
69
70static struct vnode * null_hashget(struct mount *, struct vnode *);
71static struct vnode * null_hashins(struct mount *, struct null_node *);
72
73/*
74 * Initialise cache headers
75 */
76int
77nullfs_init(vfsp)
78	struct vfsconf *vfsp;
79{
80
81	NULLFSDEBUG("nullfs_init\n");		/* printed during system boot */
82	null_node_hashtbl = hashinit(NNULLNODECACHE, M_NULLFSHASH, &null_node_hash);
83	mtx_init(&null_hashmtx, "nullhs", NULL, MTX_DEF);
84	return (0);
85}
86
87int
88nullfs_uninit(vfsp)
89	struct vfsconf *vfsp;
90{
91
92	mtx_destroy(&null_hashmtx);
93	free(null_node_hashtbl, M_NULLFSHASH);
94	return (0);
95}
96
97/*
98 * Return a VREF'ed alias for lower vnode if already exists, else 0.
99 * Lower vnode should be locked on entry and will be left locked on exit.
100 */
101static struct vnode *
102null_hashget(mp, lowervp)
103	struct mount *mp;
104	struct vnode *lowervp;
105{
106	struct thread *td = curthread;	/* XXX */
107	struct null_node_hashhead *hd;
108	struct null_node *a;
109	struct vnode *vp;
110
111	/*
112	 * Find hash base, and then search the (two-way) linked
113	 * list looking for a null_node structure which is referencing
114	 * the lower vnode.  If found, the increment the null_node
115	 * reference count (but NOT the lower vnode's VREF counter).
116	 */
117	hd = NULL_NHASH(lowervp);
118loop:
119	mtx_lock(&null_hashmtx);
120	LIST_FOREACH(a, hd, null_hash) {
121		if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
122			vp = NULLTOV(a);
123			VI_LOCK(vp);
124			/*
125			 * If the nullfs node is being recycled we have
126			 * to wait until it finishes prior to scanning
127			 * again.
128			 */
129			mtx_unlock(&null_hashmtx);
130			if ((vp->v_iflag & VI_DOOMED) != 0) {
131				/* Wait for recycling to finish. */
132				VOP_LOCK(vp, LK_EXCLUSIVE|LK_INTERLOCK, td);
133				VOP_UNLOCK(vp, 0, td);
134				goto loop;
135			}
136			vget(vp, LK_INTERLOCK, td);
137			return (vp);
138		}
139	}
140	mtx_unlock(&null_hashmtx);
141	return (NULLVP);
142}
143
144/*
145 * Act like null_hashget, but add passed null_node to hash if no existing
146 * node found.
147 */
148static struct vnode *
149null_hashins(mp, xp)
150	struct mount *mp;
151	struct null_node *xp;
152{
153	struct thread *td = curthread;	/* XXX */
154	struct null_node_hashhead *hd;
155	struct null_node *oxp;
156	struct vnode *ovp;
157
158	hd = NULL_NHASH(xp->null_lowervp);
159loop:
160	mtx_lock(&null_hashmtx);
161	LIST_FOREACH(oxp, hd, null_hash) {
162		if (oxp->null_lowervp == xp->null_lowervp &&
163		    NULLTOV(oxp)->v_mount == mp) {
164			ovp = NULLTOV(oxp);
165			VI_LOCK(ovp);
166			/*
167			 * If the nullfs node is being recycled we have
168			 * to wait until it finishes prior to scanning
169			 * again.
170			 */
171			mtx_unlock(&null_hashmtx);
172			if ((ovp->v_iflag & VI_DOOMED) != 0) {
173				VOP_LOCK(ovp, LK_EXCLUSIVE|LK_INTERLOCK, td);
174				VOP_UNLOCK(ovp, 0, td);
175				goto loop;
176			}
177			vget(ovp, LK_INTERLOCK, td);
178			return (ovp);
179		}
180	}
181	LIST_INSERT_HEAD(hd, xp, null_hash);
182	mtx_unlock(&null_hashmtx);
183	return (NULLVP);
184}
185
186/*
187 * Make a new or get existing nullfs node.
188 * Vp is the alias vnode, lowervp is the lower vnode.
189 *
190 * The lowervp assumed to be locked and having "spare" reference. This routine
191 * vrele lowervp if nullfs node was taken from hash. Otherwise it "transfers"
192 * the caller's "spare" reference to created nullfs vnode.
193 */
194int
195null_nodeget(mp, lowervp, vpp)
196	struct mount *mp;
197	struct vnode *lowervp;
198	struct vnode **vpp;
199{
200	struct null_node *xp;
201	struct vnode *vp;
202	int error;
203
204	/* Lookup the hash firstly */
205	*vpp = null_hashget(mp, lowervp);
206	if (*vpp != NULL) {
207		vrele(lowervp);
208		return (0);
209	}
210
211	/*
212	 * We do not serialize vnode creation, instead we will check for
213	 * duplicates later, when adding new vnode to hash.
214	 *
215	 * Note that duplicate can only appear in hash if the lowervp is
216	 * locked LK_SHARED.
217	 */
218
219	/*
220	 * Do the MALLOC before the getnewvnode since doing so afterward
221	 * might cause a bogus v_data pointer to get dereferenced
222	 * elsewhere if MALLOC should block.
223	 */
224	MALLOC(xp, struct null_node *, sizeof(struct null_node),
225	    M_NULLFSNODE, M_WAITOK);
226
227	error = getnewvnode("null", mp, &null_vnodeops, &vp);
228	if (error) {
229		FREE(xp, M_NULLFSNODE);
230		return (error);
231	}
232
233	xp->null_vnode = vp;
234	xp->null_lowervp = lowervp;
235	vp->v_type = lowervp->v_type;
236	vp->v_data = xp;
237	vp->v_vnlock = lowervp->v_vnlock;
238	if (vp->v_vnlock == NULL)
239		panic("null_nodeget: Passed a NULL vnlock.\n");
240	/*
241	 * Atomically insert our new node into the hash or vget existing
242	 * if someone else has beaten us to it.
243	 */
244	*vpp = null_hashins(mp, xp);
245	if (*vpp != NULL) {
246		vrele(lowervp);
247		vp->v_vnlock = &vp->v_lock;
248		xp->null_lowervp = NULL;
249		vrele(vp);
250		return (0);
251	}
252	*vpp = vp;
253
254	return (0);
255}
256
257/*
258 * Remove node from hash.
259 */
260void
261null_hashrem(xp)
262	struct null_node *xp;
263{
264
265	mtx_lock(&null_hashmtx);
266	LIST_REMOVE(xp, null_hash);
267	mtx_unlock(&null_hashmtx);
268}
269
270#ifdef DIAGNOSTIC
271
272#ifdef KDB
273#define	null_checkvp_barrier	1
274#else
275#define	null_checkvp_barrier	0
276#endif
277
278struct vnode *
279null_checkvp(vp, fil, lno)
280	struct vnode *vp;
281	char *fil;
282	int lno;
283{
284	struct null_node *a = VTONULL(vp);
285#ifdef notyet
286	/*
287	 * Can't do this check because vop_reclaim runs
288	 * with a funny vop vector.
289	 */
290	if (vp->v_op != null_vnodeop_p) {
291		printf ("null_checkvp: on non-null-node\n");
292		while (null_checkvp_barrier) /*WAIT*/ ;
293		panic("null_checkvp");
294	};
295#endif
296	if (a->null_lowervp == NULLVP) {
297		/* Should never happen */
298		int i; u_long *p;
299		printf("vp = %p, ZERO ptr\n", (void *)vp);
300		for (p = (u_long *) a, i = 0; i < 8; i++)
301			printf(" %lx", p[i]);
302		printf("\n");
303		/* wait for debugger */
304		while (null_checkvp_barrier) /*WAIT*/ ;
305		panic("null_checkvp");
306	}
307	if (vrefcnt(a->null_lowervp) < 1) {
308		int i; u_long *p;
309		printf("vp = %p, unref'ed lowervp\n", (void *)vp);
310		for (p = (u_long *) a, i = 0; i < 8; i++)
311			printf(" %lx", p[i]);
312		printf("\n");
313		/* wait for debugger */
314		while (null_checkvp_barrier) /*WAIT*/ ;
315		panic ("null with unref'ed lowervp");
316	};
317#ifdef notyet
318	printf("null %x/%d -> %x/%d [%s, %d]\n",
319	        NULLTOV(a), vrefcnt(NULLTOV(a)),
320		a->null_lowervp, vrefcnt(a->null_lowervp),
321		fil, lno);
322#endif
323	return a->null_lowervp;
324}
325#endif
326