null_subr.c revision 243311
1/*-
2 * Copyright (c) 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software donated to Berkeley by
6 * Jan-Simon Pendry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)null_subr.c	8.7 (Berkeley) 5/14/95
33 *
34 * $FreeBSD: head/sys/fs/nullfs/null_subr.c 243311 2012-11-19 22:43:45Z attilio $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/malloc.h>
43#include <sys/mount.h>
44#include <sys/proc.h>
45#include <sys/vnode.h>
46
47#include <fs/nullfs/null.h>
48
49#define LOG2_SIZEVNODE 8		/* log2(sizeof struct vnode) */
50#define	NNULLNODECACHE 16
51
52/*
53 * Null layer cache:
54 * Each cache entry holds a reference to the lower vnode
55 * along with a pointer to the alias vnode.  When an
56 * entry is added the lower vnode is VREF'd.  When the
57 * alias is removed the lower vnode is vrele'd.
58 */
59
60#define	NULL_NHASH(vp) \
61	(&null_node_hashtbl[(((uintptr_t)vp)>>LOG2_SIZEVNODE) & null_node_hash])
62
63static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
64static u_long null_node_hash;
65struct mtx null_hashmtx;
66
67static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table");
68MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part");
69
70static struct vnode * null_hashins(struct mount *, struct null_node *);
71
72/*
73 * Initialise cache headers
74 */
75int
76nullfs_init(vfsp)
77	struct vfsconf *vfsp;
78{
79
80	NULLFSDEBUG("nullfs_init\n");		/* printed during system boot */
81	null_node_hashtbl = hashinit(NNULLNODECACHE, M_NULLFSHASH, &null_node_hash);
82	mtx_init(&null_hashmtx, "nullhs", NULL, MTX_DEF);
83	return (0);
84}
85
86int
87nullfs_uninit(vfsp)
88	struct vfsconf *vfsp;
89{
90
91	mtx_destroy(&null_hashmtx);
92	hashdestroy(null_node_hashtbl, M_NULLFSHASH, null_node_hash);
93	return (0);
94}
95
96/*
97 * Return a VREF'ed alias for lower vnode if already exists, else 0.
98 * Lower vnode should be locked on entry and will be left locked on exit.
99 */
100struct vnode *
101null_hashget(mp, lowervp)
102	struct mount *mp;
103	struct vnode *lowervp;
104{
105	struct null_node_hashhead *hd;
106	struct null_node *a;
107	struct vnode *vp;
108
109	ASSERT_VOP_LOCKED(lowervp, "null_hashget");
110
111	/*
112	 * Find hash base, and then search the (two-way) linked
113	 * list looking for a null_node structure which is referencing
114	 * the lower vnode.  If found, the increment the null_node
115	 * reference count (but NOT the lower vnode's VREF counter).
116	 */
117	hd = NULL_NHASH(lowervp);
118	mtx_lock(&null_hashmtx);
119	LIST_FOREACH(a, hd, null_hash) {
120		if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
121			/*
122			 * Since we have the lower node locked the nullfs
123			 * node can not be in the process of recycling.  If
124			 * it had been recycled before we grabed the lower
125			 * lock it would not have been found on the hash.
126			 */
127			vp = NULLTOV(a);
128			vref(vp);
129			mtx_unlock(&null_hashmtx);
130			return (vp);
131		}
132	}
133	mtx_unlock(&null_hashmtx);
134	return (NULLVP);
135}
136
137/*
138 * Act like null_hashget, but add passed null_node to hash if no existing
139 * node found.
140 */
141static struct vnode *
142null_hashins(mp, xp)
143	struct mount *mp;
144	struct null_node *xp;
145{
146	struct null_node_hashhead *hd;
147	struct null_node *oxp;
148	struct vnode *ovp;
149
150	hd = NULL_NHASH(xp->null_lowervp);
151	mtx_lock(&null_hashmtx);
152	LIST_FOREACH(oxp, hd, null_hash) {
153		if (oxp->null_lowervp == xp->null_lowervp &&
154		    NULLTOV(oxp)->v_mount == mp) {
155			/*
156			 * See null_hashget for a description of this
157			 * operation.
158			 */
159			ovp = NULLTOV(oxp);
160			vref(ovp);
161			mtx_unlock(&null_hashmtx);
162			return (ovp);
163		}
164	}
165	LIST_INSERT_HEAD(hd, xp, null_hash);
166	mtx_unlock(&null_hashmtx);
167	return (NULLVP);
168}
169
170static void
171null_destroy_proto(struct vnode *vp, void *xp)
172{
173
174	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
175	VI_LOCK(vp);
176	vp->v_data = NULL;
177	vp->v_vnlock = &vp->v_lock;
178	vp->v_op = &dead_vnodeops;
179	VI_UNLOCK(vp);
180	vgone(vp);
181	vput(vp);
182	free(xp, M_NULLFSNODE);
183}
184
185static void
186null_insmntque_dtr(struct vnode *vp, void *xp)
187{
188
189	vput(((struct null_node *)xp)->null_lowervp);
190	null_destroy_proto(vp, xp);
191}
192
193/*
194 * Make a new or get existing nullfs node.
195 * Vp is the alias vnode, lowervp is the lower vnode.
196 *
197 * The lowervp assumed to be locked and having "spare" reference. This routine
198 * vrele lowervp if nullfs node was taken from hash. Otherwise it "transfers"
199 * the caller's "spare" reference to created nullfs vnode.
200 */
201int
202null_nodeget(mp, lowervp, vpp)
203	struct mount *mp;
204	struct vnode *lowervp;
205	struct vnode **vpp;
206{
207	struct null_node *xp;
208	struct vnode *vp;
209	int error;
210
211	ASSERT_VOP_LOCKED(lowervp, "lowervp");
212	KASSERT(lowervp->v_usecount >= 1, ("Unreferenced vnode %p", lowervp));
213
214	/* Lookup the hash firstly. */
215	*vpp = null_hashget(mp, lowervp);
216	if (*vpp != NULL) {
217		vrele(lowervp);
218		return (0);
219	}
220
221	/*
222	 * The insmntque1() call below requires the exclusive lock on
223	 * the nullfs vnode.  Upgrade the lock now if hash failed to
224	 * provide ready to use vnode.
225	 */
226	if (VOP_ISLOCKED(lowervp) != LK_EXCLUSIVE) {
227		vn_lock(lowervp, LK_UPGRADE | LK_RETRY);
228		if ((lowervp->v_iflag & VI_DOOMED) != 0) {
229			vput(lowervp);
230			return (ENOENT);
231		}
232	}
233
234	/*
235	 * We do not serialize vnode creation, instead we will check for
236	 * duplicates later, when adding new vnode to hash.
237	 * Note that duplicate can only appear in hash if the lowervp is
238	 * locked LK_SHARED.
239	 */
240	xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK);
241
242	error = getnewvnode("null", mp, &null_vnodeops, &vp);
243	if (error) {
244		vput(lowervp);
245		free(xp, M_NULLFSNODE);
246		return (error);
247	}
248
249	xp->null_vnode = vp;
250	xp->null_lowervp = lowervp;
251	vp->v_type = lowervp->v_type;
252	vp->v_data = xp;
253	vp->v_vnlock = lowervp->v_vnlock;
254	if (vp->v_vnlock == NULL)
255		panic("null_nodeget: Passed a NULL vnlock.\n");
256	error = insmntque1(vp, mp, null_insmntque_dtr, xp);
257	if (error != 0)
258		return (error);
259	/*
260	 * Atomically insert our new node into the hash or vget existing
261	 * if someone else has beaten us to it.
262	 */
263	*vpp = null_hashins(mp, xp);
264	if (*vpp != NULL) {
265		vrele(lowervp);
266		null_destroy_proto(vp, xp);
267		return (0);
268	}
269	*vpp = vp;
270
271	return (0);
272}
273
274/*
275 * Remove node from hash.
276 */
277void
278null_hashrem(xp)
279	struct null_node *xp;
280{
281
282	mtx_lock(&null_hashmtx);
283	LIST_REMOVE(xp, null_hash);
284	mtx_unlock(&null_hashmtx);
285}
286
287#ifdef DIAGNOSTIC
288
289struct vnode *
290null_checkvp(vp, fil, lno)
291	struct vnode *vp;
292	char *fil;
293	int lno;
294{
295	struct null_node *a = VTONULL(vp);
296
297#ifdef notyet
298	/*
299	 * Can't do this check because vop_reclaim runs
300	 * with a funny vop vector.
301	 */
302	if (vp->v_op != null_vnodeop_p) {
303		printf ("null_checkvp: on non-null-node\n");
304		panic("null_checkvp");
305	}
306#endif
307	if (a->null_lowervp == NULLVP) {
308		/* Should never happen */
309		panic("null_checkvp %p", vp);
310	}
311	VI_LOCK_FLAGS(a->null_lowervp, MTX_DUPOK);
312	if (a->null_lowervp->v_usecount < 1)
313		panic ("null with unref'ed lowervp, vp %p lvp %p",
314		    vp, a->null_lowervp);
315	VI_UNLOCK(a->null_lowervp);
316#ifdef notyet
317	printf("null %x/%d -> %x/%d [%s, %d]\n",
318	        NULLTOV(a), vrefcnt(NULLTOV(a)),
319		a->null_lowervp, vrefcnt(a->null_lowervp),
320		fil, lno);
321#endif
322	return (a->null_lowervp);
323}
324#endif
325