1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001 Dag-Erling Co��dan Sm��rgrav
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer
12 *    in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include "opt_pseudofs.h"
35
36#include <sys/param.h>
37#include <sys/kernel.h>
38#include <sys/systm.h>
39#include <sys/eventhandler.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/sysctl.h>
45#include <sys/vnode.h>
46
47#include <fs/pseudofs/pseudofs.h>
48#include <fs/pseudofs/pseudofs_internal.h>
49
50static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache");
51
52static struct mtx pfs_vncache_mutex;
53static struct pfs_vdata *pfs_vncache;
54static eventhandler_tag pfs_exit_tag;
55static void pfs_exit(void *arg, struct proc *p);
56static void pfs_purge_locked(struct pfs_node *pn, bool force);
57
58static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0,
59    "pseudofs vnode cache");
60
61static int pfs_vncache_entries;
62SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD,
63    &pfs_vncache_entries, 0,
64    "number of entries in the vnode cache");
65
66static int pfs_vncache_maxentries;
67SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD,
68    &pfs_vncache_maxentries, 0,
69    "highest number of entries in the vnode cache");
70
71static int pfs_vncache_hits;
72SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD,
73    &pfs_vncache_hits, 0,
74    "number of cache hits since initialization");
75
76static int pfs_vncache_misses;
77SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD,
78    &pfs_vncache_misses, 0,
79    "number of cache misses since initialization");
80
81extern struct vop_vector pfs_vnodeops;	/* XXX -> .h file */
82
83/*
84 * Initialize vnode cache
85 */
86void
87pfs_vncache_load(void)
88{
89
90	mtx_init(&pfs_vncache_mutex, "pfs_vncache", NULL, MTX_DEF);
91	pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL,
92	    EVENTHANDLER_PRI_ANY);
93}
94
95/*
96 * Tear down vnode cache
97 */
98void
99pfs_vncache_unload(void)
100{
101
102	EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
103	mtx_lock(&pfs_vncache_mutex);
104	pfs_purge_locked(NULL, true);
105	mtx_unlock(&pfs_vncache_mutex);
106	KASSERT(pfs_vncache_entries == 0,
107	    ("%d vncache entries remaining", pfs_vncache_entries));
108	mtx_destroy(&pfs_vncache_mutex);
109}
110
111/*
112 * Allocate a vnode
113 */
114int
115pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
116		  struct pfs_node *pn, pid_t pid)
117{
118	struct pfs_vdata *pvd, *pvd2;
119	struct vnode *vp;
120	int error;
121
122	/*
123	 * See if the vnode is in the cache.
124	 * XXX linear search is not very efficient.
125	 */
126retry:
127	mtx_lock(&pfs_vncache_mutex);
128	for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
129		if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
130		    pvd->pvd_vnode->v_mount == mp) {
131			vp = pvd->pvd_vnode;
132			VI_LOCK(vp);
133			mtx_unlock(&pfs_vncache_mutex);
134			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
135				++pfs_vncache_hits;
136				*vpp = vp;
137				/*
138				 * Some callers cache_enter(vp) later, so
139				 * we have to make sure it's not in the
140				 * VFS cache so it doesn't get entered
141				 * twice.  A better solution would be to
142				 * make pfs_vncache_alloc() responsible
143				 * for entering the vnode in the VFS
144				 * cache.
145				 */
146				cache_purge(vp);
147				return (0);
148			}
149			goto retry;
150		}
151	}
152	mtx_unlock(&pfs_vncache_mutex);
153
154	/* nope, get a new one */
155	pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
156	pvd->pvd_next = pvd->pvd_prev = NULL;
157	error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
158	if (error) {
159		free(pvd, M_PFSVNCACHE);
160		return (error);
161	}
162	pvd->pvd_pn = pn;
163	pvd->pvd_pid = pid;
164	(*vpp)->v_data = pvd;
165	switch (pn->pn_type) {
166	case pfstype_root:
167		(*vpp)->v_vflag = VV_ROOT;
168#if 0
169		printf("root vnode allocated\n");
170#endif
171		/* fall through */
172	case pfstype_dir:
173	case pfstype_this:
174	case pfstype_parent:
175	case pfstype_procdir:
176		(*vpp)->v_type = VDIR;
177		break;
178	case pfstype_file:
179		(*vpp)->v_type = VREG;
180		break;
181	case pfstype_symlink:
182		(*vpp)->v_type = VLNK;
183		break;
184	case pfstype_none:
185		KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
186	default:
187		panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
188	}
189	/*
190	 * Propagate flag through to vnode so users know it can change
191	 * if the process changes (i.e. execve)
192	 */
193	if ((pn->pn_flags & PFS_PROCDEP) != 0)
194		(*vpp)->v_vflag |= VV_PROCDEP;
195	pvd->pvd_vnode = *vpp;
196	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
197	VN_LOCK_AREC(*vpp);
198	error = insmntque(*vpp, mp);
199	if (error != 0) {
200		free(pvd, M_PFSVNCACHE);
201		*vpp = NULLVP;
202		return (error);
203	}
204retry2:
205	mtx_lock(&pfs_vncache_mutex);
206	/*
207	 * Other thread may race with us, creating the entry we are
208	 * going to insert into the cache. Recheck after
209	 * pfs_vncache_mutex is reacquired.
210	 */
211	for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) {
212		if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid &&
213		    pvd2->pvd_vnode->v_mount == mp) {
214			vp = pvd2->pvd_vnode;
215			VI_LOCK(vp);
216			mtx_unlock(&pfs_vncache_mutex);
217			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
218				++pfs_vncache_hits;
219				vgone(*vpp);
220				vput(*vpp);
221				*vpp = vp;
222				cache_purge(vp);
223				return (0);
224			}
225			goto retry2;
226		}
227	}
228	++pfs_vncache_misses;
229	if (++pfs_vncache_entries > pfs_vncache_maxentries)
230		pfs_vncache_maxentries = pfs_vncache_entries;
231	pvd->pvd_prev = NULL;
232	pvd->pvd_next = pfs_vncache;
233	if (pvd->pvd_next)
234		pvd->pvd_next->pvd_prev = pvd;
235	pfs_vncache = pvd;
236	mtx_unlock(&pfs_vncache_mutex);
237	return (0);
238}
239
240/*
241 * Free a vnode
242 */
243int
244pfs_vncache_free(struct vnode *vp)
245{
246	struct pfs_vdata *pvd;
247
248	mtx_lock(&pfs_vncache_mutex);
249	pvd = (struct pfs_vdata *)vp->v_data;
250	KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n"));
251	if (pvd->pvd_next)
252		pvd->pvd_next->pvd_prev = pvd->pvd_prev;
253	if (pvd->pvd_prev) {
254		pvd->pvd_prev->pvd_next = pvd->pvd_next;
255		--pfs_vncache_entries;
256	} else if (pfs_vncache == pvd) {
257		pfs_vncache = pvd->pvd_next;
258		--pfs_vncache_entries;
259	}
260	mtx_unlock(&pfs_vncache_mutex);
261
262	free(pvd, M_PFSVNCACHE);
263	vp->v_data = NULL;
264	return (0);
265}
266
267/*
268 * Purge the cache of dead entries
269 *
270 * This is extremely inefficient due to the fact that vgone() not only
271 * indirectly modifies the vnode cache, but may also sleep.  We can
272 * neither hold pfs_vncache_mutex across a vgone() call, nor make any
273 * assumptions about the state of the cache after vgone() returns.  In
274 * consequence, we must start over after every vgone() call, and keep
275 * trying until we manage to traverse the entire cache.
276 *
277 * The only way to improve this situation is to change the data structure
278 * used to implement the cache.
279 */
280static void
281pfs_purge_locked(struct pfs_node *pn, bool force)
282{
283	struct pfs_vdata *pvd;
284	struct vnode *vnp;
285
286	mtx_assert(&pfs_vncache_mutex, MA_OWNED);
287	pvd = pfs_vncache;
288	while (pvd != NULL) {
289		if (force || pvd->pvd_dead ||
290		    (pn != NULL && pvd->pvd_pn == pn)) {
291			vnp = pvd->pvd_vnode;
292			vhold(vnp);
293			mtx_unlock(&pfs_vncache_mutex);
294			VOP_LOCK(vnp, LK_EXCLUSIVE);
295			vgone(vnp);
296			VOP_UNLOCK(vnp, 0);
297			mtx_lock(&pfs_vncache_mutex);
298			vdrop(vnp);
299			pvd = pfs_vncache;
300		} else {
301			pvd = pvd->pvd_next;
302		}
303	}
304}
305
306void
307pfs_purge(struct pfs_node *pn)
308{
309
310	mtx_lock(&pfs_vncache_mutex);
311	pfs_purge_locked(pn, false);
312	mtx_unlock(&pfs_vncache_mutex);
313}
314
315/*
316 * Free all vnodes associated with a defunct process
317 */
318static void
319pfs_exit(void *arg, struct proc *p)
320{
321	struct pfs_vdata *pvd;
322	int dead;
323
324	if (pfs_vncache == NULL)
325		return;
326	mtx_lock(&pfs_vncache_mutex);
327	for (pvd = pfs_vncache, dead = 0; pvd != NULL; pvd = pvd->pvd_next)
328		if (pvd->pvd_pid == p->p_pid)
329			dead = pvd->pvd_dead = 1;
330	if (dead)
331		pfs_purge_locked(NULL, false);
332	mtx_unlock(&pfs_vncache_mutex);
333}
334