1163048Sru/*-
2163048Sru * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
3163048Sru * All rights reserved.
4163048Sru *
5163048Sru * Redistribution and use in source and binary forms, with or without
6163048Sru * modification, are permitted provided that the following conditions
7163048Sru * are met:
8163048Sru * 1. Redistributions of source code must retain the above copyright
9163048Sru *    notice, this list of conditions and the following disclaimer.
10163048Sru * 2. Redistributions in binary form must reproduce the above copyright
11163048Sru *    notice, this list of conditions and the following disclaimer in the
12163048Sru *    documentation and/or other materials provided with the distribution.
13163048Sru *
14163048Sru * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15163048Sru * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16163048Sru * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17163048Sru * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18163048Sru * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19163048Sru * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20163048Sru * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21163048Sru * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22163048Sru * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23163048Sru * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24163048Sru * SUCH DAMAGE.
25163048Sru */
26163048Sru
27163048Sru#include <sys/cdefs.h>
28163048Sru__FBSDID("$FreeBSD$");
29163048Sru
30163048Sru#include <sys/param.h>
31163048Sru#include <sys/systm.h>
32163048Sru#include <sys/kernel.h>
33163048Sru#include <sys/module.h>
34163048Sru#include <sys/lock.h>
35163048Sru#include <sys/mutex.h>
36163048Sru#include <sys/bio.h>
37163048Sru#include <sys/sysctl.h>
38163048Sru#include <sys/malloc.h>
39163048Sru#include <sys/queue.h>
40223921Sae#include <sys/sbuf.h>
41163048Sru#include <sys/time.h>
42163048Sru#include <vm/uma.h>
43163048Sru#include <geom/geom.h>
44163048Sru#include <geom/cache/g_cache.h>
45163048Sru
46219029SnetchildFEATURE(geom_cache, "GEOM cache module");
47219029Snetchild
48163048Srustatic MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
49163048Sru
50163048SruSYSCTL_DECL(_kern_geom);
51227309Sedstatic SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0,
52227309Sed    "GEOM_CACHE stuff");
53163048Srustatic u_int g_cache_debug = 0;
54163048SruSYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
55163048Sru    "Debug level");
56163048Srustatic u_int g_cache_enable = 1;
57163048SruSYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
58163048Sru    "");
59163048Srustatic u_int g_cache_timeout = 10;
60163048SruSYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
61163048Sru    0, "");
62163048Srustatic u_int g_cache_idletime = 5;
63163048SruSYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
64163048Sru    0, "");
65163048Srustatic u_int g_cache_used_lo = 5;
66163048Srustatic u_int g_cache_used_hi = 20;
67163048Srustatic int
68163048Srusysctl_handle_pct(SYSCTL_HANDLER_ARGS)
69163048Sru{
70163048Sru	u_int val = *(u_int *)arg1;
71163048Sru	int error;
72163048Sru
73170289Sdwmalone	error = sysctl_handle_int(oidp, &val, 0, req);
74163048Sru	if (error || !req->newptr)
75163048Sru		return (error);
76163048Sru	if (val < 0 || val > 100)
77163048Sru		return (EINVAL);
78163048Sru	if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
79163048Sru	    (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
80163048Sru		return (EINVAL);
81163048Sru	*(u_int *)arg1 = val;
82163048Sru	return (0);
83163048Sru}
84163048SruSYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
85163048Sru	&g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
86163048SruSYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
87163048Sru	&g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
88163048Sru
89163048Sru
90163048Srustatic int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
91163048Srustatic g_ctl_destroy_geom_t g_cache_destroy_geom;
92163048Sru
93163048Srustatic g_taste_t g_cache_taste;
94163048Srustatic g_ctl_req_t g_cache_config;
95163048Srustatic g_dumpconf_t g_cache_dumpconf;
96163048Sru
97163048Srustruct g_class g_cache_class = {
98163048Sru	.name = G_CACHE_CLASS_NAME,
99163048Sru	.version = G_VERSION,
100163048Sru	.ctlreq = g_cache_config,
101163048Sru	.taste = g_cache_taste,
102163048Sru	.destroy_geom = g_cache_destroy_geom
103163048Sru};
104163048Sru
105163048Sru#define	OFF2BNO(off, sc)	((off) >> (sc)->sc_bshift)
106163048Sru#define	BNO2OFF(bno, sc)	((bno) << (sc)->sc_bshift)
107163048Sru
108163048Sru
109163048Srustatic struct g_cache_desc *
110163048Srug_cache_alloc(struct g_cache_softc *sc)
111163048Sru{
112163048Sru	struct g_cache_desc *dp;
113163048Sru
114163048Sru	mtx_assert(&sc->sc_mtx, MA_OWNED);
115163048Sru
116163048Sru	if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
117163048Sru		dp = TAILQ_FIRST(&sc->sc_usedlist);
118163048Sru		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
119163048Sru		sc->sc_nused--;
120163048Sru		dp->d_flags = 0;
121163048Sru		LIST_REMOVE(dp, d_next);
122163048Sru		return (dp);
123163048Sru	}
124163048Sru	if (sc->sc_nent > sc->sc_maxent) {
125163048Sru		sc->sc_cachefull++;
126163048Sru		return (NULL);
127163048Sru	}
128163048Sru	dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
129163048Sru	if (dp == NULL)
130163048Sru		return (NULL);
131163048Sru	dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
132163048Sru	if (dp->d_data == NULL) {
133163048Sru		free(dp, M_GCACHE);
134163048Sru		return (NULL);
135163048Sru	}
136163048Sru	sc->sc_nent++;
137163048Sru	return (dp);
138163048Sru}
139163048Sru
140163048Srustatic void
141163048Srug_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
142163048Sru{
143163048Sru
144163048Sru	mtx_assert(&sc->sc_mtx, MA_OWNED);
145163048Sru
146163048Sru	uma_zfree(sc->sc_zone, dp->d_data);
147163048Sru	free(dp, M_GCACHE);
148163048Sru	sc->sc_nent--;
149163048Sru}
150163048Sru
151163048Srustatic void
152163048Srug_cache_free_used(struct g_cache_softc *sc)
153163048Sru{
154163048Sru	struct g_cache_desc *dp;
155163048Sru	u_int n;
156163048Sru
157163048Sru	mtx_assert(&sc->sc_mtx, MA_OWNED);
158163048Sru
159163048Sru	n = g_cache_used_lo * sc->sc_maxent / 100;
160163048Sru	while (sc->sc_nused > n) {
161163048Sru		KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
162163048Sru		dp = TAILQ_FIRST(&sc->sc_usedlist);
163163048Sru		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
164163048Sru		sc->sc_nused--;
165163048Sru		LIST_REMOVE(dp, d_next);
166163048Sru		g_cache_free(sc, dp);
167163048Sru	}
168163048Sru}
169163048Sru
170163048Srustatic void
171163048Srug_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
172163048Sru    struct g_cache_desc *dp, int error)
173163048Sru{
174163048Sru	off_t off1, off, len;
175163048Sru
176163048Sru	mtx_assert(&sc->sc_mtx, MA_OWNED);
177163048Sru	KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
178163048Sru	KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
179163048Sru	    dp->d_bno, ("wrong entry"));
180163048Sru
181163048Sru	off1 = BNO2OFF(dp->d_bno, sc);
182163048Sru	off = MAX(bp->bio_offset, off1);
183163048Sru	len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
184163048Sru
185163048Sru	if (bp->bio_error == 0)
186163048Sru		bp->bio_error = error;
187163048Sru	if (bp->bio_error == 0) {
188163048Sru		bcopy(dp->d_data + (off - off1),
189163048Sru		    bp->bio_data + (off - bp->bio_offset), len);
190163048Sru	}
191163048Sru	bp->bio_completed += len;
192163048Sru	KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
193163048Sru	if (bp->bio_completed == bp->bio_length) {
194163048Sru		if (bp->bio_error != 0)
195163048Sru			bp->bio_completed = 0;
196163048Sru		g_io_deliver(bp, bp->bio_error);
197163048Sru	}
198163048Sru
199163048Sru	if (dp->d_flags & D_FLAG_USED) {
200163048Sru		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
201163048Sru		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
202163048Sru	} else if (OFF2BNO(off + len, sc) > dp->d_bno) {
203163048Sru		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
204163048Sru		sc->sc_nused++;
205163048Sru		dp->d_flags |= D_FLAG_USED;
206163048Sru	}
207163048Sru	dp->d_atime = time_uptime;
208163048Sru}
209163048Sru
210163048Srustatic void
211163048Srug_cache_done(struct bio *bp)
212163048Sru{
213163048Sru	struct g_cache_softc *sc;
214163048Sru	struct g_cache_desc *dp;
215163048Sru	struct bio *bp2, *tmpbp;
216163048Sru
217163048Sru	sc = bp->bio_from->geom->softc;
218163048Sru	KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
219163048Sru	dp = G_CACHE_DESC2(bp);
220163048Sru	mtx_lock(&sc->sc_mtx);
221163048Sru	bp2 = dp->d_biolist;
222163048Sru	while (bp2 != NULL) {
223163048Sru		KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
224163048Sru		tmpbp = G_CACHE_NEXT_BIO2(bp2);
225163048Sru		g_cache_deliver(sc, bp2, dp, bp->bio_error);
226163048Sru		bp2 = tmpbp;
227163048Sru	}
228163048Sru	dp->d_biolist = NULL;
229163048Sru	if (dp->d_flags & D_FLAG_INVALID) {
230163048Sru		sc->sc_invalid--;
231163048Sru		g_cache_free(sc, dp);
232163048Sru	} else if (bp->bio_error) {
233163048Sru		LIST_REMOVE(dp, d_next);
234163048Sru		if (dp->d_flags & D_FLAG_USED) {
235163048Sru			TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
236163048Sru			sc->sc_nused--;
237163048Sru		}
238163048Sru		g_cache_free(sc, dp);
239163048Sru	}
240163048Sru	mtx_unlock(&sc->sc_mtx);
241163048Sru	g_destroy_bio(bp);
242163048Sru}
243163048Sru
244163048Srustatic struct g_cache_desc *
245163048Srug_cache_lookup(struct g_cache_softc *sc, off_t bno)
246163048Sru{
247163048Sru	struct g_cache_desc *dp;
248163048Sru
249163048Sru	mtx_assert(&sc->sc_mtx, MA_OWNED);
250163048Sru
251163048Sru	LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
252163048Sru		if (dp->d_bno == bno)
253163048Sru			return (dp);
254163048Sru	return (NULL);
255163048Sru}
256163048Sru
257163048Srustatic int
258163048Srug_cache_read(struct g_cache_softc *sc, struct bio *bp)
259163048Sru{
260163048Sru	struct bio *cbp;
261163048Sru	struct g_cache_desc *dp;
262163048Sru
263163048Sru	mtx_lock(&sc->sc_mtx);
264163048Sru	dp = g_cache_lookup(sc,
265163048Sru	    OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
266163048Sru	if (dp != NULL) {
267163048Sru		/* Add to waiters list or deliver. */
268163048Sru		sc->sc_cachehits++;
269163048Sru		if (dp->d_biolist != NULL) {
270163048Sru			G_CACHE_NEXT_BIO1(bp) = sc;
271163048Sru			G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
272163048Sru			dp->d_biolist = bp;
273163048Sru		} else
274163048Sru			g_cache_deliver(sc, bp, dp, 0);
275163048Sru		mtx_unlock(&sc->sc_mtx);
276163048Sru		return (0);
277163048Sru	}
278163048Sru
279163048Sru	/* Cache miss.  Allocate entry and schedule bio.  */
280163048Sru	sc->sc_cachemisses++;
281163048Sru	dp = g_cache_alloc(sc);
282163048Sru	if (dp == NULL) {
283163048Sru		mtx_unlock(&sc->sc_mtx);
284163048Sru		return (ENOMEM);
285163048Sru	}
286163048Sru	cbp = g_clone_bio(bp);
287163048Sru	if (cbp == NULL) {
288163048Sru		g_cache_free(sc, dp);
289163048Sru		mtx_unlock(&sc->sc_mtx);
290163048Sru		return (ENOMEM);
291163048Sru	}
292163048Sru
293163048Sru	dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
294163048Sru	G_CACHE_NEXT_BIO1(bp) = sc;
295163048Sru	G_CACHE_NEXT_BIO2(bp) = NULL;
296163048Sru	dp->d_biolist = bp;
297163048Sru	LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
298163048Sru	    dp, d_next);
299163048Sru	mtx_unlock(&sc->sc_mtx);
300163048Sru
301163048Sru	G_CACHE_DESC1(cbp) = sc;
302163048Sru	G_CACHE_DESC2(cbp) = dp;
303163048Sru	cbp->bio_done = g_cache_done;
304163048Sru	cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
305163048Sru	cbp->bio_data = dp->d_data;
306163048Sru	cbp->bio_length = sc->sc_bsize;
307163048Sru	g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
308163048Sru	return (0);
309163048Sru}
310163048Sru
311163048Srustatic void
312163048Srug_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
313163048Sru{
314163048Sru	struct g_cache_desc *dp;
315163048Sru	off_t bno, lim;
316163048Sru
317163048Sru	mtx_lock(&sc->sc_mtx);
318163048Sru	bno = OFF2BNO(bp->bio_offset, sc);
319163048Sru	lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
320163048Sru	do {
321163048Sru		if ((dp = g_cache_lookup(sc, bno)) != NULL) {
322163048Sru			LIST_REMOVE(dp, d_next);
323163048Sru			if (dp->d_flags & D_FLAG_USED) {
324163048Sru				TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
325163048Sru				sc->sc_nused--;
326163048Sru			}
327163048Sru			if (dp->d_biolist == NULL)
328163048Sru				g_cache_free(sc, dp);
329163048Sru			else {
330163048Sru				dp->d_flags = D_FLAG_INVALID;
331163048Sru				sc->sc_invalid++;
332163048Sru			}
333163048Sru		}
334163048Sru		bno++;
335163048Sru	} while (bno <= lim);
336163048Sru	mtx_unlock(&sc->sc_mtx);
337163048Sru}
338163048Sru
339163048Srustatic void
340163048Srug_cache_start(struct bio *bp)
341163048Sru{
342163048Sru	struct g_cache_softc *sc;
343163048Sru	struct g_geom *gp;
344163048Sru	struct g_cache_desc *dp;
345163048Sru	struct bio *cbp;
346163048Sru
347163048Sru	gp = bp->bio_to->geom;
348163048Sru	sc = gp->softc;
349163048Sru	G_CACHE_LOGREQ(bp, "Request received.");
350163048Sru	switch (bp->bio_cmd) {
351163048Sru	case BIO_READ:
352163048Sru		sc->sc_reads++;
353163048Sru		sc->sc_readbytes += bp->bio_length;
354163048Sru		if (!g_cache_enable)
355163048Sru			break;
356163048Sru		if (bp->bio_offset + bp->bio_length > sc->sc_tail)
357163048Sru			break;
358163048Sru		if (OFF2BNO(bp->bio_offset, sc) ==
359163048Sru		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
360163048Sru			sc->sc_cachereads++;
361163048Sru			sc->sc_cachereadbytes += bp->bio_length;
362163048Sru			if (g_cache_read(sc, bp) == 0)
363163048Sru				return;
364163048Sru			sc->sc_cachereads--;
365163048Sru			sc->sc_cachereadbytes -= bp->bio_length;
366163048Sru			break;
367163048Sru		} else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
368163048Sru		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
369163048Sru			mtx_lock(&sc->sc_mtx);
370163048Sru			dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
371163048Sru			if (dp == NULL || dp->d_biolist != NULL) {
372163048Sru				mtx_unlock(&sc->sc_mtx);
373163048Sru				break;
374163048Sru			}
375163048Sru			sc->sc_cachereads++;
376163048Sru			sc->sc_cachereadbytes += bp->bio_length;
377163048Sru			g_cache_deliver(sc, bp, dp, 0);
378163048Sru			mtx_unlock(&sc->sc_mtx);
379163048Sru			if (g_cache_read(sc, bp) == 0)
380163048Sru				return;
381163048Sru			sc->sc_cachereads--;
382163048Sru			sc->sc_cachereadbytes -= bp->bio_length;
383163048Sru			break;
384163048Sru		}
385163048Sru		break;
386163048Sru	case BIO_WRITE:
387163048Sru		sc->sc_writes++;
388163048Sru		sc->sc_wrotebytes += bp->bio_length;
389163048Sru		g_cache_invalidate(sc, bp);
390163048Sru		break;
391163048Sru	}
392163048Sru	cbp = g_clone_bio(bp);
393163048Sru	if (cbp == NULL) {
394163048Sru		g_io_deliver(bp, ENOMEM);
395163048Sru		return;
396163048Sru	}
397163048Sru	cbp->bio_done = g_std_done;
398163048Sru	G_CACHE_LOGREQ(cbp, "Sending request.");
399163048Sru	g_io_request(cbp, LIST_FIRST(&gp->consumer));
400163048Sru}
401163048Sru
402163048Srustatic void
403163048Srug_cache_go(void *arg)
404163048Sru{
405163048Sru	struct g_cache_softc *sc = arg;
406163048Sru	struct g_cache_desc *dp;
407163048Sru	int i;
408163048Sru
409163048Sru	mtx_assert(&sc->sc_mtx, MA_OWNED);
410163048Sru
411163048Sru	/* Forcibly mark idle ready entries as used. */
412163048Sru	for (i = 0; i < G_CACHE_BUCKETS; i++) {
413163048Sru		LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
414163048Sru			if (dp->d_flags & D_FLAG_USED ||
415163048Sru			    dp->d_biolist != NULL ||
416163048Sru			    time_uptime - dp->d_atime < g_cache_idletime)
417163048Sru				continue;
418163048Sru			TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
419163048Sru			sc->sc_nused++;
420163048Sru			dp->d_flags |= D_FLAG_USED;
421163048Sru		}
422163048Sru	}
423163048Sru
424163048Sru	/* Keep the number of used entries low. */
425163048Sru	if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
426163048Sru		g_cache_free_used(sc);
427163048Sru
428163048Sru	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
429163048Sru}
430163048Sru
431163048Srustatic int
432163048Srug_cache_access(struct g_provider *pp, int dr, int dw, int de)
433163048Sru{
434163048Sru	struct g_geom *gp;
435163048Sru	struct g_consumer *cp;
436163048Sru	int error;
437163048Sru
438163048Sru	gp = pp->geom;
439163048Sru	cp = LIST_FIRST(&gp->consumer);
440163048Sru	error = g_access(cp, dr, dw, de);
441163048Sru
442163048Sru	return (error);
443163048Sru}
444163048Sru
445163048Srustatic void
446163048Srug_cache_orphan(struct g_consumer *cp)
447163048Sru{
448163048Sru
449163048Sru	g_topology_assert();
450163048Sru	g_cache_destroy(cp->geom->softc, 1);
451163048Sru}
452163048Sru
453163048Srustatic struct g_cache_softc *
454163048Srug_cache_find_device(struct g_class *mp, const char *name)
455163048Sru{
456163048Sru	struct g_geom *gp;
457163048Sru
458163048Sru	LIST_FOREACH(gp, &mp->geom, geom) {
459163048Sru		if (strcmp(gp->name, name) == 0)
460163048Sru			return (gp->softc);
461163048Sru	}
462163048Sru	return (NULL);
463163048Sru}
464163048Sru
465163048Srustatic struct g_geom *
466163048Srug_cache_create(struct g_class *mp, struct g_provider *pp,
467163048Sru    const struct g_cache_metadata *md, u_int type)
468163048Sru{
469163048Sru	struct g_cache_softc *sc;
470163048Sru	struct g_geom *gp;
471163048Sru	struct g_provider *newpp;
472163048Sru	struct g_consumer *cp;
473163048Sru	u_int bshift;
474163048Sru	int i;
475163048Sru
476163048Sru	g_topology_assert();
477163048Sru
478163048Sru	gp = NULL;
479163048Sru	newpp = NULL;
480163048Sru	cp = NULL;
481163048Sru
482163048Sru	G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
483163048Sru
484163048Sru	/* Cache size is minimum 100. */
485163048Sru	if (md->md_size < 100) {
486163048Sru		G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
487163048Sru		return (NULL);
488163048Sru	}
489163048Sru
490163048Sru	/* Block size restrictions. */
491163048Sru	bshift = ffs(md->md_bsize) - 1;
492163048Sru	if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
493163048Sru	    md->md_bsize != 1 << bshift ||
494163048Sru	    (md->md_bsize % pp->sectorsize) != 0) {
495163048Sru		G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
496163048Sru		return (NULL);
497163048Sru	}
498163048Sru
499163048Sru	/* Check for duplicate unit. */
500163048Sru	if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
501163048Sru		G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
502163048Sru		return (NULL);
503163048Sru	}
504163048Sru
505243333Sjh	gp = g_new_geomf(mp, "%s", md->md_name);
506163048Sru	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
507163048Sru	sc->sc_type = type;
508163048Sru	sc->sc_bshift = bshift;
509163048Sru	sc->sc_bsize = 1 << bshift;
510163048Sru	sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
511163048Sru	    UMA_ALIGN_PTR, 0);
512163048Sru	mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
513163048Sru	for (i = 0; i < G_CACHE_BUCKETS; i++)
514163048Sru		LIST_INIT(&sc->sc_desclist[i]);
515163048Sru	TAILQ_INIT(&sc->sc_usedlist);
516163048Sru	sc->sc_maxent = md->md_size;
517163048Sru	callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
518163048Sru	gp->softc = sc;
519163048Sru	sc->sc_geom = gp;
520163048Sru	gp->start = g_cache_start;
521163048Sru	gp->orphan = g_cache_orphan;
522163048Sru	gp->access = g_cache_access;
523163048Sru	gp->dumpconf = g_cache_dumpconf;
524163048Sru
525163048Sru	newpp = g_new_providerf(gp, "cache/%s", gp->name);
526163048Sru	newpp->sectorsize = pp->sectorsize;
527163048Sru	newpp->mediasize = pp->mediasize;
528163048Sru	if (type == G_CACHE_TYPE_AUTOMATIC)
529163048Sru		newpp->mediasize -= pp->sectorsize;
530163048Sru	sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
531163048Sru
532163048Sru	cp = g_new_consumer(gp);
533163048Sru	if (g_attach(cp, pp) != 0) {
534163048Sru		G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
535221446Sae		g_destroy_consumer(cp);
536221446Sae		g_destroy_provider(newpp);
537221446Sae		mtx_destroy(&sc->sc_mtx);
538221446Sae		g_free(sc);
539221446Sae		g_destroy_geom(gp);
540221446Sae		return (NULL);
541163048Sru	}
542163048Sru
543163048Sru	g_error_provider(newpp, 0);
544163048Sru	G_CACHE_DEBUG(0, "Device %s created.", gp->name);
545163048Sru	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
546163048Sru	return (gp);
547163048Sru}
548163048Sru
549163048Srustatic int
550163048Srug_cache_destroy(struct g_cache_softc *sc, boolean_t force)
551163048Sru{
552163048Sru	struct g_geom *gp;
553163048Sru	struct g_provider *pp;
554163048Sru	struct g_cache_desc *dp, *dp2;
555163048Sru	int i;
556163048Sru
557163048Sru	g_topology_assert();
558163048Sru	if (sc == NULL)
559163048Sru		return (ENXIO);
560163048Sru	gp = sc->sc_geom;
561163048Sru	pp = LIST_FIRST(&gp->provider);
562163048Sru	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
563163048Sru		if (force) {
564163048Sru			G_CACHE_DEBUG(0, "Device %s is still open, so it "
565163048Sru			    "can't be definitely removed.", pp->name);
566163048Sru		} else {
567163048Sru			G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
568163048Sru			    pp->name, pp->acr, pp->acw, pp->ace);
569163048Sru			return (EBUSY);
570163048Sru		}
571163048Sru	} else {
572163048Sru		G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
573163048Sru	}
574163048Sru	callout_drain(&sc->sc_callout);
575163048Sru	mtx_lock(&sc->sc_mtx);
576163048Sru	for (i = 0; i < G_CACHE_BUCKETS; i++) {
577163048Sru		dp = LIST_FIRST(&sc->sc_desclist[i]);
578163048Sru		while (dp != NULL) {
579163048Sru			dp2 = LIST_NEXT(dp, d_next);
580163048Sru			g_cache_free(sc, dp);
581163048Sru			dp = dp2;
582163048Sru		}
583163048Sru	}
584163048Sru	mtx_unlock(&sc->sc_mtx);
585163048Sru	mtx_destroy(&sc->sc_mtx);
586163048Sru	uma_zdestroy(sc->sc_zone);
587163048Sru	g_free(sc);
588163048Sru	gp->softc = NULL;
589163048Sru	g_wither_geom(gp, ENXIO);
590163048Sru
591163048Sru	return (0);
592163048Sru}
593163048Sru
594163048Srustatic int
595163048Srug_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
596163048Sru{
597163048Sru
598163048Sru	return (g_cache_destroy(gp->softc, 0));
599163048Sru}
600163048Sru
601163048Srustatic int
602163048Srug_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
603163048Sru{
604163048Sru	struct g_provider *pp;
605163048Sru	u_char *buf;
606163048Sru	int error;
607163048Sru
608163048Sru	g_topology_assert();
609163048Sru
610163048Sru	error = g_access(cp, 1, 0, 0);
611163048Sru	if (error != 0)
612163048Sru		return (error);
613163048Sru	pp = cp->provider;
614163048Sru	g_topology_unlock();
615163048Sru	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
616163048Sru	    &error);
617163048Sru	g_topology_lock();
618163048Sru	g_access(cp, -1, 0, 0);
619163048Sru	if (buf == NULL)
620163048Sru		return (error);
621163048Sru
622163048Sru	/* Decode metadata. */
623163048Sru	cache_metadata_decode(buf, md);
624163048Sru	g_free(buf);
625163048Sru
626163048Sru	return (0);
627163048Sru}
628163048Sru
629163048Srustatic int
630163048Srug_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
631163048Sru{
632163048Sru	struct g_provider *pp;
633163048Sru	u_char *buf;
634163048Sru	int error;
635163048Sru
636163048Sru	g_topology_assert();
637163048Sru
638163048Sru	error = g_access(cp, 0, 1, 0);
639163048Sru	if (error != 0)
640163048Sru		return (error);
641163048Sru	pp = cp->provider;
642163048Sru	buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
643163048Sru	cache_metadata_encode(md, buf);
644163048Sru	g_topology_unlock();
645163048Sru	error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
646163048Sru	g_topology_lock();
647163048Sru	g_access(cp, 0, -1, 0);
648163048Sru	free(buf, M_GCACHE);
649163048Sru
650163048Sru	return (error);
651163048Sru}
652163048Sru
653163048Srustatic struct g_geom *
654163048Srug_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
655163048Sru{
656163048Sru	struct g_cache_metadata md;
657163048Sru	struct g_consumer *cp;
658163048Sru	struct g_geom *gp;
659163048Sru	int error;
660163048Sru
661163048Sru	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
662163048Sru	g_topology_assert();
663163048Sru
664163048Sru	G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
665163048Sru
666163048Sru	gp = g_new_geomf(mp, "cache:taste");
667163048Sru	gp->start = g_cache_start;
668163048Sru	gp->orphan = g_cache_orphan;
669163048Sru	gp->access = g_cache_access;
670163048Sru	cp = g_new_consumer(gp);
671163048Sru	g_attach(cp, pp);
672163048Sru	error = g_cache_read_metadata(cp, &md);
673163048Sru	g_detach(cp);
674163048Sru	g_destroy_consumer(cp);
675163048Sru	g_destroy_geom(gp);
676163048Sru	if (error != 0)
677163048Sru		return (NULL);
678163048Sru
679163048Sru	if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
680163048Sru		return (NULL);
681163048Sru	if (md.md_version > G_CACHE_VERSION) {
682163048Sru		printf("geom_cache.ko module is too old to handle %s.\n",
683163048Sru		    pp->name);
684163048Sru		return (NULL);
685163048Sru	}
686163048Sru	if (md.md_provsize != pp->mediasize)
687163048Sru		return (NULL);
688163048Sru
689163048Sru	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
690163048Sru	if (gp == NULL) {
691163048Sru		G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
692163048Sru		return (NULL);
693163048Sru	}
694163048Sru	return (gp);
695163048Sru}
696163048Sru
697163048Srustatic void
698163048Srug_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
699163048Sru{
700163048Sru	struct g_cache_metadata md;
701163048Sru	struct g_provider *pp;
702163048Sru	struct g_geom *gp;
703163048Sru	intmax_t *bsize, *size;
704163048Sru	const char *name;
705163048Sru	int *nargs;
706163048Sru
707163048Sru	g_topology_assert();
708163048Sru
709163048Sru	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
710163048Sru	if (nargs == NULL) {
711163048Sru		gctl_error(req, "No '%s' argument", "nargs");
712163048Sru		return;
713163048Sru	}
714163048Sru	if (*nargs != 2) {
715163048Sru		gctl_error(req, "Invalid number of arguments.");
716163048Sru		return;
717163048Sru	}
718163048Sru
719163048Sru	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
720163048Sru	md.md_version = G_CACHE_VERSION;
721163048Sru	name = gctl_get_asciiparam(req, "arg0");
722163048Sru	if (name == NULL) {
723163048Sru		gctl_error(req, "No 'arg0' argument");
724163048Sru		return;
725163048Sru	}
726163048Sru	strlcpy(md.md_name, name, sizeof(md.md_name));
727163048Sru
728163048Sru	size = gctl_get_paraml(req, "size", sizeof(*size));
729163048Sru	if (size == NULL) {
730163048Sru		gctl_error(req, "No '%s' argument", "size");
731163048Sru		return;
732163048Sru	}
733163048Sru	if ((u_int)*size < 100) {
734163048Sru		gctl_error(req, "Invalid '%s' argument", "size");
735163048Sru		return;
736163048Sru	}
737163048Sru	md.md_size = (u_int)*size;
738163048Sru
739163048Sru	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
740163048Sru	if (bsize == NULL) {
741163048Sru		gctl_error(req, "No '%s' argument", "blocksize");
742163048Sru		return;
743163048Sru	}
744163048Sru	if (*bsize < 0) {
745163048Sru		gctl_error(req, "Invalid '%s' argument", "blocksize");
746163048Sru		return;
747163048Sru	}
748163048Sru	md.md_bsize = (u_int)*bsize;
749163048Sru
750163048Sru	/* This field is not important here. */
751163048Sru	md.md_provsize = 0;
752163048Sru
753163048Sru	name = gctl_get_asciiparam(req, "arg1");
754163048Sru	if (name == NULL) {
755163048Sru		gctl_error(req, "No 'arg1' argument");
756163048Sru		return;
757163048Sru	}
758163048Sru	if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
759163048Sru		name += strlen("/dev/");
760163048Sru	pp = g_provider_by_name(name);
761163048Sru	if (pp == NULL) {
762163048Sru		G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
763163048Sru		gctl_error(req, "Provider %s is invalid.", name);
764163048Sru		return;
765163048Sru	}
766163048Sru	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
767163048Sru	if (gp == NULL) {
768163048Sru		gctl_error(req, "Can't create %s.", md.md_name);
769163048Sru		return;
770163048Sru	}
771163048Sru}
772163048Sru
773163048Srustatic void
774163048Srug_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
775163048Sru{
776163048Sru	struct g_cache_metadata md;
777163048Sru	struct g_cache_softc *sc;
778163048Sru	struct g_consumer *cp;
779163048Sru	intmax_t *bsize, *size;
780163048Sru	const char *name;
781163048Sru	int error, *nargs;
782163048Sru
783163048Sru	g_topology_assert();
784163048Sru
785163048Sru	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
786163048Sru	if (nargs == NULL) {
787163048Sru		gctl_error(req, "No '%s' argument", "nargs");
788163048Sru		return;
789163048Sru	}
790163048Sru	if (*nargs != 1) {
791163048Sru		gctl_error(req, "Missing device.");
792163048Sru		return;
793163048Sru	}
794163048Sru
795163048Sru	name = gctl_get_asciiparam(req, "arg0");
796163048Sru	if (name == NULL) {
797163048Sru		gctl_error(req, "No 'arg0' argument");
798163048Sru		return;
799163048Sru	}
800163048Sru	sc = g_cache_find_device(mp, name);
801163048Sru	if (sc == NULL) {
802163048Sru		G_CACHE_DEBUG(1, "Device %s is invalid.", name);
803163048Sru		gctl_error(req, "Device %s is invalid.", name);
804163048Sru		return;
805163048Sru	}
806163048Sru
807163048Sru	size = gctl_get_paraml(req, "size", sizeof(*size));
808163048Sru	if (size == NULL) {
809163048Sru		gctl_error(req, "No '%s' argument", "size");
810163048Sru		return;
811163048Sru	}
812163048Sru	if ((u_int)*size != 0 && (u_int)*size < 100) {
813163048Sru		gctl_error(req, "Invalid '%s' argument", "size");
814163048Sru		return;
815163048Sru	}
816163048Sru	if ((u_int)*size != 0)
817163048Sru		sc->sc_maxent = (u_int)*size;
818163048Sru
819163048Sru	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
820163048Sru	if (bsize == NULL) {
821163048Sru		gctl_error(req, "No '%s' argument", "blocksize");
822163048Sru		return;
823163048Sru	}
824163048Sru	if (*bsize < 0) {
825163048Sru		gctl_error(req, "Invalid '%s' argument", "blocksize");
826163048Sru		return;
827163048Sru	}
828163048Sru
829163048Sru	if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
830163048Sru		return;
831163048Sru
832163048Sru	strlcpy(md.md_name, name, sizeof(md.md_name));
833163048Sru	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
834163048Sru	md.md_version = G_CACHE_VERSION;
835163048Sru	if ((u_int)*size != 0)
836163048Sru		md.md_size = (u_int)*size;
837163048Sru	else
838163048Sru		md.md_size = sc->sc_maxent;
839163048Sru	if ((u_int)*bsize != 0)
840163048Sru		md.md_bsize = (u_int)*bsize;
841163048Sru	else
842163048Sru		md.md_bsize = sc->sc_bsize;
843163048Sru	cp = LIST_FIRST(&sc->sc_geom->consumer);
844163048Sru	md.md_provsize = cp->provider->mediasize;
845163048Sru	error = g_cache_write_metadata(cp, &md);
846163048Sru	if (error == 0)
847163048Sru		G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
848163048Sru	else
849163048Sru		G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
850163048Sru		    cp->provider->name, error);
851163048Sru}
852163048Sru
853163048Srustatic void
854163048Srug_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
855163048Sru{
856163048Sru	int *nargs, *force, error, i;
857163048Sru	struct g_cache_softc *sc;
858163048Sru	const char *name;
859163048Sru	char param[16];
860163048Sru
861163048Sru	g_topology_assert();
862163048Sru
863163048Sru	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
864163048Sru	if (nargs == NULL) {
865163048Sru		gctl_error(req, "No '%s' argument", "nargs");
866163048Sru		return;
867163048Sru	}
868163048Sru	if (*nargs <= 0) {
869163048Sru		gctl_error(req, "Missing device(s).");
870163048Sru		return;
871163048Sru	}
872163048Sru	force = gctl_get_paraml(req, "force", sizeof(*force));
873163048Sru	if (force == NULL) {
874163048Sru		gctl_error(req, "No 'force' argument");
875163048Sru		return;
876163048Sru	}
877163048Sru
878163048Sru	for (i = 0; i < *nargs; i++) {
879163048Sru		snprintf(param, sizeof(param), "arg%d", i);
880163048Sru		name = gctl_get_asciiparam(req, param);
881163048Sru		if (name == NULL) {
882163048Sru			gctl_error(req, "No 'arg%d' argument", i);
883163048Sru			return;
884163048Sru		}
885163048Sru		sc = g_cache_find_device(mp, name);
886163048Sru		if (sc == NULL) {
887163048Sru			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
888163048Sru			gctl_error(req, "Device %s is invalid.", name);
889163048Sru			return;
890163048Sru		}
891163048Sru		error = g_cache_destroy(sc, *force);
892163048Sru		if (error != 0) {
893163048Sru			gctl_error(req, "Cannot destroy device %s (error=%d).",
894163048Sru			    sc->sc_name, error);
895163048Sru			return;
896163048Sru		}
897163048Sru	}
898163048Sru}
899163048Sru
900163048Srustatic void
901163048Srug_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
902163048Sru{
903163048Sru	struct g_cache_softc *sc;
904163048Sru	const char *name;
905163048Sru	char param[16];
906163048Sru	int i, *nargs;
907163048Sru
908163048Sru	g_topology_assert();
909163048Sru
910163048Sru	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
911163048Sru	if (nargs == NULL) {
912163048Sru		gctl_error(req, "No '%s' argument", "nargs");
913163048Sru		return;
914163048Sru	}
915163048Sru	if (*nargs <= 0) {
916163048Sru		gctl_error(req, "Missing device(s).");
917163048Sru		return;
918163048Sru	}
919163048Sru
920163048Sru	for (i = 0; i < *nargs; i++) {
921163048Sru		snprintf(param, sizeof(param), "arg%d", i);
922163048Sru		name = gctl_get_asciiparam(req, param);
923163048Sru		if (name == NULL) {
924163048Sru			gctl_error(req, "No 'arg%d' argument", i);
925163048Sru			return;
926163048Sru		}
927163048Sru		sc = g_cache_find_device(mp, name);
928163048Sru		if (sc == NULL) {
929163048Sru			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
930163048Sru			gctl_error(req, "Device %s is invalid.", name);
931163048Sru			return;
932163048Sru		}
933163048Sru		sc->sc_reads = 0;
934163048Sru		sc->sc_readbytes = 0;
935163048Sru		sc->sc_cachereads = 0;
936163048Sru		sc->sc_cachereadbytes = 0;
937163048Sru		sc->sc_cachehits = 0;
938163048Sru		sc->sc_cachemisses = 0;
939163048Sru		sc->sc_cachefull = 0;
940163048Sru		sc->sc_writes = 0;
941163048Sru		sc->sc_wrotebytes = 0;
942163048Sru	}
943163048Sru}
944163048Sru
945163048Srustatic void
946163048Srug_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
947163048Sru{
948163048Sru	uint32_t *version;
949163048Sru
950163048Sru	g_topology_assert();
951163048Sru
952163048Sru	version = gctl_get_paraml(req, "version", sizeof(*version));
953163048Sru	if (version == NULL) {
954163048Sru		gctl_error(req, "No '%s' argument.", "version");
955163048Sru		return;
956163048Sru	}
957163048Sru	if (*version != G_CACHE_VERSION) {
958163048Sru		gctl_error(req, "Userland and kernel parts are out of sync.");
959163048Sru		return;
960163048Sru	}
961163048Sru
962163048Sru	if (strcmp(verb, "create") == 0) {
963163048Sru		g_cache_ctl_create(req, mp);
964163048Sru		return;
965163048Sru	} else if (strcmp(verb, "configure") == 0) {
966163048Sru		g_cache_ctl_configure(req, mp);
967163048Sru		return;
968163048Sru	} else if (strcmp(verb, "destroy") == 0 ||
969163048Sru	    strcmp(verb, "stop") == 0) {
970163048Sru		g_cache_ctl_destroy(req, mp);
971163048Sru		return;
972163048Sru	} else if (strcmp(verb, "reset") == 0) {
973163048Sru		g_cache_ctl_reset(req, mp);
974163048Sru		return;
975163048Sru	}
976163048Sru
977163048Sru	gctl_error(req, "Unknown verb.");
978163048Sru}
979163048Sru
980163048Srustatic void
981163048Srug_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
982163048Sru    struct g_consumer *cp, struct g_provider *pp)
983163048Sru{
984163048Sru	struct g_cache_softc *sc;
985163048Sru
986163048Sru	if (pp != NULL || cp != NULL)
987163048Sru		return;
988163048Sru	sc = gp->softc;
989163048Sru	sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
990163048Sru	sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
991163048Sru	sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
992163048Sru	    (uintmax_t)sc->sc_tail);
993163048Sru	sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
994163048Sru	sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
995163048Sru	    sc->sc_nused);
996163048Sru	sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
997163048Sru	    sc->sc_invalid);
998163048Sru	sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
999163048Sru	sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
1000163048Sru	    sc->sc_readbytes);
1001163048Sru	sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1002163048Sru	    sc->sc_cachereads);
1003163048Sru	sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1004163048Sru	    sc->sc_cachereadbytes);
1005163048Sru	sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1006163048Sru	    sc->sc_cachehits);
1007163048Sru	sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1008163048Sru	    sc->sc_cachemisses);
1009163048Sru	sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1010163048Sru	    sc->sc_cachefull);
1011163048Sru	sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1012163048Sru	sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1013163048Sru	    sc->sc_wrotebytes);
1014163048Sru}
1015163048Sru
1016163048SruDECLARE_GEOM_CLASS(g_cache_class, g_cache);
1017