g_uzip.c revision 269456
1/*-
2 * Copyright (c) 2004 Max Khon
3 * Copyright (c) 2014 Juniper Networks, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/geom/uzip/g_uzip.c 269456 2014-08-03 03:06:00Z marcel $");
30
31#include <sys/param.h>
32#include <sys/bio.h>
33#include <sys/endian.h>
34#include <sys/errno.h>
35#include <sys/kernel.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/malloc.h>
39#include <sys/sysctl.h>
40#include <sys/systm.h>
41
42#include <geom/geom.h>
43#include <net/zlib.h>
44
45FEATURE(geom_uzip, "GEOM uzip read-only compressed disks support");
46
47#undef GEOM_UZIP_DEBUG
48#ifdef GEOM_UZIP_DEBUG
49#define	DPRINTF(a)	printf a
50#else
51#define	DPRINTF(a)
52#endif
53
54static MALLOC_DEFINE(M_GEOM_UZIP, "geom_uzip", "GEOM UZIP data structures");
55
56#define	UZIP_CLASS_NAME	"UZIP"
57
58/*
59 * Maximum allowed valid block size (to prevent foot-shooting)
60 */
61#define	MAX_BLKSZ	(MAXPHYS - MAXPHYS / 1000 - 12)
62
63/*
64 * Integer values (block size, number of blocks, offsets)
65 * are stored in big-endian (network) order on disk and struct cloop_header
66 * and in native order in struct g_uzip_softc
67 */
68
69#define	CLOOP_MAGIC_LEN	128
70static char CLOOP_MAGIC_START[] = "#!/bin/sh\n";
71
72struct cloop_header {
73	char magic[CLOOP_MAGIC_LEN];	/* cloop magic */
74	uint32_t blksz;			/* block size */
75	uint32_t nblocks;		/* number of blocks */
76};
77
78struct g_uzip_softc {
79	uint32_t blksz;			/* block size */
80	uint32_t nblocks;		/* number of blocks */
81	uint64_t *offsets;
82
83	struct mtx last_mtx;
84	uint32_t last_blk;		/* last blk no */
85	char *last_buf;			/* last blk data */
86	int req_total;			/* total requests */
87	int req_cached;			/* cached requests */
88};
89
90static void g_uzip_done(struct bio *bp);
91
92static void
93g_uzip_softc_free(struct g_uzip_softc *sc, struct g_geom *gp)
94{
95
96	if (gp != NULL) {
97		printf("%s: %d requests, %d cached\n",
98		    gp->name, sc->req_total, sc->req_cached);
99	}
100	if (sc->offsets != NULL) {
101		free(sc->offsets, M_GEOM_UZIP);
102		sc->offsets = NULL;
103	}
104	mtx_destroy(&sc->last_mtx);
105	free(sc->last_buf, M_GEOM_UZIP);
106	free(sc, M_GEOM_UZIP);
107}
108
109static void *
110z_alloc(void *nil, u_int type, u_int size)
111{
112	void *ptr;
113
114	ptr = malloc(type * size, M_GEOM_UZIP, M_NOWAIT);
115
116	return (ptr);
117}
118
119static void
120z_free(void *nil, void *ptr)
121{
122
123	free(ptr, M_GEOM_UZIP);
124}
125
126static int
127g_uzip_cached(struct g_geom *gp, struct bio *bp)
128{
129	struct g_uzip_softc *sc;
130	off_t ofs;
131	size_t blk, blkofs, usz;
132
133	sc = gp->softc;
134	ofs = bp->bio_offset + bp->bio_completed;
135	blk = ofs / sc->blksz;
136	mtx_lock(&sc->last_mtx);
137	if (blk == sc->last_blk) {
138		blkofs = ofs % sc->blksz;
139		usz = sc->blksz - blkofs;
140		if (bp->bio_resid < usz)
141			usz = bp->bio_resid;
142		memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs,
143		    usz);
144		sc->req_cached++;
145		mtx_unlock(&sc->last_mtx);
146
147		DPRINTF(("%s/%s: %p: offset=%jd: got %jd bytes from cache\n",
148		    __func__, gp->name, bp, (intmax_t)ofs, (intmax_t)usz));
149
150		bp->bio_completed += usz;
151		bp->bio_resid -= usz;
152
153		if (bp->bio_resid == 0) {
154			g_io_deliver(bp, 0);
155			return (1);
156		}
157	} else
158		mtx_unlock(&sc->last_mtx);
159
160	return (0);
161}
162
163static int
164g_uzip_request(struct g_geom *gp, struct bio *bp)
165{
166	struct g_uzip_softc *sc;
167	struct bio *bp2;
168	struct g_consumer *cp;
169	struct g_provider *pp;
170	off_t ofs;
171	size_t start_blk, end_blk;
172
173	if (g_uzip_cached(gp, bp) != 0)
174		return (1);
175
176	sc = gp->softc;
177
178	bp2 = g_clone_bio(bp);
179	if (bp2 == NULL) {
180		g_io_deliver(bp, ENOMEM);
181		return (1);
182	}
183	bp2->bio_done = g_uzip_done;
184
185	cp = LIST_FIRST(&gp->consumer);
186	pp = cp->provider;
187
188	ofs = bp->bio_offset + bp->bio_completed;
189	start_blk = ofs / sc->blksz;
190	KASSERT(start_blk < sc->nblocks, ("start_blk out of range"));
191	end_blk = (ofs + bp->bio_resid + sc->blksz - 1) / sc->blksz;
192	KASSERT(end_blk <= sc->nblocks, ("end_blk out of range"));
193
194	DPRINTF(("%s/%s: %p: start=%u (%jd), end=%u (%jd)\n",
195	    __func__, gp->name, bp,
196	    (u_int)start_blk, (intmax_t)sc->offsets[start_blk],
197	    (u_int)end_blk, (intmax_t)sc->offsets[end_blk]));
198
199	bp2->bio_offset = sc->offsets[start_blk] -
200	    sc->offsets[start_blk] % pp->sectorsize;
201	while (1) {
202		bp2->bio_length = sc->offsets[end_blk] - bp2->bio_offset;
203		bp2->bio_length = (bp2->bio_length + pp->sectorsize - 1) /
204		    pp->sectorsize * pp->sectorsize;
205		if (bp2->bio_length <= MAXPHYS)
206			break;
207
208		end_blk--;
209	}
210
211	bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT);
212	if (bp2->bio_data == NULL) {
213		g_destroy_bio(bp2);
214		g_io_deliver(bp, ENOMEM);
215		return (1);
216	}
217
218	DPRINTF(("%s/%s: %p: reading %jd bytes from offset %jd\n",
219	    __func__, gp->name, bp,
220	    (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
221
222	g_io_request(bp2, cp);
223	return (0);
224}
225
226static void
227g_uzip_done(struct bio *bp)
228{
229	z_stream zs;
230	struct bio *bp2;
231	struct g_provider *pp;
232	struct g_consumer *cp;
233	struct g_geom *gp;
234	struct g_uzip_softc *sc;
235	char *data, *data2;
236	off_t ofs;
237	size_t blk, blkofs, len, ulen;
238
239	bp2 = bp->bio_parent;
240	gp = bp2->bio_to->geom;
241	sc = gp->softc;
242
243	cp = LIST_FIRST(&gp->consumer);
244	pp = cp->provider;
245
246	bp2->bio_error = bp->bio_error;
247	if (bp2->bio_error != 0)
248		goto done;
249
250	/* Make sure there's forward progress. */
251	if (bp->bio_completed == 0) {
252		bp2->bio_error = ECANCELED;
253		goto done;
254	}
255
256	zs.zalloc = z_alloc;
257	zs.zfree = z_free;
258	if (inflateInit(&zs) != Z_OK) {
259		bp2->bio_error = EILSEQ;
260		goto done;
261	}
262
263	ofs = bp2->bio_offset + bp2->bio_completed;
264	blk = ofs / sc->blksz;
265	blkofs = ofs % sc->blksz;
266	data = bp->bio_data + sc->offsets[blk] % pp->sectorsize;
267	data2 = bp2->bio_data + bp2->bio_completed;
268	while (bp->bio_completed && bp2->bio_resid) {
269		ulen = MIN(sc->blksz - blkofs, bp2->bio_resid);
270		len = sc->offsets[blk + 1] - sc->offsets[blk];
271		DPRINTF(("%s/%s: %p/%ju: data2=%p, ulen=%u, data=%p, len=%u\n",
272		    __func__, gp->name, gp, bp->bio_completed,
273		    data2, (u_int)ulen, data, (u_int)len));
274		if (len == 0) {
275			/* All zero block: no cache update */
276			bzero(data2, ulen);
277		} else if (len <= bp->bio_completed) {
278			zs.next_in = data;
279			zs.avail_in = len;
280			zs.next_out = sc->last_buf;
281			zs.avail_out = sc->blksz;
282			mtx_lock(&sc->last_mtx);
283			if (inflate(&zs, Z_FINISH) != Z_STREAM_END) {
284				sc->last_blk = -1;
285				mtx_unlock(&sc->last_mtx);
286				inflateEnd(&zs);
287				bp2->bio_error = EILSEQ;
288				goto done;
289			}
290			sc->last_blk = blk;
291			memcpy(data2, sc->last_buf + blkofs, ulen);
292			mtx_unlock(&sc->last_mtx);
293			if (inflateReset(&zs) != Z_OK) {
294				inflateEnd(&zs);
295				bp2->bio_error = EILSEQ;
296				goto done;
297			}
298			data += len;
299		} else
300			break;
301
302		data2 += ulen;
303		bp2->bio_completed += ulen;
304		bp2->bio_resid -= ulen;
305		bp->bio_completed -= len;
306		blkofs = 0;
307		blk++;
308	}
309
310	if (inflateEnd(&zs) != Z_OK)
311		bp2->bio_error = EILSEQ;
312
313done:
314	/* Finish processing the request. */
315	free(bp->bio_data, M_GEOM_UZIP);
316	g_destroy_bio(bp);
317	if (bp2->bio_error != 0 || bp2->bio_resid == 0)
318		g_io_deliver(bp2, bp2->bio_error);
319	else
320		g_uzip_request(gp, bp2);
321}
322
323static void
324g_uzip_start(struct bio *bp)
325{
326	struct g_provider *pp;
327	struct g_geom *gp;
328	struct g_uzip_softc *sc;
329
330	pp = bp->bio_to;
331	gp = pp->geom;
332
333	DPRINTF(("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, buffer=%p\n",
334	    __func__, gp->name, bp, bp->bio_cmd, (intmax_t)bp->bio_offset,
335	    (intmax_t)bp->bio_length, bp->bio_data));
336
337	sc = gp->softc;
338	sc->req_total++;
339
340	if (bp->bio_cmd != BIO_READ) {
341		g_io_deliver(bp, EOPNOTSUPP);
342		return;
343	}
344
345	bp->bio_resid = bp->bio_length;
346	bp->bio_completed = 0;
347
348	g_uzip_request(gp, bp);
349}
350
351static void
352g_uzip_orphan(struct g_consumer *cp)
353{
354	struct g_geom *gp;
355
356	g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->provider->name);
357	g_topology_assert();
358
359	gp = cp->geom;
360	g_uzip_softc_free(gp->softc, gp);
361	gp->softc = NULL;
362	g_wither_geom(gp, ENXIO);
363}
364
365static int
366g_uzip_access(struct g_provider *pp, int dr, int dw, int de)
367{
368	struct g_geom *gp;
369	struct g_consumer *cp;
370
371	gp = pp->geom;
372	cp = LIST_FIRST(&gp->consumer);
373	KASSERT (cp != NULL, ("g_uzip_access but no consumer"));
374
375	if (cp->acw + dw > 0)
376		return (EROFS);
377
378	return (g_access(cp, dr, dw, de));
379}
380
381static void
382g_uzip_spoiled(struct g_consumer *cp)
383{
384	struct g_geom *gp;
385
386	gp = cp->geom;
387	g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name);
388	g_topology_assert();
389
390	g_uzip_softc_free(gp->softc, gp);
391	gp->softc = NULL;
392	g_wither_geom(gp, ENXIO);
393}
394
395static struct g_geom *
396g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
397{
398	int error;
399	uint32_t i, total_offsets, offsets_read, blk;
400	void *buf;
401	struct cloop_header *header;
402	struct g_consumer *cp;
403	struct g_geom *gp;
404	struct g_provider *pp2;
405	struct g_uzip_softc *sc;
406
407	g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
408	g_topology_assert();
409
410	/* Skip providers that are already open for writing. */
411	if (pp->acw > 0)
412		return (NULL);
413
414	buf = NULL;
415
416	/*
417	 * Create geom instance.
418	 */
419	gp = g_new_geomf(mp, "%s.uzip", pp->name);
420	cp = g_new_consumer(gp);
421	error = g_attach(cp, pp);
422	if (error == 0)
423		error = g_access(cp, 1, 0, 0);
424	if (error) {
425		g_detach(cp);
426		g_destroy_consumer(cp);
427		g_destroy_geom(gp);
428		return (NULL);
429	}
430	g_topology_unlock();
431
432	/*
433	 * Read cloop header, look for CLOOP magic, perform
434	 * other validity checks.
435	 */
436	DPRINTF(("%s: media sectorsize %u, mediasize %jd\n",
437	    gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
438	buf = g_read_data(cp, 0, pp->sectorsize, NULL);
439	if (buf == NULL)
440		goto err;
441	header = (struct cloop_header *) buf;
442	if (strncmp(header->magic, CLOOP_MAGIC_START,
443	    sizeof(CLOOP_MAGIC_START) - 1) != 0) {
444		DPRINTF(("%s: no CLOOP magic\n", gp->name));
445		goto err;
446	}
447	if (header->magic[0x0b] != 'V' || header->magic[0x0c] < '2') {
448		DPRINTF(("%s: image version too old\n", gp->name));
449		goto err;
450	}
451
452	/*
453	 * Initialize softc and read offsets.
454	 */
455	sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
456	gp->softc = sc;
457	sc->blksz = ntohl(header->blksz);
458	sc->nblocks = ntohl(header->nblocks);
459	if (sc->blksz % 512 != 0) {
460		printf("%s: block size (%u) should be multiple of 512.\n",
461		    gp->name, sc->blksz);
462		goto err;
463	}
464	if (sc->blksz > MAX_BLKSZ) {
465		printf("%s: block size (%u) should not be larger than %d.\n",
466		    gp->name, sc->blksz, MAX_BLKSZ);
467	}
468	total_offsets = sc->nblocks + 1;
469	if (sizeof(struct cloop_header) +
470	    total_offsets * sizeof(uint64_t) > pp->mediasize) {
471		printf("%s: media too small for %u blocks\n",
472		    gp->name, sc->nblocks);
473		goto err;
474	}
475	sc->offsets = malloc(
476	    total_offsets * sizeof(uint64_t), M_GEOM_UZIP, M_WAITOK);
477	offsets_read = MIN(total_offsets,
478	    (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
479	for (i = 0; i < offsets_read; i++)
480		sc->offsets[i] = be64toh(((uint64_t *) (header + 1))[i]);
481	DPRINTF(("%s: %u offsets in the first sector\n",
482	       gp->name, offsets_read));
483	for (blk = 1; offsets_read < total_offsets; blk++) {
484		uint32_t nread;
485
486		free(buf, M_GEOM);
487		buf = g_read_data(
488		    cp, blk * pp->sectorsize, pp->sectorsize, NULL);
489		if (buf == NULL)
490			goto err;
491		nread = MIN(total_offsets - offsets_read,
492		     pp->sectorsize / sizeof(uint64_t));
493		DPRINTF(("%s: %u offsets read from sector %d\n",
494		    gp->name, nread, blk));
495		for (i = 0; i < nread; i++) {
496			sc->offsets[offsets_read + i] =
497			    be64toh(((uint64_t *) buf)[i]);
498		}
499		offsets_read += nread;
500	}
501	free(buf, M_GEOM);
502	DPRINTF(("%s: done reading offsets\n", gp->name));
503	mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
504	sc->last_blk = -1;
505	sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
506	sc->req_total = 0;
507	sc->req_cached = 0;
508
509	g_topology_lock();
510	pp2 = g_new_providerf(gp, "%s", gp->name);
511	pp2->sectorsize = 512;
512	pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
513	pp2->stripesize = pp->stripesize;
514	pp2->stripeoffset = pp->stripeoffset;
515	g_error_provider(pp2, 0);
516	g_access(cp, -1, 0, 0);
517
518	DPRINTF(("%s: taste ok (%d, %jd), (%d, %d), %x\n",
519	    gp->name,
520	    pp2->sectorsize, (intmax_t)pp2->mediasize,
521	    pp2->stripeoffset, pp2->stripesize, pp2->flags));
522	printf("%s: %u x %u blocks\n", gp->name, sc->nblocks, sc->blksz);
523	return (gp);
524
525err:
526	g_topology_lock();
527	g_access(cp, -1, 0, 0);
528	if (buf != NULL)
529		free(buf, M_GEOM);
530	if (gp->softc != NULL) {
531		g_uzip_softc_free(gp->softc, NULL);
532		gp->softc = NULL;
533	}
534	g_detach(cp);
535	g_destroy_consumer(cp);
536	g_destroy_geom(gp);
537
538	return (NULL);
539}
540
541static int
542g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
543{
544	struct g_provider *pp;
545
546	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name);
547	g_topology_assert();
548
549	if (gp->softc == NULL) {
550		printf("%s(%s): gp->softc == NULL\n", __func__, gp->name);
551		return (ENXIO);
552	}
553
554	KASSERT(gp != NULL, ("NULL geom"));
555	pp = LIST_FIRST(&gp->provider);
556	KASSERT(pp != NULL, ("NULL provider"));
557	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
558		return (EBUSY);
559
560	g_uzip_softc_free(gp->softc, gp);
561	gp->softc = NULL;
562	g_wither_geom(gp, ENXIO);
563
564	return (0);
565}
566
567static struct g_class g_uzip_class = {
568	.name = UZIP_CLASS_NAME,
569	.version = G_VERSION,
570	.taste = g_uzip_taste,
571	.destroy_geom = g_uzip_destroy_geom,
572
573	.start = g_uzip_start,
574	.orphan = g_uzip_orphan,
575	.access = g_uzip_access,
576	.spoiled = g_uzip_spoiled,
577};
578
579DECLARE_GEOM_CLASS(g_uzip_class, g_uzip);
580MODULE_DEPEND(g_uzip, zlib, 1, 1, 1);
581