md.c revision 221855
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD: head/sys/dev/md/md.c 221855 2011-05-13 19:35:01Z mdf $
10 *
11 */
12
13/*-
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
17 *
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 *	The Regents of the University of California.  All rights reserved.
21 *
22 * This code is derived from software contributed to Berkeley by
23 * the Systems Programming Group of the University of Utah Computer
24 * Science Department.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 *    notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 *    notice, this list of conditions and the following disclaimer in the
33 *    documentation and/or other materials provided with the distribution.
34 * 4. Neither the name of the University nor the names of its contributors
35 *    may be used to endorse or promote products derived from this software
36 *    without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE.
49 *
50 * from: Utah Hdr: vn.c 1.13 94/04/02
51 *
52 *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54 */
55
56#include "opt_geom.h"
57#include "opt_md.h"
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/bio.h>
62#include <sys/conf.h>
63#include <sys/devicestat.h>
64#include <sys/fcntl.h>
65#include <sys/kernel.h>
66#include <sys/kthread.h>
67#include <sys/limits.h>
68#include <sys/linker.h>
69#include <sys/lock.h>
70#include <sys/malloc.h>
71#include <sys/mdioctl.h>
72#include <sys/mount.h>
73#include <sys/mutex.h>
74#include <sys/sx.h>
75#include <sys/namei.h>
76#include <sys/proc.h>
77#include <sys/queue.h>
78#include <sys/sched.h>
79#include <sys/sf_buf.h>
80#include <sys/sysctl.h>
81#include <sys/vnode.h>
82
83#include <geom/geom.h>
84
85#include <vm/vm.h>
86#include <vm/vm_object.h>
87#include <vm/vm_page.h>
88#include <vm/vm_pager.h>
89#include <vm/swap_pager.h>
90#include <vm/uma.h>
91
92#include <machine/vmparam.h>
93
94#define MD_MODVER 1
95
96#define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
97#define	MD_EXITING	0x20000		/* Worker thread is exiting. */
98
99#ifndef MD_NSECT
100#define MD_NSECT (10000 * 2)
101#endif
102
103static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
104static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
105
106static int md_debug;
107SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
108static int md_malloc_wait;
109SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, "");
110
111#if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
112/*
113 * Preloaded image gets put here.
114 * Applications that patch the object with the image can determine
115 * the size looking at the start and end markers (strings),
116 * so we want them contiguous.
117 */
118static struct {
119	u_char start[MD_ROOT_SIZE*1024];
120	u_char end[128];
121} mfs_root = {
122	.start = "MFS Filesystem goes here",
123	.end = "MFS Filesystem had better STOP here",
124};
125#endif
126
127static g_init_t g_md_init;
128static g_fini_t g_md_fini;
129static g_start_t g_md_start;
130static g_access_t g_md_access;
131static void g_md_dumpconf(struct sbuf *sb, const char *indent,
132    struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
133
134static int mdunits;
135static struct cdev *status_dev = 0;
136static struct sx md_sx;
137static struct unrhdr *md_uh;
138
139static d_ioctl_t mdctlioctl;
140
141static struct cdevsw mdctl_cdevsw = {
142	.d_version =	D_VERSION,
143	.d_ioctl =	mdctlioctl,
144	.d_name =	MD_NAME,
145};
146
147struct g_class g_md_class = {
148	.name = "MD",
149	.version = G_VERSION,
150	.init = g_md_init,
151	.fini = g_md_fini,
152	.start = g_md_start,
153	.access = g_md_access,
154	.dumpconf = g_md_dumpconf,
155};
156
157DECLARE_GEOM_CLASS(g_md_class, g_md);
158
159
160static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
161
162#define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
163#define NMASK	(NINDIR-1)
164static int nshift;
165
166struct indir {
167	uintptr_t	*array;
168	u_int		total;
169	u_int		used;
170	u_int		shift;
171};
172
173struct md_s {
174	int unit;
175	LIST_ENTRY(md_s) list;
176	struct bio_queue_head bio_queue;
177	struct mtx queue_mtx;
178	struct cdev *dev;
179	enum md_types type;
180	off_t mediasize;
181	unsigned sectorsize;
182	unsigned opencount;
183	unsigned fwheads;
184	unsigned fwsectors;
185	unsigned flags;
186	char name[20];
187	struct proc *procp;
188	struct g_geom *gp;
189	struct g_provider *pp;
190	int (*start)(struct md_s *sc, struct bio *bp);
191	struct devstat *devstat;
192
193	/* MD_MALLOC related fields */
194	struct indir *indir;
195	uma_zone_t uma;
196
197	/* MD_PRELOAD related fields */
198	u_char *pl_ptr;
199	size_t pl_len;
200
201	/* MD_VNODE related fields */
202	struct vnode *vnode;
203	char file[PATH_MAX];
204	struct ucred *cred;
205
206	/* MD_SWAP related fields */
207	vm_object_t object;
208};
209
210static struct indir *
211new_indir(u_int shift)
212{
213	struct indir *ip;
214
215	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
216	    | M_ZERO);
217	if (ip == NULL)
218		return (NULL);
219	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
220	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
221	if (ip->array == NULL) {
222		free(ip, M_MD);
223		return (NULL);
224	}
225	ip->total = NINDIR;
226	ip->shift = shift;
227	return (ip);
228}
229
230static void
231del_indir(struct indir *ip)
232{
233
234	free(ip->array, M_MDSECT);
235	free(ip, M_MD);
236}
237
238static void
239destroy_indir(struct md_s *sc, struct indir *ip)
240{
241	int i;
242
243	for (i = 0; i < NINDIR; i++) {
244		if (!ip->array[i])
245			continue;
246		if (ip->shift)
247			destroy_indir(sc, (struct indir*)(ip->array[i]));
248		else if (ip->array[i] > 255)
249			uma_zfree(sc->uma, (void *)(ip->array[i]));
250	}
251	del_indir(ip);
252}
253
254/*
255 * This function does the math and allocates the top level "indir" structure
256 * for a device of "size" sectors.
257 */
258
259static struct indir *
260dimension(off_t size)
261{
262	off_t rcnt;
263	struct indir *ip;
264	int layer;
265
266	rcnt = size;
267	layer = 0;
268	while (rcnt > NINDIR) {
269		rcnt /= NINDIR;
270		layer++;
271	}
272
273	/*
274	 * XXX: the top layer is probably not fully populated, so we allocate
275	 * too much space for ip->array in here.
276	 */
277	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
278	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
279	    M_MDSECT, M_WAITOK | M_ZERO);
280	ip->total = NINDIR;
281	ip->shift = layer * nshift;
282	return (ip);
283}
284
285/*
286 * Read a given sector
287 */
288
289static uintptr_t
290s_read(struct indir *ip, off_t offset)
291{
292	struct indir *cip;
293	int idx;
294	uintptr_t up;
295
296	if (md_debug > 1)
297		printf("s_read(%jd)\n", (intmax_t)offset);
298	up = 0;
299	for (cip = ip; cip != NULL;) {
300		if (cip->shift) {
301			idx = (offset >> cip->shift) & NMASK;
302			up = cip->array[idx];
303			cip = (struct indir *)up;
304			continue;
305		}
306		idx = offset & NMASK;
307		return (cip->array[idx]);
308	}
309	return (0);
310}
311
312/*
313 * Write a given sector, prune the tree if the value is 0
314 */
315
316static int
317s_write(struct indir *ip, off_t offset, uintptr_t ptr)
318{
319	struct indir *cip, *lip[10];
320	int idx, li;
321	uintptr_t up;
322
323	if (md_debug > 1)
324		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
325	up = 0;
326	li = 0;
327	cip = ip;
328	for (;;) {
329		lip[li++] = cip;
330		if (cip->shift) {
331			idx = (offset >> cip->shift) & NMASK;
332			up = cip->array[idx];
333			if (up != 0) {
334				cip = (struct indir *)up;
335				continue;
336			}
337			/* Allocate branch */
338			cip->array[idx] =
339			    (uintptr_t)new_indir(cip->shift - nshift);
340			if (cip->array[idx] == 0)
341				return (ENOSPC);
342			cip->used++;
343			up = cip->array[idx];
344			cip = (struct indir *)up;
345			continue;
346		}
347		/* leafnode */
348		idx = offset & NMASK;
349		up = cip->array[idx];
350		if (up != 0)
351			cip->used--;
352		cip->array[idx] = ptr;
353		if (ptr != 0)
354			cip->used++;
355		break;
356	}
357	if (cip->used != 0 || li == 1)
358		return (0);
359	li--;
360	while (cip->used == 0 && cip != ip) {
361		li--;
362		idx = (offset >> lip[li]->shift) & NMASK;
363		up = lip[li]->array[idx];
364		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
365		del_indir(cip);
366		lip[li]->array[idx] = 0;
367		lip[li]->used--;
368		cip = lip[li];
369	}
370	return (0);
371}
372
373
374static int
375g_md_access(struct g_provider *pp, int r, int w, int e)
376{
377	struct md_s *sc;
378
379	sc = pp->geom->softc;
380	if (sc == NULL) {
381		if (r <= 0 && w <= 0 && e <= 0)
382			return (0);
383		return (ENXIO);
384	}
385	r += pp->acr;
386	w += pp->acw;
387	e += pp->ace;
388	if ((sc->flags & MD_READONLY) != 0 && w > 0)
389		return (EROFS);
390	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
391		sc->opencount = 1;
392	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
393		sc->opencount = 0;
394	}
395	return (0);
396}
397
398static void
399g_md_start(struct bio *bp)
400{
401	struct md_s *sc;
402
403	sc = bp->bio_to->geom->softc;
404	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
405		devstat_start_transaction_bio(sc->devstat, bp);
406	mtx_lock(&sc->queue_mtx);
407	bioq_disksort(&sc->bio_queue, bp);
408	mtx_unlock(&sc->queue_mtx);
409	wakeup(sc);
410}
411
412static int
413mdstart_malloc(struct md_s *sc, struct bio *bp)
414{
415	int i, error;
416	u_char *dst;
417	off_t secno, nsec, uc;
418	uintptr_t sp, osp;
419
420	switch (bp->bio_cmd) {
421	case BIO_READ:
422	case BIO_WRITE:
423	case BIO_DELETE:
424		break;
425	default:
426		return (EOPNOTSUPP);
427	}
428
429	nsec = bp->bio_length / sc->sectorsize;
430	secno = bp->bio_offset / sc->sectorsize;
431	dst = bp->bio_data;
432	error = 0;
433	while (nsec--) {
434		osp = s_read(sc->indir, secno);
435		if (bp->bio_cmd == BIO_DELETE) {
436			if (osp != 0)
437				error = s_write(sc->indir, secno, 0);
438		} else if (bp->bio_cmd == BIO_READ) {
439			if (osp == 0)
440				bzero(dst, sc->sectorsize);
441			else if (osp <= 255)
442				memset(dst, osp, sc->sectorsize);
443			else {
444				bcopy((void *)osp, dst, sc->sectorsize);
445				cpu_flush_dcache(dst, sc->sectorsize);
446			}
447			osp = 0;
448		} else if (bp->bio_cmd == BIO_WRITE) {
449			if (sc->flags & MD_COMPRESS) {
450				uc = dst[0];
451				for (i = 1; i < sc->sectorsize; i++)
452					if (dst[i] != uc)
453						break;
454			} else {
455				i = 0;
456				uc = 0;
457			}
458			if (i == sc->sectorsize) {
459				if (osp != uc)
460					error = s_write(sc->indir, secno, uc);
461			} else {
462				if (osp <= 255) {
463					sp = (uintptr_t)uma_zalloc(sc->uma,
464					    md_malloc_wait ? M_WAITOK :
465					    M_NOWAIT);
466					if (sp == 0) {
467						error = ENOSPC;
468						break;
469					}
470					bcopy(dst, (void *)sp, sc->sectorsize);
471					error = s_write(sc->indir, secno, sp);
472				} else {
473					bcopy(dst, (void *)osp, sc->sectorsize);
474					osp = 0;
475				}
476			}
477		} else {
478			error = EOPNOTSUPP;
479		}
480		if (osp > 255)
481			uma_zfree(sc->uma, (void*)osp);
482		if (error != 0)
483			break;
484		secno++;
485		dst += sc->sectorsize;
486	}
487	bp->bio_resid = 0;
488	return (error);
489}
490
491static int
492mdstart_preload(struct md_s *sc, struct bio *bp)
493{
494
495	switch (bp->bio_cmd) {
496	case BIO_READ:
497		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
498		    bp->bio_length);
499		cpu_flush_dcache(bp->bio_data, bp->bio_length);
500		break;
501	case BIO_WRITE:
502		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
503		    bp->bio_length);
504		break;
505	}
506	bp->bio_resid = 0;
507	return (0);
508}
509
510static int
511mdstart_vnode(struct md_s *sc, struct bio *bp)
512{
513	int error, vfslocked;
514	struct uio auio;
515	struct iovec aiov;
516	struct mount *mp;
517	struct vnode *vp;
518	struct thread *td;
519	off_t end, zerosize;
520
521	switch (bp->bio_cmd) {
522	case BIO_READ:
523	case BIO_WRITE:
524	case BIO_DELETE:
525	case BIO_FLUSH:
526		break;
527	default:
528		return (EOPNOTSUPP);
529	}
530
531	td = curthread;
532	vp = sc->vnode;
533
534	/*
535	 * VNODE I/O
536	 *
537	 * If an error occurs, we set BIO_ERROR but we do not set
538	 * B_INVAL because (for a write anyway), the buffer is
539	 * still valid.
540	 */
541
542	if (bp->bio_cmd == BIO_FLUSH) {
543		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
544		(void) vn_start_write(vp, &mp, V_WAIT);
545		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
546		error = VOP_FSYNC(vp, MNT_WAIT, td);
547		VOP_UNLOCK(vp, 0);
548		vn_finished_write(mp);
549		VFS_UNLOCK_GIANT(vfslocked);
550		return (error);
551	}
552
553	bzero(&auio, sizeof(auio));
554
555	/*
556	 * Special case for BIO_DELETE.  On the surface, this is very
557	 * similar to BIO_WRITE, except that we write from our own
558	 * fixed-length buffer, so we have to loop.  The net result is
559	 * that the two cases end up having very little in common.
560	 */
561	if (bp->bio_cmd == BIO_DELETE) {
562		zerosize = ZERO_REGION_SIZE -
563		    (ZERO_REGION_SIZE % sc->sectorsize);
564		auio.uio_iov = &aiov;
565		auio.uio_iovcnt = 1;
566		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
567		auio.uio_segflg = UIO_SYSSPACE;
568		auio.uio_rw = UIO_WRITE;
569		auio.uio_td = td;
570		end = bp->bio_offset + bp->bio_length;
571		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
572		(void) vn_start_write(vp, &mp, V_WAIT);
573		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
574		error = 0;
575		while (auio.uio_offset < end) {
576			aiov.iov_base = __DECONST(void *, zero_region);
577			aiov.iov_len = end - auio.uio_offset;
578			if (aiov.iov_len > zerosize)
579				aiov.iov_len = zerosize;
580			auio.uio_resid = aiov.iov_len;
581			error = VOP_WRITE(vp, &auio,
582			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
583			if (error != 0)
584				break;
585		}
586		VOP_UNLOCK(vp, 0);
587		vn_finished_write(mp);
588		bp->bio_resid = end - auio.uio_offset;
589		VFS_UNLOCK_GIANT(vfslocked);
590		return (error);
591	}
592
593	aiov.iov_base = bp->bio_data;
594	aiov.iov_len = bp->bio_length;
595	auio.uio_iov = &aiov;
596	auio.uio_iovcnt = 1;
597	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
598	auio.uio_segflg = UIO_SYSSPACE;
599	if (bp->bio_cmd == BIO_READ)
600		auio.uio_rw = UIO_READ;
601	else if (bp->bio_cmd == BIO_WRITE)
602		auio.uio_rw = UIO_WRITE;
603	else
604		panic("wrong BIO_OP in mdstart_vnode");
605	auio.uio_resid = bp->bio_length;
606	auio.uio_td = td;
607	/*
608	 * When reading set IO_DIRECT to try to avoid double-caching
609	 * the data.  When writing IO_DIRECT is not optimal.
610	 */
611	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
612	if (bp->bio_cmd == BIO_READ) {
613		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
614		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
615		VOP_UNLOCK(vp, 0);
616	} else {
617		(void) vn_start_write(vp, &mp, V_WAIT);
618		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
619		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
620		    sc->cred);
621		VOP_UNLOCK(vp, 0);
622		vn_finished_write(mp);
623	}
624	VFS_UNLOCK_GIANT(vfslocked);
625	bp->bio_resid = auio.uio_resid;
626	return (error);
627}
628
629static int
630mdstart_swap(struct md_s *sc, struct bio *bp)
631{
632	struct sf_buf *sf;
633	int rv, offs, len, lastend;
634	vm_pindex_t i, lastp;
635	vm_page_t m;
636	u_char *p;
637
638	switch (bp->bio_cmd) {
639	case BIO_READ:
640	case BIO_WRITE:
641	case BIO_DELETE:
642		break;
643	default:
644		return (EOPNOTSUPP);
645	}
646
647	p = bp->bio_data;
648
649	/*
650	 * offs is the offset at which to start operating on the
651	 * next (ie, first) page.  lastp is the last page on
652	 * which we're going to operate.  lastend is the ending
653	 * position within that last page (ie, PAGE_SIZE if
654	 * we're operating on complete aligned pages).
655	 */
656	offs = bp->bio_offset % PAGE_SIZE;
657	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
658	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
659
660	rv = VM_PAGER_OK;
661	VM_OBJECT_LOCK(sc->object);
662	vm_object_pip_add(sc->object, 1);
663	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
664		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
665
666		m = vm_page_grab(sc->object, i,
667		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
668		VM_OBJECT_UNLOCK(sc->object);
669		sched_pin();
670		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
671		VM_OBJECT_LOCK(sc->object);
672		if (bp->bio_cmd == BIO_READ) {
673			if (m->valid != VM_PAGE_BITS_ALL)
674				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
675			if (rv == VM_PAGER_ERROR) {
676				sf_buf_free(sf);
677				sched_unpin();
678				vm_page_wakeup(m);
679				break;
680			}
681			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
682			cpu_flush_dcache(p, len);
683		} else if (bp->bio_cmd == BIO_WRITE) {
684			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
685				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
686			if (rv == VM_PAGER_ERROR) {
687				sf_buf_free(sf);
688				sched_unpin();
689				vm_page_wakeup(m);
690				break;
691			}
692			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
693			m->valid = VM_PAGE_BITS_ALL;
694		} else if (bp->bio_cmd == BIO_DELETE) {
695			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
696				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
697			if (rv == VM_PAGER_ERROR) {
698				sf_buf_free(sf);
699				sched_unpin();
700				vm_page_wakeup(m);
701				break;
702			}
703			if (len != PAGE_SIZE) {
704				bzero((void *)(sf_buf_kva(sf) + offs), len);
705				vm_page_clear_dirty(m, offs, len);
706				m->valid = VM_PAGE_BITS_ALL;
707			} else
708				vm_pager_page_unswapped(m);
709		}
710		sf_buf_free(sf);
711		sched_unpin();
712		vm_page_wakeup(m);
713		vm_page_lock(m);
714		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
715			vm_page_free(m);
716		else
717			vm_page_activate(m);
718		vm_page_unlock(m);
719		if (bp->bio_cmd == BIO_WRITE)
720			vm_page_dirty(m);
721
722		/* Actions on further pages start at offset 0 */
723		p += PAGE_SIZE - offs;
724		offs = 0;
725#if 0
726if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
727printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
728    m->wire_count, m->busy,
729    m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
730#endif
731	}
732	vm_object_pip_subtract(sc->object, 1);
733	VM_OBJECT_UNLOCK(sc->object);
734	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
735}
736
737static void
738md_kthread(void *arg)
739{
740	struct md_s *sc;
741	struct bio *bp;
742	int error;
743
744	sc = arg;
745	thread_lock(curthread);
746	sched_prio(curthread, PRIBIO);
747	thread_unlock(curthread);
748	if (sc->type == MD_VNODE)
749		curthread->td_pflags |= TDP_NORUNNINGBUF;
750
751	for (;;) {
752		mtx_lock(&sc->queue_mtx);
753		if (sc->flags & MD_SHUTDOWN) {
754			sc->flags |= MD_EXITING;
755			mtx_unlock(&sc->queue_mtx);
756			kproc_exit(0);
757		}
758		bp = bioq_takefirst(&sc->bio_queue);
759		if (!bp) {
760			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
761			continue;
762		}
763		mtx_unlock(&sc->queue_mtx);
764		if (bp->bio_cmd == BIO_GETATTR) {
765			if ((sc->fwsectors && sc->fwheads &&
766			    (g_handleattr_int(bp, "GEOM::fwsectors",
767			    sc->fwsectors) ||
768			    g_handleattr_int(bp, "GEOM::fwheads",
769			    sc->fwheads))) ||
770			    g_handleattr_int(bp, "GEOM::candelete", 1))
771				error = -1;
772			else
773				error = EOPNOTSUPP;
774		} else {
775			error = sc->start(sc, bp);
776		}
777
778		if (error != -1) {
779			bp->bio_completed = bp->bio_length;
780			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
781				devstat_end_transaction_bio(sc->devstat, bp);
782			g_io_deliver(bp, error);
783		}
784	}
785}
786
787static struct md_s *
788mdfind(int unit)
789{
790	struct md_s *sc;
791
792	LIST_FOREACH(sc, &md_softc_list, list) {
793		if (sc->unit == unit)
794			break;
795	}
796	return (sc);
797}
798
799static struct md_s *
800mdnew(int unit, int *errp, enum md_types type)
801{
802	struct md_s *sc;
803	int error;
804
805	*errp = 0;
806	if (unit == -1)
807		unit = alloc_unr(md_uh);
808	else
809		unit = alloc_unr_specific(md_uh, unit);
810
811	if (unit == -1) {
812		*errp = EBUSY;
813		return (NULL);
814	}
815
816	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
817	sc->type = type;
818	bioq_init(&sc->bio_queue);
819	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
820	sc->unit = unit;
821	sprintf(sc->name, "md%d", unit);
822	LIST_INSERT_HEAD(&md_softc_list, sc, list);
823	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
824	if (error == 0)
825		return (sc);
826	LIST_REMOVE(sc, list);
827	mtx_destroy(&sc->queue_mtx);
828	free_unr(md_uh, sc->unit);
829	free(sc, M_MD);
830	*errp = error;
831	return (NULL);
832}
833
834static void
835mdinit(struct md_s *sc)
836{
837	struct g_geom *gp;
838	struct g_provider *pp;
839
840	g_topology_lock();
841	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
842	gp->softc = sc;
843	pp = g_new_providerf(gp, "md%d", sc->unit);
844	pp->mediasize = sc->mediasize;
845	pp->sectorsize = sc->sectorsize;
846	sc->gp = gp;
847	sc->pp = pp;
848	g_error_provider(pp, 0);
849	g_topology_unlock();
850	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
851	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
852}
853
854/*
855 * XXX: we should check that the range they feed us is mapped.
856 * XXX: we should implement read-only.
857 */
858
859static int
860mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
861{
862
863	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
864		return (EINVAL);
865	if (mdio->md_base == 0)
866		return (EINVAL);
867	sc->flags = mdio->md_options & MD_FORCE;
868	/* Cast to pointer size, then to pointer to avoid warning */
869	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
870	sc->pl_len = (size_t)sc->mediasize;
871	return (0);
872}
873
874
875static int
876mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
877{
878	uintptr_t sp;
879	int error;
880	off_t u;
881
882	error = 0;
883	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
884		return (EINVAL);
885	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
886		return (EINVAL);
887	/* Compression doesn't make sense if we have reserved space */
888	if (mdio->md_options & MD_RESERVE)
889		mdio->md_options &= ~MD_COMPRESS;
890	if (mdio->md_fwsectors != 0)
891		sc->fwsectors = mdio->md_fwsectors;
892	if (mdio->md_fwheads != 0)
893		sc->fwheads = mdio->md_fwheads;
894	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
895	sc->indir = dimension(sc->mediasize / sc->sectorsize);
896	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
897	    0x1ff, 0);
898	if (mdio->md_options & MD_RESERVE) {
899		off_t nsectors;
900
901		nsectors = sc->mediasize / sc->sectorsize;
902		for (u = 0; u < nsectors; u++) {
903			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
904			    M_WAITOK : M_NOWAIT) | M_ZERO);
905			if (sp != 0)
906				error = s_write(sc->indir, u, sp);
907			else
908				error = ENOMEM;
909			if (error != 0)
910				break;
911		}
912	}
913	return (error);
914}
915
916
917static int
918mdsetcred(struct md_s *sc, struct ucred *cred)
919{
920	char *tmpbuf;
921	int error = 0;
922
923	/*
924	 * Set credits in our softc
925	 */
926
927	if (sc->cred)
928		crfree(sc->cred);
929	sc->cred = crhold(cred);
930
931	/*
932	 * Horrible kludge to establish credentials for NFS  XXX.
933	 */
934
935	if (sc->vnode) {
936		struct uio auio;
937		struct iovec aiov;
938
939		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
940		bzero(&auio, sizeof(auio));
941
942		aiov.iov_base = tmpbuf;
943		aiov.iov_len = sc->sectorsize;
944		auio.uio_iov = &aiov;
945		auio.uio_iovcnt = 1;
946		auio.uio_offset = 0;
947		auio.uio_rw = UIO_READ;
948		auio.uio_segflg = UIO_SYSSPACE;
949		auio.uio_resid = aiov.iov_len;
950		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
951		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
952		VOP_UNLOCK(sc->vnode, 0);
953		free(tmpbuf, M_TEMP);
954	}
955	return (error);
956}
957
958static int
959mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
960{
961	struct vattr vattr;
962	struct nameidata nd;
963	char *fname;
964	int error, flags, vfslocked;
965
966	/*
967	 * Kernel-originated requests must have the filename appended
968	 * to the mdio structure to protect against malicious software.
969	 */
970	fname = mdio->md_file;
971	if ((void *)fname != (void *)(mdio + 1)) {
972		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
973		if (error != 0)
974			return (error);
975	} else
976		strlcpy(sc->file, fname, sizeof(sc->file));
977
978	/*
979	 * If the user specified that this is a read only device, don't
980	 * set the FWRITE mask before trying to open the backing store.
981	 */
982	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
983	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
984	error = vn_open(&nd, &flags, 0, NULL);
985	if (error != 0)
986		return (error);
987	vfslocked = NDHASGIANT(&nd);
988	NDFREE(&nd, NDF_ONLY_PNBUF);
989	if (nd.ni_vp->v_type != VREG) {
990		error = EINVAL;
991		goto bad;
992	}
993	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
994	if (error != 0)
995		goto bad;
996	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
997		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
998		if (nd.ni_vp->v_iflag & VI_DOOMED) {
999			/* Forced unmount. */
1000			error = EBADF;
1001			goto bad;
1002		}
1003	}
1004	nd.ni_vp->v_vflag |= VV_MD;
1005	VOP_UNLOCK(nd.ni_vp, 0);
1006
1007	if (mdio->md_fwsectors != 0)
1008		sc->fwsectors = mdio->md_fwsectors;
1009	if (mdio->md_fwheads != 0)
1010		sc->fwheads = mdio->md_fwheads;
1011	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1012	if (!(flags & FWRITE))
1013		sc->flags |= MD_READONLY;
1014	sc->vnode = nd.ni_vp;
1015
1016	error = mdsetcred(sc, td->td_ucred);
1017	if (error != 0) {
1018		sc->vnode = NULL;
1019		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1020		nd.ni_vp->v_vflag &= ~VV_MD;
1021		goto bad;
1022	}
1023	VFS_UNLOCK_GIANT(vfslocked);
1024	return (0);
1025bad:
1026	VOP_UNLOCK(nd.ni_vp, 0);
1027	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1028	VFS_UNLOCK_GIANT(vfslocked);
1029	return (error);
1030}
1031
1032static int
1033mddestroy(struct md_s *sc, struct thread *td)
1034{
1035	int vfslocked;
1036
1037	if (sc->gp) {
1038		sc->gp->softc = NULL;
1039		g_topology_lock();
1040		g_wither_geom(sc->gp, ENXIO);
1041		g_topology_unlock();
1042		sc->gp = NULL;
1043		sc->pp = NULL;
1044	}
1045	if (sc->devstat) {
1046		devstat_remove_entry(sc->devstat);
1047		sc->devstat = NULL;
1048	}
1049	mtx_lock(&sc->queue_mtx);
1050	sc->flags |= MD_SHUTDOWN;
1051	wakeup(sc);
1052	while (!(sc->flags & MD_EXITING))
1053		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1054	mtx_unlock(&sc->queue_mtx);
1055	mtx_destroy(&sc->queue_mtx);
1056	if (sc->vnode != NULL) {
1057		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1058		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1059		sc->vnode->v_vflag &= ~VV_MD;
1060		VOP_UNLOCK(sc->vnode, 0);
1061		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1062		    FREAD : (FREAD|FWRITE), sc->cred, td);
1063		VFS_UNLOCK_GIANT(vfslocked);
1064	}
1065	if (sc->cred != NULL)
1066		crfree(sc->cred);
1067	if (sc->object != NULL)
1068		vm_object_deallocate(sc->object);
1069	if (sc->indir)
1070		destroy_indir(sc, sc->indir);
1071	if (sc->uma)
1072		uma_zdestroy(sc->uma);
1073
1074	LIST_REMOVE(sc, list);
1075	free_unr(md_uh, sc->unit);
1076	free(sc, M_MD);
1077	return (0);
1078}
1079
1080static int
1081mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1082{
1083	vm_ooffset_t npage;
1084	int error;
1085
1086	/*
1087	 * Range check.  Disallow negative sizes or any size less then the
1088	 * size of a page.  Then round to a page.
1089	 */
1090	if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1091		return (EDOM);
1092
1093	/*
1094	 * Allocate an OBJT_SWAP object.
1095	 *
1096	 * Note the truncation.
1097	 */
1098
1099	npage = mdio->md_mediasize / PAGE_SIZE;
1100	if (mdio->md_fwsectors != 0)
1101		sc->fwsectors = mdio->md_fwsectors;
1102	if (mdio->md_fwheads != 0)
1103		sc->fwheads = mdio->md_fwheads;
1104	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1105	    VM_PROT_DEFAULT, 0, td->td_ucred);
1106	if (sc->object == NULL)
1107		return (ENOMEM);
1108	sc->flags = mdio->md_options & MD_FORCE;
1109	if (mdio->md_options & MD_RESERVE) {
1110		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1111			error = EDOM;
1112			goto finish;
1113		}
1114	}
1115	error = mdsetcred(sc, td->td_ucred);
1116 finish:
1117	if (error != 0) {
1118		vm_object_deallocate(sc->object);
1119		sc->object = NULL;
1120	}
1121	return (error);
1122}
1123
1124
1125static int
1126xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1127{
1128	struct md_ioctl *mdio;
1129	struct md_s *sc;
1130	int error, i;
1131
1132	if (md_debug)
1133		printf("mdctlioctl(%s %lx %p %x %p)\n",
1134			devtoname(dev), cmd, addr, flags, td);
1135
1136	mdio = (struct md_ioctl *)addr;
1137	if (mdio->md_version != MDIOVERSION)
1138		return (EINVAL);
1139
1140	/*
1141	 * We assert the version number in the individual ioctl
1142	 * handlers instead of out here because (a) it is possible we
1143	 * may add another ioctl in the future which doesn't read an
1144	 * mdio, and (b) the correct return value for an unknown ioctl
1145	 * is ENOIOCTL, not EINVAL.
1146	 */
1147	error = 0;
1148	switch (cmd) {
1149	case MDIOCATTACH:
1150		switch (mdio->md_type) {
1151		case MD_MALLOC:
1152		case MD_PRELOAD:
1153		case MD_VNODE:
1154		case MD_SWAP:
1155			break;
1156		default:
1157			return (EINVAL);
1158		}
1159		if (mdio->md_options & MD_AUTOUNIT)
1160			sc = mdnew(-1, &error, mdio->md_type);
1161		else {
1162			if (mdio->md_unit > INT_MAX)
1163				return (EINVAL);
1164			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1165		}
1166		if (sc == NULL)
1167			return (error);
1168		if (mdio->md_options & MD_AUTOUNIT)
1169			mdio->md_unit = sc->unit;
1170		sc->mediasize = mdio->md_mediasize;
1171		if (mdio->md_sectorsize == 0)
1172			sc->sectorsize = DEV_BSIZE;
1173		else
1174			sc->sectorsize = mdio->md_sectorsize;
1175		error = EDOOFUS;
1176		switch (sc->type) {
1177		case MD_MALLOC:
1178			sc->start = mdstart_malloc;
1179			error = mdcreate_malloc(sc, mdio);
1180			break;
1181		case MD_PRELOAD:
1182			sc->start = mdstart_preload;
1183			error = mdcreate_preload(sc, mdio);
1184			break;
1185		case MD_VNODE:
1186			sc->start = mdstart_vnode;
1187			error = mdcreate_vnode(sc, mdio, td);
1188			break;
1189		case MD_SWAP:
1190			sc->start = mdstart_swap;
1191			error = mdcreate_swap(sc, mdio, td);
1192			break;
1193		}
1194		if (error != 0) {
1195			mddestroy(sc, td);
1196			return (error);
1197		}
1198
1199		/* Prune off any residual fractional sector */
1200		i = sc->mediasize % sc->sectorsize;
1201		sc->mediasize -= i;
1202
1203		mdinit(sc);
1204		return (0);
1205	case MDIOCDETACH:
1206		if (mdio->md_mediasize != 0 ||
1207		    (mdio->md_options & ~MD_FORCE) != 0)
1208			return (EINVAL);
1209
1210		sc = mdfind(mdio->md_unit);
1211		if (sc == NULL)
1212			return (ENOENT);
1213		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1214		    !(mdio->md_options & MD_FORCE))
1215			return (EBUSY);
1216		return (mddestroy(sc, td));
1217	case MDIOCQUERY:
1218		sc = mdfind(mdio->md_unit);
1219		if (sc == NULL)
1220			return (ENOENT);
1221		mdio->md_type = sc->type;
1222		mdio->md_options = sc->flags;
1223		mdio->md_mediasize = sc->mediasize;
1224		mdio->md_sectorsize = sc->sectorsize;
1225		if (sc->type == MD_VNODE)
1226			error = copyout(sc->file, mdio->md_file,
1227			    strlen(sc->file) + 1);
1228		return (error);
1229	case MDIOCLIST:
1230		i = 1;
1231		LIST_FOREACH(sc, &md_softc_list, list) {
1232			if (i == MDNPAD - 1)
1233				mdio->md_pad[i] = -1;
1234			else
1235				mdio->md_pad[i++] = sc->unit;
1236		}
1237		mdio->md_pad[0] = i - 1;
1238		return (0);
1239	default:
1240		return (ENOIOCTL);
1241	};
1242}
1243
1244static int
1245mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1246{
1247	int error;
1248
1249	sx_xlock(&md_sx);
1250	error = xmdctlioctl(dev, cmd, addr, flags, td);
1251	sx_xunlock(&md_sx);
1252	return (error);
1253}
1254
1255static void
1256md_preloaded(u_char *image, size_t length)
1257{
1258	struct md_s *sc;
1259	int error;
1260
1261	sc = mdnew(-1, &error, MD_PRELOAD);
1262	if (sc == NULL)
1263		return;
1264	sc->mediasize = length;
1265	sc->sectorsize = DEV_BSIZE;
1266	sc->pl_ptr = image;
1267	sc->pl_len = length;
1268	sc->start = mdstart_preload;
1269#ifdef MD_ROOT
1270	if (sc->unit == 0)
1271		rootdevnames[0] = "ufs:/dev/md0";
1272#endif
1273	mdinit(sc);
1274}
1275
1276static void
1277g_md_init(struct g_class *mp __unused)
1278{
1279	caddr_t mod;
1280	u_char *ptr, *name, *type;
1281	unsigned len;
1282	int i;
1283
1284	/* figure out log2(NINDIR) */
1285	for (i = NINDIR, nshift = -1; i; nshift++)
1286		i >>= 1;
1287
1288	mod = NULL;
1289	sx_init(&md_sx, "MD config lock");
1290	g_topology_unlock();
1291	md_uh = new_unrhdr(0, INT_MAX, NULL);
1292#ifdef MD_ROOT_SIZE
1293	sx_xlock(&md_sx);
1294	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1295	sx_xunlock(&md_sx);
1296#endif
1297	/* XXX: are preload_* static or do they need Giant ? */
1298	while ((mod = preload_search_next_name(mod)) != NULL) {
1299		name = (char *)preload_search_info(mod, MODINFO_NAME);
1300		if (name == NULL)
1301			continue;
1302		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1303		if (type == NULL)
1304			continue;
1305		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1306			continue;
1307		ptr = preload_fetch_addr(mod);
1308		len = preload_fetch_size(mod);
1309		if (ptr != NULL && len != 0) {
1310			printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1311			    MD_NAME, mdunits, name, len, ptr);
1312			sx_xlock(&md_sx);
1313			md_preloaded(ptr, len);
1314			sx_xunlock(&md_sx);
1315		}
1316	}
1317	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1318	    0600, MDCTL_NAME);
1319	g_topology_lock();
1320}
1321
1322static void
1323g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1324    struct g_consumer *cp __unused, struct g_provider *pp)
1325{
1326	struct md_s *mp;
1327	char *type;
1328
1329	mp = gp->softc;
1330	if (mp == NULL)
1331		return;
1332
1333	switch (mp->type) {
1334	case MD_MALLOC:
1335		type = "malloc";
1336		break;
1337	case MD_PRELOAD:
1338		type = "preload";
1339		break;
1340	case MD_VNODE:
1341		type = "vnode";
1342		break;
1343	case MD_SWAP:
1344		type = "swap";
1345		break;
1346	default:
1347		type = "unknown";
1348		break;
1349	}
1350
1351	if (pp != NULL) {
1352		if (indent == NULL) {
1353			sbuf_printf(sb, " u %d", mp->unit);
1354			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1355			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1356			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1357			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1358			sbuf_printf(sb, " t %s", type);
1359			if (mp->type == MD_VNODE && mp->vnode != NULL)
1360				sbuf_printf(sb, " file %s", mp->file);
1361		} else {
1362			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1363			    mp->unit);
1364			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1365			    indent, (uintmax_t) mp->sectorsize);
1366			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1367			    indent, (uintmax_t) mp->fwheads);
1368			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1369			    indent, (uintmax_t) mp->fwsectors);
1370			sbuf_printf(sb, "%s<length>%ju</length>\n",
1371			    indent, (uintmax_t) mp->mediasize);
1372			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1373			    type);
1374			if (mp->type == MD_VNODE && mp->vnode != NULL)
1375				sbuf_printf(sb, "%s<file>%s</file>\n",
1376				    indent, mp->file);
1377		}
1378	}
1379}
1380
1381static void
1382g_md_fini(struct g_class *mp __unused)
1383{
1384
1385	sx_destroy(&md_sx);
1386	if (status_dev != NULL)
1387		destroy_dev(status_dev);
1388	delete_unrhdr(md_uh);
1389}
1390