md.c revision 238991
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD: head/sys/dev/md/md.c 238991 2012-08-02 15:05:34Z jh $
10 *
11 */
12
13/*-
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
17 *
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 *	The Regents of the University of California.  All rights reserved.
21 *
22 * This code is derived from software contributed to Berkeley by
23 * the Systems Programming Group of the University of Utah Computer
24 * Science Department.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 *    notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 *    notice, this list of conditions and the following disclaimer in the
33 *    documentation and/or other materials provided with the distribution.
34 * 4. Neither the name of the University nor the names of its contributors
35 *    may be used to endorse or promote products derived from this software
36 *    without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE.
49 *
50 * from: Utah Hdr: vn.c 1.13 94/04/02
51 *
52 *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54 */
55
56#include "opt_geom.h"
57#include "opt_md.h"
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/bio.h>
62#include <sys/conf.h>
63#include <sys/devicestat.h>
64#include <sys/fcntl.h>
65#include <sys/kernel.h>
66#include <sys/kthread.h>
67#include <sys/limits.h>
68#include <sys/linker.h>
69#include <sys/lock.h>
70#include <sys/malloc.h>
71#include <sys/mdioctl.h>
72#include <sys/mount.h>
73#include <sys/mutex.h>
74#include <sys/sx.h>
75#include <sys/namei.h>
76#include <sys/proc.h>
77#include <sys/queue.h>
78#include <sys/sbuf.h>
79#include <sys/sched.h>
80#include <sys/sf_buf.h>
81#include <sys/sysctl.h>
82#include <sys/vnode.h>
83
84#include <geom/geom.h>
85
86#include <vm/vm.h>
87#include <vm/vm_object.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pager.h>
90#include <vm/swap_pager.h>
91#include <vm/uma.h>
92
93#include <machine/vmparam.h>
94
95#define MD_MODVER 1
96
97#define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
98#define	MD_EXITING	0x20000		/* Worker thread is exiting. */
99
100#ifndef MD_NSECT
101#define MD_NSECT (10000 * 2)
102#endif
103
104static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
105static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
106
107static int md_debug;
108SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
109    "Enable md(4) debug messages");
110static int md_malloc_wait;
111SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
112    "Allow malloc to wait for memory allocations");
113
114#if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
115/*
116 * Preloaded image gets put here.
117 * Applications that patch the object with the image can determine
118 * the size looking at the start and end markers (strings),
119 * so we want them contiguous.
120 */
121static struct {
122	u_char start[MD_ROOT_SIZE*1024];
123	u_char end[128];
124} mfs_root = {
125	.start = "MFS Filesystem goes here",
126	.end = "MFS Filesystem had better STOP here",
127};
128#endif
129
130static g_init_t g_md_init;
131static g_fini_t g_md_fini;
132static g_start_t g_md_start;
133static g_access_t g_md_access;
134static void g_md_dumpconf(struct sbuf *sb, const char *indent,
135    struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
136
137static int mdunits;
138static struct cdev *status_dev = 0;
139static struct sx md_sx;
140static struct unrhdr *md_uh;
141
142static d_ioctl_t mdctlioctl;
143
144static struct cdevsw mdctl_cdevsw = {
145	.d_version =	D_VERSION,
146	.d_ioctl =	mdctlioctl,
147	.d_name =	MD_NAME,
148};
149
150struct g_class g_md_class = {
151	.name = "MD",
152	.version = G_VERSION,
153	.init = g_md_init,
154	.fini = g_md_fini,
155	.start = g_md_start,
156	.access = g_md_access,
157	.dumpconf = g_md_dumpconf,
158};
159
160DECLARE_GEOM_CLASS(g_md_class, g_md);
161
162
163static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
164
165#define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
166#define NMASK	(NINDIR-1)
167static int nshift;
168
169struct indir {
170	uintptr_t	*array;
171	u_int		total;
172	u_int		used;
173	u_int		shift;
174};
175
176struct md_s {
177	int unit;
178	LIST_ENTRY(md_s) list;
179	struct bio_queue_head bio_queue;
180	struct mtx queue_mtx;
181	struct cdev *dev;
182	enum md_types type;
183	off_t mediasize;
184	unsigned sectorsize;
185	unsigned opencount;
186	unsigned fwheads;
187	unsigned fwsectors;
188	unsigned flags;
189	char name[20];
190	struct proc *procp;
191	struct g_geom *gp;
192	struct g_provider *pp;
193	int (*start)(struct md_s *sc, struct bio *bp);
194	struct devstat *devstat;
195
196	/* MD_MALLOC related fields */
197	struct indir *indir;
198	uma_zone_t uma;
199
200	/* MD_PRELOAD related fields */
201	u_char *pl_ptr;
202	size_t pl_len;
203
204	/* MD_VNODE related fields */
205	struct vnode *vnode;
206	char file[PATH_MAX];
207	struct ucred *cred;
208
209	/* MD_SWAP related fields */
210	vm_object_t object;
211};
212
213static struct indir *
214new_indir(u_int shift)
215{
216	struct indir *ip;
217
218	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
219	    | M_ZERO);
220	if (ip == NULL)
221		return (NULL);
222	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
223	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
224	if (ip->array == NULL) {
225		free(ip, M_MD);
226		return (NULL);
227	}
228	ip->total = NINDIR;
229	ip->shift = shift;
230	return (ip);
231}
232
233static void
234del_indir(struct indir *ip)
235{
236
237	free(ip->array, M_MDSECT);
238	free(ip, M_MD);
239}
240
241static void
242destroy_indir(struct md_s *sc, struct indir *ip)
243{
244	int i;
245
246	for (i = 0; i < NINDIR; i++) {
247		if (!ip->array[i])
248			continue;
249		if (ip->shift)
250			destroy_indir(sc, (struct indir*)(ip->array[i]));
251		else if (ip->array[i] > 255)
252			uma_zfree(sc->uma, (void *)(ip->array[i]));
253	}
254	del_indir(ip);
255}
256
257/*
258 * This function does the math and allocates the top level "indir" structure
259 * for a device of "size" sectors.
260 */
261
262static struct indir *
263dimension(off_t size)
264{
265	off_t rcnt;
266	struct indir *ip;
267	int layer;
268
269	rcnt = size;
270	layer = 0;
271	while (rcnt > NINDIR) {
272		rcnt /= NINDIR;
273		layer++;
274	}
275
276	/*
277	 * XXX: the top layer is probably not fully populated, so we allocate
278	 * too much space for ip->array in here.
279	 */
280	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
281	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
282	    M_MDSECT, M_WAITOK | M_ZERO);
283	ip->total = NINDIR;
284	ip->shift = layer * nshift;
285	return (ip);
286}
287
288/*
289 * Read a given sector
290 */
291
292static uintptr_t
293s_read(struct indir *ip, off_t offset)
294{
295	struct indir *cip;
296	int idx;
297	uintptr_t up;
298
299	if (md_debug > 1)
300		printf("s_read(%jd)\n", (intmax_t)offset);
301	up = 0;
302	for (cip = ip; cip != NULL;) {
303		if (cip->shift) {
304			idx = (offset >> cip->shift) & NMASK;
305			up = cip->array[idx];
306			cip = (struct indir *)up;
307			continue;
308		}
309		idx = offset & NMASK;
310		return (cip->array[idx]);
311	}
312	return (0);
313}
314
315/*
316 * Write a given sector, prune the tree if the value is 0
317 */
318
319static int
320s_write(struct indir *ip, off_t offset, uintptr_t ptr)
321{
322	struct indir *cip, *lip[10];
323	int idx, li;
324	uintptr_t up;
325
326	if (md_debug > 1)
327		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
328	up = 0;
329	li = 0;
330	cip = ip;
331	for (;;) {
332		lip[li++] = cip;
333		if (cip->shift) {
334			idx = (offset >> cip->shift) & NMASK;
335			up = cip->array[idx];
336			if (up != 0) {
337				cip = (struct indir *)up;
338				continue;
339			}
340			/* Allocate branch */
341			cip->array[idx] =
342			    (uintptr_t)new_indir(cip->shift - nshift);
343			if (cip->array[idx] == 0)
344				return (ENOSPC);
345			cip->used++;
346			up = cip->array[idx];
347			cip = (struct indir *)up;
348			continue;
349		}
350		/* leafnode */
351		idx = offset & NMASK;
352		up = cip->array[idx];
353		if (up != 0)
354			cip->used--;
355		cip->array[idx] = ptr;
356		if (ptr != 0)
357			cip->used++;
358		break;
359	}
360	if (cip->used != 0 || li == 1)
361		return (0);
362	li--;
363	while (cip->used == 0 && cip != ip) {
364		li--;
365		idx = (offset >> lip[li]->shift) & NMASK;
366		up = lip[li]->array[idx];
367		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
368		del_indir(cip);
369		lip[li]->array[idx] = 0;
370		lip[li]->used--;
371		cip = lip[li];
372	}
373	return (0);
374}
375
376
377static int
378g_md_access(struct g_provider *pp, int r, int w, int e)
379{
380	struct md_s *sc;
381
382	sc = pp->geom->softc;
383	if (sc == NULL) {
384		if (r <= 0 && w <= 0 && e <= 0)
385			return (0);
386		return (ENXIO);
387	}
388	r += pp->acr;
389	w += pp->acw;
390	e += pp->ace;
391	if ((sc->flags & MD_READONLY) != 0 && w > 0)
392		return (EROFS);
393	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
394		sc->opencount = 1;
395	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
396		sc->opencount = 0;
397	}
398	return (0);
399}
400
401static void
402g_md_start(struct bio *bp)
403{
404	struct md_s *sc;
405
406	sc = bp->bio_to->geom->softc;
407	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
408		devstat_start_transaction_bio(sc->devstat, bp);
409	mtx_lock(&sc->queue_mtx);
410	bioq_disksort(&sc->bio_queue, bp);
411	mtx_unlock(&sc->queue_mtx);
412	wakeup(sc);
413}
414
415static int
416mdstart_malloc(struct md_s *sc, struct bio *bp)
417{
418	int i, error;
419	u_char *dst;
420	off_t secno, nsec, uc;
421	uintptr_t sp, osp;
422
423	switch (bp->bio_cmd) {
424	case BIO_READ:
425	case BIO_WRITE:
426	case BIO_DELETE:
427		break;
428	default:
429		return (EOPNOTSUPP);
430	}
431
432	nsec = bp->bio_length / sc->sectorsize;
433	secno = bp->bio_offset / sc->sectorsize;
434	dst = bp->bio_data;
435	error = 0;
436	while (nsec--) {
437		osp = s_read(sc->indir, secno);
438		if (bp->bio_cmd == BIO_DELETE) {
439			if (osp != 0)
440				error = s_write(sc->indir, secno, 0);
441		} else if (bp->bio_cmd == BIO_READ) {
442			if (osp == 0)
443				bzero(dst, sc->sectorsize);
444			else if (osp <= 255)
445				memset(dst, osp, sc->sectorsize);
446			else {
447				bcopy((void *)osp, dst, sc->sectorsize);
448				cpu_flush_dcache(dst, sc->sectorsize);
449			}
450			osp = 0;
451		} else if (bp->bio_cmd == BIO_WRITE) {
452			if (sc->flags & MD_COMPRESS) {
453				uc = dst[0];
454				for (i = 1; i < sc->sectorsize; i++)
455					if (dst[i] != uc)
456						break;
457			} else {
458				i = 0;
459				uc = 0;
460			}
461			if (i == sc->sectorsize) {
462				if (osp != uc)
463					error = s_write(sc->indir, secno, uc);
464			} else {
465				if (osp <= 255) {
466					sp = (uintptr_t)uma_zalloc(sc->uma,
467					    md_malloc_wait ? M_WAITOK :
468					    M_NOWAIT);
469					if (sp == 0) {
470						error = ENOSPC;
471						break;
472					}
473					bcopy(dst, (void *)sp, sc->sectorsize);
474					error = s_write(sc->indir, secno, sp);
475				} else {
476					bcopy(dst, (void *)osp, sc->sectorsize);
477					osp = 0;
478				}
479			}
480		} else {
481			error = EOPNOTSUPP;
482		}
483		if (osp > 255)
484			uma_zfree(sc->uma, (void*)osp);
485		if (error != 0)
486			break;
487		secno++;
488		dst += sc->sectorsize;
489	}
490	bp->bio_resid = 0;
491	return (error);
492}
493
494static int
495mdstart_preload(struct md_s *sc, struct bio *bp)
496{
497
498	switch (bp->bio_cmd) {
499	case BIO_READ:
500		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
501		    bp->bio_length);
502		cpu_flush_dcache(bp->bio_data, bp->bio_length);
503		break;
504	case BIO_WRITE:
505		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
506		    bp->bio_length);
507		break;
508	}
509	bp->bio_resid = 0;
510	return (0);
511}
512
513static int
514mdstart_vnode(struct md_s *sc, struct bio *bp)
515{
516	int error, vfslocked;
517	struct uio auio;
518	struct iovec aiov;
519	struct mount *mp;
520	struct vnode *vp;
521	struct thread *td;
522	off_t end, zerosize;
523
524	switch (bp->bio_cmd) {
525	case BIO_READ:
526	case BIO_WRITE:
527	case BIO_DELETE:
528	case BIO_FLUSH:
529		break;
530	default:
531		return (EOPNOTSUPP);
532	}
533
534	td = curthread;
535	vp = sc->vnode;
536
537	/*
538	 * VNODE I/O
539	 *
540	 * If an error occurs, we set BIO_ERROR but we do not set
541	 * B_INVAL because (for a write anyway), the buffer is
542	 * still valid.
543	 */
544
545	if (bp->bio_cmd == BIO_FLUSH) {
546		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
547		(void) vn_start_write(vp, &mp, V_WAIT);
548		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
549		error = VOP_FSYNC(vp, MNT_WAIT, td);
550		VOP_UNLOCK(vp, 0);
551		vn_finished_write(mp);
552		VFS_UNLOCK_GIANT(vfslocked);
553		return (error);
554	}
555
556	bzero(&auio, sizeof(auio));
557
558	/*
559	 * Special case for BIO_DELETE.  On the surface, this is very
560	 * similar to BIO_WRITE, except that we write from our own
561	 * fixed-length buffer, so we have to loop.  The net result is
562	 * that the two cases end up having very little in common.
563	 */
564	if (bp->bio_cmd == BIO_DELETE) {
565		zerosize = ZERO_REGION_SIZE -
566		    (ZERO_REGION_SIZE % sc->sectorsize);
567		auio.uio_iov = &aiov;
568		auio.uio_iovcnt = 1;
569		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
570		auio.uio_segflg = UIO_SYSSPACE;
571		auio.uio_rw = UIO_WRITE;
572		auio.uio_td = td;
573		end = bp->bio_offset + bp->bio_length;
574		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
575		(void) vn_start_write(vp, &mp, V_WAIT);
576		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
577		error = 0;
578		while (auio.uio_offset < end) {
579			aiov.iov_base = __DECONST(void *, zero_region);
580			aiov.iov_len = end - auio.uio_offset;
581			if (aiov.iov_len > zerosize)
582				aiov.iov_len = zerosize;
583			auio.uio_resid = aiov.iov_len;
584			error = VOP_WRITE(vp, &auio,
585			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
586			if (error != 0)
587				break;
588		}
589		VOP_UNLOCK(vp, 0);
590		vn_finished_write(mp);
591		bp->bio_resid = end - auio.uio_offset;
592		VFS_UNLOCK_GIANT(vfslocked);
593		return (error);
594	}
595
596	aiov.iov_base = bp->bio_data;
597	aiov.iov_len = bp->bio_length;
598	auio.uio_iov = &aiov;
599	auio.uio_iovcnt = 1;
600	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
601	auio.uio_segflg = UIO_SYSSPACE;
602	if (bp->bio_cmd == BIO_READ)
603		auio.uio_rw = UIO_READ;
604	else if (bp->bio_cmd == BIO_WRITE)
605		auio.uio_rw = UIO_WRITE;
606	else
607		panic("wrong BIO_OP in mdstart_vnode");
608	auio.uio_resid = bp->bio_length;
609	auio.uio_td = td;
610	/*
611	 * When reading set IO_DIRECT to try to avoid double-caching
612	 * the data.  When writing IO_DIRECT is not optimal.
613	 */
614	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
615	if (bp->bio_cmd == BIO_READ) {
616		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
617		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
618		VOP_UNLOCK(vp, 0);
619	} else {
620		(void) vn_start_write(vp, &mp, V_WAIT);
621		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
622		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
623		    sc->cred);
624		VOP_UNLOCK(vp, 0);
625		vn_finished_write(mp);
626	}
627	VFS_UNLOCK_GIANT(vfslocked);
628	bp->bio_resid = auio.uio_resid;
629	return (error);
630}
631
632static int
633mdstart_swap(struct md_s *sc, struct bio *bp)
634{
635	struct sf_buf *sf;
636	int rv, offs, len, lastend;
637	vm_pindex_t i, lastp;
638	vm_page_t m;
639	u_char *p;
640
641	switch (bp->bio_cmd) {
642	case BIO_READ:
643	case BIO_WRITE:
644	case BIO_DELETE:
645		break;
646	default:
647		return (EOPNOTSUPP);
648	}
649
650	p = bp->bio_data;
651
652	/*
653	 * offs is the offset at which to start operating on the
654	 * next (ie, first) page.  lastp is the last page on
655	 * which we're going to operate.  lastend is the ending
656	 * position within that last page (ie, PAGE_SIZE if
657	 * we're operating on complete aligned pages).
658	 */
659	offs = bp->bio_offset % PAGE_SIZE;
660	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
661	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
662
663	rv = VM_PAGER_OK;
664	VM_OBJECT_LOCK(sc->object);
665	vm_object_pip_add(sc->object, 1);
666	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
667		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
668
669		m = vm_page_grab(sc->object, i,
670		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
671		VM_OBJECT_UNLOCK(sc->object);
672		sched_pin();
673		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
674		VM_OBJECT_LOCK(sc->object);
675		if (bp->bio_cmd == BIO_READ) {
676			if (m->valid != VM_PAGE_BITS_ALL)
677				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
678			if (rv == VM_PAGER_ERROR) {
679				sf_buf_free(sf);
680				sched_unpin();
681				vm_page_wakeup(m);
682				break;
683			}
684			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
685			cpu_flush_dcache(p, len);
686		} else if (bp->bio_cmd == BIO_WRITE) {
687			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
688				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
689			if (rv == VM_PAGER_ERROR) {
690				sf_buf_free(sf);
691				sched_unpin();
692				vm_page_wakeup(m);
693				break;
694			}
695			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
696			m->valid = VM_PAGE_BITS_ALL;
697		} else if (bp->bio_cmd == BIO_DELETE) {
698			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
699				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
700			if (rv == VM_PAGER_ERROR) {
701				sf_buf_free(sf);
702				sched_unpin();
703				vm_page_wakeup(m);
704				break;
705			}
706			if (len != PAGE_SIZE) {
707				bzero((void *)(sf_buf_kva(sf) + offs), len);
708				vm_page_clear_dirty(m, offs, len);
709				m->valid = VM_PAGE_BITS_ALL;
710			} else
711				vm_pager_page_unswapped(m);
712		}
713		sf_buf_free(sf);
714		sched_unpin();
715		vm_page_wakeup(m);
716		vm_page_lock(m);
717		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
718			vm_page_free(m);
719		else
720			vm_page_activate(m);
721		vm_page_unlock(m);
722		if (bp->bio_cmd == BIO_WRITE)
723			vm_page_dirty(m);
724
725		/* Actions on further pages start at offset 0 */
726		p += PAGE_SIZE - offs;
727		offs = 0;
728#if 0
729if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
730printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
731    m->wire_count, m->busy,
732    m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
733#endif
734	}
735	vm_object_pip_subtract(sc->object, 1);
736	VM_OBJECT_UNLOCK(sc->object);
737	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
738}
739
740static void
741md_kthread(void *arg)
742{
743	struct md_s *sc;
744	struct bio *bp;
745	int error;
746
747	sc = arg;
748	thread_lock(curthread);
749	sched_prio(curthread, PRIBIO);
750	thread_unlock(curthread);
751	if (sc->type == MD_VNODE)
752		curthread->td_pflags |= TDP_NORUNNINGBUF;
753
754	for (;;) {
755		mtx_lock(&sc->queue_mtx);
756		if (sc->flags & MD_SHUTDOWN) {
757			sc->flags |= MD_EXITING;
758			mtx_unlock(&sc->queue_mtx);
759			kproc_exit(0);
760		}
761		bp = bioq_takefirst(&sc->bio_queue);
762		if (!bp) {
763			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
764			continue;
765		}
766		mtx_unlock(&sc->queue_mtx);
767		if (bp->bio_cmd == BIO_GETATTR) {
768			if ((sc->fwsectors && sc->fwheads &&
769			    (g_handleattr_int(bp, "GEOM::fwsectors",
770			    sc->fwsectors) ||
771			    g_handleattr_int(bp, "GEOM::fwheads",
772			    sc->fwheads))) ||
773			    g_handleattr_int(bp, "GEOM::candelete", 1))
774				error = -1;
775			else
776				error = EOPNOTSUPP;
777		} else {
778			error = sc->start(sc, bp);
779		}
780
781		if (error != -1) {
782			bp->bio_completed = bp->bio_length;
783			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
784				devstat_end_transaction_bio(sc->devstat, bp);
785			g_io_deliver(bp, error);
786		}
787	}
788}
789
790static struct md_s *
791mdfind(int unit)
792{
793	struct md_s *sc;
794
795	LIST_FOREACH(sc, &md_softc_list, list) {
796		if (sc->unit == unit)
797			break;
798	}
799	return (sc);
800}
801
802static struct md_s *
803mdnew(int unit, int *errp, enum md_types type)
804{
805	struct md_s *sc;
806	int error;
807
808	*errp = 0;
809	if (unit == -1)
810		unit = alloc_unr(md_uh);
811	else
812		unit = alloc_unr_specific(md_uh, unit);
813
814	if (unit == -1) {
815		*errp = EBUSY;
816		return (NULL);
817	}
818
819	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
820	sc->type = type;
821	bioq_init(&sc->bio_queue);
822	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
823	sc->unit = unit;
824	sprintf(sc->name, "md%d", unit);
825	LIST_INSERT_HEAD(&md_softc_list, sc, list);
826	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
827	if (error == 0)
828		return (sc);
829	LIST_REMOVE(sc, list);
830	mtx_destroy(&sc->queue_mtx);
831	free_unr(md_uh, sc->unit);
832	free(sc, M_MD);
833	*errp = error;
834	return (NULL);
835}
836
837static void
838mdinit(struct md_s *sc)
839{
840	struct g_geom *gp;
841	struct g_provider *pp;
842
843	g_topology_lock();
844	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
845	gp->softc = sc;
846	pp = g_new_providerf(gp, "md%d", sc->unit);
847	pp->mediasize = sc->mediasize;
848	pp->sectorsize = sc->sectorsize;
849	sc->gp = gp;
850	sc->pp = pp;
851	g_error_provider(pp, 0);
852	g_topology_unlock();
853	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
854	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
855}
856
857/*
858 * XXX: we should check that the range they feed us is mapped.
859 * XXX: we should implement read-only.
860 */
861
862static int
863mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
864{
865
866	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
867		return (EINVAL);
868	if (mdio->md_base == 0)
869		return (EINVAL);
870	sc->flags = mdio->md_options & MD_FORCE;
871	/* Cast to pointer size, then to pointer to avoid warning */
872	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
873	sc->pl_len = (size_t)sc->mediasize;
874	return (0);
875}
876
877
878static int
879mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
880{
881	uintptr_t sp;
882	int error;
883	off_t u;
884
885	error = 0;
886	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
887		return (EINVAL);
888	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
889		return (EINVAL);
890	/* Compression doesn't make sense if we have reserved space */
891	if (mdio->md_options & MD_RESERVE)
892		mdio->md_options &= ~MD_COMPRESS;
893	if (mdio->md_fwsectors != 0)
894		sc->fwsectors = mdio->md_fwsectors;
895	if (mdio->md_fwheads != 0)
896		sc->fwheads = mdio->md_fwheads;
897	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
898	sc->indir = dimension(sc->mediasize / sc->sectorsize);
899	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
900	    0x1ff, 0);
901	if (mdio->md_options & MD_RESERVE) {
902		off_t nsectors;
903
904		nsectors = sc->mediasize / sc->sectorsize;
905		for (u = 0; u < nsectors; u++) {
906			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
907			    M_WAITOK : M_NOWAIT) | M_ZERO);
908			if (sp != 0)
909				error = s_write(sc->indir, u, sp);
910			else
911				error = ENOMEM;
912			if (error != 0)
913				break;
914		}
915	}
916	return (error);
917}
918
919
920static int
921mdsetcred(struct md_s *sc, struct ucred *cred)
922{
923	char *tmpbuf;
924	int error = 0;
925
926	/*
927	 * Set credits in our softc
928	 */
929
930	if (sc->cred)
931		crfree(sc->cred);
932	sc->cred = crhold(cred);
933
934	/*
935	 * Horrible kludge to establish credentials for NFS  XXX.
936	 */
937
938	if (sc->vnode) {
939		struct uio auio;
940		struct iovec aiov;
941
942		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
943		bzero(&auio, sizeof(auio));
944
945		aiov.iov_base = tmpbuf;
946		aiov.iov_len = sc->sectorsize;
947		auio.uio_iov = &aiov;
948		auio.uio_iovcnt = 1;
949		auio.uio_offset = 0;
950		auio.uio_rw = UIO_READ;
951		auio.uio_segflg = UIO_SYSSPACE;
952		auio.uio_resid = aiov.iov_len;
953		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
954		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
955		VOP_UNLOCK(sc->vnode, 0);
956		free(tmpbuf, M_TEMP);
957	}
958	return (error);
959}
960
961static int
962mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
963{
964	struct vattr vattr;
965	struct nameidata nd;
966	char *fname;
967	int error, flags, vfslocked;
968
969	/*
970	 * Kernel-originated requests must have the filename appended
971	 * to the mdio structure to protect against malicious software.
972	 */
973	fname = mdio->md_file;
974	if ((void *)fname != (void *)(mdio + 1)) {
975		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
976		if (error != 0)
977			return (error);
978	} else
979		strlcpy(sc->file, fname, sizeof(sc->file));
980
981	/*
982	 * If the user specified that this is a read only device, don't
983	 * set the FWRITE mask before trying to open the backing store.
984	 */
985	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
986	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
987	error = vn_open(&nd, &flags, 0, NULL);
988	if (error != 0)
989		return (error);
990	vfslocked = NDHASGIANT(&nd);
991	NDFREE(&nd, NDF_ONLY_PNBUF);
992	if (nd.ni_vp->v_type != VREG) {
993		error = EINVAL;
994		goto bad;
995	}
996	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
997	if (error != 0)
998		goto bad;
999	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1000		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1001		if (nd.ni_vp->v_iflag & VI_DOOMED) {
1002			/* Forced unmount. */
1003			error = EBADF;
1004			goto bad;
1005		}
1006	}
1007	nd.ni_vp->v_vflag |= VV_MD;
1008	VOP_UNLOCK(nd.ni_vp, 0);
1009
1010	if (mdio->md_fwsectors != 0)
1011		sc->fwsectors = mdio->md_fwsectors;
1012	if (mdio->md_fwheads != 0)
1013		sc->fwheads = mdio->md_fwheads;
1014	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1015	if (!(flags & FWRITE))
1016		sc->flags |= MD_READONLY;
1017	sc->vnode = nd.ni_vp;
1018
1019	error = mdsetcred(sc, td->td_ucred);
1020	if (error != 0) {
1021		sc->vnode = NULL;
1022		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1023		nd.ni_vp->v_vflag &= ~VV_MD;
1024		goto bad;
1025	}
1026	VFS_UNLOCK_GIANT(vfslocked);
1027	return (0);
1028bad:
1029	VOP_UNLOCK(nd.ni_vp, 0);
1030	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1031	VFS_UNLOCK_GIANT(vfslocked);
1032	return (error);
1033}
1034
1035static int
1036mddestroy(struct md_s *sc, struct thread *td)
1037{
1038	int vfslocked;
1039
1040	if (sc->gp) {
1041		sc->gp->softc = NULL;
1042		g_topology_lock();
1043		g_wither_geom(sc->gp, ENXIO);
1044		g_topology_unlock();
1045		sc->gp = NULL;
1046		sc->pp = NULL;
1047	}
1048	if (sc->devstat) {
1049		devstat_remove_entry(sc->devstat);
1050		sc->devstat = NULL;
1051	}
1052	mtx_lock(&sc->queue_mtx);
1053	sc->flags |= MD_SHUTDOWN;
1054	wakeup(sc);
1055	while (!(sc->flags & MD_EXITING))
1056		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1057	mtx_unlock(&sc->queue_mtx);
1058	mtx_destroy(&sc->queue_mtx);
1059	if (sc->vnode != NULL) {
1060		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
1061		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1062		sc->vnode->v_vflag &= ~VV_MD;
1063		VOP_UNLOCK(sc->vnode, 0);
1064		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1065		    FREAD : (FREAD|FWRITE), sc->cred, td);
1066		VFS_UNLOCK_GIANT(vfslocked);
1067	}
1068	if (sc->cred != NULL)
1069		crfree(sc->cred);
1070	if (sc->object != NULL)
1071		vm_object_deallocate(sc->object);
1072	if (sc->indir)
1073		destroy_indir(sc, sc->indir);
1074	if (sc->uma)
1075		uma_zdestroy(sc->uma);
1076
1077	LIST_REMOVE(sc, list);
1078	free_unr(md_uh, sc->unit);
1079	free(sc, M_MD);
1080	return (0);
1081}
1082
1083static int
1084mdresize(struct md_s *sc, struct md_ioctl *mdio)
1085{
1086	int error, res;
1087	vm_pindex_t oldpages, newpages;
1088
1089	switch (sc->type) {
1090	case MD_VNODE:
1091		break;
1092	case MD_SWAP:
1093		if (mdio->md_mediasize <= 0 ||
1094		    (mdio->md_mediasize % PAGE_SIZE) != 0)
1095			return (EDOM);
1096		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1097		newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1098		if (newpages < oldpages) {
1099			VM_OBJECT_LOCK(sc->object);
1100			vm_object_page_remove(sc->object, newpages, 0, 0);
1101			swap_pager_freespace(sc->object, newpages,
1102			    oldpages - newpages);
1103			swap_release_by_cred(IDX_TO_OFF(oldpages -
1104			    newpages), sc->cred);
1105			sc->object->charge = IDX_TO_OFF(newpages);
1106			sc->object->size = newpages;
1107			VM_OBJECT_UNLOCK(sc->object);
1108		} else if (newpages > oldpages) {
1109			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1110			    oldpages), sc->cred);
1111			if (!res)
1112				return (ENOMEM);
1113			if ((mdio->md_options & MD_RESERVE) ||
1114			    (sc->flags & MD_RESERVE)) {
1115				error = swap_pager_reserve(sc->object,
1116				    oldpages, newpages - oldpages);
1117				if (error < 0) {
1118					swap_release_by_cred(
1119					    IDX_TO_OFF(newpages - oldpages),
1120					    sc->cred);
1121					return (EDOM);
1122				}
1123			}
1124			VM_OBJECT_LOCK(sc->object);
1125			sc->object->charge = IDX_TO_OFF(newpages);
1126			sc->object->size = newpages;
1127			VM_OBJECT_UNLOCK(sc->object);
1128		}
1129		break;
1130	default:
1131		return (EOPNOTSUPP);
1132	}
1133
1134	sc->mediasize = mdio->md_mediasize;
1135	g_topology_lock();
1136	g_resize_provider(sc->pp, sc->mediasize);
1137	g_topology_unlock();
1138	return (0);
1139}
1140
1141static int
1142mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1143{
1144	vm_ooffset_t npage;
1145	int error;
1146
1147	/*
1148	 * Range check.  Disallow negative sizes or any size less then the
1149	 * size of a page.  Then round to a page.
1150	 */
1151	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1152		return (EDOM);
1153
1154	/*
1155	 * Allocate an OBJT_SWAP object.
1156	 *
1157	 * Note the truncation.
1158	 */
1159
1160	npage = mdio->md_mediasize / PAGE_SIZE;
1161	if (mdio->md_fwsectors != 0)
1162		sc->fwsectors = mdio->md_fwsectors;
1163	if (mdio->md_fwheads != 0)
1164		sc->fwheads = mdio->md_fwheads;
1165	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1166	    VM_PROT_DEFAULT, 0, td->td_ucred);
1167	if (sc->object == NULL)
1168		return (ENOMEM);
1169	sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1170	if (mdio->md_options & MD_RESERVE) {
1171		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1172			error = EDOM;
1173			goto finish;
1174		}
1175	}
1176	error = mdsetcred(sc, td->td_ucred);
1177 finish:
1178	if (error != 0) {
1179		vm_object_deallocate(sc->object);
1180		sc->object = NULL;
1181	}
1182	return (error);
1183}
1184
1185
1186static int
1187xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1188{
1189	struct md_ioctl *mdio;
1190	struct md_s *sc;
1191	int error, i;
1192	unsigned sectsize;
1193
1194	if (md_debug)
1195		printf("mdctlioctl(%s %lx %p %x %p)\n",
1196			devtoname(dev), cmd, addr, flags, td);
1197
1198	mdio = (struct md_ioctl *)addr;
1199	if (mdio->md_version != MDIOVERSION)
1200		return (EINVAL);
1201
1202	/*
1203	 * We assert the version number in the individual ioctl
1204	 * handlers instead of out here because (a) it is possible we
1205	 * may add another ioctl in the future which doesn't read an
1206	 * mdio, and (b) the correct return value for an unknown ioctl
1207	 * is ENOIOCTL, not EINVAL.
1208	 */
1209	error = 0;
1210	switch (cmd) {
1211	case MDIOCATTACH:
1212		switch (mdio->md_type) {
1213		case MD_MALLOC:
1214		case MD_PRELOAD:
1215		case MD_VNODE:
1216		case MD_SWAP:
1217			break;
1218		default:
1219			return (EINVAL);
1220		}
1221		if (mdio->md_sectorsize == 0)
1222			sectsize = DEV_BSIZE;
1223		else
1224			sectsize = mdio->md_sectorsize;
1225		if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1226			return (EINVAL);
1227		if (mdio->md_options & MD_AUTOUNIT)
1228			sc = mdnew(-1, &error, mdio->md_type);
1229		else {
1230			if (mdio->md_unit > INT_MAX)
1231				return (EINVAL);
1232			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1233		}
1234		if (sc == NULL)
1235			return (error);
1236		if (mdio->md_options & MD_AUTOUNIT)
1237			mdio->md_unit = sc->unit;
1238		sc->mediasize = mdio->md_mediasize;
1239		sc->sectorsize = sectsize;
1240		error = EDOOFUS;
1241		switch (sc->type) {
1242		case MD_MALLOC:
1243			sc->start = mdstart_malloc;
1244			error = mdcreate_malloc(sc, mdio);
1245			break;
1246		case MD_PRELOAD:
1247			sc->start = mdstart_preload;
1248			error = mdcreate_preload(sc, mdio);
1249			break;
1250		case MD_VNODE:
1251			sc->start = mdstart_vnode;
1252			error = mdcreate_vnode(sc, mdio, td);
1253			break;
1254		case MD_SWAP:
1255			sc->start = mdstart_swap;
1256			error = mdcreate_swap(sc, mdio, td);
1257			break;
1258		}
1259		if (error != 0) {
1260			mddestroy(sc, td);
1261			return (error);
1262		}
1263
1264		/* Prune off any residual fractional sector */
1265		i = sc->mediasize % sc->sectorsize;
1266		sc->mediasize -= i;
1267
1268		mdinit(sc);
1269		return (0);
1270	case MDIOCDETACH:
1271		if (mdio->md_mediasize != 0 ||
1272		    (mdio->md_options & ~MD_FORCE) != 0)
1273			return (EINVAL);
1274
1275		sc = mdfind(mdio->md_unit);
1276		if (sc == NULL)
1277			return (ENOENT);
1278		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1279		    !(mdio->md_options & MD_FORCE))
1280			return (EBUSY);
1281		return (mddestroy(sc, td));
1282	case MDIOCRESIZE:
1283		if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1284			return (EINVAL);
1285
1286		sc = mdfind(mdio->md_unit);
1287		if (sc == NULL)
1288			return (ENOENT);
1289		if (mdio->md_mediasize < sc->sectorsize)
1290			return (EINVAL);
1291		if (mdio->md_mediasize < sc->mediasize &&
1292		    !(sc->flags & MD_FORCE) &&
1293		    !(mdio->md_options & MD_FORCE))
1294			return (EBUSY);
1295		return (mdresize(sc, mdio));
1296	case MDIOCQUERY:
1297		sc = mdfind(mdio->md_unit);
1298		if (sc == NULL)
1299			return (ENOENT);
1300		mdio->md_type = sc->type;
1301		mdio->md_options = sc->flags;
1302		mdio->md_mediasize = sc->mediasize;
1303		mdio->md_sectorsize = sc->sectorsize;
1304		if (sc->type == MD_VNODE)
1305			error = copyout(sc->file, mdio->md_file,
1306			    strlen(sc->file) + 1);
1307		return (error);
1308	case MDIOCLIST:
1309		i = 1;
1310		LIST_FOREACH(sc, &md_softc_list, list) {
1311			if (i == MDNPAD - 1)
1312				mdio->md_pad[i] = -1;
1313			else
1314				mdio->md_pad[i++] = sc->unit;
1315		}
1316		mdio->md_pad[0] = i - 1;
1317		return (0);
1318	default:
1319		return (ENOIOCTL);
1320	};
1321}
1322
1323static int
1324mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1325{
1326	int error;
1327
1328	sx_xlock(&md_sx);
1329	error = xmdctlioctl(dev, cmd, addr, flags, td);
1330	sx_xunlock(&md_sx);
1331	return (error);
1332}
1333
1334static void
1335md_preloaded(u_char *image, size_t length)
1336{
1337	struct md_s *sc;
1338	int error;
1339
1340	sc = mdnew(-1, &error, MD_PRELOAD);
1341	if (sc == NULL)
1342		return;
1343	sc->mediasize = length;
1344	sc->sectorsize = DEV_BSIZE;
1345	sc->pl_ptr = image;
1346	sc->pl_len = length;
1347	sc->start = mdstart_preload;
1348#ifdef MD_ROOT
1349	if (sc->unit == 0)
1350		rootdevnames[0] = "ufs:/dev/md0";
1351#endif
1352	mdinit(sc);
1353}
1354
1355static void
1356g_md_init(struct g_class *mp __unused)
1357{
1358	caddr_t mod;
1359	u_char *ptr, *name, *type;
1360	unsigned len;
1361	int i;
1362
1363	/* figure out log2(NINDIR) */
1364	for (i = NINDIR, nshift = -1; i; nshift++)
1365		i >>= 1;
1366
1367	mod = NULL;
1368	sx_init(&md_sx, "MD config lock");
1369	g_topology_unlock();
1370	md_uh = new_unrhdr(0, INT_MAX, NULL);
1371#ifdef MD_ROOT_SIZE
1372	sx_xlock(&md_sx);
1373	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1374	sx_xunlock(&md_sx);
1375#endif
1376	/* XXX: are preload_* static or do they need Giant ? */
1377	while ((mod = preload_search_next_name(mod)) != NULL) {
1378		name = (char *)preload_search_info(mod, MODINFO_NAME);
1379		if (name == NULL)
1380			continue;
1381		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1382		if (type == NULL)
1383			continue;
1384		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1385			continue;
1386		ptr = preload_fetch_addr(mod);
1387		len = preload_fetch_size(mod);
1388		if (ptr != NULL && len != 0) {
1389			printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1390			    MD_NAME, mdunits, name, len, ptr);
1391			sx_xlock(&md_sx);
1392			md_preloaded(ptr, len);
1393			sx_xunlock(&md_sx);
1394		}
1395	}
1396	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1397	    0600, MDCTL_NAME);
1398	g_topology_lock();
1399}
1400
1401static void
1402g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1403    struct g_consumer *cp __unused, struct g_provider *pp)
1404{
1405	struct md_s *mp;
1406	char *type;
1407
1408	mp = gp->softc;
1409	if (mp == NULL)
1410		return;
1411
1412	switch (mp->type) {
1413	case MD_MALLOC:
1414		type = "malloc";
1415		break;
1416	case MD_PRELOAD:
1417		type = "preload";
1418		break;
1419	case MD_VNODE:
1420		type = "vnode";
1421		break;
1422	case MD_SWAP:
1423		type = "swap";
1424		break;
1425	default:
1426		type = "unknown";
1427		break;
1428	}
1429
1430	if (pp != NULL) {
1431		if (indent == NULL) {
1432			sbuf_printf(sb, " u %d", mp->unit);
1433			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1434			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1435			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1436			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1437			sbuf_printf(sb, " t %s", type);
1438			if (mp->type == MD_VNODE && mp->vnode != NULL)
1439				sbuf_printf(sb, " file %s", mp->file);
1440		} else {
1441			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1442			    mp->unit);
1443			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1444			    indent, (uintmax_t) mp->sectorsize);
1445			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1446			    indent, (uintmax_t) mp->fwheads);
1447			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1448			    indent, (uintmax_t) mp->fwsectors);
1449			sbuf_printf(sb, "%s<length>%ju</length>\n",
1450			    indent, (uintmax_t) mp->mediasize);
1451			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1452			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1453			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1454			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1455			    "read-only");
1456			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1457			    type);
1458			if (mp->type == MD_VNODE && mp->vnode != NULL)
1459				sbuf_printf(sb, "%s<file>%s</file>\n",
1460				    indent, mp->file);
1461		}
1462	}
1463}
1464
1465static void
1466g_md_fini(struct g_class *mp __unused)
1467{
1468
1469	sx_destroy(&md_sx);
1470	if (status_dev != NULL)
1471		destroy_dev(status_dev);
1472	delete_unrhdr(md_uh);
1473}
1474