md.c revision 127211
1/*
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD: head/sys/dev/md/md.c 127211 2004-03-19 21:19:15Z alc $
10 *
11 */
12
13/*
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
17 *
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 *	The Regents of the University of California.  All rights reserved.
21 *
22 * This code is derived from software contributed to Berkeley by
23 * the Systems Programming Group of the University of Utah Computer
24 * Science Department.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 *    notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 *    notice, this list of conditions and the following disclaimer in the
33 *    documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 *    must display the following acknowledgement:
36 *	This product includes software developed by the University of
37 *	California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 *    may be used to endorse or promote products derived from this software
40 *    without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * from: Utah Hdr: vn.c 1.13 94/04/02
55 *
56 *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
57 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
58 */
59
60#include "opt_geom.h"
61#include "opt_md.h"
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/bio.h>
66#include <sys/conf.h>
67#include <sys/fcntl.h>
68#include <sys/kernel.h>
69#include <sys/kthread.h>
70#include <sys/linker.h>
71#include <sys/lock.h>
72#include <sys/malloc.h>
73#include <sys/mdioctl.h>
74#include <sys/mutex.h>
75#include <sys/namei.h>
76#include <sys/proc.h>
77#include <sys/queue.h>
78#include <sys/sf_buf.h>
79#include <sys/sysctl.h>
80#include <sys/vnode.h>
81
82#include <geom/geom.h>
83
84#include <vm/vm.h>
85#include <vm/vm_object.h>
86#include <vm/vm_page.h>
87#include <vm/vm_pager.h>
88#include <vm/swap_pager.h>
89#include <vm/uma.h>
90
91#define MD_MODVER 1
92
93#define MD_SHUTDOWN 0x10000	/* Tell worker thread to terminate. */
94
95#ifndef MD_NSECT
96#define MD_NSECT (10000 * 2)
97#endif
98
99static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk");
100static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors");
101
102static int md_debug;
103SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
104
105#if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
106/* Image gets put here: */
107static u_char mfs_root[MD_ROOT_SIZE*1024] = "MFS Filesystem goes here";
108static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here";
109#endif
110
111static g_init_t md_drvinit;
112
113static int	mdrootready;
114static int	mdunits;
115static dev_t	status_dev = 0;
116
117static d_ioctl_t mdctlioctl;
118
119static struct cdevsw mdctl_cdevsw = {
120	.d_version =	D_VERSION,
121	.d_flags =	D_NEEDGIANT,
122	.d_ioctl =	mdctlioctl,
123	.d_name =	MD_NAME,
124};
125
126
127static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
128
129#define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
130#define NMASK	(NINDIR-1)
131static int nshift;
132
133struct indir {
134	uintptr_t	*array;
135	u_int		total;
136	u_int		used;
137	u_int		shift;
138};
139
140struct md_s {
141	int unit;
142	LIST_ENTRY(md_s) list;
143	struct bio_queue_head bio_queue;
144	struct mtx queue_mtx;
145	dev_t dev;
146	enum md_types type;
147	unsigned nsect;
148	unsigned opencount;
149	unsigned secsize;
150	unsigned fwheads;
151	unsigned fwsectors;
152	unsigned flags;
153	char name[20];
154	struct proc *procp;
155	struct g_geom *gp;
156	struct g_provider *pp;
157
158	/* MD_MALLOC related fields */
159	struct indir *indir;
160	uma_zone_t uma;
161
162	/* MD_PRELOAD related fields */
163	u_char *pl_ptr;
164	unsigned pl_len;
165
166	/* MD_VNODE related fields */
167	struct vnode *vnode;
168	struct ucred *cred;
169
170	/* MD_SWAP related fields */
171	vm_object_t object;
172	unsigned npage;
173};
174
175static int mddestroy(struct md_s *sc, struct thread *td);
176
177static struct indir *
178new_indir(u_int shift)
179{
180	struct indir *ip;
181
182	ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
183	if (ip == NULL)
184		return (NULL);
185	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
186	    M_MDSECT, M_NOWAIT | M_ZERO);
187	if (ip->array == NULL) {
188		free(ip, M_MD);
189		return (NULL);
190	}
191	ip->total = NINDIR;
192	ip->shift = shift;
193	return (ip);
194}
195
196static void
197del_indir(struct indir *ip)
198{
199
200	free(ip->array, M_MDSECT);
201	free(ip, M_MD);
202}
203
204static void
205destroy_indir(struct md_s *sc, struct indir *ip)
206{
207	int i;
208
209	for (i = 0; i < NINDIR; i++) {
210		if (!ip->array[i])
211			continue;
212		if (ip->shift)
213			destroy_indir(sc, (struct indir*)(ip->array[i]));
214		else if (ip->array[i] > 255)
215			uma_zfree(sc->uma, (void *)(ip->array[i]));
216	}
217	del_indir(ip);
218}
219
220/*
221 * This function does the math and alloctes the top level "indir" structure
222 * for a device of "size" sectors.
223 */
224
225static struct indir *
226dimension(off_t size)
227{
228	off_t rcnt;
229	struct indir *ip;
230	int i, layer;
231
232	rcnt = size;
233	layer = 0;
234	while (rcnt > NINDIR) {
235		rcnt /= NINDIR;
236		layer++;
237	}
238	/* figure out log2(NINDIR) */
239	for (i = NINDIR, nshift = -1; i; nshift++)
240		i >>= 1;
241
242	/*
243	 * XXX: the top layer is probably not fully populated, so we allocate
244	 * too much space for ip->array in here.
245	 */
246	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
247	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
248	    M_MDSECT, M_WAITOK | M_ZERO);
249	ip->total = NINDIR;
250	ip->shift = layer * nshift;
251	return (ip);
252}
253
254/*
255 * Read a given sector
256 */
257
258static uintptr_t
259s_read(struct indir *ip, off_t offset)
260{
261	struct indir *cip;
262	int idx;
263	uintptr_t up;
264
265	if (md_debug > 1)
266		printf("s_read(%jd)\n", (intmax_t)offset);
267	up = 0;
268	for (cip = ip; cip != NULL;) {
269		if (cip->shift) {
270			idx = (offset >> cip->shift) & NMASK;
271			up = cip->array[idx];
272			cip = (struct indir *)up;
273			continue;
274		}
275		idx = offset & NMASK;
276		return (cip->array[idx]);
277	}
278	return (0);
279}
280
281/*
282 * Write a given sector, prune the tree if the value is 0
283 */
284
285static int
286s_write(struct indir *ip, off_t offset, uintptr_t ptr)
287{
288	struct indir *cip, *lip[10];
289	int idx, li;
290	uintptr_t up;
291
292	if (md_debug > 1)
293		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
294	up = 0;
295	li = 0;
296	cip = ip;
297	for (;;) {
298		lip[li++] = cip;
299		if (cip->shift) {
300			idx = (offset >> cip->shift) & NMASK;
301			up = cip->array[idx];
302			if (up != 0) {
303				cip = (struct indir *)up;
304				continue;
305			}
306			/* Allocate branch */
307			cip->array[idx] =
308			    (uintptr_t)new_indir(cip->shift - nshift);
309			if (cip->array[idx] == 0)
310				return (ENOSPC);
311			cip->used++;
312			up = cip->array[idx];
313			cip = (struct indir *)up;
314			continue;
315		}
316		/* leafnode */
317		idx = offset & NMASK;
318		up = cip->array[idx];
319		if (up != 0)
320			cip->used--;
321		cip->array[idx] = ptr;
322		if (ptr != 0)
323			cip->used++;
324		break;
325	}
326	if (cip->used != 0 || li == 1)
327		return (0);
328	li--;
329	while (cip->used == 0 && cip != ip) {
330		li--;
331		idx = (offset >> lip[li]->shift) & NMASK;
332		up = lip[li]->array[idx];
333		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
334		del_indir(cip);
335		lip[li]->array[idx] = 0;
336		lip[li]->used--;
337		cip = lip[li];
338	}
339	return (0);
340}
341
342
343struct g_class g_md_class = {
344	.name = "MD",
345	.init = md_drvinit,
346};
347
348static int
349g_md_access(struct g_provider *pp, int r, int w, int e)
350{
351	struct md_s *sc;
352
353	sc = pp->geom->softc;
354	if (sc == NULL)
355		return (ENXIO);
356	r += pp->acr;
357	w += pp->acw;
358	e += pp->ace;
359	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
360		sc->opencount = 1;
361	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
362		sc->opencount = 0;
363	}
364	return (0);
365}
366
367static void
368g_md_start(struct bio *bp)
369{
370	struct md_s *sc;
371
372	sc = bp->bio_to->geom->softc;
373
374	bp->bio_pblkno = bp->bio_offset / sc->secsize;
375	bp->bio_bcount = bp->bio_length;
376	mtx_lock(&sc->queue_mtx);
377	bioq_disksort(&sc->bio_queue, bp);
378	mtx_unlock(&sc->queue_mtx);
379
380	wakeup(sc);
381}
382
383DECLARE_GEOM_CLASS(g_md_class, g_md);
384
385
386static int
387mdstart_malloc(struct md_s *sc, struct bio *bp)
388{
389	int i, error;
390	u_char *dst;
391	unsigned secno, nsec, uc;
392	uintptr_t sp, osp;
393
394	nsec = bp->bio_bcount / sc->secsize;
395	secno = bp->bio_pblkno;
396	dst = bp->bio_data;
397	error = 0;
398	while (nsec--) {
399		osp = s_read(sc->indir, secno);
400		if (bp->bio_cmd == BIO_DELETE) {
401			if (osp != 0)
402				error = s_write(sc->indir, secno, 0);
403		} else if (bp->bio_cmd == BIO_READ) {
404			if (osp == 0)
405				bzero(dst, sc->secsize);
406			else if (osp <= 255)
407				for (i = 0; i < sc->secsize; i++)
408					dst[i] = osp;
409			else
410				bcopy((void *)osp, dst, sc->secsize);
411			osp = 0;
412		} else if (bp->bio_cmd == BIO_WRITE) {
413			if (sc->flags & MD_COMPRESS) {
414				uc = dst[0];
415				for (i = 1; i < sc->secsize; i++)
416					if (dst[i] != uc)
417						break;
418			} else {
419				i = 0;
420				uc = 0;
421			}
422			if (i == sc->secsize) {
423				if (osp != uc)
424					error = s_write(sc->indir, secno, uc);
425			} else {
426				if (osp <= 255) {
427					sp = (uintptr_t) uma_zalloc(
428					    sc->uma, M_NOWAIT);
429					if (sp == 0) {
430						error = ENOSPC;
431						break;
432					}
433					bcopy(dst, (void *)sp, sc->secsize);
434					error = s_write(sc->indir, secno, sp);
435				} else {
436					bcopy(dst, (void *)osp, sc->secsize);
437					osp = 0;
438				}
439			}
440		} else {
441			error = EOPNOTSUPP;
442		}
443		if (osp > 255)
444			uma_zfree(sc->uma, (void*)osp);
445		if (error)
446			break;
447		secno++;
448		dst += sc->secsize;
449	}
450	bp->bio_resid = 0;
451	return (error);
452}
453
454static int
455mdstart_preload(struct md_s *sc, struct bio *bp)
456{
457
458	if (bp->bio_cmd == BIO_DELETE) {
459	} else if (bp->bio_cmd == BIO_READ) {
460		bcopy(sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_data, bp->bio_bcount);
461	} else {
462		bcopy(bp->bio_data, sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_bcount);
463	}
464	bp->bio_resid = 0;
465	return (0);
466}
467
468static int
469mdstart_vnode(struct md_s *sc, struct bio *bp)
470{
471	int error;
472	struct uio auio;
473	struct iovec aiov;
474	struct mount *mp;
475
476	/*
477	 * VNODE I/O
478	 *
479	 * If an error occurs, we set BIO_ERROR but we do not set
480	 * B_INVAL because (for a write anyway), the buffer is
481	 * still valid.
482	 */
483
484	bzero(&auio, sizeof(auio));
485
486	aiov.iov_base = bp->bio_data;
487	aiov.iov_len = bp->bio_bcount;
488	auio.uio_iov = &aiov;
489	auio.uio_iovcnt = 1;
490	auio.uio_offset = (vm_ooffset_t)bp->bio_pblkno * sc->secsize;
491	auio.uio_segflg = UIO_SYSSPACE;
492	if(bp->bio_cmd == BIO_READ)
493		auio.uio_rw = UIO_READ;
494	else if(bp->bio_cmd == BIO_WRITE)
495		auio.uio_rw = UIO_WRITE;
496	else
497		panic("wrong BIO_OP in mdstart_vnode");
498	auio.uio_resid = bp->bio_bcount;
499	auio.uio_td = curthread;
500	/*
501	 * When reading set IO_DIRECT to try to avoid double-caching
502	 * the data.  When writing IO_DIRECT is not optimal.
503	 */
504	if (bp->bio_cmd == BIO_READ) {
505		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
506		error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred);
507		VOP_UNLOCK(sc->vnode, 0, curthread);
508	} else {
509		(void) vn_start_write(sc->vnode, &mp, V_WAIT);
510		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
511		error = VOP_WRITE(sc->vnode, &auio,
512		    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
513		VOP_UNLOCK(sc->vnode, 0, curthread);
514		vn_finished_write(mp);
515	}
516	bp->bio_resid = auio.uio_resid;
517	return (error);
518}
519
520static int
521mdstart_swap(struct md_s *sc, struct bio *bp)
522{
523	struct sf_buf *sf;
524	int i, rv;
525	int offs, len, lastp, lastend;
526	vm_page_t m;
527	u_char *p;
528
529	p = bp->bio_data;
530
531	/*
532	 * offs is the ofset at whih to start operating on the
533	 * next (ie, first) page.  lastp is the last page on
534	 * which we're going to operate.  lastend is the ending
535	 * position within that last page (ie, PAGE_SIZE if
536	 * we're operating on complete aligned pages).
537	 */
538	offs = bp->bio_offset % PAGE_SIZE;
539	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
540	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
541
542	VM_OBJECT_LOCK(sc->object);
543	vm_object_pip_add(sc->object, 1);
544	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
545		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
546
547		m = vm_page_grab(sc->object, i,
548		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
549		VM_OBJECT_UNLOCK(sc->object);
550		sf = sf_buf_alloc(m);
551		VM_OBJECT_LOCK(sc->object);
552		if (bp->bio_cmd == BIO_READ) {
553			if (m->valid != VM_PAGE_BITS_ALL)
554				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
555			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
556		} else if (bp->bio_cmd == BIO_WRITE) {
557			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
558				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
559			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
560			m->valid = VM_PAGE_BITS_ALL;
561#if 0
562		} else if (bp->bio_cmd == BIO_DELETE) {
563			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
564				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
565			bzero((void *)(sf_buf_kva(sf) + offs), len);
566			vm_page_dirty(m);
567			m->valid = VM_PAGE_BITS_ALL;
568#endif
569		}
570		sf_buf_free(sf);
571		vm_page_lock_queues();
572		vm_page_wakeup(m);
573		vm_page_activate(m);
574		if (bp->bio_cmd == BIO_WRITE)
575			vm_page_dirty(m);
576		vm_page_unlock_queues();
577
578		/* Actions on further pages start at offset 0 */
579		p += PAGE_SIZE - offs;
580		offs = 0;
581#if 0
582if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
583printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
584    m->wire_count, m->busy,
585    m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
586#endif
587	}
588	vm_object_pip_subtract(sc->object, 1);
589	vm_object_set_writeable_dirty(sc->object);
590	VM_OBJECT_UNLOCK(sc->object);
591	return (0);
592}
593
594static void
595md_kthread(void *arg)
596{
597	struct md_s *sc;
598	struct bio *bp;
599	int error, hasgiant;
600
601	sc = arg;
602	curthread->td_base_pri = PRIBIO;
603
604	switch (sc->type) {
605	case MD_VNODE:
606		mtx_lock(&Giant);
607		hasgiant = 1;
608		break;
609	case MD_MALLOC:
610	case MD_PRELOAD:
611	case MD_SWAP:
612	default:
613		hasgiant = 0;
614		break;
615	}
616
617	for (;;) {
618		mtx_lock(&sc->queue_mtx);
619		bp = bioq_first(&sc->bio_queue);
620		if (bp)
621			bioq_remove(&sc->bio_queue, bp);
622		if (!bp) {
623			if (sc->flags & MD_SHUTDOWN) {
624				mtx_unlock(&sc->queue_mtx);
625				sc->procp = NULL;
626				wakeup(&sc->procp);
627				if (hasgiant)
628					mtx_unlock(&Giant);
629				kthread_exit(0);
630			}
631			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
632			continue;
633		}
634		mtx_unlock(&sc->queue_mtx);
635		if (bp->bio_cmd == BIO_GETATTR) {
636			if (sc->fwsectors && sc->fwheads &&
637			    (g_handleattr_int(bp, "GEOM::fwsectors",
638			    sc->fwsectors) ||
639			    g_handleattr_int(bp, "GEOM::fwheads",
640			    sc->fwheads)))
641				error = -1;
642			else
643				error = EOPNOTSUPP;
644		} else {
645			switch (sc->type) {
646			case MD_MALLOC:
647				error = mdstart_malloc(sc, bp);
648				break;
649			case MD_PRELOAD:
650				error = mdstart_preload(sc, bp);
651				break;
652			case MD_VNODE:
653				error = mdstart_vnode(sc, bp);
654				break;
655			case MD_SWAP:
656				error = mdstart_swap(sc, bp);
657				break;
658			default:
659				panic("Impossible md(type)");
660				break;
661			}
662		}
663
664		if (error != -1) {
665			bp->bio_completed = bp->bio_length;
666			g_io_deliver(bp, error);
667		}
668	}
669}
670
671static struct md_s *
672mdfind(int unit)
673{
674	struct md_s *sc;
675
676	/* XXX: LOCK(unique unit numbers) */
677	LIST_FOREACH(sc, &md_softc_list, list) {
678		if (sc->unit == unit)
679			break;
680	}
681	/* XXX: UNLOCK(unique unit numbers) */
682	return (sc);
683}
684
685static struct md_s *
686mdnew(int unit)
687{
688	struct md_s *sc;
689	int error, max = -1;
690
691	/* XXX: LOCK(unique unit numbers) */
692	LIST_FOREACH(sc, &md_softc_list, list) {
693		if (sc->unit == unit) {
694			/* XXX: UNLOCK(unique unit numbers) */
695			return (NULL);
696		}
697		if (sc->unit > max)
698			max = sc->unit;
699	}
700	if (unit == -1)
701		unit = max + 1;
702	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
703	sc->unit = unit;
704	bioq_init(&sc->bio_queue);
705	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
706	sprintf(sc->name, "md%d", unit);
707	error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
708	if (error) {
709		free(sc, M_MD);
710		return (NULL);
711	}
712	LIST_INSERT_HEAD(&md_softc_list, sc, list);
713	/* XXX: UNLOCK(unique unit numbers) */
714	return (sc);
715}
716
717static void
718mdinit(struct md_s *sc)
719{
720
721	struct g_geom *gp;
722	struct g_provider *pp;
723
724	DROP_GIANT();
725	g_topology_lock();
726	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
727	gp->start = g_md_start;
728	gp->access = g_md_access;
729	gp->softc = sc;
730	pp = g_new_providerf(gp, "md%d", sc->unit);
731	pp->mediasize = (off_t)sc->nsect * sc->secsize;
732	pp->sectorsize = sc->secsize;
733	sc->gp = gp;
734	sc->pp = pp;
735	g_error_provider(pp, 0);
736	g_topology_unlock();
737	PICKUP_GIANT();
738}
739
740/*
741 * XXX: we should check that the range they feed us is mapped.
742 * XXX: we should implement read-only.
743 */
744
745static int
746mdcreate_preload(struct md_ioctl *mdio)
747{
748	struct md_s *sc;
749
750	if (mdio->md_size == 0)
751		return (EINVAL);
752	if (mdio->md_options & ~(MD_AUTOUNIT))
753		return (EINVAL);
754	if (mdio->md_options & MD_AUTOUNIT) {
755		sc = mdnew(-1);
756		if (sc == NULL)
757			return (ENOMEM);
758		mdio->md_unit = sc->unit;
759	} else {
760		sc = mdnew(mdio->md_unit);
761		if (sc == NULL)
762			return (EBUSY);
763	}
764	sc->type = MD_PRELOAD;
765	sc->secsize = DEV_BSIZE;
766	sc->nsect = mdio->md_size;
767	sc->flags = mdio->md_options & MD_FORCE;
768	/* Cast to pointer size, then to pointer to avoid warning */
769	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
770	sc->pl_len = (mdio->md_size << DEV_BSHIFT);
771	mdinit(sc);
772	return (0);
773}
774
775
776static int
777mdcreate_malloc(struct md_ioctl *mdio)
778{
779	struct md_s *sc;
780	off_t u;
781	uintptr_t sp;
782	int error;
783
784	error = 0;
785	if (mdio->md_size == 0)
786		return (EINVAL);
787	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
788		return (EINVAL);
789	if (mdio->md_secsize != 0 && !powerof2(mdio->md_secsize))
790		return (EINVAL);
791	/* Compression doesn't make sense if we have reserved space */
792	if (mdio->md_options & MD_RESERVE)
793		mdio->md_options &= ~MD_COMPRESS;
794	if (mdio->md_options & MD_AUTOUNIT) {
795		sc = mdnew(-1);
796		if (sc == NULL)
797			return (ENOMEM);
798		mdio->md_unit = sc->unit;
799	} else {
800		sc = mdnew(mdio->md_unit);
801		if (sc == NULL)
802			return (EBUSY);
803	}
804	sc->type = MD_MALLOC;
805	if (mdio->md_secsize != 0)
806		sc->secsize = mdio->md_secsize;
807	else
808		sc->secsize = DEV_BSIZE;
809	if (mdio->md_fwsectors != 0)
810		sc->fwsectors = mdio->md_fwsectors;
811	if (mdio->md_fwheads != 0)
812		sc->fwheads = mdio->md_fwheads;
813	sc->nsect = mdio->md_size;
814	sc->nsect /= (sc->secsize / DEV_BSIZE);
815	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
816	sc->indir = dimension(sc->nsect);
817	sc->uma = uma_zcreate(sc->name, sc->secsize,
818	    NULL, NULL, NULL, NULL, 0x1ff, 0);
819	if (mdio->md_options & MD_RESERVE) {
820		for (u = 0; u < sc->nsect; u++) {
821			sp = (uintptr_t) uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
822			if (sp != 0)
823				error = s_write(sc->indir, u, sp);
824			else
825				error = ENOMEM;
826			if (error)
827				break;
828		}
829	}
830	if (error)  {
831		mddestroy(sc, NULL);
832		return (error);
833	}
834	mdinit(sc);
835	if (!(mdio->md_options & MD_RESERVE))
836		sc->pp->flags |= G_PF_CANDELETE;
837	return (0);
838}
839
840
841static int
842mdsetcred(struct md_s *sc, struct ucred *cred)
843{
844	char *tmpbuf;
845	int error = 0;
846
847	/*
848	 * Set credits in our softc
849	 */
850
851	if (sc->cred)
852		crfree(sc->cred);
853	sc->cred = crhold(cred);
854
855	/*
856	 * Horrible kludge to establish credentials for NFS  XXX.
857	 */
858
859	if (sc->vnode) {
860		struct uio auio;
861		struct iovec aiov;
862
863		tmpbuf = malloc(sc->secsize, M_TEMP, M_WAITOK);
864		bzero(&auio, sizeof(auio));
865
866		aiov.iov_base = tmpbuf;
867		aiov.iov_len = sc->secsize;
868		auio.uio_iov = &aiov;
869		auio.uio_iovcnt = 1;
870		auio.uio_offset = 0;
871		auio.uio_rw = UIO_READ;
872		auio.uio_segflg = UIO_SYSSPACE;
873		auio.uio_resid = aiov.iov_len;
874		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
875		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
876		VOP_UNLOCK(sc->vnode, 0, curthread);
877		free(tmpbuf, M_TEMP);
878	}
879	return (error);
880}
881
882static int
883mdcreate_vnode(struct md_ioctl *mdio, struct thread *td)
884{
885	struct md_s *sc;
886	struct vattr vattr;
887	struct nameidata nd;
888	int error, flags;
889
890	flags = FREAD|FWRITE;
891	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
892	error = vn_open(&nd, &flags, 0, -1);
893	if (error) {
894		if (error != EACCES && error != EPERM && error != EROFS)
895			return (error);
896		flags &= ~FWRITE;
897		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
898		error = vn_open(&nd, &flags, 0, -1);
899		if (error)
900			return (error);
901	}
902	NDFREE(&nd, NDF_ONLY_PNBUF);
903	if (nd.ni_vp->v_type != VREG ||
904	    (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) {
905		VOP_UNLOCK(nd.ni_vp, 0, td);
906		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
907		return (error ? error : EINVAL);
908	}
909	VOP_UNLOCK(nd.ni_vp, 0, td);
910
911	if (mdio->md_options & MD_AUTOUNIT) {
912		sc = mdnew(-1);
913		mdio->md_unit = sc->unit;
914	} else {
915		sc = mdnew(mdio->md_unit);
916	}
917	if (sc == NULL) {
918		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
919		return (EBUSY);
920	}
921
922	if (mdio->md_fwsectors != 0)
923		sc->fwsectors = mdio->md_fwsectors;
924	if (mdio->md_fwheads != 0)
925		sc->fwheads = mdio->md_fwheads;
926	sc->type = MD_VNODE;
927	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
928	if (!(flags & FWRITE))
929		sc->flags |= MD_READONLY;
930	sc->secsize = DEV_BSIZE;
931	sc->vnode = nd.ni_vp;
932
933	/*
934	 * If the size is specified, override the file attributes.
935	 */
936	if (mdio->md_size)
937		sc->nsect = mdio->md_size;
938	else
939		sc->nsect = vattr.va_size / sc->secsize; /* XXX: round up ? */
940	if (sc->nsect == 0) {
941		mddestroy(sc, td);
942		return (EINVAL);
943	}
944	error = mdsetcred(sc, td->td_ucred);
945	if (error) {
946		mddestroy(sc, td);
947		return (error);
948	}
949	mdinit(sc);
950	return (0);
951}
952
953static void
954md_zapit(void *p, int cancel)
955{
956	if (cancel)
957		return;
958	g_wither_geom(p, ENXIO);
959}
960
961static int
962mddestroy(struct md_s *sc, struct thread *td)
963{
964
965	GIANT_REQUIRED;
966
967	mtx_destroy(&sc->queue_mtx);
968	if (sc->gp) {
969		sc->gp->softc = NULL;
970		g_waitfor_event(md_zapit, sc->gp, M_WAITOK, sc->gp, NULL);
971		sc->gp = NULL;
972		sc->pp = NULL;
973	}
974	sc->flags |= MD_SHUTDOWN;
975	wakeup(sc);
976	while (sc->procp != NULL)
977		tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10);
978	if (sc->vnode != NULL)
979		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
980		    FREAD : (FREAD|FWRITE), sc->cred, td);
981	if (sc->cred != NULL)
982		crfree(sc->cred);
983	if (sc->object != NULL) {
984		vm_object_deallocate(sc->object);
985	}
986	if (sc->indir)
987		destroy_indir(sc, sc->indir);
988	if (sc->uma)
989		uma_zdestroy(sc->uma);
990
991	/* XXX: LOCK(unique unit numbers) */
992	LIST_REMOVE(sc, list);
993	/* XXX: UNLOCK(unique unit numbers) */
994	free(sc, M_MD);
995	return (0);
996}
997
998static int
999mdcreate_swap(struct md_ioctl *mdio, struct thread *td)
1000{
1001	int error;
1002	struct md_s *sc;
1003
1004	GIANT_REQUIRED;
1005
1006	if (mdio->md_options & MD_AUTOUNIT) {
1007		sc = mdnew(-1);
1008		mdio->md_unit = sc->unit;
1009	} else {
1010		sc = mdnew(mdio->md_unit);
1011	}
1012	if (sc == NULL)
1013		return (EBUSY);
1014
1015	sc->type = MD_SWAP;
1016
1017	/*
1018	 * Range check.  Disallow negative sizes or any size less then the
1019	 * size of a page.  Then round to a page.
1020	 */
1021
1022	if (mdio->md_size == 0) {
1023		mddestroy(sc, td);
1024		return (EDOM);
1025	}
1026
1027	/*
1028	 * Allocate an OBJT_SWAP object.
1029	 *
1030	 * sc_nsect is in units of DEV_BSIZE.
1031	 * sc_npage is in units of PAGE_SIZE.
1032	 *
1033	 * Note the truncation.
1034	 */
1035
1036	sc->secsize = DEV_BSIZE;
1037	sc->npage = mdio->md_size / (PAGE_SIZE / DEV_BSIZE);
1038	sc->nsect = sc->npage * (PAGE_SIZE / DEV_BSIZE);
1039	if (mdio->md_fwsectors != 0)
1040		sc->fwsectors = mdio->md_fwsectors;
1041	if (mdio->md_fwheads != 0)
1042		sc->fwheads = mdio->md_fwheads;
1043	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE *
1044	    (vm_offset_t)sc->npage, VM_PROT_DEFAULT, 0);
1045	sc->flags = mdio->md_options & MD_FORCE;
1046	if (mdio->md_options & MD_RESERVE) {
1047		if (swap_pager_reserve(sc->object, 0, sc->npage) < 0) {
1048			vm_object_deallocate(sc->object);
1049			sc->object = NULL;
1050			mddestroy(sc, td);
1051			return (EDOM);
1052		}
1053	}
1054	error = mdsetcred(sc, td->td_ucred);
1055	if (error) {
1056		mddestroy(sc, td);
1057		return (error);
1058	}
1059	mdinit(sc);
1060	if (!(mdio->md_options & MD_RESERVE))
1061		sc->pp->flags |= G_PF_CANDELETE;
1062	return (0);
1063}
1064
1065static int
1066mddetach(int unit, struct thread *td)
1067{
1068	struct md_s *sc;
1069
1070	sc = mdfind(unit);
1071	if (sc == NULL)
1072		return (ENOENT);
1073	if (sc->opencount != 0 && !(sc->flags & MD_FORCE))
1074		return (EBUSY);
1075	switch(sc->type) {
1076	case MD_VNODE:
1077	case MD_SWAP:
1078	case MD_MALLOC:
1079	case MD_PRELOAD:
1080		return (mddestroy(sc, td));
1081	default:
1082		return (EOPNOTSUPP);
1083	}
1084}
1085
1086static int
1087mdctlioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1088{
1089	struct md_ioctl *mdio;
1090	struct md_s *sc;
1091	int i;
1092
1093	if (md_debug)
1094		printf("mdctlioctl(%s %lx %p %x %p)\n",
1095			devtoname(dev), cmd, addr, flags, td);
1096
1097	/*
1098	 * We assert the version number in the individual ioctl
1099	 * handlers instead of out here because (a) it is possible we
1100	 * may add another ioctl in the future which doesn't read an
1101	 * mdio, and (b) the correct return value for an unknown ioctl
1102	 * is ENOIOCTL, not EINVAL.
1103	 */
1104	mdio = (struct md_ioctl *)addr;
1105	switch (cmd) {
1106	case MDIOCATTACH:
1107		if (mdio->md_version != MDIOVERSION)
1108			return (EINVAL);
1109		switch (mdio->md_type) {
1110		case MD_MALLOC:
1111			return (mdcreate_malloc(mdio));
1112		case MD_PRELOAD:
1113			return (mdcreate_preload(mdio));
1114		case MD_VNODE:
1115			return (mdcreate_vnode(mdio, td));
1116		case MD_SWAP:
1117			return (mdcreate_swap(mdio, td));
1118		default:
1119			return (EINVAL);
1120		}
1121	case MDIOCDETACH:
1122		if (mdio->md_version != MDIOVERSION)
1123			return (EINVAL);
1124		if (mdio->md_file != NULL || mdio->md_size != 0 ||
1125		    mdio->md_options != 0)
1126			return (EINVAL);
1127		return (mddetach(mdio->md_unit, td));
1128	case MDIOCQUERY:
1129		if (mdio->md_version != MDIOVERSION)
1130			return (EINVAL);
1131		sc = mdfind(mdio->md_unit);
1132		if (sc == NULL)
1133			return (ENOENT);
1134		mdio->md_type = sc->type;
1135		mdio->md_options = sc->flags;
1136		switch (sc->type) {
1137		case MD_MALLOC:
1138			mdio->md_size = sc->nsect;
1139			break;
1140		case MD_PRELOAD:
1141			mdio->md_size = sc->nsect;
1142			mdio->md_base = (uint64_t)(intptr_t)sc->pl_ptr;
1143			break;
1144		case MD_SWAP:
1145			mdio->md_size = sc->nsect;
1146			break;
1147		case MD_VNODE:
1148			mdio->md_size = sc->nsect;
1149			/* XXX fill this in */
1150			mdio->md_file = NULL;
1151			break;
1152		}
1153		return (0);
1154	case MDIOCLIST:
1155		i = 1;
1156		LIST_FOREACH(sc, &md_softc_list, list) {
1157			if (i == MDNPAD - 1)
1158				mdio->md_pad[i] = -1;
1159			else
1160				mdio->md_pad[i++] = sc->unit;
1161		}
1162		mdio->md_pad[0] = i - 1;
1163		return (0);
1164	default:
1165		return (ENOIOCTL);
1166	};
1167	return (ENOIOCTL);
1168}
1169
1170static void
1171md_preloaded(u_char *image, unsigned length)
1172{
1173	struct md_s *sc;
1174
1175	sc = mdnew(-1);
1176	if (sc == NULL)
1177		return;
1178	sc->type = MD_PRELOAD;
1179	sc->secsize = DEV_BSIZE;
1180	sc->nsect = length / DEV_BSIZE;
1181	sc->pl_ptr = image;
1182	sc->pl_len = length;
1183	if (sc->unit == 0)
1184		mdrootready = 1;
1185	mdinit(sc);
1186}
1187
1188static void
1189md_drvinit(struct g_class *mp __unused)
1190{
1191
1192	caddr_t mod;
1193	caddr_t c;
1194	u_char *ptr, *name, *type;
1195	unsigned len;
1196
1197	mod = NULL;
1198	g_topology_unlock();
1199#ifdef MD_ROOT_SIZE
1200	md_preloaded(mfs_root, MD_ROOT_SIZE*1024);
1201#endif
1202	while ((mod = preload_search_next_name(mod)) != NULL) {
1203		name = (char *)preload_search_info(mod, MODINFO_NAME);
1204		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1205		if (name == NULL)
1206			continue;
1207		if (type == NULL)
1208			continue;
1209		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1210			continue;
1211		c = preload_search_info(mod, MODINFO_ADDR);
1212		ptr = *(u_char **)c;
1213		c = preload_search_info(mod, MODINFO_SIZE);
1214		len = *(size_t *)c;
1215		printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1216		    MD_NAME, mdunits, name, len, ptr);
1217		md_preloaded(ptr, len);
1218	}
1219	status_dev = make_dev(&mdctl_cdevsw, 0xffff00ff, UID_ROOT, GID_WHEEL,
1220	    0600, MDCTL_NAME);
1221	g_topology_lock();
1222}
1223
1224static int
1225md_modevent(module_t mod, int type, void *data)
1226{
1227	int error;
1228	struct md_s *sc;
1229
1230	switch (type) {
1231	case MOD_LOAD:
1232		break;
1233	case MOD_UNLOAD:
1234		LIST_FOREACH(sc, &md_softc_list, list) {
1235			error = mddetach(sc->unit, curthread);
1236			if (error != 0)
1237				return (error);
1238		}
1239		if (status_dev)
1240			destroy_dev(status_dev);
1241		status_dev = 0;
1242		break;
1243	default:
1244		break;
1245	}
1246	return (0);
1247}
1248
1249static moduledata_t md_mod = {
1250	MD_NAME,
1251	md_modevent,
1252	NULL
1253};
1254DECLARE_MODULE(md, md_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1255MODULE_VERSION(md, MD_MODVER);
1256
1257
1258#ifdef MD_ROOT
1259static void
1260md_takeroot(void *junk)
1261{
1262	if (mdrootready)
1263		rootdevnames[0] = "ufs:/dev/md0";
1264}
1265
1266SYSINIT(md_root, SI_SUB_MOUNT_ROOT, SI_ORDER_FIRST, md_takeroot, NULL);
1267#endif /* MD_ROOT */
1268