md.c revision 126935
1/*
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD: head/sys/dev/md/md.c 126935 2004-03-14 00:24:30Z alc $
10 *
11 */
12
13/*
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
17 *
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 *	The Regents of the University of California.  All rights reserved.
21 *
22 * This code is derived from software contributed to Berkeley by
23 * the Systems Programming Group of the University of Utah Computer
24 * Science Department.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 *    notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 *    notice, this list of conditions and the following disclaimer in the
33 *    documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 *    must display the following acknowledgement:
36 *	This product includes software developed by the University of
37 *	California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 *    may be used to endorse or promote products derived from this software
40 *    without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * from: Utah Hdr: vn.c 1.13 94/04/02
55 *
56 *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
57 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
58 */
59
60#include "opt_geom.h"
61#include "opt_md.h"
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/bio.h>
66#include <sys/conf.h>
67#include <sys/fcntl.h>
68#include <sys/kernel.h>
69#include <sys/kthread.h>
70#include <sys/linker.h>
71#include <sys/lock.h>
72#include <sys/malloc.h>
73#include <sys/mdioctl.h>
74#include <sys/mutex.h>
75#include <sys/namei.h>
76#include <sys/proc.h>
77#include <sys/queue.h>
78#include <sys/sysctl.h>
79#include <sys/vnode.h>
80
81#include <geom/geom.h>
82
83#include <vm/vm.h>
84#include <vm/vm_object.h>
85#include <vm/vm_page.h>
86#include <vm/vm_pager.h>
87#include <vm/swap_pager.h>
88#include <vm/uma.h>
89
90#define MD_MODVER 1
91
92#define MD_SHUTDOWN 0x10000	/* Tell worker thread to terminate. */
93
94#ifndef MD_NSECT
95#define MD_NSECT (10000 * 2)
96#endif
97
98static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk");
99static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors");
100
101static int md_debug;
102SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
103
104#if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
105/* Image gets put here: */
106static u_char mfs_root[MD_ROOT_SIZE*1024] = "MFS Filesystem goes here";
107static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here";
108#endif
109
110static g_init_t md_drvinit;
111
112static int	mdrootready;
113static int	mdunits;
114static dev_t	status_dev = 0;
115
116static d_ioctl_t mdctlioctl;
117
118static struct cdevsw mdctl_cdevsw = {
119	.d_version =	D_VERSION,
120	.d_flags =	D_NEEDGIANT,
121	.d_ioctl =	mdctlioctl,
122	.d_name =	MD_NAME,
123};
124
125
126static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
127
128#define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
129#define NMASK	(NINDIR-1)
130static int nshift;
131
132struct indir {
133	uintptr_t	*array;
134	u_int		total;
135	u_int		used;
136	u_int		shift;
137};
138
139struct md_s {
140	int unit;
141	LIST_ENTRY(md_s) list;
142	struct bio_queue_head bio_queue;
143	struct mtx queue_mtx;
144	dev_t dev;
145	enum md_types type;
146	unsigned nsect;
147	unsigned opencount;
148	unsigned secsize;
149	unsigned fwheads;
150	unsigned fwsectors;
151	unsigned flags;
152	char name[20];
153	struct proc *procp;
154	struct g_geom *gp;
155	struct g_provider *pp;
156
157	/* MD_MALLOC related fields */
158	struct indir *indir;
159	uma_zone_t uma;
160
161	/* MD_PRELOAD related fields */
162	u_char *pl_ptr;
163	unsigned pl_len;
164
165	/* MD_VNODE related fields */
166	struct vnode *vnode;
167	struct ucred *cred;
168
169	/* MD_SWAP related fields */
170	vm_object_t object;
171	unsigned npage;
172};
173
174static int mddestroy(struct md_s *sc, struct thread *td);
175
176static struct indir *
177new_indir(u_int shift)
178{
179	struct indir *ip;
180
181	ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
182	if (ip == NULL)
183		return (NULL);
184	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
185	    M_MDSECT, M_NOWAIT | M_ZERO);
186	if (ip->array == NULL) {
187		free(ip, M_MD);
188		return (NULL);
189	}
190	ip->total = NINDIR;
191	ip->shift = shift;
192	return (ip);
193}
194
195static void
196del_indir(struct indir *ip)
197{
198
199	free(ip->array, M_MDSECT);
200	free(ip, M_MD);
201}
202
203static void
204destroy_indir(struct md_s *sc, struct indir *ip)
205{
206	int i;
207
208	for (i = 0; i < NINDIR; i++) {
209		if (!ip->array[i])
210			continue;
211		if (ip->shift)
212			destroy_indir(sc, (struct indir*)(ip->array[i]));
213		else if (ip->array[i] > 255)
214			uma_zfree(sc->uma, (void *)(ip->array[i]));
215	}
216	del_indir(ip);
217}
218
219/*
220 * This function does the math and alloctes the top level "indir" structure
221 * for a device of "size" sectors.
222 */
223
224static struct indir *
225dimension(off_t size)
226{
227	off_t rcnt;
228	struct indir *ip;
229	int i, layer;
230
231	rcnt = size;
232	layer = 0;
233	while (rcnt > NINDIR) {
234		rcnt /= NINDIR;
235		layer++;
236	}
237	/* figure out log2(NINDIR) */
238	for (i = NINDIR, nshift = -1; i; nshift++)
239		i >>= 1;
240
241	/*
242	 * XXX: the top layer is probably not fully populated, so we allocate
243	 * too much space for ip->array in here.
244	 */
245	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
246	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
247	    M_MDSECT, M_WAITOK | M_ZERO);
248	ip->total = NINDIR;
249	ip->shift = layer * nshift;
250	return (ip);
251}
252
253/*
254 * Read a given sector
255 */
256
257static uintptr_t
258s_read(struct indir *ip, off_t offset)
259{
260	struct indir *cip;
261	int idx;
262	uintptr_t up;
263
264	if (md_debug > 1)
265		printf("s_read(%jd)\n", (intmax_t)offset);
266	up = 0;
267	for (cip = ip; cip != NULL;) {
268		if (cip->shift) {
269			idx = (offset >> cip->shift) & NMASK;
270			up = cip->array[idx];
271			cip = (struct indir *)up;
272			continue;
273		}
274		idx = offset & NMASK;
275		return (cip->array[idx]);
276	}
277	return (0);
278}
279
280/*
281 * Write a given sector, prune the tree if the value is 0
282 */
283
284static int
285s_write(struct indir *ip, off_t offset, uintptr_t ptr)
286{
287	struct indir *cip, *lip[10];
288	int idx, li;
289	uintptr_t up;
290
291	if (md_debug > 1)
292		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
293	up = 0;
294	li = 0;
295	cip = ip;
296	for (;;) {
297		lip[li++] = cip;
298		if (cip->shift) {
299			idx = (offset >> cip->shift) & NMASK;
300			up = cip->array[idx];
301			if (up != 0) {
302				cip = (struct indir *)up;
303				continue;
304			}
305			/* Allocate branch */
306			cip->array[idx] =
307			    (uintptr_t)new_indir(cip->shift - nshift);
308			if (cip->array[idx] == 0)
309				return (ENOSPC);
310			cip->used++;
311			up = cip->array[idx];
312			cip = (struct indir *)up;
313			continue;
314		}
315		/* leafnode */
316		idx = offset & NMASK;
317		up = cip->array[idx];
318		if (up != 0)
319			cip->used--;
320		cip->array[idx] = ptr;
321		if (ptr != 0)
322			cip->used++;
323		break;
324	}
325	if (cip->used != 0 || li == 1)
326		return (0);
327	li--;
328	while (cip->used == 0 && cip != ip) {
329		li--;
330		idx = (offset >> lip[li]->shift) & NMASK;
331		up = lip[li]->array[idx];
332		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
333		del_indir(cip);
334		lip[li]->array[idx] = 0;
335		lip[li]->used--;
336		cip = lip[li];
337	}
338	return (0);
339}
340
341
342struct g_class g_md_class = {
343	.name = "MD",
344	.init = md_drvinit,
345};
346
347static int
348g_md_access(struct g_provider *pp, int r, int w, int e)
349{
350	struct md_s *sc;
351
352	sc = pp->geom->softc;
353	if (sc == NULL)
354		return (ENXIO);
355	r += pp->acr;
356	w += pp->acw;
357	e += pp->ace;
358	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
359		sc->opencount = 1;
360	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
361		sc->opencount = 0;
362	}
363	return (0);
364}
365
366static void
367g_md_start(struct bio *bp)
368{
369	struct md_s *sc;
370
371	sc = bp->bio_to->geom->softc;
372
373	bp->bio_pblkno = bp->bio_offset / sc->secsize;
374	bp->bio_bcount = bp->bio_length;
375	mtx_lock(&sc->queue_mtx);
376	bioq_disksort(&sc->bio_queue, bp);
377	mtx_unlock(&sc->queue_mtx);
378
379	wakeup(sc);
380}
381
382DECLARE_GEOM_CLASS(g_md_class, g_md);
383
384
385static int
386mdstart_malloc(struct md_s *sc, struct bio *bp)
387{
388	int i, error;
389	u_char *dst;
390	unsigned secno, nsec, uc;
391	uintptr_t sp, osp;
392
393	nsec = bp->bio_bcount / sc->secsize;
394	secno = bp->bio_pblkno;
395	dst = bp->bio_data;
396	error = 0;
397	while (nsec--) {
398		osp = s_read(sc->indir, secno);
399		if (bp->bio_cmd == BIO_DELETE) {
400			if (osp != 0)
401				error = s_write(sc->indir, secno, 0);
402		} else if (bp->bio_cmd == BIO_READ) {
403			if (osp == 0)
404				bzero(dst, sc->secsize);
405			else if (osp <= 255)
406				for (i = 0; i < sc->secsize; i++)
407					dst[i] = osp;
408			else
409				bcopy((void *)osp, dst, sc->secsize);
410			osp = 0;
411		} else if (bp->bio_cmd == BIO_WRITE) {
412			if (sc->flags & MD_COMPRESS) {
413				uc = dst[0];
414				for (i = 1; i < sc->secsize; i++)
415					if (dst[i] != uc)
416						break;
417			} else {
418				i = 0;
419				uc = 0;
420			}
421			if (i == sc->secsize) {
422				if (osp != uc)
423					error = s_write(sc->indir, secno, uc);
424			} else {
425				if (osp <= 255) {
426					sp = (uintptr_t) uma_zalloc(
427					    sc->uma, M_NOWAIT);
428					if (sp == 0) {
429						error = ENOSPC;
430						break;
431					}
432					bcopy(dst, (void *)sp, sc->secsize);
433					error = s_write(sc->indir, secno, sp);
434				} else {
435					bcopy(dst, (void *)osp, sc->secsize);
436					osp = 0;
437				}
438			}
439		} else {
440			error = EOPNOTSUPP;
441		}
442		if (osp > 255)
443			uma_zfree(sc->uma, (void*)osp);
444		if (error)
445			break;
446		secno++;
447		dst += sc->secsize;
448	}
449	bp->bio_resid = 0;
450	return (error);
451}
452
453static int
454mdstart_preload(struct md_s *sc, struct bio *bp)
455{
456
457	if (bp->bio_cmd == BIO_DELETE) {
458	} else if (bp->bio_cmd == BIO_READ) {
459		bcopy(sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_data, bp->bio_bcount);
460	} else {
461		bcopy(bp->bio_data, sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_bcount);
462	}
463	bp->bio_resid = 0;
464	return (0);
465}
466
467static int
468mdstart_vnode(struct md_s *sc, struct bio *bp)
469{
470	int error;
471	struct uio auio;
472	struct iovec aiov;
473	struct mount *mp;
474
475	/*
476	 * VNODE I/O
477	 *
478	 * If an error occurs, we set BIO_ERROR but we do not set
479	 * B_INVAL because (for a write anyway), the buffer is
480	 * still valid.
481	 */
482
483	bzero(&auio, sizeof(auio));
484
485	aiov.iov_base = bp->bio_data;
486	aiov.iov_len = bp->bio_bcount;
487	auio.uio_iov = &aiov;
488	auio.uio_iovcnt = 1;
489	auio.uio_offset = (vm_ooffset_t)bp->bio_pblkno * sc->secsize;
490	auio.uio_segflg = UIO_SYSSPACE;
491	if(bp->bio_cmd == BIO_READ)
492		auio.uio_rw = UIO_READ;
493	else if(bp->bio_cmd == BIO_WRITE)
494		auio.uio_rw = UIO_WRITE;
495	else
496		panic("wrong BIO_OP in mdstart_vnode");
497	auio.uio_resid = bp->bio_bcount;
498	auio.uio_td = curthread;
499	/*
500	 * When reading set IO_DIRECT to try to avoid double-caching
501	 * the data.  When writing IO_DIRECT is not optimal.
502	 */
503	if (bp->bio_cmd == BIO_READ) {
504		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
505		error = VOP_READ(sc->vnode, &auio, IO_DIRECT, sc->cred);
506		VOP_UNLOCK(sc->vnode, 0, curthread);
507	} else {
508		(void) vn_start_write(sc->vnode, &mp, V_WAIT);
509		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
510		error = VOP_WRITE(sc->vnode, &auio,
511		    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
512		VOP_UNLOCK(sc->vnode, 0, curthread);
513		vn_finished_write(mp);
514	}
515	bp->bio_resid = auio.uio_resid;
516	return (error);
517}
518
519#include <vm/vm_extern.h>
520#include <vm/vm_kern.h>
521
522static int
523mdstart_swap(struct md_s *sc, struct bio *bp)
524{
525	{
526		int i, rv;
527		int offs, len, lastp, lastend;
528		vm_page_t m;
529		u_char *p;
530		vm_offset_t kva;
531
532		p = bp->bio_data;
533
534		/*
535		 * offs is the ofset at whih to start operating on the
536		 * next (ie, first) page.  lastp is the last page on
537		 * which we're going to operate.  lastend is the ending
538		 * position within that last page (ie, PAGE_SIZE if
539		 * we're operating on complete aligned pages).
540		 */
541		offs = bp->bio_offset % PAGE_SIZE;
542		lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
543		lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
544
545		kva = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
546
547		VM_OBJECT_LOCK(sc->object);
548		vm_object_pip_add(sc->object, 1);
549		for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
550			len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
551
552			m = vm_page_grab(sc->object, i,
553			    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
554			pmap_qenter(kva, &m, 1);
555			if (bp->bio_cmd == BIO_READ) {
556				if (m->valid != VM_PAGE_BITS_ALL) {
557					rv = vm_pager_get_pages(sc->object,
558					    &m, 1, 0);
559				}
560				bcopy((void *)(kva + offs), p, len);
561			} else if (bp->bio_cmd == BIO_WRITE) {
562				if (len != PAGE_SIZE && m->valid !=
563				    VM_PAGE_BITS_ALL) {
564					rv = vm_pager_get_pages(sc->object,
565					    &m, 1, 0);
566				}
567				bcopy(p, (void *)(kva + offs), len);
568				m->valid = VM_PAGE_BITS_ALL;
569#if 0
570			} else if (bp->bio_cmd == BIO_DELETE) {
571				if (len != PAGE_SIZE && m->valid !=
572				    VM_PAGE_BITS_ALL) {
573					rv = vm_pager_get_pages(sc->object,
574					    &m, 1, 0);
575				}
576				bzero((void *)(kva + offs), len);
577				vm_page_dirty(m);
578				m->valid = VM_PAGE_BITS_ALL;
579#endif
580			}
581			pmap_qremove(kva, 1);
582			vm_page_lock_queues();
583			vm_page_wakeup(m);
584			vm_page_activate(m);
585			if (bp->bio_cmd == BIO_WRITE) {
586				vm_page_dirty(m);
587			}
588			vm_page_unlock_queues();
589
590			/* Actions on further pages start at offset 0 */
591			p += PAGE_SIZE - offs;
592			offs = 0;
593#if 0
594if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
595printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
596    m->wire_count, m->busy,
597    m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
598#endif
599		}
600		vm_object_pip_subtract(sc->object, 1);
601		vm_object_set_writeable_dirty(sc->object);
602		VM_OBJECT_UNLOCK(sc->object);
603		kmem_free(kernel_map, kva, sc->secsize);
604		return (0);
605	}
606}
607
608static void
609md_kthread(void *arg)
610{
611	struct md_s *sc;
612	struct bio *bp;
613	int error, hasgiant;
614
615	sc = arg;
616	curthread->td_base_pri = PRIBIO;
617
618	switch (sc->type) {
619	case MD_VNODE:
620		mtx_lock(&Giant);
621		hasgiant = 1;
622		break;
623	case MD_MALLOC:
624	case MD_PRELOAD:
625	case MD_SWAP:
626	default:
627		hasgiant = 0;
628		break;
629	}
630
631	for (;;) {
632		mtx_lock(&sc->queue_mtx);
633		bp = bioq_first(&sc->bio_queue);
634		if (bp)
635			bioq_remove(&sc->bio_queue, bp);
636		if (!bp) {
637			if (sc->flags & MD_SHUTDOWN) {
638				mtx_unlock(&sc->queue_mtx);
639				sc->procp = NULL;
640				wakeup(&sc->procp);
641				if (hasgiant)
642					mtx_unlock(&Giant);
643				kthread_exit(0);
644			}
645			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
646			continue;
647		}
648		mtx_unlock(&sc->queue_mtx);
649		if (bp->bio_cmd == BIO_GETATTR) {
650			if (sc->fwsectors && sc->fwheads &&
651			    (g_handleattr_int(bp, "GEOM::fwsectors",
652			    sc->fwsectors) ||
653			    g_handleattr_int(bp, "GEOM::fwheads",
654			    sc->fwheads)))
655				error = -1;
656			else
657				error = EOPNOTSUPP;
658		} else {
659			switch (sc->type) {
660			case MD_MALLOC:
661				error = mdstart_malloc(sc, bp);
662				break;
663			case MD_PRELOAD:
664				error = mdstart_preload(sc, bp);
665				break;
666			case MD_VNODE:
667				error = mdstart_vnode(sc, bp);
668				break;
669			case MD_SWAP:
670				error = mdstart_swap(sc, bp);
671				break;
672			default:
673				panic("Impossible md(type)");
674				break;
675			}
676		}
677
678		if (error != -1) {
679			bp->bio_completed = bp->bio_length;
680			g_io_deliver(bp, error);
681		}
682	}
683}
684
685static struct md_s *
686mdfind(int unit)
687{
688	struct md_s *sc;
689
690	/* XXX: LOCK(unique unit numbers) */
691	LIST_FOREACH(sc, &md_softc_list, list) {
692		if (sc->unit == unit)
693			break;
694	}
695	/* XXX: UNLOCK(unique unit numbers) */
696	return (sc);
697}
698
699static struct md_s *
700mdnew(int unit)
701{
702	struct md_s *sc;
703	int error, max = -1;
704
705	/* XXX: LOCK(unique unit numbers) */
706	LIST_FOREACH(sc, &md_softc_list, list) {
707		if (sc->unit == unit) {
708			/* XXX: UNLOCK(unique unit numbers) */
709			return (NULL);
710		}
711		if (sc->unit > max)
712			max = sc->unit;
713	}
714	if (unit == -1)
715		unit = max + 1;
716	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
717	sc->unit = unit;
718	bioq_init(&sc->bio_queue);
719	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
720	sprintf(sc->name, "md%d", unit);
721	error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
722	if (error) {
723		free(sc, M_MD);
724		return (NULL);
725	}
726	LIST_INSERT_HEAD(&md_softc_list, sc, list);
727	/* XXX: UNLOCK(unique unit numbers) */
728	return (sc);
729}
730
731static void
732mdinit(struct md_s *sc)
733{
734
735	struct g_geom *gp;
736	struct g_provider *pp;
737
738	DROP_GIANT();
739	g_topology_lock();
740	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
741	gp->start = g_md_start;
742	gp->access = g_md_access;
743	gp->softc = sc;
744	pp = g_new_providerf(gp, "md%d", sc->unit);
745	pp->mediasize = (off_t)sc->nsect * sc->secsize;
746	pp->sectorsize = sc->secsize;
747	sc->gp = gp;
748	sc->pp = pp;
749	g_error_provider(pp, 0);
750	g_topology_unlock();
751	PICKUP_GIANT();
752}
753
754/*
755 * XXX: we should check that the range they feed us is mapped.
756 * XXX: we should implement read-only.
757 */
758
759static int
760mdcreate_preload(struct md_ioctl *mdio)
761{
762	struct md_s *sc;
763
764	if (mdio->md_size == 0)
765		return (EINVAL);
766	if (mdio->md_options & ~(MD_AUTOUNIT))
767		return (EINVAL);
768	if (mdio->md_options & MD_AUTOUNIT) {
769		sc = mdnew(-1);
770		if (sc == NULL)
771			return (ENOMEM);
772		mdio->md_unit = sc->unit;
773	} else {
774		sc = mdnew(mdio->md_unit);
775		if (sc == NULL)
776			return (EBUSY);
777	}
778	sc->type = MD_PRELOAD;
779	sc->secsize = DEV_BSIZE;
780	sc->nsect = mdio->md_size;
781	sc->flags = mdio->md_options & MD_FORCE;
782	/* Cast to pointer size, then to pointer to avoid warning */
783	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
784	sc->pl_len = (mdio->md_size << DEV_BSHIFT);
785	mdinit(sc);
786	return (0);
787}
788
789
790static int
791mdcreate_malloc(struct md_ioctl *mdio)
792{
793	struct md_s *sc;
794	off_t u;
795	uintptr_t sp;
796	int error;
797
798	error = 0;
799	if (mdio->md_size == 0)
800		return (EINVAL);
801	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
802		return (EINVAL);
803	if (mdio->md_secsize != 0 && !powerof2(mdio->md_secsize))
804		return (EINVAL);
805	/* Compression doesn't make sense if we have reserved space */
806	if (mdio->md_options & MD_RESERVE)
807		mdio->md_options &= ~MD_COMPRESS;
808	if (mdio->md_options & MD_AUTOUNIT) {
809		sc = mdnew(-1);
810		if (sc == NULL)
811			return (ENOMEM);
812		mdio->md_unit = sc->unit;
813	} else {
814		sc = mdnew(mdio->md_unit);
815		if (sc == NULL)
816			return (EBUSY);
817	}
818	sc->type = MD_MALLOC;
819	if (mdio->md_secsize != 0)
820		sc->secsize = mdio->md_secsize;
821	else
822		sc->secsize = DEV_BSIZE;
823	if (mdio->md_fwsectors != 0)
824		sc->fwsectors = mdio->md_fwsectors;
825	if (mdio->md_fwheads != 0)
826		sc->fwheads = mdio->md_fwheads;
827	sc->nsect = mdio->md_size;
828	sc->nsect /= (sc->secsize / DEV_BSIZE);
829	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
830	sc->indir = dimension(sc->nsect);
831	sc->uma = uma_zcreate(sc->name, sc->secsize,
832	    NULL, NULL, NULL, NULL, 0x1ff, 0);
833	if (mdio->md_options & MD_RESERVE) {
834		for (u = 0; u < sc->nsect; u++) {
835			sp = (uintptr_t) uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
836			if (sp != 0)
837				error = s_write(sc->indir, u, sp);
838			else
839				error = ENOMEM;
840			if (error)
841				break;
842		}
843	}
844	if (error)  {
845		mddestroy(sc, NULL);
846		return (error);
847	}
848	mdinit(sc);
849	if (!(mdio->md_options & MD_RESERVE))
850		sc->pp->flags |= G_PF_CANDELETE;
851	return (0);
852}
853
854
855static int
856mdsetcred(struct md_s *sc, struct ucred *cred)
857{
858	char *tmpbuf;
859	int error = 0;
860
861	/*
862	 * Set credits in our softc
863	 */
864
865	if (sc->cred)
866		crfree(sc->cred);
867	sc->cred = crhold(cred);
868
869	/*
870	 * Horrible kludge to establish credentials for NFS  XXX.
871	 */
872
873	if (sc->vnode) {
874		struct uio auio;
875		struct iovec aiov;
876
877		tmpbuf = malloc(sc->secsize, M_TEMP, M_WAITOK);
878		bzero(&auio, sizeof(auio));
879
880		aiov.iov_base = tmpbuf;
881		aiov.iov_len = sc->secsize;
882		auio.uio_iov = &aiov;
883		auio.uio_iovcnt = 1;
884		auio.uio_offset = 0;
885		auio.uio_rw = UIO_READ;
886		auio.uio_segflg = UIO_SYSSPACE;
887		auio.uio_resid = aiov.iov_len;
888		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
889		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
890		VOP_UNLOCK(sc->vnode, 0, curthread);
891		free(tmpbuf, M_TEMP);
892	}
893	return (error);
894}
895
896static int
897mdcreate_vnode(struct md_ioctl *mdio, struct thread *td)
898{
899	struct md_s *sc;
900	struct vattr vattr;
901	struct nameidata nd;
902	int error, flags;
903
904	flags = FREAD|FWRITE;
905	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
906	error = vn_open(&nd, &flags, 0, -1);
907	if (error) {
908		if (error != EACCES && error != EPERM && error != EROFS)
909			return (error);
910		flags &= ~FWRITE;
911		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
912		error = vn_open(&nd, &flags, 0, -1);
913		if (error)
914			return (error);
915	}
916	NDFREE(&nd, NDF_ONLY_PNBUF);
917	if (nd.ni_vp->v_type != VREG ||
918	    (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) {
919		VOP_UNLOCK(nd.ni_vp, 0, td);
920		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
921		return (error ? error : EINVAL);
922	}
923	VOP_UNLOCK(nd.ni_vp, 0, td);
924
925	if (mdio->md_options & MD_AUTOUNIT) {
926		sc = mdnew(-1);
927		mdio->md_unit = sc->unit;
928	} else {
929		sc = mdnew(mdio->md_unit);
930	}
931	if (sc == NULL) {
932		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
933		return (EBUSY);
934	}
935
936	if (mdio->md_fwsectors != 0)
937		sc->fwsectors = mdio->md_fwsectors;
938	if (mdio->md_fwheads != 0)
939		sc->fwheads = mdio->md_fwheads;
940	sc->type = MD_VNODE;
941	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
942	if (!(flags & FWRITE))
943		sc->flags |= MD_READONLY;
944	sc->secsize = DEV_BSIZE;
945	sc->vnode = nd.ni_vp;
946
947	/*
948	 * If the size is specified, override the file attributes.
949	 */
950	if (mdio->md_size)
951		sc->nsect = mdio->md_size;
952	else
953		sc->nsect = vattr.va_size / sc->secsize; /* XXX: round up ? */
954	if (sc->nsect == 0) {
955		mddestroy(sc, td);
956		return (EINVAL);
957	}
958	error = mdsetcred(sc, td->td_ucred);
959	if (error) {
960		mddestroy(sc, td);
961		return (error);
962	}
963	mdinit(sc);
964	return (0);
965}
966
967static void
968md_zapit(void *p, int cancel)
969{
970	if (cancel)
971		return;
972	g_wither_geom(p, ENXIO);
973}
974
975static int
976mddestroy(struct md_s *sc, struct thread *td)
977{
978
979	GIANT_REQUIRED;
980
981	mtx_destroy(&sc->queue_mtx);
982	if (sc->gp) {
983		sc->gp->softc = NULL;
984		g_waitfor_event(md_zapit, sc->gp, M_WAITOK, sc->gp, NULL);
985		sc->gp = NULL;
986		sc->pp = NULL;
987	}
988	sc->flags |= MD_SHUTDOWN;
989	wakeup(sc);
990	while (sc->procp != NULL)
991		tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10);
992	if (sc->vnode != NULL)
993		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
994		    FREAD : (FREAD|FWRITE), sc->cred, td);
995	if (sc->cred != NULL)
996		crfree(sc->cred);
997	if (sc->object != NULL) {
998		vm_object_deallocate(sc->object);
999	}
1000	if (sc->indir)
1001		destroy_indir(sc, sc->indir);
1002	if (sc->uma)
1003		uma_zdestroy(sc->uma);
1004
1005	/* XXX: LOCK(unique unit numbers) */
1006	LIST_REMOVE(sc, list);
1007	/* XXX: UNLOCK(unique unit numbers) */
1008	free(sc, M_MD);
1009	return (0);
1010}
1011
1012static int
1013mdcreate_swap(struct md_ioctl *mdio, struct thread *td)
1014{
1015	int error;
1016	struct md_s *sc;
1017
1018	GIANT_REQUIRED;
1019
1020	if (mdio->md_options & MD_AUTOUNIT) {
1021		sc = mdnew(-1);
1022		mdio->md_unit = sc->unit;
1023	} else {
1024		sc = mdnew(mdio->md_unit);
1025	}
1026	if (sc == NULL)
1027		return (EBUSY);
1028
1029	sc->type = MD_SWAP;
1030
1031	/*
1032	 * Range check.  Disallow negative sizes or any size less then the
1033	 * size of a page.  Then round to a page.
1034	 */
1035
1036	if (mdio->md_size == 0) {
1037		mddestroy(sc, td);
1038		return (EDOM);
1039	}
1040
1041	/*
1042	 * Allocate an OBJT_SWAP object.
1043	 *
1044	 * sc_nsect is in units of DEV_BSIZE.
1045	 * sc_npage is in units of PAGE_SIZE.
1046	 *
1047	 * Note the truncation.
1048	 */
1049
1050	sc->secsize = DEV_BSIZE;
1051	sc->npage = mdio->md_size / (PAGE_SIZE / DEV_BSIZE);
1052	sc->nsect = sc->npage * (PAGE_SIZE / DEV_BSIZE);
1053	if (mdio->md_fwsectors != 0)
1054		sc->fwsectors = mdio->md_fwsectors;
1055	if (mdio->md_fwheads != 0)
1056		sc->fwheads = mdio->md_fwheads;
1057	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE *
1058	    (vm_offset_t)sc->npage, VM_PROT_DEFAULT, 0);
1059	sc->flags = mdio->md_options & MD_FORCE;
1060	if (mdio->md_options & MD_RESERVE) {
1061		if (swap_pager_reserve(sc->object, 0, sc->npage) < 0) {
1062			vm_object_deallocate(sc->object);
1063			sc->object = NULL;
1064			mddestroy(sc, td);
1065			return (EDOM);
1066		}
1067	}
1068	error = mdsetcred(sc, td->td_ucred);
1069	if (error) {
1070		mddestroy(sc, td);
1071		return (error);
1072	}
1073	mdinit(sc);
1074	if (!(mdio->md_options & MD_RESERVE))
1075		sc->pp->flags |= G_PF_CANDELETE;
1076	return (0);
1077}
1078
1079static int
1080mddetach(int unit, struct thread *td)
1081{
1082	struct md_s *sc;
1083
1084	sc = mdfind(unit);
1085	if (sc == NULL)
1086		return (ENOENT);
1087	if (sc->opencount != 0 && !(sc->flags & MD_FORCE))
1088		return (EBUSY);
1089	switch(sc->type) {
1090	case MD_VNODE:
1091	case MD_SWAP:
1092	case MD_MALLOC:
1093	case MD_PRELOAD:
1094		return (mddestroy(sc, td));
1095	default:
1096		return (EOPNOTSUPP);
1097	}
1098}
1099
1100static int
1101mdctlioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1102{
1103	struct md_ioctl *mdio;
1104	struct md_s *sc;
1105	int i;
1106
1107	if (md_debug)
1108		printf("mdctlioctl(%s %lx %p %x %p)\n",
1109			devtoname(dev), cmd, addr, flags, td);
1110
1111	/*
1112	 * We assert the version number in the individual ioctl
1113	 * handlers instead of out here because (a) it is possible we
1114	 * may add another ioctl in the future which doesn't read an
1115	 * mdio, and (b) the correct return value for an unknown ioctl
1116	 * is ENOIOCTL, not EINVAL.
1117	 */
1118	mdio = (struct md_ioctl *)addr;
1119	switch (cmd) {
1120	case MDIOCATTACH:
1121		if (mdio->md_version != MDIOVERSION)
1122			return (EINVAL);
1123		switch (mdio->md_type) {
1124		case MD_MALLOC:
1125			return (mdcreate_malloc(mdio));
1126		case MD_PRELOAD:
1127			return (mdcreate_preload(mdio));
1128		case MD_VNODE:
1129			return (mdcreate_vnode(mdio, td));
1130		case MD_SWAP:
1131			return (mdcreate_swap(mdio, td));
1132		default:
1133			return (EINVAL);
1134		}
1135	case MDIOCDETACH:
1136		if (mdio->md_version != MDIOVERSION)
1137			return (EINVAL);
1138		if (mdio->md_file != NULL || mdio->md_size != 0 ||
1139		    mdio->md_options != 0)
1140			return (EINVAL);
1141		return (mddetach(mdio->md_unit, td));
1142	case MDIOCQUERY:
1143		if (mdio->md_version != MDIOVERSION)
1144			return (EINVAL);
1145		sc = mdfind(mdio->md_unit);
1146		if (sc == NULL)
1147			return (ENOENT);
1148		mdio->md_type = sc->type;
1149		mdio->md_options = sc->flags;
1150		switch (sc->type) {
1151		case MD_MALLOC:
1152			mdio->md_size = sc->nsect;
1153			break;
1154		case MD_PRELOAD:
1155			mdio->md_size = sc->nsect;
1156			mdio->md_base = (uint64_t)(intptr_t)sc->pl_ptr;
1157			break;
1158		case MD_SWAP:
1159			mdio->md_size = sc->nsect;
1160			break;
1161		case MD_VNODE:
1162			mdio->md_size = sc->nsect;
1163			/* XXX fill this in */
1164			mdio->md_file = NULL;
1165			break;
1166		}
1167		return (0);
1168	case MDIOCLIST:
1169		i = 1;
1170		LIST_FOREACH(sc, &md_softc_list, list) {
1171			if (i == MDNPAD - 1)
1172				mdio->md_pad[i] = -1;
1173			else
1174				mdio->md_pad[i++] = sc->unit;
1175		}
1176		mdio->md_pad[0] = i - 1;
1177		return (0);
1178	default:
1179		return (ENOIOCTL);
1180	};
1181	return (ENOIOCTL);
1182}
1183
1184static void
1185md_preloaded(u_char *image, unsigned length)
1186{
1187	struct md_s *sc;
1188
1189	sc = mdnew(-1);
1190	if (sc == NULL)
1191		return;
1192	sc->type = MD_PRELOAD;
1193	sc->secsize = DEV_BSIZE;
1194	sc->nsect = length / DEV_BSIZE;
1195	sc->pl_ptr = image;
1196	sc->pl_len = length;
1197	if (sc->unit == 0)
1198		mdrootready = 1;
1199	mdinit(sc);
1200}
1201
1202static void
1203md_drvinit(struct g_class *mp __unused)
1204{
1205
1206	caddr_t mod;
1207	caddr_t c;
1208	u_char *ptr, *name, *type;
1209	unsigned len;
1210
1211	mod = NULL;
1212	g_topology_unlock();
1213#ifdef MD_ROOT_SIZE
1214	md_preloaded(mfs_root, MD_ROOT_SIZE*1024);
1215#endif
1216	while ((mod = preload_search_next_name(mod)) != NULL) {
1217		name = (char *)preload_search_info(mod, MODINFO_NAME);
1218		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1219		if (name == NULL)
1220			continue;
1221		if (type == NULL)
1222			continue;
1223		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1224			continue;
1225		c = preload_search_info(mod, MODINFO_ADDR);
1226		ptr = *(u_char **)c;
1227		c = preload_search_info(mod, MODINFO_SIZE);
1228		len = *(size_t *)c;
1229		printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1230		    MD_NAME, mdunits, name, len, ptr);
1231		md_preloaded(ptr, len);
1232	}
1233	status_dev = make_dev(&mdctl_cdevsw, 0xffff00ff, UID_ROOT, GID_WHEEL,
1234	    0600, MDCTL_NAME);
1235	g_topology_lock();
1236}
1237
1238static int
1239md_modevent(module_t mod, int type, void *data)
1240{
1241	int error;
1242	struct md_s *sc;
1243
1244	switch (type) {
1245	case MOD_LOAD:
1246		break;
1247	case MOD_UNLOAD:
1248		LIST_FOREACH(sc, &md_softc_list, list) {
1249			error = mddetach(sc->unit, curthread);
1250			if (error != 0)
1251				return (error);
1252		}
1253		if (status_dev)
1254			destroy_dev(status_dev);
1255		status_dev = 0;
1256		break;
1257	default:
1258		break;
1259	}
1260	return (0);
1261}
1262
1263static moduledata_t md_mod = {
1264	MD_NAME,
1265	md_modevent,
1266	NULL
1267};
1268DECLARE_MODULE(md, md_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1269MODULE_VERSION(md, MD_MODVER);
1270
1271
1272#ifdef MD_ROOT
1273static void
1274md_takeroot(void *junk)
1275{
1276	if (mdrootready)
1277		rootdevnames[0] = "ufs:/dev/md0";
1278}
1279
1280SYSINIT(md_root, SI_SUB_MOUNT_ROOT, SI_ORDER_FIRST, md_takeroot, NULL);
1281#endif /* MD_ROOT */
1282