md.c revision 286727
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD: head/sys/dev/md/md.c 286727 2015-08-13 15:16:34Z marcel $
10 *
11 */
12
13/*-
14 * The following functions are based in the vn(4) driver: mdstart_swap(),
15 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16 * and as such under the following copyright:
17 *
18 * Copyright (c) 1988 University of Utah.
19 * Copyright (c) 1990, 1993
20 *	The Regents of the University of California.  All rights reserved.
21 * Copyright (c) 2013 The FreeBSD Foundation
22 * All rights reserved.
23 *
24 * This code is derived from software contributed to Berkeley by
25 * the Systems Programming Group of the University of Utah Computer
26 * Science Department.
27 *
28 * Portions of this software were developed by Konstantin Belousov
29 * under sponsorship from the FreeBSD Foundation.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 *    notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 *    notice, this list of conditions and the following disclaimer in the
38 *    documentation and/or other materials provided with the distribution.
39 * 4. Neither the name of the University nor the names of its contributors
40 *    may be used to endorse or promote products derived from this software
41 *    without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * from: Utah Hdr: vn.c 1.13 94/04/02
56 *
57 *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
58 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
59 */
60
61#include "opt_geom.h"
62#include "opt_md.h"
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/bio.h>
67#include <sys/buf.h>
68#include <sys/conf.h>
69#include <sys/devicestat.h>
70#include <sys/fcntl.h>
71#include <sys/kernel.h>
72#include <sys/kthread.h>
73#include <sys/limits.h>
74#include <sys/linker.h>
75#include <sys/lock.h>
76#include <sys/malloc.h>
77#include <sys/mdioctl.h>
78#include <sys/mount.h>
79#include <sys/mutex.h>
80#include <sys/sx.h>
81#include <sys/namei.h>
82#include <sys/proc.h>
83#include <sys/queue.h>
84#include <sys/rwlock.h>
85#include <sys/sbuf.h>
86#include <sys/sched.h>
87#include <sys/sf_buf.h>
88#include <sys/sysctl.h>
89#include <sys/vnode.h>
90
91#include <geom/geom.h>
92#include <geom/geom_int.h>
93
94#include <vm/vm.h>
95#include <vm/vm_param.h>
96#include <vm/vm_object.h>
97#include <vm/vm_page.h>
98#include <vm/vm_pager.h>
99#include <vm/swap_pager.h>
100#include <vm/uma.h>
101
102#define MD_MODVER 1
103
104#define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
105#define	MD_EXITING	0x20000		/* Worker thread is exiting. */
106
107#ifndef MD_NSECT
108#define MD_NSECT (10000 * 2)
109#endif
110
111static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
112static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
113
114static int md_debug;
115SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
116    "Enable md(4) debug messages");
117static int md_malloc_wait;
118SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
119    "Allow malloc to wait for memory allocations");
120
121#if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
122#define	MD_ROOT_FSTYPE	"ufs"
123#endif
124
125#if defined(MD_ROOT)
126/*
127 * Preloaded image gets put here.
128 */
129#if defined(MD_ROOT_SIZE)
130/*
131 * Applications that patch the object with the image can determine
132 * the size looking at the start and end markers (strings),
133 * so we want them contiguous.
134 */
135static struct {
136	u_char start[MD_ROOT_SIZE*1024];
137	u_char end[128];
138} mfs_root = {
139	.start = "MFS Filesystem goes here",
140	.end = "MFS Filesystem had better STOP here",
141};
142const int mfs_root_size = sizeof(mfs_root.start);
143#else
144extern volatile u_char __weak_symbol mfs_root;
145extern volatile u_char __weak_symbol mfs_root_end;
146__GLOBL(mfs_root);
147__GLOBL(mfs_root_end);
148#define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
149#endif
150#endif
151
152static g_init_t g_md_init;
153static g_fini_t g_md_fini;
154static g_start_t g_md_start;
155static g_access_t g_md_access;
156static void g_md_dumpconf(struct sbuf *sb, const char *indent,
157    struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
158
159static struct cdev *status_dev = 0;
160static struct sx md_sx;
161static struct unrhdr *md_uh;
162
163static d_ioctl_t mdctlioctl;
164
165static struct cdevsw mdctl_cdevsw = {
166	.d_version =	D_VERSION,
167	.d_ioctl =	mdctlioctl,
168	.d_name =	MD_NAME,
169};
170
171struct g_class g_md_class = {
172	.name = "MD",
173	.version = G_VERSION,
174	.init = g_md_init,
175	.fini = g_md_fini,
176	.start = g_md_start,
177	.access = g_md_access,
178	.dumpconf = g_md_dumpconf,
179};
180
181DECLARE_GEOM_CLASS(g_md_class, g_md);
182
183
184static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
185
186#define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
187#define NMASK	(NINDIR-1)
188static int nshift;
189
190static int md_vnode_pbuf_freecnt;
191
192struct indir {
193	uintptr_t	*array;
194	u_int		total;
195	u_int		used;
196	u_int		shift;
197};
198
199struct md_s {
200	int unit;
201	LIST_ENTRY(md_s) list;
202	struct bio_queue_head bio_queue;
203	struct mtx queue_mtx;
204	struct mtx stat_mtx;
205	struct cdev *dev;
206	enum md_types type;
207	off_t mediasize;
208	unsigned sectorsize;
209	unsigned opencount;
210	unsigned fwheads;
211	unsigned fwsectors;
212	unsigned flags;
213	char name[20];
214	struct proc *procp;
215	struct g_geom *gp;
216	struct g_provider *pp;
217	int (*start)(struct md_s *sc, struct bio *bp);
218	struct devstat *devstat;
219
220	/* MD_MALLOC related fields */
221	struct indir *indir;
222	uma_zone_t uma;
223
224	/* MD_PRELOAD related fields */
225	u_char *pl_ptr;
226	size_t pl_len;
227
228	/* MD_VNODE related fields */
229	struct vnode *vnode;
230	char file[PATH_MAX];
231	struct ucred *cred;
232
233	/* MD_SWAP related fields */
234	vm_object_t object;
235};
236
237static struct indir *
238new_indir(u_int shift)
239{
240	struct indir *ip;
241
242	ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
243	    | M_ZERO);
244	if (ip == NULL)
245		return (NULL);
246	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
247	    M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
248	if (ip->array == NULL) {
249		free(ip, M_MD);
250		return (NULL);
251	}
252	ip->total = NINDIR;
253	ip->shift = shift;
254	return (ip);
255}
256
257static void
258del_indir(struct indir *ip)
259{
260
261	free(ip->array, M_MDSECT);
262	free(ip, M_MD);
263}
264
265static void
266destroy_indir(struct md_s *sc, struct indir *ip)
267{
268	int i;
269
270	for (i = 0; i < NINDIR; i++) {
271		if (!ip->array[i])
272			continue;
273		if (ip->shift)
274			destroy_indir(sc, (struct indir*)(ip->array[i]));
275		else if (ip->array[i] > 255)
276			uma_zfree(sc->uma, (void *)(ip->array[i]));
277	}
278	del_indir(ip);
279}
280
281/*
282 * This function does the math and allocates the top level "indir" structure
283 * for a device of "size" sectors.
284 */
285
286static struct indir *
287dimension(off_t size)
288{
289	off_t rcnt;
290	struct indir *ip;
291	int layer;
292
293	rcnt = size;
294	layer = 0;
295	while (rcnt > NINDIR) {
296		rcnt /= NINDIR;
297		layer++;
298	}
299
300	/*
301	 * XXX: the top layer is probably not fully populated, so we allocate
302	 * too much space for ip->array in here.
303	 */
304	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
305	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
306	    M_MDSECT, M_WAITOK | M_ZERO);
307	ip->total = NINDIR;
308	ip->shift = layer * nshift;
309	return (ip);
310}
311
312/*
313 * Read a given sector
314 */
315
316static uintptr_t
317s_read(struct indir *ip, off_t offset)
318{
319	struct indir *cip;
320	int idx;
321	uintptr_t up;
322
323	if (md_debug > 1)
324		printf("s_read(%jd)\n", (intmax_t)offset);
325	up = 0;
326	for (cip = ip; cip != NULL;) {
327		if (cip->shift) {
328			idx = (offset >> cip->shift) & NMASK;
329			up = cip->array[idx];
330			cip = (struct indir *)up;
331			continue;
332		}
333		idx = offset & NMASK;
334		return (cip->array[idx]);
335	}
336	return (0);
337}
338
339/*
340 * Write a given sector, prune the tree if the value is 0
341 */
342
343static int
344s_write(struct indir *ip, off_t offset, uintptr_t ptr)
345{
346	struct indir *cip, *lip[10];
347	int idx, li;
348	uintptr_t up;
349
350	if (md_debug > 1)
351		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
352	up = 0;
353	li = 0;
354	cip = ip;
355	for (;;) {
356		lip[li++] = cip;
357		if (cip->shift) {
358			idx = (offset >> cip->shift) & NMASK;
359			up = cip->array[idx];
360			if (up != 0) {
361				cip = (struct indir *)up;
362				continue;
363			}
364			/* Allocate branch */
365			cip->array[idx] =
366			    (uintptr_t)new_indir(cip->shift - nshift);
367			if (cip->array[idx] == 0)
368				return (ENOSPC);
369			cip->used++;
370			up = cip->array[idx];
371			cip = (struct indir *)up;
372			continue;
373		}
374		/* leafnode */
375		idx = offset & NMASK;
376		up = cip->array[idx];
377		if (up != 0)
378			cip->used--;
379		cip->array[idx] = ptr;
380		if (ptr != 0)
381			cip->used++;
382		break;
383	}
384	if (cip->used != 0 || li == 1)
385		return (0);
386	li--;
387	while (cip->used == 0 && cip != ip) {
388		li--;
389		idx = (offset >> lip[li]->shift) & NMASK;
390		up = lip[li]->array[idx];
391		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
392		del_indir(cip);
393		lip[li]->array[idx] = 0;
394		lip[li]->used--;
395		cip = lip[li];
396	}
397	return (0);
398}
399
400
401static int
402g_md_access(struct g_provider *pp, int r, int w, int e)
403{
404	struct md_s *sc;
405
406	sc = pp->geom->softc;
407	if (sc == NULL) {
408		if (r <= 0 && w <= 0 && e <= 0)
409			return (0);
410		return (ENXIO);
411	}
412	r += pp->acr;
413	w += pp->acw;
414	e += pp->ace;
415	if ((sc->flags & MD_READONLY) != 0 && w > 0)
416		return (EROFS);
417	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
418		sc->opencount = 1;
419	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
420		sc->opencount = 0;
421	}
422	return (0);
423}
424
425static void
426g_md_start(struct bio *bp)
427{
428	struct md_s *sc;
429
430	sc = bp->bio_to->geom->softc;
431	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
432		mtx_lock(&sc->stat_mtx);
433		devstat_start_transaction_bio(sc->devstat, bp);
434		mtx_unlock(&sc->stat_mtx);
435	}
436	mtx_lock(&sc->queue_mtx);
437	bioq_disksort(&sc->bio_queue, bp);
438	mtx_unlock(&sc->queue_mtx);
439	wakeup(sc);
440}
441
442#define	MD_MALLOC_MOVE_ZERO	1
443#define	MD_MALLOC_MOVE_FILL	2
444#define	MD_MALLOC_MOVE_READ	3
445#define	MD_MALLOC_MOVE_WRITE	4
446#define	MD_MALLOC_MOVE_CMP	5
447
448static int
449md_malloc_move(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
450    void *ptr, u_char fill, int op)
451{
452	struct sf_buf *sf;
453	vm_page_t m, *mp1;
454	char *p, first;
455	off_t *uc;
456	unsigned n;
457	int error, i, ma_offs1, sz, first_read;
458
459	m = NULL;
460	error = 0;
461	sf = NULL;
462	/* if (op == MD_MALLOC_MOVE_CMP) { gcc */
463		first = 0;
464		first_read = 0;
465		uc = ptr;
466		mp1 = *mp;
467		ma_offs1 = *ma_offs;
468	/* } */
469	sched_pin();
470	for (n = sectorsize; n != 0; n -= sz) {
471		sz = imin(PAGE_SIZE - *ma_offs, n);
472		if (m != **mp) {
473			if (sf != NULL)
474				sf_buf_free(sf);
475			m = **mp;
476			sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
477			    (md_malloc_wait ? 0 : SFB_NOWAIT));
478			if (sf == NULL) {
479				error = ENOMEM;
480				break;
481			}
482		}
483		p = (char *)sf_buf_kva(sf) + *ma_offs;
484		switch (op) {
485		case MD_MALLOC_MOVE_ZERO:
486			bzero(p, sz);
487			break;
488		case MD_MALLOC_MOVE_FILL:
489			memset(p, fill, sz);
490			break;
491		case MD_MALLOC_MOVE_READ:
492			bcopy(ptr, p, sz);
493			cpu_flush_dcache(p, sz);
494			break;
495		case MD_MALLOC_MOVE_WRITE:
496			bcopy(p, ptr, sz);
497			break;
498		case MD_MALLOC_MOVE_CMP:
499			for (i = 0; i < sz; i++, p++) {
500				if (!first_read) {
501					*uc = (u_char)*p;
502					first = *p;
503					first_read = 1;
504				} else if (*p != first) {
505					error = EDOOFUS;
506					break;
507				}
508			}
509			break;
510		default:
511			KASSERT(0, ("md_malloc_move unknown op %d\n", op));
512			break;
513		}
514		if (error != 0)
515			break;
516		*ma_offs += sz;
517		*ma_offs %= PAGE_SIZE;
518		if (*ma_offs == 0)
519			(*mp)++;
520		ptr = (char *)ptr + sz;
521	}
522
523	if (sf != NULL)
524		sf_buf_free(sf);
525	sched_unpin();
526	if (op == MD_MALLOC_MOVE_CMP && error != 0) {
527		*mp = mp1;
528		*ma_offs = ma_offs1;
529	}
530	return (error);
531}
532
533static int
534mdstart_malloc(struct md_s *sc, struct bio *bp)
535{
536	u_char *dst;
537	vm_page_t *m;
538	int i, error, error1, ma_offs, notmapped;
539	off_t secno, nsec, uc;
540	uintptr_t sp, osp;
541
542	switch (bp->bio_cmd) {
543	case BIO_READ:
544	case BIO_WRITE:
545	case BIO_DELETE:
546		break;
547	default:
548		return (EOPNOTSUPP);
549	}
550
551	notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
552	if (notmapped) {
553		m = bp->bio_ma;
554		ma_offs = bp->bio_ma_offset;
555		dst = NULL;
556	} else {
557		dst = bp->bio_data;
558	}
559
560	nsec = bp->bio_length / sc->sectorsize;
561	secno = bp->bio_offset / sc->sectorsize;
562	error = 0;
563	while (nsec--) {
564		osp = s_read(sc->indir, secno);
565		if (bp->bio_cmd == BIO_DELETE) {
566			if (osp != 0)
567				error = s_write(sc->indir, secno, 0);
568		} else if (bp->bio_cmd == BIO_READ) {
569			if (osp == 0) {
570				if (notmapped) {
571					error = md_malloc_move(&m, &ma_offs,
572					    sc->sectorsize, NULL, 0,
573					    MD_MALLOC_MOVE_ZERO);
574				} else
575					bzero(dst, sc->sectorsize);
576			} else if (osp <= 255) {
577				if (notmapped) {
578					error = md_malloc_move(&m, &ma_offs,
579					    sc->sectorsize, NULL, osp,
580					    MD_MALLOC_MOVE_FILL);
581				} else
582					memset(dst, osp, sc->sectorsize);
583			} else {
584				if (notmapped) {
585					error = md_malloc_move(&m, &ma_offs,
586					    sc->sectorsize, (void *)osp, 0,
587					    MD_MALLOC_MOVE_READ);
588				} else {
589					bcopy((void *)osp, dst, sc->sectorsize);
590					cpu_flush_dcache(dst, sc->sectorsize);
591				}
592			}
593			osp = 0;
594		} else if (bp->bio_cmd == BIO_WRITE) {
595			if (sc->flags & MD_COMPRESS) {
596				if (notmapped) {
597					error1 = md_malloc_move(&m, &ma_offs,
598					    sc->sectorsize, &uc, 0,
599					    MD_MALLOC_MOVE_CMP);
600					i = error1 == 0 ? sc->sectorsize : 0;
601				} else {
602					uc = dst[0];
603					for (i = 1; i < sc->sectorsize; i++) {
604						if (dst[i] != uc)
605							break;
606					}
607				}
608			} else {
609				i = 0;
610				uc = 0;
611			}
612			if (i == sc->sectorsize) {
613				if (osp != uc)
614					error = s_write(sc->indir, secno, uc);
615			} else {
616				if (osp <= 255) {
617					sp = (uintptr_t)uma_zalloc(sc->uma,
618					    md_malloc_wait ? M_WAITOK :
619					    M_NOWAIT);
620					if (sp == 0) {
621						error = ENOSPC;
622						break;
623					}
624					if (notmapped) {
625						error = md_malloc_move(&m,
626						    &ma_offs, sc->sectorsize,
627						    (void *)sp, 0,
628						    MD_MALLOC_MOVE_WRITE);
629					} else {
630						bcopy(dst, (void *)sp,
631						    sc->sectorsize);
632					}
633					error = s_write(sc->indir, secno, sp);
634				} else {
635					if (notmapped) {
636						error = md_malloc_move(&m,
637						    &ma_offs, sc->sectorsize,
638						    (void *)osp, 0,
639						    MD_MALLOC_MOVE_WRITE);
640					} else {
641						bcopy(dst, (void *)osp,
642						    sc->sectorsize);
643					}
644					osp = 0;
645				}
646			}
647		} else {
648			error = EOPNOTSUPP;
649		}
650		if (osp > 255)
651			uma_zfree(sc->uma, (void*)osp);
652		if (error != 0)
653			break;
654		secno++;
655		if (!notmapped)
656			dst += sc->sectorsize;
657	}
658	bp->bio_resid = 0;
659	return (error);
660}
661
662static int
663mdstart_preload(struct md_s *sc, struct bio *bp)
664{
665
666	switch (bp->bio_cmd) {
667	case BIO_READ:
668		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
669		    bp->bio_length);
670		cpu_flush_dcache(bp->bio_data, bp->bio_length);
671		break;
672	case BIO_WRITE:
673		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
674		    bp->bio_length);
675		break;
676	}
677	bp->bio_resid = 0;
678	return (0);
679}
680
681static int
682mdstart_vnode(struct md_s *sc, struct bio *bp)
683{
684	int error;
685	struct uio auio;
686	struct iovec aiov;
687	struct mount *mp;
688	struct vnode *vp;
689	struct buf *pb;
690	struct thread *td;
691	off_t end, zerosize;
692
693	switch (bp->bio_cmd) {
694	case BIO_READ:
695	case BIO_WRITE:
696	case BIO_DELETE:
697	case BIO_FLUSH:
698		break;
699	default:
700		return (EOPNOTSUPP);
701	}
702
703	td = curthread;
704	vp = sc->vnode;
705
706	/*
707	 * VNODE I/O
708	 *
709	 * If an error occurs, we set BIO_ERROR but we do not set
710	 * B_INVAL because (for a write anyway), the buffer is
711	 * still valid.
712	 */
713
714	if (bp->bio_cmd == BIO_FLUSH) {
715		(void) vn_start_write(vp, &mp, V_WAIT);
716		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
717		error = VOP_FSYNC(vp, MNT_WAIT, td);
718		VOP_UNLOCK(vp, 0);
719		vn_finished_write(mp);
720		return (error);
721	}
722
723	bzero(&auio, sizeof(auio));
724
725	/*
726	 * Special case for BIO_DELETE.  On the surface, this is very
727	 * similar to BIO_WRITE, except that we write from our own
728	 * fixed-length buffer, so we have to loop.  The net result is
729	 * that the two cases end up having very little in common.
730	 */
731	if (bp->bio_cmd == BIO_DELETE) {
732		zerosize = ZERO_REGION_SIZE -
733		    (ZERO_REGION_SIZE % sc->sectorsize);
734		auio.uio_iov = &aiov;
735		auio.uio_iovcnt = 1;
736		auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
737		auio.uio_segflg = UIO_SYSSPACE;
738		auio.uio_rw = UIO_WRITE;
739		auio.uio_td = td;
740		end = bp->bio_offset + bp->bio_length;
741		(void) vn_start_write(vp, &mp, V_WAIT);
742		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
743		error = 0;
744		while (auio.uio_offset < end) {
745			aiov.iov_base = __DECONST(void *, zero_region);
746			aiov.iov_len = end - auio.uio_offset;
747			if (aiov.iov_len > zerosize)
748				aiov.iov_len = zerosize;
749			auio.uio_resid = aiov.iov_len;
750			error = VOP_WRITE(vp, &auio,
751			    sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred);
752			if (error != 0)
753				break;
754		}
755		VOP_UNLOCK(vp, 0);
756		vn_finished_write(mp);
757		bp->bio_resid = end - auio.uio_offset;
758		return (error);
759	}
760
761	if ((bp->bio_flags & BIO_UNMAPPED) == 0) {
762		pb = NULL;
763		aiov.iov_base = bp->bio_data;
764	} else {
765		KASSERT(bp->bio_length <= MAXPHYS, ("bio_length %jd",
766		    (uintmax_t)bp->bio_length));
767		pb = getpbuf(&md_vnode_pbuf_freecnt);
768		pmap_qenter((vm_offset_t)pb->b_data, bp->bio_ma, bp->bio_ma_n);
769		aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
770		    bp->bio_ma_offset);
771	}
772	aiov.iov_len = bp->bio_length;
773	auio.uio_iov = &aiov;
774	auio.uio_iovcnt = 1;
775	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
776	auio.uio_segflg = UIO_SYSSPACE;
777	if (bp->bio_cmd == BIO_READ)
778		auio.uio_rw = UIO_READ;
779	else if (bp->bio_cmd == BIO_WRITE)
780		auio.uio_rw = UIO_WRITE;
781	else
782		panic("wrong BIO_OP in mdstart_vnode");
783	auio.uio_resid = bp->bio_length;
784	auio.uio_td = td;
785	/*
786	 * When reading set IO_DIRECT to try to avoid double-caching
787	 * the data.  When writing IO_DIRECT is not optimal.
788	 */
789	if (bp->bio_cmd == BIO_READ) {
790		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
791		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
792		VOP_UNLOCK(vp, 0);
793	} else {
794		(void) vn_start_write(vp, &mp, V_WAIT);
795		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
796		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
797		    sc->cred);
798		VOP_UNLOCK(vp, 0);
799		vn_finished_write(mp);
800	}
801	if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
802		pmap_qremove((vm_offset_t)pb->b_data, bp->bio_ma_n);
803		relpbuf(pb, &md_vnode_pbuf_freecnt);
804	}
805	bp->bio_resid = auio.uio_resid;
806	return (error);
807}
808
809static int
810mdstart_swap(struct md_s *sc, struct bio *bp)
811{
812	vm_page_t m;
813	u_char *p;
814	vm_pindex_t i, lastp;
815	int rv, ma_offs, offs, len, lastend;
816
817	switch (bp->bio_cmd) {
818	case BIO_READ:
819	case BIO_WRITE:
820	case BIO_DELETE:
821		break;
822	default:
823		return (EOPNOTSUPP);
824	}
825
826	p = bp->bio_data;
827	ma_offs = (bp->bio_flags & BIO_UNMAPPED) == 0 ? 0 : bp->bio_ma_offset;
828
829	/*
830	 * offs is the offset at which to start operating on the
831	 * next (ie, first) page.  lastp is the last page on
832	 * which we're going to operate.  lastend is the ending
833	 * position within that last page (ie, PAGE_SIZE if
834	 * we're operating on complete aligned pages).
835	 */
836	offs = bp->bio_offset % PAGE_SIZE;
837	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
838	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
839
840	rv = VM_PAGER_OK;
841	VM_OBJECT_WLOCK(sc->object);
842	vm_object_pip_add(sc->object, 1);
843	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
844		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
845		m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
846		if (bp->bio_cmd == BIO_READ) {
847			if (m->valid == VM_PAGE_BITS_ALL)
848				rv = VM_PAGER_OK;
849			else
850				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
851			if (rv == VM_PAGER_ERROR) {
852				vm_page_xunbusy(m);
853				break;
854			} else if (rv == VM_PAGER_FAIL) {
855				/*
856				 * Pager does not have the page.  Zero
857				 * the allocated page, and mark it as
858				 * valid. Do not set dirty, the page
859				 * can be recreated if thrown out.
860				 */
861				pmap_zero_page(m);
862				m->valid = VM_PAGE_BITS_ALL;
863			}
864			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
865				pmap_copy_pages(&m, offs, bp->bio_ma,
866				    ma_offs, len);
867			} else {
868				physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
869				cpu_flush_dcache(p, len);
870			}
871		} else if (bp->bio_cmd == BIO_WRITE) {
872			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
873				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
874			else
875				rv = VM_PAGER_OK;
876			if (rv == VM_PAGER_ERROR) {
877				vm_page_xunbusy(m);
878				break;
879			}
880			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
881				pmap_copy_pages(bp->bio_ma, ma_offs, &m,
882				    offs, len);
883			} else {
884				physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
885			}
886			m->valid = VM_PAGE_BITS_ALL;
887		} else if (bp->bio_cmd == BIO_DELETE) {
888			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
889				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
890			else
891				rv = VM_PAGER_OK;
892			if (rv == VM_PAGER_ERROR) {
893				vm_page_xunbusy(m);
894				break;
895			}
896			if (len != PAGE_SIZE) {
897				pmap_zero_page_area(m, offs, len);
898				vm_page_clear_dirty(m, offs, len);
899				m->valid = VM_PAGE_BITS_ALL;
900			} else
901				vm_pager_page_unswapped(m);
902		}
903		vm_page_xunbusy(m);
904		vm_page_lock(m);
905		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
906			vm_page_free(m);
907		else
908			vm_page_activate(m);
909		vm_page_unlock(m);
910		if (bp->bio_cmd == BIO_WRITE) {
911			vm_page_dirty(m);
912			vm_pager_page_unswapped(m);
913		}
914
915		/* Actions on further pages start at offset 0 */
916		p += PAGE_SIZE - offs;
917		offs = 0;
918		ma_offs += len;
919	}
920	vm_object_pip_wakeup(sc->object);
921	VM_OBJECT_WUNLOCK(sc->object);
922	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
923}
924
925static int
926mdstart_null(struct md_s *sc, struct bio *bp)
927{
928
929	switch (bp->bio_cmd) {
930	case BIO_READ:
931		bzero(bp->bio_data, bp->bio_length);
932		cpu_flush_dcache(bp->bio_data, bp->bio_length);
933		break;
934	case BIO_WRITE:
935		break;
936	}
937	bp->bio_resid = 0;
938	return (0);
939}
940
941static void
942md_kthread(void *arg)
943{
944	struct md_s *sc;
945	struct bio *bp;
946	int error;
947
948	sc = arg;
949	thread_lock(curthread);
950	sched_prio(curthread, PRIBIO);
951	thread_unlock(curthread);
952	if (sc->type == MD_VNODE)
953		curthread->td_pflags |= TDP_NORUNNINGBUF;
954
955	for (;;) {
956		mtx_lock(&sc->queue_mtx);
957		if (sc->flags & MD_SHUTDOWN) {
958			sc->flags |= MD_EXITING;
959			mtx_unlock(&sc->queue_mtx);
960			kproc_exit(0);
961		}
962		bp = bioq_takefirst(&sc->bio_queue);
963		if (!bp) {
964			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
965			continue;
966		}
967		mtx_unlock(&sc->queue_mtx);
968		if (bp->bio_cmd == BIO_GETATTR) {
969			if ((sc->fwsectors && sc->fwheads &&
970			    (g_handleattr_int(bp, "GEOM::fwsectors",
971			    sc->fwsectors) ||
972			    g_handleattr_int(bp, "GEOM::fwheads",
973			    sc->fwheads))) ||
974			    g_handleattr_int(bp, "GEOM::candelete", 1))
975				error = -1;
976			else
977				error = EOPNOTSUPP;
978		} else {
979			error = sc->start(sc, bp);
980		}
981
982		if (error != -1) {
983			bp->bio_completed = bp->bio_length;
984			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
985				devstat_end_transaction_bio(sc->devstat, bp);
986			g_io_deliver(bp, error);
987		}
988	}
989}
990
991static struct md_s *
992mdfind(int unit)
993{
994	struct md_s *sc;
995
996	LIST_FOREACH(sc, &md_softc_list, list) {
997		if (sc->unit == unit)
998			break;
999	}
1000	return (sc);
1001}
1002
1003static struct md_s *
1004mdnew(int unit, int *errp, enum md_types type)
1005{
1006	struct md_s *sc;
1007	int error;
1008
1009	*errp = 0;
1010	if (unit == -1)
1011		unit = alloc_unr(md_uh);
1012	else
1013		unit = alloc_unr_specific(md_uh, unit);
1014
1015	if (unit == -1) {
1016		*errp = EBUSY;
1017		return (NULL);
1018	}
1019
1020	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
1021	sc->type = type;
1022	bioq_init(&sc->bio_queue);
1023	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1024	mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
1025	sc->unit = unit;
1026	sprintf(sc->name, "md%d", unit);
1027	LIST_INSERT_HEAD(&md_softc_list, sc, list);
1028	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1029	if (error == 0)
1030		return (sc);
1031	LIST_REMOVE(sc, list);
1032	mtx_destroy(&sc->stat_mtx);
1033	mtx_destroy(&sc->queue_mtx);
1034	free_unr(md_uh, sc->unit);
1035	free(sc, M_MD);
1036	*errp = error;
1037	return (NULL);
1038}
1039
1040static void
1041mdinit(struct md_s *sc)
1042{
1043	struct g_geom *gp;
1044	struct g_provider *pp;
1045
1046	g_topology_lock();
1047	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1048	gp->softc = sc;
1049	pp = g_new_providerf(gp, "md%d", sc->unit);
1050	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1051	pp->mediasize = sc->mediasize;
1052	pp->sectorsize = sc->sectorsize;
1053	switch (sc->type) {
1054	case MD_MALLOC:
1055	case MD_VNODE:
1056	case MD_SWAP:
1057		pp->flags |= G_PF_ACCEPT_UNMAPPED;
1058		break;
1059	case MD_PRELOAD:
1060	case MD_NULL:
1061		break;
1062	}
1063	sc->gp = gp;
1064	sc->pp = pp;
1065	g_error_provider(pp, 0);
1066	g_topology_unlock();
1067	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1068	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1069}
1070
1071static int
1072mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
1073{
1074	uintptr_t sp;
1075	int error;
1076	off_t u;
1077
1078	error = 0;
1079	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1080		return (EINVAL);
1081	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
1082		return (EINVAL);
1083	/* Compression doesn't make sense if we have reserved space */
1084	if (mdio->md_options & MD_RESERVE)
1085		mdio->md_options &= ~MD_COMPRESS;
1086	if (mdio->md_fwsectors != 0)
1087		sc->fwsectors = mdio->md_fwsectors;
1088	if (mdio->md_fwheads != 0)
1089		sc->fwheads = mdio->md_fwheads;
1090	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
1091	sc->indir = dimension(sc->mediasize / sc->sectorsize);
1092	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1093	    0x1ff, 0);
1094	if (mdio->md_options & MD_RESERVE) {
1095		off_t nsectors;
1096
1097		nsectors = sc->mediasize / sc->sectorsize;
1098		for (u = 0; u < nsectors; u++) {
1099			sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1100			    M_WAITOK : M_NOWAIT) | M_ZERO);
1101			if (sp != 0)
1102				error = s_write(sc->indir, u, sp);
1103			else
1104				error = ENOMEM;
1105			if (error != 0)
1106				break;
1107		}
1108	}
1109	return (error);
1110}
1111
1112
1113static int
1114mdsetcred(struct md_s *sc, struct ucred *cred)
1115{
1116	char *tmpbuf;
1117	int error = 0;
1118
1119	/*
1120	 * Set credits in our softc
1121	 */
1122
1123	if (sc->cred)
1124		crfree(sc->cred);
1125	sc->cred = crhold(cred);
1126
1127	/*
1128	 * Horrible kludge to establish credentials for NFS  XXX.
1129	 */
1130
1131	if (sc->vnode) {
1132		struct uio auio;
1133		struct iovec aiov;
1134
1135		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1136		bzero(&auio, sizeof(auio));
1137
1138		aiov.iov_base = tmpbuf;
1139		aiov.iov_len = sc->sectorsize;
1140		auio.uio_iov = &aiov;
1141		auio.uio_iovcnt = 1;
1142		auio.uio_offset = 0;
1143		auio.uio_rw = UIO_READ;
1144		auio.uio_segflg = UIO_SYSSPACE;
1145		auio.uio_resid = aiov.iov_len;
1146		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1147		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1148		VOP_UNLOCK(sc->vnode, 0);
1149		free(tmpbuf, M_TEMP);
1150	}
1151	return (error);
1152}
1153
1154static int
1155mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1156{
1157	struct vattr vattr;
1158	struct nameidata nd;
1159	char *fname;
1160	int error, flags;
1161
1162	/*
1163	 * Kernel-originated requests must have the filename appended
1164	 * to the mdio structure to protect against malicious software.
1165	 */
1166	fname = mdio->md_file;
1167	if ((void *)fname != (void *)(mdio + 1)) {
1168		error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1169		if (error != 0)
1170			return (error);
1171	} else
1172		strlcpy(sc->file, fname, sizeof(sc->file));
1173
1174	/*
1175	 * If the user specified that this is a read only device, don't
1176	 * set the FWRITE mask before trying to open the backing store.
1177	 */
1178	flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE);
1179	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1180	error = vn_open(&nd, &flags, 0, NULL);
1181	if (error != 0)
1182		return (error);
1183	NDFREE(&nd, NDF_ONLY_PNBUF);
1184	if (nd.ni_vp->v_type != VREG) {
1185		error = EINVAL;
1186		goto bad;
1187	}
1188	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1189	if (error != 0)
1190		goto bad;
1191	if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1192		vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1193		if (nd.ni_vp->v_iflag & VI_DOOMED) {
1194			/* Forced unmount. */
1195			error = EBADF;
1196			goto bad;
1197		}
1198	}
1199	nd.ni_vp->v_vflag |= VV_MD;
1200	VOP_UNLOCK(nd.ni_vp, 0);
1201
1202	if (mdio->md_fwsectors != 0)
1203		sc->fwsectors = mdio->md_fwsectors;
1204	if (mdio->md_fwheads != 0)
1205		sc->fwheads = mdio->md_fwheads;
1206	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
1207	if (!(flags & FWRITE))
1208		sc->flags |= MD_READONLY;
1209	sc->vnode = nd.ni_vp;
1210
1211	error = mdsetcred(sc, td->td_ucred);
1212	if (error != 0) {
1213		sc->vnode = NULL;
1214		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1215		nd.ni_vp->v_vflag &= ~VV_MD;
1216		goto bad;
1217	}
1218	return (0);
1219bad:
1220	VOP_UNLOCK(nd.ni_vp, 0);
1221	(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1222	return (error);
1223}
1224
1225static int
1226mddestroy(struct md_s *sc, struct thread *td)
1227{
1228
1229	if (sc->gp) {
1230		sc->gp->softc = NULL;
1231		g_topology_lock();
1232		g_wither_geom(sc->gp, ENXIO);
1233		g_topology_unlock();
1234		sc->gp = NULL;
1235		sc->pp = NULL;
1236	}
1237	if (sc->devstat) {
1238		devstat_remove_entry(sc->devstat);
1239		sc->devstat = NULL;
1240	}
1241	mtx_lock(&sc->queue_mtx);
1242	sc->flags |= MD_SHUTDOWN;
1243	wakeup(sc);
1244	while (!(sc->flags & MD_EXITING))
1245		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1246	mtx_unlock(&sc->queue_mtx);
1247	mtx_destroy(&sc->stat_mtx);
1248	mtx_destroy(&sc->queue_mtx);
1249	if (sc->vnode != NULL) {
1250		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1251		sc->vnode->v_vflag &= ~VV_MD;
1252		VOP_UNLOCK(sc->vnode, 0);
1253		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1254		    FREAD : (FREAD|FWRITE), sc->cred, td);
1255	}
1256	if (sc->cred != NULL)
1257		crfree(sc->cred);
1258	if (sc->object != NULL)
1259		vm_object_deallocate(sc->object);
1260	if (sc->indir)
1261		destroy_indir(sc, sc->indir);
1262	if (sc->uma)
1263		uma_zdestroy(sc->uma);
1264
1265	LIST_REMOVE(sc, list);
1266	free_unr(md_uh, sc->unit);
1267	free(sc, M_MD);
1268	return (0);
1269}
1270
1271static int
1272mdresize(struct md_s *sc, struct md_ioctl *mdio)
1273{
1274	int error, res;
1275	vm_pindex_t oldpages, newpages;
1276
1277	switch (sc->type) {
1278	case MD_VNODE:
1279	case MD_NULL:
1280		break;
1281	case MD_SWAP:
1282		if (mdio->md_mediasize <= 0 ||
1283		    (mdio->md_mediasize % PAGE_SIZE) != 0)
1284			return (EDOM);
1285		oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1286		newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
1287		if (newpages < oldpages) {
1288			VM_OBJECT_WLOCK(sc->object);
1289			vm_object_page_remove(sc->object, newpages, 0, 0);
1290			swap_pager_freespace(sc->object, newpages,
1291			    oldpages - newpages);
1292			swap_release_by_cred(IDX_TO_OFF(oldpages -
1293			    newpages), sc->cred);
1294			sc->object->charge = IDX_TO_OFF(newpages);
1295			sc->object->size = newpages;
1296			VM_OBJECT_WUNLOCK(sc->object);
1297		} else if (newpages > oldpages) {
1298			res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1299			    oldpages), sc->cred);
1300			if (!res)
1301				return (ENOMEM);
1302			if ((mdio->md_options & MD_RESERVE) ||
1303			    (sc->flags & MD_RESERVE)) {
1304				error = swap_pager_reserve(sc->object,
1305				    oldpages, newpages - oldpages);
1306				if (error < 0) {
1307					swap_release_by_cred(
1308					    IDX_TO_OFF(newpages - oldpages),
1309					    sc->cred);
1310					return (EDOM);
1311				}
1312			}
1313			VM_OBJECT_WLOCK(sc->object);
1314			sc->object->charge = IDX_TO_OFF(newpages);
1315			sc->object->size = newpages;
1316			VM_OBJECT_WUNLOCK(sc->object);
1317		}
1318		break;
1319	default:
1320		return (EOPNOTSUPP);
1321	}
1322
1323	sc->mediasize = mdio->md_mediasize;
1324	g_topology_lock();
1325	g_resize_provider(sc->pp, sc->mediasize);
1326	g_topology_unlock();
1327	return (0);
1328}
1329
1330static int
1331mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1332{
1333	vm_ooffset_t npage;
1334	int error;
1335
1336	/*
1337	 * Range check.  Disallow negative sizes and sizes not being
1338	 * multiple of page size.
1339	 */
1340	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1341		return (EDOM);
1342
1343	/*
1344	 * Allocate an OBJT_SWAP object.
1345	 *
1346	 * Note the truncation.
1347	 */
1348
1349	npage = mdio->md_mediasize / PAGE_SIZE;
1350	if (mdio->md_fwsectors != 0)
1351		sc->fwsectors = mdio->md_fwsectors;
1352	if (mdio->md_fwheads != 0)
1353		sc->fwheads = mdio->md_fwheads;
1354	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1355	    VM_PROT_DEFAULT, 0, td->td_ucred);
1356	if (sc->object == NULL)
1357		return (ENOMEM);
1358	sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE);
1359	if (mdio->md_options & MD_RESERVE) {
1360		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1361			error = EDOM;
1362			goto finish;
1363		}
1364	}
1365	error = mdsetcred(sc, td->td_ucred);
1366 finish:
1367	if (error != 0) {
1368		vm_object_deallocate(sc->object);
1369		sc->object = NULL;
1370	}
1371	return (error);
1372}
1373
1374static int
1375mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1376{
1377
1378	/*
1379	 * Range check.  Disallow negative sizes and sizes not being
1380	 * multiple of page size.
1381	 */
1382	if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1383		return (EDOM);
1384
1385	return (0);
1386}
1387
1388static int
1389xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1390{
1391	struct md_ioctl *mdio;
1392	struct md_s *sc;
1393	int error, i;
1394	unsigned sectsize;
1395
1396	if (md_debug)
1397		printf("mdctlioctl(%s %lx %p %x %p)\n",
1398			devtoname(dev), cmd, addr, flags, td);
1399
1400	mdio = (struct md_ioctl *)addr;
1401	if (mdio->md_version != MDIOVERSION)
1402		return (EINVAL);
1403
1404	/*
1405	 * We assert the version number in the individual ioctl
1406	 * handlers instead of out here because (a) it is possible we
1407	 * may add another ioctl in the future which doesn't read an
1408	 * mdio, and (b) the correct return value for an unknown ioctl
1409	 * is ENOIOCTL, not EINVAL.
1410	 */
1411	error = 0;
1412	switch (cmd) {
1413	case MDIOCATTACH:
1414		switch (mdio->md_type) {
1415		case MD_MALLOC:
1416		case MD_PRELOAD:
1417		case MD_VNODE:
1418		case MD_SWAP:
1419		case MD_NULL:
1420			break;
1421		default:
1422			return (EINVAL);
1423		}
1424		if (mdio->md_sectorsize == 0)
1425			sectsize = DEV_BSIZE;
1426		else
1427			sectsize = mdio->md_sectorsize;
1428		if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize)
1429			return (EINVAL);
1430		if (mdio->md_options & MD_AUTOUNIT)
1431			sc = mdnew(-1, &error, mdio->md_type);
1432		else {
1433			if (mdio->md_unit > INT_MAX)
1434				return (EINVAL);
1435			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1436		}
1437		if (sc == NULL)
1438			return (error);
1439		if (mdio->md_options & MD_AUTOUNIT)
1440			mdio->md_unit = sc->unit;
1441		sc->mediasize = mdio->md_mediasize;
1442		sc->sectorsize = sectsize;
1443		error = EDOOFUS;
1444		switch (sc->type) {
1445		case MD_MALLOC:
1446			sc->start = mdstart_malloc;
1447			error = mdcreate_malloc(sc, mdio);
1448			break;
1449		case MD_PRELOAD:
1450			/*
1451			 * We disallow attaching preloaded memory disks via
1452			 * ioctl. Preloaded memory disks are automatically
1453			 * attached in g_md_init().
1454			 */
1455			error = EOPNOTSUPP;
1456			break;
1457		case MD_VNODE:
1458			sc->start = mdstart_vnode;
1459			error = mdcreate_vnode(sc, mdio, td);
1460			break;
1461		case MD_SWAP:
1462			sc->start = mdstart_swap;
1463			error = mdcreate_swap(sc, mdio, td);
1464			break;
1465		case MD_NULL:
1466			sc->start = mdstart_null;
1467			error = mdcreate_null(sc, mdio, td);
1468			break;
1469		}
1470		if (error != 0) {
1471			mddestroy(sc, td);
1472			return (error);
1473		}
1474
1475		/* Prune off any residual fractional sector */
1476		i = sc->mediasize % sc->sectorsize;
1477		sc->mediasize -= i;
1478
1479		mdinit(sc);
1480		return (0);
1481	case MDIOCDETACH:
1482		if (mdio->md_mediasize != 0 ||
1483		    (mdio->md_options & ~MD_FORCE) != 0)
1484			return (EINVAL);
1485
1486		sc = mdfind(mdio->md_unit);
1487		if (sc == NULL)
1488			return (ENOENT);
1489		if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1490		    !(mdio->md_options & MD_FORCE))
1491			return (EBUSY);
1492		return (mddestroy(sc, td));
1493	case MDIOCRESIZE:
1494		if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1495			return (EINVAL);
1496
1497		sc = mdfind(mdio->md_unit);
1498		if (sc == NULL)
1499			return (ENOENT);
1500		if (mdio->md_mediasize < sc->sectorsize)
1501			return (EINVAL);
1502		if (mdio->md_mediasize < sc->mediasize &&
1503		    !(sc->flags & MD_FORCE) &&
1504		    !(mdio->md_options & MD_FORCE))
1505			return (EBUSY);
1506		return (mdresize(sc, mdio));
1507	case MDIOCQUERY:
1508		sc = mdfind(mdio->md_unit);
1509		if (sc == NULL)
1510			return (ENOENT);
1511		mdio->md_type = sc->type;
1512		mdio->md_options = sc->flags;
1513		mdio->md_mediasize = sc->mediasize;
1514		mdio->md_sectorsize = sc->sectorsize;
1515		if (sc->type == MD_VNODE)
1516			error = copyout(sc->file, mdio->md_file,
1517			    strlen(sc->file) + 1);
1518		return (error);
1519	case MDIOCLIST:
1520		i = 1;
1521		LIST_FOREACH(sc, &md_softc_list, list) {
1522			if (i == MDNPAD - 1)
1523				mdio->md_pad[i] = -1;
1524			else
1525				mdio->md_pad[i++] = sc->unit;
1526		}
1527		mdio->md_pad[0] = i - 1;
1528		return (0);
1529	default:
1530		return (ENOIOCTL);
1531	};
1532}
1533
1534static int
1535mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1536{
1537	int error;
1538
1539	sx_xlock(&md_sx);
1540	error = xmdctlioctl(dev, cmd, addr, flags, td);
1541	sx_xunlock(&md_sx);
1542	return (error);
1543}
1544
1545static void
1546md_preloaded(u_char *image, size_t length, const char *name)
1547{
1548	struct md_s *sc;
1549	int error;
1550
1551	sc = mdnew(-1, &error, MD_PRELOAD);
1552	if (sc == NULL)
1553		return;
1554	sc->mediasize = length;
1555	sc->sectorsize = DEV_BSIZE;
1556	sc->pl_ptr = image;
1557	sc->pl_len = length;
1558	sc->start = mdstart_preload;
1559#ifdef MD_ROOT
1560	if (sc->unit == 0)
1561		rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
1562#endif
1563	mdinit(sc);
1564	if (name != NULL) {
1565		printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
1566		    MD_NAME, sc->unit, name, length, image);
1567	} else {
1568		printf("%s%d: Embedded image %zd bytes as %p\n",
1569		    MD_NAME, sc->unit, length, image);
1570	}
1571}
1572
1573static void
1574g_md_init(struct g_class *mp __unused)
1575{
1576	caddr_t mod;
1577	u_char *ptr, *name, *type;
1578	unsigned len;
1579	int i;
1580
1581	/* figure out log2(NINDIR) */
1582	for (i = NINDIR, nshift = -1; i; nshift++)
1583		i >>= 1;
1584
1585	mod = NULL;
1586	sx_init(&md_sx, "MD config lock");
1587	g_topology_unlock();
1588	md_uh = new_unrhdr(0, INT_MAX, NULL);
1589#ifdef MD_ROOT
1590	if (mfs_root_size != 0) {
1591		sx_xlock(&md_sx);
1592		md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
1593		    NULL);
1594		sx_xunlock(&md_sx);
1595	}
1596#endif
1597	/* XXX: are preload_* static or do they need Giant ? */
1598	while ((mod = preload_search_next_name(mod)) != NULL) {
1599		name = (char *)preload_search_info(mod, MODINFO_NAME);
1600		if (name == NULL)
1601			continue;
1602		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1603		if (type == NULL)
1604			continue;
1605		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1606			continue;
1607		ptr = preload_fetch_addr(mod);
1608		len = preload_fetch_size(mod);
1609		if (ptr != NULL && len != 0) {
1610			sx_xlock(&md_sx);
1611			md_preloaded(ptr, len, name);
1612			sx_xunlock(&md_sx);
1613		}
1614	}
1615	md_vnode_pbuf_freecnt = nswbuf / 10;
1616	status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
1617	    0600, MDCTL_NAME);
1618	g_topology_lock();
1619}
1620
1621static void
1622g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1623    struct g_consumer *cp __unused, struct g_provider *pp)
1624{
1625	struct md_s *mp;
1626	char *type;
1627
1628	mp = gp->softc;
1629	if (mp == NULL)
1630		return;
1631
1632	switch (mp->type) {
1633	case MD_MALLOC:
1634		type = "malloc";
1635		break;
1636	case MD_PRELOAD:
1637		type = "preload";
1638		break;
1639	case MD_VNODE:
1640		type = "vnode";
1641		break;
1642	case MD_SWAP:
1643		type = "swap";
1644		break;
1645	case MD_NULL:
1646		type = "null";
1647		break;
1648	default:
1649		type = "unknown";
1650		break;
1651	}
1652
1653	if (pp != NULL) {
1654		if (indent == NULL) {
1655			sbuf_printf(sb, " u %d", mp->unit);
1656			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1657			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1658			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1659			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1660			sbuf_printf(sb, " t %s", type);
1661			if (mp->type == MD_VNODE && mp->vnode != NULL)
1662				sbuf_printf(sb, " file %s", mp->file);
1663		} else {
1664			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1665			    mp->unit);
1666			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1667			    indent, (uintmax_t) mp->sectorsize);
1668			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1669			    indent, (uintmax_t) mp->fwheads);
1670			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1671			    indent, (uintmax_t) mp->fwsectors);
1672			sbuf_printf(sb, "%s<length>%ju</length>\n",
1673			    indent, (uintmax_t) mp->mediasize);
1674			sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
1675			    (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
1676			sbuf_printf(sb, "%s<access>%s</access>\n", indent,
1677			    (mp->flags & MD_READONLY) == 0 ? "read-write":
1678			    "read-only");
1679			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1680			    type);
1681			if (mp->type == MD_VNODE && mp->vnode != NULL) {
1682				sbuf_printf(sb, "%s<file>", indent);
1683				g_conf_printf_escaped(sb, "%s", mp->file);
1684				sbuf_printf(sb, "</file>\n");
1685			}
1686		}
1687	}
1688}
1689
1690static void
1691g_md_fini(struct g_class *mp __unused)
1692{
1693
1694	sx_destroy(&md_sx);
1695	if (status_dev != NULL)
1696		destroy_dev(status_dev);
1697	delete_unrhdr(md_uh);
1698}
1699