g_part.c revision 221972
1/*-
2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/part/g_part.c 221972 2011-05-15 20:03:54Z ae $");
29
30#include <sys/param.h>
31#include <sys/bio.h>
32#include <sys/diskmbr.h>
33#include <sys/endian.h>
34#include <sys/kernel.h>
35#include <sys/kobj.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/queue.h>
41#include <sys/sbuf.h>
42#include <sys/sysctl.h>
43#include <sys/systm.h>
44#include <sys/uuid.h>
45#include <geom/geom.h>
46#include <geom/geom_ctl.h>
47#include <geom/geom_int.h>
48#include <geom/part/g_part.h>
49
50#include "g_part_if.h"
51
52#ifndef _PATH_DEV
53#define _PATH_DEV "/dev/"
54#endif
55
56static kobj_method_t g_part_null_methods[] = {
57	{ 0, 0 }
58};
59
60static struct g_part_scheme g_part_null_scheme = {
61	"(none)",
62	g_part_null_methods,
63	sizeof(struct g_part_table),
64};
65
66TAILQ_HEAD(, g_part_scheme) g_part_schemes =
67    TAILQ_HEAD_INITIALIZER(g_part_schemes);
68
69struct g_part_alias_list {
70	const char *lexeme;
71	enum g_part_alias alias;
72} g_part_alias_list[G_PART_ALIAS_COUNT] = {
73	{ "apple-boot", G_PART_ALIAS_APPLE_BOOT },
74	{ "apple-hfs", G_PART_ALIAS_APPLE_HFS },
75	{ "apple-label", G_PART_ALIAS_APPLE_LABEL },
76	{ "apple-raid", G_PART_ALIAS_APPLE_RAID },
77	{ "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
78	{ "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
79	{ "apple-ufs", G_PART_ALIAS_APPLE_UFS },
80	{ "bios-boot", G_PART_ALIAS_BIOS_BOOT },
81	{ "ebr", G_PART_ALIAS_EBR },
82	{ "efi", G_PART_ALIAS_EFI },
83	{ "fat32", G_PART_ALIAS_MS_FAT32 },
84	{ "freebsd", G_PART_ALIAS_FREEBSD },
85	{ "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
86	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
87	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
88	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
89	{ "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
90	{ "linux-data", G_PART_ALIAS_LINUX_DATA },
91	{ "linux-lvm", G_PART_ALIAS_LINUX_LVM },
92	{ "linux-raid", G_PART_ALIAS_LINUX_RAID },
93	{ "linux-swap", G_PART_ALIAS_LINUX_SWAP },
94	{ "mbr", G_PART_ALIAS_MBR },
95	{ "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
96	{ "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
97	{ "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
98	{ "ms-reserved", G_PART_ALIAS_MS_RESERVED },
99	{ "ntfs", G_PART_ALIAS_MS_NTFS },
100	{ "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
101	{ "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
102	{ "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
103	{ "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
104	{ "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
105	{ "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
106};
107
108SYSCTL_DECL(_kern_geom);
109SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, "GEOM_PART stuff");
110static u_int check_integrity = 1;
111TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity);
112SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, CTLFLAG_RW,
113    &check_integrity, 1, "Enable integrity checking");
114
115/*
116 * The GEOM partitioning class.
117 */
118static g_ctl_req_t g_part_ctlreq;
119static g_ctl_destroy_geom_t g_part_destroy_geom;
120static g_fini_t g_part_fini;
121static g_init_t g_part_init;
122static g_taste_t g_part_taste;
123
124static g_access_t g_part_access;
125static g_dumpconf_t g_part_dumpconf;
126static g_orphan_t g_part_orphan;
127static g_spoiled_t g_part_spoiled;
128static g_start_t g_part_start;
129
130static struct g_class g_part_class = {
131	.name = "PART",
132	.version = G_VERSION,
133	/* Class methods. */
134	.ctlreq = g_part_ctlreq,
135	.destroy_geom = g_part_destroy_geom,
136	.fini = g_part_fini,
137	.init = g_part_init,
138	.taste = g_part_taste,
139	/* Geom methods. */
140	.access = g_part_access,
141	.dumpconf = g_part_dumpconf,
142	.orphan = g_part_orphan,
143	.spoiled = g_part_spoiled,
144	.start = g_part_start,
145};
146
147DECLARE_GEOM_CLASS(g_part_class, g_part);
148
149/*
150 * Support functions.
151 */
152
153static void g_part_wither(struct g_geom *, int);
154
155const char *
156g_part_alias_name(enum g_part_alias alias)
157{
158	int i;
159
160	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
161		if (g_part_alias_list[i].alias != alias)
162			continue;
163		return (g_part_alias_list[i].lexeme);
164	}
165
166	return (NULL);
167}
168
169void
170g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
171    u_int *bestheads)
172{
173	static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
174	off_t chs, cylinders;
175	u_int heads;
176	int idx;
177
178	*bestchs = 0;
179	*bestheads = 0;
180	for (idx = 0; candidate_heads[idx] != 0; idx++) {
181		heads = candidate_heads[idx];
182		cylinders = blocks / heads / sectors;
183		if (cylinders < heads || cylinders < sectors)
184			break;
185		if (cylinders > 1023)
186			continue;
187		chs = cylinders * heads * sectors;
188		if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
189			*bestchs = chs;
190			*bestheads = heads;
191		}
192	}
193}
194
195static void
196g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
197    off_t blocks)
198{
199	static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
200	off_t chs, bestchs;
201	u_int heads, sectors;
202	int idx;
203
204	if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 || sectors == 0 ||
205	    g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
206		table->gpt_fixgeom = 0;
207		table->gpt_heads = 0;
208		table->gpt_sectors = 0;
209		bestchs = 0;
210		for (idx = 0; candidate_sectors[idx] != 0; idx++) {
211			sectors = candidate_sectors[idx];
212			g_part_geometry_heads(blocks, sectors, &chs, &heads);
213			if (chs == 0)
214				continue;
215			/*
216			 * Prefer a geometry with sectors > 1, but only if
217			 * it doesn't bump down the numbver of heads to 1.
218			 */
219			if (chs > bestchs || (chs == bestchs && heads > 1 &&
220			    table->gpt_sectors == 1)) {
221				bestchs = chs;
222				table->gpt_heads = heads;
223				table->gpt_sectors = sectors;
224			}
225		}
226		/*
227		 * If we didn't find a geometry at all, then the disk is
228		 * too big. This means we can use the maximum number of
229		 * heads and sectors.
230		 */
231		if (bestchs == 0) {
232			table->gpt_heads = 255;
233			table->gpt_sectors = 63;
234		}
235	} else {
236		table->gpt_fixgeom = 1;
237		table->gpt_heads = heads;
238		table->gpt_sectors = sectors;
239	}
240}
241
242static int
243g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
244{
245	struct g_part_entry *e1, *e2;
246	struct g_provider *pp;
247
248	pp = cp->provider;
249	if (table->gpt_first > table->gpt_last ||
250	    table->gpt_last > pp->mediasize / pp->sectorsize - 1)
251		goto fail;
252
253	LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
254		if (e1->gpe_deleted || e1->gpe_internal)
255			continue;
256		if (e1->gpe_start < table->gpt_first ||
257		    e1->gpe_start > table->gpt_last ||
258		    e1->gpe_end < e1->gpe_start ||
259		    e1->gpe_end > table->gpt_last)
260			goto fail;
261		e2 = e1;
262		while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
263			if (e2->gpe_deleted || e2->gpe_internal)
264				continue;
265			if (e1->gpe_start >= e2->gpe_start &&
266			    e1->gpe_start <= e2->gpe_end)
267				goto fail;
268			if (e1->gpe_end >= e2->gpe_start &&
269			    e1->gpe_end <= e2->gpe_end)
270				goto fail;
271			if (e1->gpe_start < e2->gpe_start &&
272			    e1->gpe_end > e2->gpe_end)
273				goto fail;
274		}
275	}
276	return (0);
277fail:
278	printf("GEOM_PART: integrity check failed (%s, %s)\n", pp->name,
279	    table->gpt_scheme->name);
280	if (check_integrity == 0) {
281		table->gpt_corrupt = 1;
282		return (0);
283	}
284	return (EINVAL);
285}
286
287struct g_part_entry *
288g_part_new_entry(struct g_part_table *table, int index, quad_t start,
289    quad_t end)
290{
291	struct g_part_entry *entry, *last;
292
293	last = NULL;
294	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
295		if (entry->gpe_index == index)
296			break;
297		if (entry->gpe_index > index) {
298			entry = NULL;
299			break;
300		}
301		last = entry;
302	}
303	if (entry == NULL) {
304		entry = g_malloc(table->gpt_scheme->gps_entrysz,
305		    M_WAITOK | M_ZERO);
306		entry->gpe_index = index;
307		if (last == NULL)
308			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
309		else
310			LIST_INSERT_AFTER(last, entry, gpe_entry);
311	} else
312		entry->gpe_offset = 0;
313	entry->gpe_start = start;
314	entry->gpe_end = end;
315	return (entry);
316}
317
318static void
319g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
320    struct g_part_entry *entry)
321{
322	struct g_consumer *cp;
323	struct g_provider *pp;
324	struct sbuf *sb;
325	off_t offset;
326
327	cp = LIST_FIRST(&gp->consumer);
328	pp = cp->provider;
329
330	offset = entry->gpe_start * pp->sectorsize;
331	if (entry->gpe_offset < offset)
332		entry->gpe_offset = offset;
333
334	if (entry->gpe_pp == NULL) {
335		sb = sbuf_new_auto();
336		G_PART_FULLNAME(table, entry, sb, gp->name);
337		sbuf_finish(sb);
338		entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
339		sbuf_delete(sb);
340		entry->gpe_pp->private = entry;		/* Close the circle. */
341	}
342	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
343	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
344	    pp->sectorsize;
345	entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
346	entry->gpe_pp->sectorsize = pp->sectorsize;
347	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
348	entry->gpe_pp->stripesize = pp->stripesize;
349	entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
350	if (pp->stripesize > 0)
351		entry->gpe_pp->stripeoffset %= pp->stripesize;
352	g_error_provider(entry->gpe_pp, 0);
353}
354
355static struct g_geom*
356g_part_find_geom(const char *name)
357{
358	struct g_geom *gp;
359	LIST_FOREACH(gp, &g_part_class.geom, geom) {
360		if (!strcmp(name, gp->name))
361			break;
362	}
363	return (gp);
364}
365
366static int
367g_part_parm_geom(struct gctl_req *req, const char *name, struct g_geom **v)
368{
369	struct g_geom *gp;
370	const char *gname;
371
372	gname = gctl_get_asciiparam(req, name);
373	if (gname == NULL)
374		return (ENOATTR);
375	if (strncmp(gname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
376		gname += sizeof(_PATH_DEV) - 1;
377	gp = g_part_find_geom(gname);
378	if (gp == NULL) {
379		gctl_error(req, "%d %s '%s'", EINVAL, name, gname);
380		return (EINVAL);
381	}
382	*v = gp;
383	return (0);
384}
385
386static int
387g_part_parm_provider(struct gctl_req *req, const char *name,
388    struct g_provider **v)
389{
390	struct g_provider *pp;
391	const char *pname;
392
393	pname = gctl_get_asciiparam(req, name);
394	if (pname == NULL)
395		return (ENOATTR);
396	if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
397		pname += sizeof(_PATH_DEV) - 1;
398	pp = g_provider_by_name(pname);
399	if (pp == NULL) {
400		gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
401		return (EINVAL);
402	}
403	*v = pp;
404	return (0);
405}
406
407static int
408g_part_parm_quad(struct gctl_req *req, const char *name, quad_t *v)
409{
410	const char *p;
411	char *x;
412	quad_t q;
413
414	p = gctl_get_asciiparam(req, name);
415	if (p == NULL)
416		return (ENOATTR);
417	q = strtoq(p, &x, 0);
418	if (*x != '\0' || q < 0) {
419		gctl_error(req, "%d %s '%s'", EINVAL, name, p);
420		return (EINVAL);
421	}
422	*v = q;
423	return (0);
424}
425
426static int
427g_part_parm_scheme(struct gctl_req *req, const char *name,
428    struct g_part_scheme **v)
429{
430	struct g_part_scheme *s;
431	const char *p;
432
433	p = gctl_get_asciiparam(req, name);
434	if (p == NULL)
435		return (ENOATTR);
436	TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
437		if (s == &g_part_null_scheme)
438			continue;
439		if (!strcasecmp(s->name, p))
440			break;
441	}
442	if (s == NULL) {
443		gctl_error(req, "%d %s '%s'", EINVAL, name, p);
444		return (EINVAL);
445	}
446	*v = s;
447	return (0);
448}
449
450static int
451g_part_parm_str(struct gctl_req *req, const char *name, const char **v)
452{
453	const char *p;
454
455	p = gctl_get_asciiparam(req, name);
456	if (p == NULL)
457		return (ENOATTR);
458	/* An empty label is always valid. */
459	if (strcmp(name, "label") != 0 && p[0] == '\0') {
460		gctl_error(req, "%d %s '%s'", EINVAL, name, p);
461		return (EINVAL);
462	}
463	*v = p;
464	return (0);
465}
466
467static int
468g_part_parm_intmax(struct gctl_req *req, const char *name, u_int *v)
469{
470	const intmax_t *p;
471	int size;
472
473	p = gctl_get_param(req, name, &size);
474	if (p == NULL)
475		return (ENOATTR);
476	if (size != sizeof(*p) || *p < 0 || *p > INT_MAX) {
477		gctl_error(req, "%d %s '%jd'", EINVAL, name, *p);
478		return (EINVAL);
479	}
480	*v = (u_int)*p;
481	return (0);
482}
483
484static int
485g_part_parm_uint32(struct gctl_req *req, const char *name, u_int *v)
486{
487	const uint32_t *p;
488	int size;
489
490	p = gctl_get_param(req, name, &size);
491	if (p == NULL)
492		return (ENOATTR);
493	if (size != sizeof(*p) || *p > INT_MAX) {
494		gctl_error(req, "%d %s '%u'", EINVAL, name, (unsigned int)*p);
495		return (EINVAL);
496	}
497	*v = (u_int)*p;
498	return (0);
499}
500
501static int
502g_part_parm_bootcode(struct gctl_req *req, const char *name, const void **v,
503    unsigned int *s)
504{
505	const void *p;
506	int size;
507
508	p = gctl_get_param(req, name, &size);
509	if (p == NULL)
510		return (ENOATTR);
511	*v = p;
512	*s = size;
513	return (0);
514}
515
516static int
517g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
518{
519	struct g_part_scheme *iter, *scheme;
520	struct g_part_table *table;
521	int pri, probe;
522
523	table = gp->softc;
524	scheme = (table != NULL) ? table->gpt_scheme : NULL;
525	pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
526	if (pri == 0)
527		goto done;
528	if (pri > 0) {	/* error */
529		scheme = NULL;
530		pri = INT_MIN;
531	}
532
533	TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
534		if (iter == &g_part_null_scheme)
535			continue;
536		table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
537		    M_WAITOK);
538		table->gpt_gp = gp;
539		table->gpt_scheme = iter;
540		table->gpt_depth = depth;
541		probe = G_PART_PROBE(table, cp);
542		if (probe <= 0 && probe > pri) {
543			pri = probe;
544			scheme = iter;
545			if (gp->softc != NULL)
546				kobj_delete((kobj_t)gp->softc, M_GEOM);
547			gp->softc = table;
548			if (pri == 0)
549				goto done;
550		} else
551			kobj_delete((kobj_t)table, M_GEOM);
552	}
553
554done:
555	return ((scheme == NULL) ? ENXIO : 0);
556}
557
558/*
559 * Control request functions.
560 */
561
562static int
563g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
564{
565	struct g_geom *gp;
566	struct g_provider *pp;
567	struct g_part_entry *delent, *last, *entry;
568	struct g_part_table *table;
569	struct sbuf *sb;
570	quad_t end;
571	unsigned int index;
572	int error;
573
574	gp = gpp->gpp_geom;
575	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
576	g_topology_assert();
577
578	pp = LIST_FIRST(&gp->consumer)->provider;
579	table = gp->softc;
580	end = gpp->gpp_start + gpp->gpp_size - 1;
581
582	if (gpp->gpp_start < table->gpt_first ||
583	    gpp->gpp_start > table->gpt_last) {
584		gctl_error(req, "%d start '%jd'", EINVAL,
585		    (intmax_t)gpp->gpp_start);
586		return (EINVAL);
587	}
588	if (end < gpp->gpp_start || end > table->gpt_last) {
589		gctl_error(req, "%d size '%jd'", EINVAL,
590		    (intmax_t)gpp->gpp_size);
591		return (EINVAL);
592	}
593	if (gpp->gpp_index > table->gpt_entries) {
594		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
595		return (EINVAL);
596	}
597
598	delent = last = NULL;
599	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
600	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
601		if (entry->gpe_deleted) {
602			if (entry->gpe_index == index)
603				delent = entry;
604			continue;
605		}
606		if (entry->gpe_index == index)
607			index = entry->gpe_index + 1;
608		if (entry->gpe_index < index)
609			last = entry;
610		if (entry->gpe_internal)
611			continue;
612		if (gpp->gpp_start >= entry->gpe_start &&
613		    gpp->gpp_start <= entry->gpe_end) {
614			gctl_error(req, "%d start '%jd'", ENOSPC,
615			    (intmax_t)gpp->gpp_start);
616			return (ENOSPC);
617		}
618		if (end >= entry->gpe_start && end <= entry->gpe_end) {
619			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
620			return (ENOSPC);
621		}
622		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
623			gctl_error(req, "%d size '%jd'", ENOSPC,
624			    (intmax_t)gpp->gpp_size);
625			return (ENOSPC);
626		}
627	}
628	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
629		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
630		return (EEXIST);
631	}
632	if (index > table->gpt_entries) {
633		gctl_error(req, "%d index '%d'", ENOSPC, index);
634		return (ENOSPC);
635	}
636
637	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
638	    M_WAITOK | M_ZERO) : delent;
639	entry->gpe_index = index;
640	entry->gpe_start = gpp->gpp_start;
641	entry->gpe_end = end;
642	error = G_PART_ADD(table, entry, gpp);
643	if (error) {
644		gctl_error(req, "%d", error);
645		if (delent == NULL)
646			g_free(entry);
647		return (error);
648	}
649	if (delent == NULL) {
650		if (last == NULL)
651			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
652		else
653			LIST_INSERT_AFTER(last, entry, gpe_entry);
654		entry->gpe_created = 1;
655	} else {
656		entry->gpe_deleted = 0;
657		entry->gpe_modified = 1;
658	}
659	g_part_new_provider(gp, table, entry);
660
661	/* Provide feedback if so requested. */
662	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
663		sb = sbuf_new_auto();
664		G_PART_FULLNAME(table, entry, sb, gp->name);
665		sbuf_cat(sb, " added\n");
666		sbuf_finish(sb);
667		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
668		sbuf_delete(sb);
669	}
670	return (0);
671}
672
673static int
674g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
675{
676	struct g_geom *gp;
677	struct g_part_table *table;
678	struct sbuf *sb;
679	int error, sz;
680
681	gp = gpp->gpp_geom;
682	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
683	g_topology_assert();
684
685	table = gp->softc;
686	sz = table->gpt_scheme->gps_bootcodesz;
687	if (sz == 0) {
688		error = ENODEV;
689		goto fail;
690	}
691	if (gpp->gpp_codesize > sz) {
692		error = EFBIG;
693		goto fail;
694	}
695
696	error = G_PART_BOOTCODE(table, gpp);
697	if (error)
698		goto fail;
699
700	/* Provide feedback if so requested. */
701	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
702		sb = sbuf_new_auto();
703		sbuf_printf(sb, "bootcode written to %s\n", gp->name);
704		sbuf_finish(sb);
705		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
706		sbuf_delete(sb);
707	}
708	return (0);
709
710 fail:
711	gctl_error(req, "%d", error);
712	return (error);
713}
714
715static int
716g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
717{
718	struct g_consumer *cp;
719	struct g_geom *gp;
720	struct g_provider *pp;
721	struct g_part_entry *entry, *tmp;
722	struct g_part_table *table;
723	char *buf;
724	int error, i;
725
726	gp = gpp->gpp_geom;
727	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
728	g_topology_assert();
729
730	table = gp->softc;
731	if (!table->gpt_opened) {
732		gctl_error(req, "%d", EPERM);
733		return (EPERM);
734	}
735
736	g_topology_unlock();
737
738	cp = LIST_FIRST(&gp->consumer);
739	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
740		pp = cp->provider;
741		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
742		while (table->gpt_smhead != 0) {
743			i = ffs(table->gpt_smhead) - 1;
744			error = g_write_data(cp, i * pp->sectorsize, buf,
745			    pp->sectorsize);
746			if (error) {
747				g_free(buf);
748				goto fail;
749			}
750			table->gpt_smhead &= ~(1 << i);
751		}
752		while (table->gpt_smtail != 0) {
753			i = ffs(table->gpt_smtail) - 1;
754			error = g_write_data(cp, pp->mediasize - (i + 1) *
755			    pp->sectorsize, buf, pp->sectorsize);
756			if (error) {
757				g_free(buf);
758				goto fail;
759			}
760			table->gpt_smtail &= ~(1 << i);
761		}
762		g_free(buf);
763	}
764
765	if (table->gpt_scheme == &g_part_null_scheme) {
766		g_topology_lock();
767		g_access(cp, -1, -1, -1);
768		g_part_wither(gp, ENXIO);
769		return (0);
770	}
771
772	error = G_PART_WRITE(table, cp);
773	if (error)
774		goto fail;
775
776	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
777		if (!entry->gpe_deleted) {
778			entry->gpe_created = 0;
779			entry->gpe_modified = 0;
780			continue;
781		}
782		LIST_REMOVE(entry, gpe_entry);
783		g_free(entry);
784	}
785	table->gpt_created = 0;
786	table->gpt_opened = 0;
787
788	g_topology_lock();
789	g_access(cp, -1, -1, -1);
790	return (0);
791
792fail:
793	g_topology_lock();
794	gctl_error(req, "%d", error);
795	return (error);
796}
797
798static int
799g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
800{
801	struct g_consumer *cp;
802	struct g_geom *gp;
803	struct g_provider *pp;
804	struct g_part_scheme *scheme;
805	struct g_part_table *null, *table;
806	struct sbuf *sb;
807	int attr, error;
808
809	pp = gpp->gpp_provider;
810	scheme = gpp->gpp_scheme;
811	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
812	g_topology_assert();
813
814	/* Check that there isn't already a g_part geom on the provider. */
815	gp = g_part_find_geom(pp->name);
816	if (gp != NULL) {
817		null = gp->softc;
818		if (null->gpt_scheme != &g_part_null_scheme) {
819			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
820			return (EEXIST);
821		}
822	} else
823		null = NULL;
824
825	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
826	    (gpp->gpp_entries < scheme->gps_minent ||
827	     gpp->gpp_entries > scheme->gps_maxent)) {
828		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
829		return (EINVAL);
830	}
831
832	if (null == NULL)
833		gp = g_new_geomf(&g_part_class, "%s", pp->name);
834	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
835	    M_WAITOK);
836	table = gp->softc;
837	table->gpt_gp = gp;
838	table->gpt_scheme = gpp->gpp_scheme;
839	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
840	    gpp->gpp_entries : scheme->gps_minent;
841	LIST_INIT(&table->gpt_entry);
842	if (null == NULL) {
843		cp = g_new_consumer(gp);
844		error = g_attach(cp, pp);
845		if (error == 0)
846			error = g_access(cp, 1, 1, 1);
847		if (error != 0) {
848			g_part_wither(gp, error);
849			gctl_error(req, "%d geom '%s'", error, pp->name);
850			return (error);
851		}
852		table->gpt_opened = 1;
853	} else {
854		cp = LIST_FIRST(&gp->consumer);
855		table->gpt_opened = null->gpt_opened;
856		table->gpt_smhead = null->gpt_smhead;
857		table->gpt_smtail = null->gpt_smtail;
858	}
859
860	g_topology_unlock();
861
862	/* Make sure the provider has media. */
863	if (pp->mediasize == 0 || pp->sectorsize == 0) {
864		error = ENODEV;
865		goto fail;
866	}
867
868	/* Make sure we can nest and if so, determine our depth. */
869	error = g_getattr("PART::isleaf", cp, &attr);
870	if (!error && attr) {
871		error = ENODEV;
872		goto fail;
873	}
874	error = g_getattr("PART::depth", cp, &attr);
875	table->gpt_depth = (!error) ? attr + 1 : 0;
876
877	/*
878	 * Synthesize a disk geometry. Some partitioning schemes
879	 * depend on it and since some file systems need it even
880	 * when the partitition scheme doesn't, we do it here in
881	 * scheme-independent code.
882	 */
883	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
884
885	error = G_PART_CREATE(table, gpp);
886	if (error)
887		goto fail;
888
889	g_topology_lock();
890
891	table->gpt_created = 1;
892	if (null != NULL)
893		kobj_delete((kobj_t)null, M_GEOM);
894
895	/*
896	 * Support automatic commit by filling in the gpp_geom
897	 * parameter.
898	 */
899	gpp->gpp_parms |= G_PART_PARM_GEOM;
900	gpp->gpp_geom = gp;
901
902	/* Provide feedback if so requested. */
903	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
904		sb = sbuf_new_auto();
905		sbuf_printf(sb, "%s created\n", gp->name);
906		sbuf_finish(sb);
907		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
908		sbuf_delete(sb);
909	}
910	return (0);
911
912fail:
913	g_topology_lock();
914	if (null == NULL) {
915		g_access(cp, -1, -1, -1);
916		g_part_wither(gp, error);
917	} else {
918		kobj_delete((kobj_t)gp->softc, M_GEOM);
919		gp->softc = null;
920	}
921	gctl_error(req, "%d provider", error);
922	return (error);
923}
924
925static int
926g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
927{
928	struct g_geom *gp;
929	struct g_provider *pp;
930	struct g_part_entry *entry;
931	struct g_part_table *table;
932	struct sbuf *sb;
933
934	gp = gpp->gpp_geom;
935	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
936	g_topology_assert();
937
938	table = gp->softc;
939
940	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
941		if (entry->gpe_deleted || entry->gpe_internal)
942			continue;
943		if (entry->gpe_index == gpp->gpp_index)
944			break;
945	}
946	if (entry == NULL) {
947		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
948		return (ENOENT);
949	}
950
951	pp = entry->gpe_pp;
952	if (pp != NULL) {
953		if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
954			gctl_error(req, "%d", EBUSY);
955			return (EBUSY);
956		}
957
958		pp->private = NULL;
959		entry->gpe_pp = NULL;
960	}
961
962	if (pp != NULL)
963		g_wither_provider(pp, ENXIO);
964
965	/* Provide feedback if so requested. */
966	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
967		sb = sbuf_new_auto();
968		G_PART_FULLNAME(table, entry, sb, gp->name);
969		sbuf_cat(sb, " deleted\n");
970		sbuf_finish(sb);
971		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
972		sbuf_delete(sb);
973	}
974
975	if (entry->gpe_created) {
976		LIST_REMOVE(entry, gpe_entry);
977		g_free(entry);
978	} else {
979		entry->gpe_modified = 0;
980		entry->gpe_deleted = 1;
981	}
982	return (0);
983}
984
985static int
986g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
987{
988	struct g_consumer *cp;
989	struct g_geom *gp;
990	struct g_provider *pp;
991	struct g_part_entry *entry, *tmp;
992	struct g_part_table *null, *table;
993	struct sbuf *sb;
994	int error;
995
996	gp = gpp->gpp_geom;
997	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
998	g_topology_assert();
999
1000	table = gp->softc;
1001	/* Check for busy providers. */
1002	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1003		if (entry->gpe_deleted || entry->gpe_internal)
1004			continue;
1005		if (gpp->gpp_force) {
1006			pp = entry->gpe_pp;
1007			if (pp == NULL)
1008				continue;
1009			if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
1010				continue;
1011		}
1012		gctl_error(req, "%d", EBUSY);
1013		return (EBUSY);
1014	}
1015
1016	if (gpp->gpp_force) {
1017		/* Destroy all providers. */
1018		LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1019			pp = entry->gpe_pp;
1020			if (pp != NULL) {
1021				pp->private = NULL;
1022				g_wither_provider(pp, ENXIO);
1023			}
1024			LIST_REMOVE(entry, gpe_entry);
1025			g_free(entry);
1026		}
1027	}
1028
1029	error = G_PART_DESTROY(table, gpp);
1030	if (error) {
1031		gctl_error(req, "%d", error);
1032		return (error);
1033	}
1034
1035	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
1036	    M_WAITOK);
1037	null = gp->softc;
1038	null->gpt_gp = gp;
1039	null->gpt_scheme = &g_part_null_scheme;
1040	LIST_INIT(&null->gpt_entry);
1041
1042	cp = LIST_FIRST(&gp->consumer);
1043	pp = cp->provider;
1044	null->gpt_last = pp->mediasize / pp->sectorsize - 1;
1045
1046	null->gpt_depth = table->gpt_depth;
1047	null->gpt_opened = table->gpt_opened;
1048	null->gpt_smhead = table->gpt_smhead;
1049	null->gpt_smtail = table->gpt_smtail;
1050
1051	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1052		LIST_REMOVE(entry, gpe_entry);
1053		g_free(entry);
1054	}
1055	kobj_delete((kobj_t)table, M_GEOM);
1056
1057	/* Provide feedback if so requested. */
1058	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1059		sb = sbuf_new_auto();
1060		sbuf_printf(sb, "%s destroyed\n", gp->name);
1061		sbuf_finish(sb);
1062		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1063		sbuf_delete(sb);
1064	}
1065	return (0);
1066}
1067
1068static int
1069g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
1070{
1071	struct g_geom *gp;
1072	struct g_part_entry *entry;
1073	struct g_part_table *table;
1074	struct sbuf *sb;
1075	int error;
1076
1077	gp = gpp->gpp_geom;
1078	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1079	g_topology_assert();
1080
1081	table = gp->softc;
1082
1083	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1084		if (entry->gpe_deleted || entry->gpe_internal)
1085			continue;
1086		if (entry->gpe_index == gpp->gpp_index)
1087			break;
1088	}
1089	if (entry == NULL) {
1090		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1091		return (ENOENT);
1092	}
1093
1094	error = G_PART_MODIFY(table, entry, gpp);
1095	if (error) {
1096		gctl_error(req, "%d", error);
1097		return (error);
1098	}
1099
1100	if (!entry->gpe_created)
1101		entry->gpe_modified = 1;
1102
1103	/* Provide feedback if so requested. */
1104	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1105		sb = sbuf_new_auto();
1106		G_PART_FULLNAME(table, entry, sb, gp->name);
1107		sbuf_cat(sb, " modified\n");
1108		sbuf_finish(sb);
1109		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1110		sbuf_delete(sb);
1111	}
1112	return (0);
1113}
1114
1115static int
1116g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
1117{
1118	gctl_error(req, "%d verb 'move'", ENOSYS);
1119	return (ENOSYS);
1120}
1121
1122static int
1123g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
1124{
1125	struct g_part_table *table;
1126	struct g_geom *gp;
1127	struct sbuf *sb;
1128	int error, recovered;
1129
1130	gp = gpp->gpp_geom;
1131	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1132	g_topology_assert();
1133	table = gp->softc;
1134	error = recovered = 0;
1135
1136	if (table->gpt_corrupt) {
1137		error = G_PART_RECOVER(table);
1138		if (error) {
1139			gctl_error(req, "%d recovering '%s' failed",
1140			    error, gp->name);
1141			return (error);
1142		}
1143		recovered = 1;
1144	}
1145	/* Provide feedback if so requested. */
1146	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1147		sb = sbuf_new_auto();
1148		if (recovered)
1149			sbuf_printf(sb, "%s recovered\n", gp->name);
1150		else
1151			sbuf_printf(sb, "%s recovering is not needed\n",
1152			    gp->name);
1153		sbuf_finish(sb);
1154		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1155		sbuf_delete(sb);
1156	}
1157	return (0);
1158}
1159
1160static int
1161g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
1162{
1163	struct g_geom *gp;
1164	struct g_provider *pp;
1165	struct g_part_entry *pe, *entry;
1166	struct g_part_table *table;
1167	struct sbuf *sb;
1168	quad_t end;
1169	int error;
1170
1171	gp = gpp->gpp_geom;
1172	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1173	g_topology_assert();
1174	table = gp->softc;
1175
1176	/* check gpp_index */
1177	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1178		if (entry->gpe_deleted || entry->gpe_internal)
1179			continue;
1180		if (entry->gpe_index == gpp->gpp_index)
1181			break;
1182	}
1183	if (entry == NULL) {
1184		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1185		return (ENOENT);
1186	}
1187
1188	/* check gpp_size */
1189	end = entry->gpe_start + gpp->gpp_size - 1;
1190	if (gpp->gpp_size < 1 || end > table->gpt_last) {
1191		gctl_error(req, "%d size '%jd'", EINVAL,
1192		    (intmax_t)gpp->gpp_size);
1193		return (EINVAL);
1194	}
1195
1196	LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1197		if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1198			continue;
1199		if (end >= pe->gpe_start && end <= pe->gpe_end) {
1200			gctl_error(req, "%d end '%jd'", ENOSPC,
1201			    (intmax_t)end);
1202			return (ENOSPC);
1203		}
1204		if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1205			gctl_error(req, "%d size '%jd'", ENOSPC,
1206			    (intmax_t)gpp->gpp_size);
1207			return (ENOSPC);
1208		}
1209	}
1210
1211	pp = entry->gpe_pp;
1212	if ((g_debugflags & 16) == 0 &&
1213	    (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1214		gctl_error(req, "%d", EBUSY);
1215		return (EBUSY);
1216	}
1217
1218	error = G_PART_RESIZE(table, entry, gpp);
1219	if (error) {
1220		gctl_error(req, "%d", error);
1221		return (error);
1222	}
1223
1224	if (!entry->gpe_created)
1225		entry->gpe_modified = 1;
1226
1227	/* update mediasize of changed provider */
1228	pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1229		pp->sectorsize;
1230
1231	/* Provide feedback if so requested. */
1232	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1233		sb = sbuf_new_auto();
1234		G_PART_FULLNAME(table, entry, sb, gp->name);
1235		sbuf_cat(sb, " resized\n");
1236		sbuf_finish(sb);
1237		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1238		sbuf_delete(sb);
1239	}
1240	return (0);
1241}
1242
1243static int
1244g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1245    unsigned int set)
1246{
1247	struct g_geom *gp;
1248	struct g_part_entry *entry;
1249	struct g_part_table *table;
1250	struct sbuf *sb;
1251	int error;
1252
1253	gp = gpp->gpp_geom;
1254	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1255	g_topology_assert();
1256
1257	table = gp->softc;
1258
1259	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1260		if (entry->gpe_deleted || entry->gpe_internal)
1261			continue;
1262		if (entry->gpe_index == gpp->gpp_index)
1263			break;
1264	}
1265	if (entry == NULL) {
1266		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1267		return (ENOENT);
1268	}
1269
1270	error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1271	if (error) {
1272		gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1273		return (error);
1274	}
1275
1276	/* Provide feedback if so requested. */
1277	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1278		sb = sbuf_new_auto();
1279		sbuf_printf(sb, "%s %sset on ", gpp->gpp_attrib,
1280		    (set) ? "" : "un");
1281		G_PART_FULLNAME(table, entry, sb, gp->name);
1282		sbuf_printf(sb, "\n");
1283		sbuf_finish(sb);
1284		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1285		sbuf_delete(sb);
1286	}
1287	return (0);
1288}
1289
1290static int
1291g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1292{
1293	struct g_consumer *cp;
1294	struct g_provider *pp;
1295	struct g_geom *gp;
1296	struct g_part_entry *entry, *tmp;
1297	struct g_part_table *table;
1298	int error, reprobe;
1299
1300	gp = gpp->gpp_geom;
1301	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1302	g_topology_assert();
1303
1304	table = gp->softc;
1305	if (!table->gpt_opened) {
1306		gctl_error(req, "%d", EPERM);
1307		return (EPERM);
1308	}
1309
1310	cp = LIST_FIRST(&gp->consumer);
1311	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1312		entry->gpe_modified = 0;
1313		if (entry->gpe_created) {
1314			pp = entry->gpe_pp;
1315			if (pp != NULL) {
1316				pp->private = NULL;
1317				entry->gpe_pp = NULL;
1318				g_wither_provider(pp, ENXIO);
1319			}
1320			entry->gpe_deleted = 1;
1321		}
1322		if (entry->gpe_deleted) {
1323			LIST_REMOVE(entry, gpe_entry);
1324			g_free(entry);
1325		}
1326	}
1327
1328	g_topology_unlock();
1329
1330	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1331	    table->gpt_created) ? 1 : 0;
1332
1333	if (reprobe) {
1334		LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1335			if (entry->gpe_internal)
1336				continue;
1337			error = EBUSY;
1338			goto fail;
1339		}
1340		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1341			LIST_REMOVE(entry, gpe_entry);
1342			g_free(entry);
1343		}
1344		error = g_part_probe(gp, cp, table->gpt_depth);
1345		if (error) {
1346			g_topology_lock();
1347			g_access(cp, -1, -1, -1);
1348			g_part_wither(gp, error);
1349			return (0);
1350		}
1351		table = gp->softc;
1352
1353		/*
1354		 * Synthesize a disk geometry. Some partitioning schemes
1355		 * depend on it and since some file systems need it even
1356		 * when the partitition scheme doesn't, we do it here in
1357		 * scheme-independent code.
1358		 */
1359		pp = cp->provider;
1360		g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1361	}
1362
1363	error = G_PART_READ(table, cp);
1364	if (error)
1365		goto fail;
1366	error = g_part_check_integrity(table, cp);
1367	if (error)
1368		goto fail;
1369
1370	g_topology_lock();
1371	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1372		if (!entry->gpe_internal)
1373			g_part_new_provider(gp, table, entry);
1374	}
1375
1376	table->gpt_opened = 0;
1377	g_access(cp, -1, -1, -1);
1378	return (0);
1379
1380fail:
1381	g_topology_lock();
1382	gctl_error(req, "%d", error);
1383	return (error);
1384}
1385
1386static void
1387g_part_wither(struct g_geom *gp, int error)
1388{
1389	struct g_part_entry *entry;
1390	struct g_part_table *table;
1391
1392	table = gp->softc;
1393	if (table != NULL) {
1394		G_PART_DESTROY(table, NULL);
1395		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1396			LIST_REMOVE(entry, gpe_entry);
1397			g_free(entry);
1398		}
1399		if (gp->softc != NULL) {
1400			kobj_delete((kobj_t)gp->softc, M_GEOM);
1401			gp->softc = NULL;
1402		}
1403	}
1404	g_wither_geom(gp, error);
1405}
1406
1407/*
1408 * Class methods.
1409 */
1410
1411static void
1412g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1413{
1414	struct g_part_parms gpp;
1415	struct g_part_table *table;
1416	struct gctl_req_arg *ap;
1417	enum g_part_ctl ctlreq;
1418	unsigned int i, mparms, oparms, parm;
1419	int auto_commit, close_on_error;
1420	int error, modifies;
1421
1422	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1423	g_topology_assert();
1424
1425	ctlreq = G_PART_CTL_NONE;
1426	modifies = 1;
1427	mparms = 0;
1428	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1429	switch (*verb) {
1430	case 'a':
1431		if (!strcmp(verb, "add")) {
1432			ctlreq = G_PART_CTL_ADD;
1433			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1434			    G_PART_PARM_START | G_PART_PARM_TYPE;
1435			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1436		}
1437		break;
1438	case 'b':
1439		if (!strcmp(verb, "bootcode")) {
1440			ctlreq = G_PART_CTL_BOOTCODE;
1441			mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1442		}
1443		break;
1444	case 'c':
1445		if (!strcmp(verb, "commit")) {
1446			ctlreq = G_PART_CTL_COMMIT;
1447			mparms |= G_PART_PARM_GEOM;
1448			modifies = 0;
1449		} else if (!strcmp(verb, "create")) {
1450			ctlreq = G_PART_CTL_CREATE;
1451			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1452			oparms |= G_PART_PARM_ENTRIES;
1453		}
1454		break;
1455	case 'd':
1456		if (!strcmp(verb, "delete")) {
1457			ctlreq = G_PART_CTL_DELETE;
1458			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1459		} else if (!strcmp(verb, "destroy")) {
1460			ctlreq = G_PART_CTL_DESTROY;
1461			mparms |= G_PART_PARM_GEOM;
1462			oparms |= G_PART_PARM_FORCE;
1463		}
1464		break;
1465	case 'm':
1466		if (!strcmp(verb, "modify")) {
1467			ctlreq = G_PART_CTL_MODIFY;
1468			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1469			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1470		} else if (!strcmp(verb, "move")) {
1471			ctlreq = G_PART_CTL_MOVE;
1472			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1473		}
1474		break;
1475	case 'r':
1476		if (!strcmp(verb, "recover")) {
1477			ctlreq = G_PART_CTL_RECOVER;
1478			mparms |= G_PART_PARM_GEOM;
1479		} else if (!strcmp(verb, "resize")) {
1480			ctlreq = G_PART_CTL_RESIZE;
1481			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1482			    G_PART_PARM_SIZE;
1483		}
1484		break;
1485	case 's':
1486		if (!strcmp(verb, "set")) {
1487			ctlreq = G_PART_CTL_SET;
1488			mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1489			    G_PART_PARM_INDEX;
1490		}
1491		break;
1492	case 'u':
1493		if (!strcmp(verb, "undo")) {
1494			ctlreq = G_PART_CTL_UNDO;
1495			mparms |= G_PART_PARM_GEOM;
1496			modifies = 0;
1497		} else if (!strcmp(verb, "unset")) {
1498			ctlreq = G_PART_CTL_UNSET;
1499			mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1500			    G_PART_PARM_INDEX;
1501		}
1502		break;
1503	}
1504	if (ctlreq == G_PART_CTL_NONE) {
1505		gctl_error(req, "%d verb '%s'", EINVAL, verb);
1506		return;
1507	}
1508
1509	bzero(&gpp, sizeof(gpp));
1510	for (i = 0; i < req->narg; i++) {
1511		ap = &req->arg[i];
1512		parm = 0;
1513		switch (ap->name[0]) {
1514		case 'a':
1515			if (!strcmp(ap->name, "arg0")) {
1516				parm = mparms &
1517				    (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1518			}
1519			if (!strcmp(ap->name, "attrib"))
1520				parm = G_PART_PARM_ATTRIB;
1521			break;
1522		case 'b':
1523			if (!strcmp(ap->name, "bootcode"))
1524				parm = G_PART_PARM_BOOTCODE;
1525			break;
1526		case 'c':
1527			if (!strcmp(ap->name, "class"))
1528				continue;
1529			break;
1530		case 'e':
1531			if (!strcmp(ap->name, "entries"))
1532				parm = G_PART_PARM_ENTRIES;
1533			break;
1534		case 'f':
1535			if (!strcmp(ap->name, "flags"))
1536				parm = G_PART_PARM_FLAGS;
1537			else if (!strcmp(ap->name, "force"))
1538				parm = G_PART_PARM_FORCE;
1539			break;
1540		case 'i':
1541			if (!strcmp(ap->name, "index"))
1542				parm = G_PART_PARM_INDEX;
1543			break;
1544		case 'l':
1545			if (!strcmp(ap->name, "label"))
1546				parm = G_PART_PARM_LABEL;
1547			break;
1548		case 'o':
1549			if (!strcmp(ap->name, "output"))
1550				parm = G_PART_PARM_OUTPUT;
1551			break;
1552		case 's':
1553			if (!strcmp(ap->name, "scheme"))
1554				parm = G_PART_PARM_SCHEME;
1555			else if (!strcmp(ap->name, "size"))
1556				parm = G_PART_PARM_SIZE;
1557			else if (!strcmp(ap->name, "start"))
1558				parm = G_PART_PARM_START;
1559			break;
1560		case 't':
1561			if (!strcmp(ap->name, "type"))
1562				parm = G_PART_PARM_TYPE;
1563			break;
1564		case 'v':
1565			if (!strcmp(ap->name, "verb"))
1566				continue;
1567			else if (!strcmp(ap->name, "version"))
1568				parm = G_PART_PARM_VERSION;
1569			break;
1570		}
1571		if ((parm & (mparms | oparms)) == 0) {
1572			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1573			return;
1574		}
1575		switch (parm) {
1576		case G_PART_PARM_ATTRIB:
1577			error = g_part_parm_str(req, ap->name,
1578			    &gpp.gpp_attrib);
1579			break;
1580		case G_PART_PARM_BOOTCODE:
1581			error = g_part_parm_bootcode(req, ap->name,
1582			    &gpp.gpp_codeptr, &gpp.gpp_codesize);
1583			break;
1584		case G_PART_PARM_ENTRIES:
1585			error = g_part_parm_intmax(req, ap->name,
1586			    &gpp.gpp_entries);
1587			break;
1588		case G_PART_PARM_FLAGS:
1589			error = g_part_parm_str(req, ap->name, &gpp.gpp_flags);
1590			break;
1591		case G_PART_PARM_FORCE:
1592			error = g_part_parm_uint32(req, ap->name,
1593			    &gpp.gpp_force);
1594			break;
1595		case G_PART_PARM_GEOM:
1596			error = g_part_parm_geom(req, ap->name, &gpp.gpp_geom);
1597			break;
1598		case G_PART_PARM_INDEX:
1599			error = g_part_parm_intmax(req, ap->name,
1600			    &gpp.gpp_index);
1601			break;
1602		case G_PART_PARM_LABEL:
1603			error = g_part_parm_str(req, ap->name, &gpp.gpp_label);
1604			break;
1605		case G_PART_PARM_OUTPUT:
1606			error = 0;	/* Write-only parameter */
1607			break;
1608		case G_PART_PARM_PROVIDER:
1609			error = g_part_parm_provider(req, ap->name,
1610			    &gpp.gpp_provider);
1611			break;
1612		case G_PART_PARM_SCHEME:
1613			error = g_part_parm_scheme(req, ap->name,
1614			    &gpp.gpp_scheme);
1615			break;
1616		case G_PART_PARM_SIZE:
1617			error = g_part_parm_quad(req, ap->name, &gpp.gpp_size);
1618			break;
1619		case G_PART_PARM_START:
1620			error = g_part_parm_quad(req, ap->name,
1621			    &gpp.gpp_start);
1622			break;
1623		case G_PART_PARM_TYPE:
1624			error = g_part_parm_str(req, ap->name, &gpp.gpp_type);
1625			break;
1626		case G_PART_PARM_VERSION:
1627			error = g_part_parm_uint32(req, ap->name,
1628			    &gpp.gpp_version);
1629			break;
1630		default:
1631			error = EDOOFUS;
1632			gctl_error(req, "%d %s", error, ap->name);
1633			break;
1634		}
1635		if (error != 0) {
1636			if (error == ENOATTR) {
1637				gctl_error(req, "%d param '%s'", error,
1638				    ap->name);
1639			}
1640			return;
1641		}
1642		gpp.gpp_parms |= parm;
1643	}
1644	if ((gpp.gpp_parms & mparms) != mparms) {
1645		parm = mparms - (gpp.gpp_parms & mparms);
1646		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1647		return;
1648	}
1649
1650	/* Obtain permissions if possible/necessary. */
1651	close_on_error = 0;
1652	table = NULL;
1653	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1654		table = gpp.gpp_geom->softc;
1655		if (table != NULL && table->gpt_corrupt &&
1656		    ctlreq != G_PART_CTL_DESTROY &&
1657		    ctlreq != G_PART_CTL_RECOVER) {
1658			gctl_error(req, "%d table '%s' is corrupt",
1659			    EPERM, gpp.gpp_geom->name);
1660			return;
1661		}
1662		if (table != NULL && !table->gpt_opened) {
1663			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1664			    1, 1, 1);
1665			if (error) {
1666				gctl_error(req, "%d geom '%s'", error,
1667				    gpp.gpp_geom->name);
1668				return;
1669			}
1670			table->gpt_opened = 1;
1671			close_on_error = 1;
1672		}
1673	}
1674
1675	/* Allow the scheme to check or modify the parameters. */
1676	if (table != NULL) {
1677		error = G_PART_PRECHECK(table, ctlreq, &gpp);
1678		if (error) {
1679			gctl_error(req, "%d pre-check failed", error);
1680			goto out;
1681		}
1682	} else
1683		error = EDOOFUS;	/* Prevent bogus uninit. warning. */
1684
1685	switch (ctlreq) {
1686	case G_PART_CTL_NONE:
1687		panic("%s", __func__);
1688	case G_PART_CTL_ADD:
1689		error = g_part_ctl_add(req, &gpp);
1690		break;
1691	case G_PART_CTL_BOOTCODE:
1692		error = g_part_ctl_bootcode(req, &gpp);
1693		break;
1694	case G_PART_CTL_COMMIT:
1695		error = g_part_ctl_commit(req, &gpp);
1696		break;
1697	case G_PART_CTL_CREATE:
1698		error = g_part_ctl_create(req, &gpp);
1699		break;
1700	case G_PART_CTL_DELETE:
1701		error = g_part_ctl_delete(req, &gpp);
1702		break;
1703	case G_PART_CTL_DESTROY:
1704		error = g_part_ctl_destroy(req, &gpp);
1705		break;
1706	case G_PART_CTL_MODIFY:
1707		error = g_part_ctl_modify(req, &gpp);
1708		break;
1709	case G_PART_CTL_MOVE:
1710		error = g_part_ctl_move(req, &gpp);
1711		break;
1712	case G_PART_CTL_RECOVER:
1713		error = g_part_ctl_recover(req, &gpp);
1714		break;
1715	case G_PART_CTL_RESIZE:
1716		error = g_part_ctl_resize(req, &gpp);
1717		break;
1718	case G_PART_CTL_SET:
1719		error = g_part_ctl_setunset(req, &gpp, 1);
1720		break;
1721	case G_PART_CTL_UNDO:
1722		error = g_part_ctl_undo(req, &gpp);
1723		break;
1724	case G_PART_CTL_UNSET:
1725		error = g_part_ctl_setunset(req, &gpp, 0);
1726		break;
1727	}
1728
1729	/* Implement automatic commit. */
1730	if (!error) {
1731		auto_commit = (modifies &&
1732		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1733		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1734		if (auto_commit) {
1735			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, ("%s",
1736			    __func__));
1737			error = g_part_ctl_commit(req, &gpp);
1738		}
1739	}
1740
1741 out:
1742	if (error && close_on_error) {
1743		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1744		table->gpt_opened = 0;
1745	}
1746}
1747
1748static int
1749g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1750    struct g_geom *gp)
1751{
1752
1753	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1754	g_topology_assert();
1755
1756	g_part_wither(gp, EINVAL);
1757	return (0);
1758}
1759
1760static struct g_geom *
1761g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1762{
1763	struct g_consumer *cp;
1764	struct g_geom *gp;
1765	struct g_part_entry *entry;
1766	struct g_part_table *table;
1767	struct root_hold_token *rht;
1768	int attr, depth;
1769	int error;
1770
1771	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1772	g_topology_assert();
1773
1774	/* Skip providers that are already open for writing. */
1775	if (pp->acw > 0)
1776		return (NULL);
1777
1778	/*
1779	 * Create a GEOM with consumer and hook it up to the provider.
1780	 * With that we become part of the topology. Optain read access
1781	 * to the provider.
1782	 */
1783	gp = g_new_geomf(mp, "%s", pp->name);
1784	cp = g_new_consumer(gp);
1785	error = g_attach(cp, pp);
1786	if (error == 0)
1787		error = g_access(cp, 1, 0, 0);
1788	if (error != 0) {
1789		g_part_wither(gp, error);
1790		return (NULL);
1791	}
1792
1793	rht = root_mount_hold(mp->name);
1794	g_topology_unlock();
1795
1796	/*
1797	 * Short-circuit the whole probing galore when there's no
1798	 * media present.
1799	 */
1800	if (pp->mediasize == 0 || pp->sectorsize == 0) {
1801		error = ENODEV;
1802		goto fail;
1803	}
1804
1805	/* Make sure we can nest and if so, determine our depth. */
1806	error = g_getattr("PART::isleaf", cp, &attr);
1807	if (!error && attr) {
1808		error = ENODEV;
1809		goto fail;
1810	}
1811	error = g_getattr("PART::depth", cp, &attr);
1812	depth = (!error) ? attr + 1 : 0;
1813
1814	error = g_part_probe(gp, cp, depth);
1815	if (error)
1816		goto fail;
1817
1818	table = gp->softc;
1819
1820	/*
1821	 * Synthesize a disk geometry. Some partitioning schemes
1822	 * depend on it and since some file systems need it even
1823	 * when the partitition scheme doesn't, we do it here in
1824	 * scheme-independent code.
1825	 */
1826	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1827
1828	error = G_PART_READ(table, cp);
1829	if (error)
1830		goto fail;
1831	error = g_part_check_integrity(table, cp);
1832	if (error)
1833		goto fail;
1834
1835	g_topology_lock();
1836	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1837		if (!entry->gpe_internal)
1838			g_part_new_provider(gp, table, entry);
1839	}
1840
1841	root_mount_rel(rht);
1842	g_access(cp, -1, 0, 0);
1843	return (gp);
1844
1845 fail:
1846	g_topology_lock();
1847	root_mount_rel(rht);
1848	g_access(cp, -1, 0, 0);
1849	g_part_wither(gp, error);
1850	return (NULL);
1851}
1852
1853/*
1854 * Geom methods.
1855 */
1856
1857static int
1858g_part_access(struct g_provider *pp, int dr, int dw, int de)
1859{
1860	struct g_consumer *cp;
1861
1862	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1863	    dw, de));
1864
1865	cp = LIST_FIRST(&pp->geom->consumer);
1866
1867	/* We always gain write-exclusive access. */
1868	return (g_access(cp, dr, dw, dw + de));
1869}
1870
1871static void
1872g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1873    struct g_consumer *cp, struct g_provider *pp)
1874{
1875	char buf[64];
1876	struct g_part_entry *entry;
1877	struct g_part_table *table;
1878
1879	KASSERT(sb != NULL && gp != NULL, ("%s", __func__));
1880	table = gp->softc;
1881
1882	if (indent == NULL) {
1883		KASSERT(cp == NULL && pp != NULL, ("%s", __func__));
1884		entry = pp->private;
1885		if (entry == NULL)
1886			return;
1887		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1888		    (uintmax_t)entry->gpe_offset,
1889		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1890		/*
1891		 * libdisk compatibility quirk - the scheme dumps the
1892		 * slicer name and partition type in a way that is
1893		 * compatible with libdisk. When libdisk is not used
1894		 * anymore, this should go away.
1895		 */
1896		G_PART_DUMPCONF(table, entry, sb, indent);
1897	} else if (cp != NULL) {	/* Consumer configuration. */
1898		KASSERT(pp == NULL, ("%s", __func__));
1899		/* none */
1900	} else if (pp != NULL) {	/* Provider configuration. */
1901		entry = pp->private;
1902		if (entry == NULL)
1903			return;
1904		sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
1905		    (uintmax_t)entry->gpe_start);
1906		sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
1907		    (uintmax_t)entry->gpe_end);
1908		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1909		    entry->gpe_index);
1910		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1911		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1912		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1913		    (uintmax_t)entry->gpe_offset);
1914		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1915		    (uintmax_t)pp->mediasize);
1916		G_PART_DUMPCONF(table, entry, sb, indent);
1917	} else {			/* Geom configuration. */
1918		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1919		    table->gpt_scheme->name);
1920		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1921		    table->gpt_entries);
1922		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1923		    (uintmax_t)table->gpt_first);
1924		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1925		    (uintmax_t)table->gpt_last);
1926		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1927		    table->gpt_sectors);
1928		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1929		    table->gpt_heads);
1930		sbuf_printf(sb, "%s<state>%s</state>\n", indent,
1931		    table->gpt_corrupt ? "CORRUPT": "OK");
1932		sbuf_printf(sb, "%s<modified>%s</modified>\n", indent,
1933		    table->gpt_opened ? "true": "false");
1934		G_PART_DUMPCONF(table, NULL, sb, indent);
1935	}
1936}
1937
1938static void
1939g_part_orphan(struct g_consumer *cp)
1940{
1941	struct g_provider *pp;
1942	struct g_part_table *table;
1943
1944	pp = cp->provider;
1945	KASSERT(pp != NULL, ("%s", __func__));
1946	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1947	g_topology_assert();
1948
1949	KASSERT(pp->error != 0, ("%s", __func__));
1950	table = cp->geom->softc;
1951	if (table != NULL && table->gpt_opened)
1952		g_access(cp, -1, -1, -1);
1953	g_part_wither(cp->geom, pp->error);
1954}
1955
1956static void
1957g_part_spoiled(struct g_consumer *cp)
1958{
1959
1960	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1961	g_topology_assert();
1962
1963	g_part_wither(cp->geom, ENXIO);
1964}
1965
1966static void
1967g_part_start(struct bio *bp)
1968{
1969	struct bio *bp2;
1970	struct g_consumer *cp;
1971	struct g_geom *gp;
1972	struct g_part_entry *entry;
1973	struct g_part_table *table;
1974	struct g_kerneldump *gkd;
1975	struct g_provider *pp;
1976
1977	pp = bp->bio_to;
1978	gp = pp->geom;
1979	table = gp->softc;
1980	cp = LIST_FIRST(&gp->consumer);
1981
1982	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1983	    pp->name));
1984
1985	entry = pp->private;
1986	if (entry == NULL) {
1987		g_io_deliver(bp, ENXIO);
1988		return;
1989	}
1990
1991	switch(bp->bio_cmd) {
1992	case BIO_DELETE:
1993	case BIO_READ:
1994	case BIO_WRITE:
1995		if (bp->bio_offset >= pp->mediasize) {
1996			g_io_deliver(bp, EIO);
1997			return;
1998		}
1999		bp2 = g_clone_bio(bp);
2000		if (bp2 == NULL) {
2001			g_io_deliver(bp, ENOMEM);
2002			return;
2003		}
2004		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
2005			bp2->bio_length = pp->mediasize - bp2->bio_offset;
2006		bp2->bio_done = g_std_done;
2007		bp2->bio_offset += entry->gpe_offset;
2008		g_io_request(bp2, cp);
2009		return;
2010	case BIO_FLUSH:
2011		break;
2012	case BIO_GETATTR:
2013		if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
2014			return;
2015		if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
2016			return;
2017		if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
2018			return;
2019		if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
2020			return;
2021		if (g_handleattr_str(bp, "PART::scheme",
2022		    table->gpt_scheme->name))
2023			return;
2024		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
2025			/*
2026			 * Check that the partition is suitable for kernel
2027			 * dumps. Typically only swap partitions should be
2028			 * used.
2029			 */
2030			if (!G_PART_DUMPTO(table, entry)) {
2031				g_io_deliver(bp, ENODEV);
2032				printf("GEOM_PART: Partition '%s' not suitable"
2033				    " for kernel dumps (wrong type?)\n",
2034				    pp->name);
2035				return;
2036			}
2037			gkd = (struct g_kerneldump *)bp->bio_data;
2038			if (gkd->offset >= pp->mediasize) {
2039				g_io_deliver(bp, EIO);
2040				return;
2041			}
2042			if (gkd->offset + gkd->length > pp->mediasize)
2043				gkd->length = pp->mediasize - gkd->offset;
2044			gkd->offset += entry->gpe_offset;
2045		}
2046		break;
2047	default:
2048		g_io_deliver(bp, EOPNOTSUPP);
2049		return;
2050	}
2051
2052	bp2 = g_clone_bio(bp);
2053	if (bp2 == NULL) {
2054		g_io_deliver(bp, ENOMEM);
2055		return;
2056	}
2057	bp2->bio_done = g_std_done;
2058	g_io_request(bp2, cp);
2059}
2060
2061static void
2062g_part_init(struct g_class *mp)
2063{
2064
2065	TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
2066}
2067
2068static void
2069g_part_fini(struct g_class *mp)
2070{
2071
2072	TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
2073}
2074
2075static void
2076g_part_unload_event(void *arg, int flag)
2077{
2078	struct g_consumer *cp;
2079	struct g_geom *gp;
2080	struct g_provider *pp;
2081	struct g_part_scheme *scheme;
2082	struct g_part_table *table;
2083	uintptr_t *xchg;
2084	int acc, error;
2085
2086	if (flag == EV_CANCEL)
2087		return;
2088
2089	xchg = arg;
2090	error = 0;
2091	scheme = (void *)(*xchg);
2092
2093	g_topology_assert();
2094
2095	LIST_FOREACH(gp, &g_part_class.geom, geom) {
2096		table = gp->softc;
2097		if (table->gpt_scheme != scheme)
2098			continue;
2099
2100		acc = 0;
2101		LIST_FOREACH(pp, &gp->provider, provider)
2102			acc += pp->acr + pp->acw + pp->ace;
2103		LIST_FOREACH(cp, &gp->consumer, consumer)
2104			acc += cp->acr + cp->acw + cp->ace;
2105
2106		if (!acc)
2107			g_part_wither(gp, ENOSYS);
2108		else
2109			error = EBUSY;
2110	}
2111
2112	if (!error)
2113		TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2114
2115	*xchg = error;
2116}
2117
2118int
2119g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
2120{
2121	uintptr_t arg;
2122	int error;
2123
2124	switch (type) {
2125	case MOD_LOAD:
2126		TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list);
2127
2128		error = g_retaste(&g_part_class);
2129		if (error)
2130			TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
2131		break;
2132	case MOD_UNLOAD:
2133		arg = (uintptr_t)scheme;
2134		error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
2135		    NULL);
2136		if (!error)
2137			error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg;
2138		break;
2139	default:
2140		error = EOPNOTSUPP;
2141		break;
2142	}
2143
2144	return (error);
2145}
2146