g_part.c revision 212609
1/*-
2 * Copyright (c) 2002, 2005-2009 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/part/g_part.c 212609 2010-09-14 11:42:07Z pjd $");
29
30#include <sys/param.h>
31#include <sys/bio.h>
32#include <sys/diskmbr.h>
33#include <sys/endian.h>
34#include <sys/kernel.h>
35#include <sys/kobj.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/queue.h>
41#include <sys/sbuf.h>
42#include <sys/systm.h>
43#include <sys/uuid.h>
44#include <geom/geom.h>
45#include <geom/geom_ctl.h>
46#include <geom/geom_int.h>
47#include <geom/part/g_part.h>
48
49#include "g_part_if.h"
50
51#ifndef _PATH_DEV
52#define _PATH_DEV "/dev/"
53#endif
54
55static kobj_method_t g_part_null_methods[] = {
56	{ 0, 0 }
57};
58
59static struct g_part_scheme g_part_null_scheme = {
60	"(none)",
61	g_part_null_methods,
62	sizeof(struct g_part_table),
63};
64
65TAILQ_HEAD(, g_part_scheme) g_part_schemes =
66    TAILQ_HEAD_INITIALIZER(g_part_schemes);
67
68struct g_part_alias_list {
69	const char *lexeme;
70	enum g_part_alias alias;
71} g_part_alias_list[G_PART_ALIAS_COUNT] = {
72	{ "apple-boot", G_PART_ALIAS_APPLE_BOOT },
73	{ "apple-hfs", G_PART_ALIAS_APPLE_HFS },
74	{ "apple-label", G_PART_ALIAS_APPLE_LABEL },
75	{ "apple-raid", G_PART_ALIAS_APPLE_RAID },
76	{ "apple-raid-offline", G_PART_ALIAS_APPLE_RAID_OFFLINE },
77	{ "apple-tv-recovery", G_PART_ALIAS_APPLE_TV_RECOVERY },
78	{ "apple-ufs", G_PART_ALIAS_APPLE_UFS },
79	{ "efi", G_PART_ALIAS_EFI },
80	{ "freebsd", G_PART_ALIAS_FREEBSD },
81	{ "freebsd-boot", G_PART_ALIAS_FREEBSD_BOOT },
82	{ "freebsd-swap", G_PART_ALIAS_FREEBSD_SWAP },
83	{ "freebsd-ufs", G_PART_ALIAS_FREEBSD_UFS },
84	{ "freebsd-vinum", G_PART_ALIAS_FREEBSD_VINUM },
85	{ "freebsd-zfs", G_PART_ALIAS_FREEBSD_ZFS },
86	{ "linux-data", G_PART_ALIAS_LINUX_DATA },
87	{ "linux-lvm", G_PART_ALIAS_LINUX_LVM },
88	{ "linux-raid", G_PART_ALIAS_LINUX_RAID },
89	{ "linux-swap", G_PART_ALIAS_LINUX_SWAP },
90	{ "ms-basic-data", G_PART_ALIAS_MS_BASIC_DATA },
91	{ "ms-ldm-data", G_PART_ALIAS_MS_LDM_DATA },
92	{ "ms-ldm-metadata", G_PART_ALIAS_MS_LDM_METADATA },
93	{ "ms-reserved", G_PART_ALIAS_MS_RESERVED },
94	{ "ntfs", G_PART_ALIAS_MS_NTFS },
95	{ "netbsd-ccd", G_PART_ALIAS_NETBSD_CCD },
96	{ "netbsd-cgd", G_PART_ALIAS_NETBSD_CGD },
97	{ "netbsd-ffs", G_PART_ALIAS_NETBSD_FFS },
98	{ "netbsd-lfs", G_PART_ALIAS_NETBSD_LFS },
99	{ "netbsd-raid", G_PART_ALIAS_NETBSD_RAID },
100	{ "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
101	{ "mbr", G_PART_ALIAS_MBR }
102};
103
104/*
105 * The GEOM partitioning class.
106 */
107static g_ctl_req_t g_part_ctlreq;
108static g_ctl_destroy_geom_t g_part_destroy_geom;
109static g_fini_t g_part_fini;
110static g_init_t g_part_init;
111static g_taste_t g_part_taste;
112
113static g_access_t g_part_access;
114static g_dumpconf_t g_part_dumpconf;
115static g_orphan_t g_part_orphan;
116static g_spoiled_t g_part_spoiled;
117static g_start_t g_part_start;
118
119static struct g_class g_part_class = {
120	.name = "PART",
121	.version = G_VERSION,
122	/* Class methods. */
123	.ctlreq = g_part_ctlreq,
124	.destroy_geom = g_part_destroy_geom,
125	.fini = g_part_fini,
126	.init = g_part_init,
127	.taste = g_part_taste,
128	/* Geom methods. */
129	.access = g_part_access,
130	.dumpconf = g_part_dumpconf,
131	.orphan = g_part_orphan,
132	.spoiled = g_part_spoiled,
133	.start = g_part_start,
134};
135
136DECLARE_GEOM_CLASS(g_part_class, g_part);
137
138/*
139 * Support functions.
140 */
141
142static void g_part_wither(struct g_geom *, int);
143
144const char *
145g_part_alias_name(enum g_part_alias alias)
146{
147	int i;
148
149	for (i = 0; i < G_PART_ALIAS_COUNT; i++) {
150		if (g_part_alias_list[i].alias != alias)
151			continue;
152		return (g_part_alias_list[i].lexeme);
153	}
154
155	return (NULL);
156}
157
158void
159g_part_geometry_heads(off_t blocks, u_int sectors, off_t *bestchs,
160    u_int *bestheads)
161{
162	static u_int candidate_heads[] = { 1, 2, 16, 32, 64, 128, 255, 0 };
163	off_t chs, cylinders;
164	u_int heads;
165	int idx;
166
167	*bestchs = 0;
168	*bestheads = 0;
169	for (idx = 0; candidate_heads[idx] != 0; idx++) {
170		heads = candidate_heads[idx];
171		cylinders = blocks / heads / sectors;
172		if (cylinders < heads || cylinders < sectors)
173			break;
174		if (cylinders > 1023)
175			continue;
176		chs = cylinders * heads * sectors;
177		if (chs > *bestchs || (chs == *bestchs && *bestheads == 1)) {
178			*bestchs = chs;
179			*bestheads = heads;
180		}
181	}
182}
183
184static void
185g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
186    off_t blocks)
187{
188	static u_int candidate_sectors[] = { 1, 9, 17, 33, 63, 0 };
189	off_t chs, bestchs;
190	u_int heads, sectors;
191	int idx;
192
193	if (g_getattr("GEOM::fwsectors", cp, &sectors) != 0 || sectors == 0 ||
194	    g_getattr("GEOM::fwheads", cp, &heads) != 0 || heads == 0) {
195		table->gpt_fixgeom = 0;
196		table->gpt_heads = 0;
197		table->gpt_sectors = 0;
198		bestchs = 0;
199		for (idx = 0; candidate_sectors[idx] != 0; idx++) {
200			sectors = candidate_sectors[idx];
201			g_part_geometry_heads(blocks, sectors, &chs, &heads);
202			if (chs == 0)
203				continue;
204			/*
205			 * Prefer a geometry with sectors > 1, but only if
206			 * it doesn't bump down the numbver of heads to 1.
207			 */
208			if (chs > bestchs || (chs == bestchs && heads > 1 &&
209			    table->gpt_sectors == 1)) {
210				bestchs = chs;
211				table->gpt_heads = heads;
212				table->gpt_sectors = sectors;
213			}
214		}
215		/*
216		 * If we didn't find a geometry at all, then the disk is
217		 * too big. This means we can use the maximum number of
218		 * heads and sectors.
219		 */
220		if (bestchs == 0) {
221			table->gpt_heads = 255;
222			table->gpt_sectors = 63;
223		}
224	} else {
225		table->gpt_fixgeom = 1;
226		table->gpt_heads = heads;
227		table->gpt_sectors = sectors;
228	}
229}
230
231struct g_part_entry *
232g_part_new_entry(struct g_part_table *table, int index, quad_t start,
233    quad_t end)
234{
235	struct g_part_entry *entry, *last;
236
237	last = NULL;
238	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
239		if (entry->gpe_index == index)
240			break;
241		if (entry->gpe_index > index) {
242			entry = NULL;
243			break;
244		}
245		last = entry;
246	}
247	if (entry == NULL) {
248		entry = g_malloc(table->gpt_scheme->gps_entrysz,
249		    M_WAITOK | M_ZERO);
250		entry->gpe_index = index;
251		if (last == NULL)
252			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
253		else
254			LIST_INSERT_AFTER(last, entry, gpe_entry);
255	} else
256		entry->gpe_offset = 0;
257	entry->gpe_start = start;
258	entry->gpe_end = end;
259	return (entry);
260}
261
262static void
263g_part_new_provider(struct g_geom *gp, struct g_part_table *table,
264    struct g_part_entry *entry)
265{
266	struct g_consumer *cp;
267	struct g_provider *pp;
268	struct sbuf *sb;
269	off_t offset;
270
271	cp = LIST_FIRST(&gp->consumer);
272	pp = cp->provider;
273
274	offset = entry->gpe_start * pp->sectorsize;
275	if (entry->gpe_offset < offset)
276		entry->gpe_offset = offset;
277
278	if (entry->gpe_pp == NULL) {
279		sb = sbuf_new_auto();
280		G_PART_FULLNAME(table, entry, sb, gp->name);
281		sbuf_finish(sb);
282		entry->gpe_pp = g_new_providerf(gp, "%s", sbuf_data(sb));
283		sbuf_delete(sb);
284		entry->gpe_pp->private = entry;		/* Close the circle. */
285	}
286	entry->gpe_pp->index = entry->gpe_index - 1;	/* index is 1-based. */
287	entry->gpe_pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
288	    pp->sectorsize;
289	entry->gpe_pp->mediasize -= entry->gpe_offset - offset;
290	entry->gpe_pp->sectorsize = pp->sectorsize;
291	entry->gpe_pp->flags = pp->flags & G_PF_CANDELETE;
292	entry->gpe_pp->stripesize = pp->stripesize;
293	entry->gpe_pp->stripeoffset = pp->stripeoffset + entry->gpe_offset;
294	if (pp->stripesize > 0)
295		entry->gpe_pp->stripeoffset %= pp->stripesize;
296	g_error_provider(entry->gpe_pp, 0);
297}
298
299static int
300g_part_parm_geom(const char *name, struct g_geom **v)
301{
302	struct g_geom *gp;
303
304	if (strncmp(name, _PATH_DEV, strlen(_PATH_DEV)) == 0)
305		name += strlen(_PATH_DEV);
306	LIST_FOREACH(gp, &g_part_class.geom, geom) {
307		if (!strcmp(name, gp->name))
308			break;
309	}
310	if (gp == NULL)
311		return (EINVAL);
312	*v = gp;
313	return (0);
314}
315
316static int
317g_part_parm_provider(const char *name, struct g_provider **v)
318{
319	struct g_provider *pp;
320
321	if (strncmp(name, _PATH_DEV, strlen(_PATH_DEV)) == 0)
322		name += strlen(_PATH_DEV);
323	pp = g_provider_by_name(name);
324	if (pp == NULL)
325		return (EINVAL);
326	*v = pp;
327	return (0);
328}
329
330static int
331g_part_parm_quad(const char *p, quad_t *v)
332{
333	char *x;
334	quad_t q;
335
336	q = strtoq(p, &x, 0);
337	if (*x != '\0' || q < 0)
338		return (EINVAL);
339	*v = q;
340	return (0);
341}
342
343static int
344g_part_parm_scheme(const char *p, struct g_part_scheme **v)
345{
346	struct g_part_scheme *s;
347
348	TAILQ_FOREACH(s, &g_part_schemes, scheme_list) {
349		if (s == &g_part_null_scheme)
350			continue;
351		if (!strcasecmp(s->name, p))
352			break;
353	}
354	if (s == NULL)
355		return (EINVAL);
356	*v = s;
357	return (0);
358}
359
360static int
361g_part_parm_str(const char *p, const char **v)
362{
363
364	if (p[0] == '\0')
365		return (EINVAL);
366	*v = p;
367	return (0);
368}
369
370static int
371g_part_parm_uint(const char *p, u_int *v)
372{
373	char *x;
374	long l;
375
376	l = strtol(p, &x, 0);
377	if (*x != '\0' || l < 0 || l > INT_MAX)
378		return (EINVAL);
379	*v = (unsigned int)l;
380	return (0);
381}
382
383static int
384g_part_probe(struct g_geom *gp, struct g_consumer *cp, int depth)
385{
386	struct g_part_scheme *iter, *scheme;
387	struct g_part_table *table;
388	int pri, probe;
389
390	table = gp->softc;
391	scheme = (table != NULL) ? table->gpt_scheme : NULL;
392	pri = (scheme != NULL) ? G_PART_PROBE(table, cp) : INT_MIN;
393	if (pri == 0)
394		goto done;
395	if (pri > 0) {	/* error */
396		scheme = NULL;
397		pri = INT_MIN;
398	}
399
400	TAILQ_FOREACH(iter, &g_part_schemes, scheme_list) {
401		if (iter == &g_part_null_scheme)
402			continue;
403		table = (void *)kobj_create((kobj_class_t)iter, M_GEOM,
404		    M_WAITOK);
405		table->gpt_gp = gp;
406		table->gpt_scheme = iter;
407		table->gpt_depth = depth;
408		probe = G_PART_PROBE(table, cp);
409		if (probe <= 0 && probe > pri) {
410			pri = probe;
411			scheme = iter;
412			if (gp->softc != NULL)
413				kobj_delete((kobj_t)gp->softc, M_GEOM);
414			gp->softc = table;
415			if (pri == 0)
416				goto done;
417		} else
418			kobj_delete((kobj_t)table, M_GEOM);
419	}
420
421done:
422	return ((scheme == NULL) ? ENXIO : 0);
423}
424
425/*
426 * Control request functions.
427 */
428
429static int
430g_part_ctl_add(struct gctl_req *req, struct g_part_parms *gpp)
431{
432	struct g_geom *gp;
433	struct g_provider *pp;
434	struct g_part_entry *delent, *last, *entry;
435	struct g_part_table *table;
436	struct sbuf *sb;
437	quad_t end;
438	unsigned int index;
439	int error;
440
441	gp = gpp->gpp_geom;
442	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
443	g_topology_assert();
444
445	pp = LIST_FIRST(&gp->consumer)->provider;
446	table = gp->softc;
447	end = gpp->gpp_start + gpp->gpp_size - 1;
448
449	if (gpp->gpp_start < table->gpt_first ||
450	    gpp->gpp_start > table->gpt_last) {
451		gctl_error(req, "%d start '%jd'", EINVAL,
452		    (intmax_t)gpp->gpp_start);
453		return (EINVAL);
454	}
455	if (end < gpp->gpp_start || end > table->gpt_last) {
456		gctl_error(req, "%d size '%jd'", EINVAL,
457		    (intmax_t)gpp->gpp_size);
458		return (EINVAL);
459	}
460	if (gpp->gpp_index > table->gpt_entries) {
461		gctl_error(req, "%d index '%d'", EINVAL, gpp->gpp_index);
462		return (EINVAL);
463	}
464
465	delent = last = NULL;
466	index = (gpp->gpp_index > 0) ? gpp->gpp_index : 1;
467	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
468		if (entry->gpe_deleted) {
469			if (entry->gpe_index == index)
470				delent = entry;
471			continue;
472		}
473		if (entry->gpe_index == index)
474			index = entry->gpe_index + 1;
475		if (entry->gpe_index < index)
476			last = entry;
477		if (entry->gpe_internal)
478			continue;
479		if (gpp->gpp_start >= entry->gpe_start &&
480		    gpp->gpp_start <= entry->gpe_end) {
481			gctl_error(req, "%d start '%jd'", ENOSPC,
482			    (intmax_t)gpp->gpp_start);
483			return (ENOSPC);
484		}
485		if (end >= entry->gpe_start && end <= entry->gpe_end) {
486			gctl_error(req, "%d end '%jd'", ENOSPC, (intmax_t)end);
487			return (ENOSPC);
488		}
489		if (gpp->gpp_start < entry->gpe_start && end > entry->gpe_end) {
490			gctl_error(req, "%d size '%jd'", ENOSPC,
491			    (intmax_t)gpp->gpp_size);
492			return (ENOSPC);
493		}
494	}
495	if (gpp->gpp_index > 0 && index != gpp->gpp_index) {
496		gctl_error(req, "%d index '%d'", EEXIST, gpp->gpp_index);
497		return (EEXIST);
498	}
499	if (index > table->gpt_entries) {
500		gctl_error(req, "%d index '%d'", ENOSPC, index);
501		return (ENOSPC);
502	}
503
504	entry = (delent == NULL) ? g_malloc(table->gpt_scheme->gps_entrysz,
505	    M_WAITOK | M_ZERO) : delent;
506	entry->gpe_index = index;
507	entry->gpe_start = gpp->gpp_start;
508	entry->gpe_end = end;
509	error = G_PART_ADD(table, entry, gpp);
510	if (error) {
511		gctl_error(req, "%d", error);
512		if (delent == NULL)
513			g_free(entry);
514		return (error);
515	}
516	if (delent == NULL) {
517		if (last == NULL)
518			LIST_INSERT_HEAD(&table->gpt_entry, entry, gpe_entry);
519		else
520			LIST_INSERT_AFTER(last, entry, gpe_entry);
521		entry->gpe_created = 1;
522	} else {
523		entry->gpe_deleted = 0;
524		entry->gpe_modified = 1;
525	}
526	g_part_new_provider(gp, table, entry);
527
528	/* Provide feedback if so requested. */
529	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
530		sb = sbuf_new_auto();
531		G_PART_FULLNAME(table, entry, sb, gp->name);
532		sbuf_cat(sb, " added\n");
533		sbuf_finish(sb);
534		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
535		sbuf_delete(sb);
536	}
537	return (0);
538}
539
540static int
541g_part_ctl_bootcode(struct gctl_req *req, struct g_part_parms *gpp)
542{
543	struct g_geom *gp;
544	struct g_part_table *table;
545	struct sbuf *sb;
546	int error, sz;
547
548	gp = gpp->gpp_geom;
549	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
550	g_topology_assert();
551
552	table = gp->softc;
553	sz = table->gpt_scheme->gps_bootcodesz;
554	if (sz == 0) {
555		error = ENODEV;
556		goto fail;
557	}
558	if (gpp->gpp_codesize > sz) {
559		error = EFBIG;
560		goto fail;
561	}
562
563	error = G_PART_BOOTCODE(table, gpp);
564	if (error)
565		goto fail;
566
567	/* Provide feedback if so requested. */
568	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
569		sb = sbuf_new_auto();
570		sbuf_printf(sb, "%s has bootcode\n", gp->name);
571		sbuf_finish(sb);
572		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
573		sbuf_delete(sb);
574	}
575	return (0);
576
577 fail:
578	gctl_error(req, "%d", error);
579	return (error);
580}
581
582static int
583g_part_ctl_commit(struct gctl_req *req, struct g_part_parms *gpp)
584{
585	struct g_consumer *cp;
586	struct g_geom *gp;
587	struct g_provider *pp;
588	struct g_part_entry *entry, *tmp;
589	struct g_part_table *table;
590	char *buf;
591	int error, i;
592
593	gp = gpp->gpp_geom;
594	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
595	g_topology_assert();
596
597	table = gp->softc;
598	if (!table->gpt_opened) {
599		gctl_error(req, "%d", EPERM);
600		return (EPERM);
601	}
602
603	g_topology_unlock();
604
605	cp = LIST_FIRST(&gp->consumer);
606	if ((table->gpt_smhead | table->gpt_smtail) != 0) {
607		pp = cp->provider;
608		buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO);
609		while (table->gpt_smhead != 0) {
610			i = ffs(table->gpt_smhead) - 1;
611			error = g_write_data(cp, i * pp->sectorsize, buf,
612			    pp->sectorsize);
613			if (error) {
614				g_free(buf);
615				goto fail;
616			}
617			table->gpt_smhead &= ~(1 << i);
618		}
619		while (table->gpt_smtail != 0) {
620			i = ffs(table->gpt_smtail) - 1;
621			error = g_write_data(cp, pp->mediasize - (i + 1) *
622			    pp->sectorsize, buf, pp->sectorsize);
623			if (error) {
624				g_free(buf);
625				goto fail;
626			}
627			table->gpt_smtail &= ~(1 << i);
628		}
629		g_free(buf);
630	}
631
632	if (table->gpt_scheme == &g_part_null_scheme) {
633		g_topology_lock();
634		g_access(cp, -1, -1, -1);
635		g_part_wither(gp, ENXIO);
636		return (0);
637	}
638
639	error = G_PART_WRITE(table, cp);
640	if (error)
641		goto fail;
642
643	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
644		if (!entry->gpe_deleted) {
645			entry->gpe_created = 0;
646			entry->gpe_modified = 0;
647			continue;
648		}
649		LIST_REMOVE(entry, gpe_entry);
650		g_free(entry);
651	}
652	table->gpt_created = 0;
653	table->gpt_opened = 0;
654
655	g_topology_lock();
656	g_access(cp, -1, -1, -1);
657	return (0);
658
659fail:
660	g_topology_lock();
661	gctl_error(req, "%d", error);
662	return (error);
663}
664
665static int
666g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
667{
668	struct g_consumer *cp;
669	struct g_geom *gp;
670	struct g_provider *pp;
671	struct g_part_scheme *scheme;
672	struct g_part_table *null, *table;
673	struct sbuf *sb;
674	int attr, error;
675
676	pp = gpp->gpp_provider;
677	scheme = gpp->gpp_scheme;
678	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
679	g_topology_assert();
680
681	/* Check that there isn't already a g_part geom on the provider. */
682	error = g_part_parm_geom(pp->name, &gp);
683	if (!error) {
684		null = gp->softc;
685		if (null->gpt_scheme != &g_part_null_scheme) {
686			gctl_error(req, "%d geom '%s'", EEXIST, pp->name);
687			return (EEXIST);
688		}
689	} else
690		null = NULL;
691
692	if ((gpp->gpp_parms & G_PART_PARM_ENTRIES) &&
693	    (gpp->gpp_entries < scheme->gps_minent ||
694	     gpp->gpp_entries > scheme->gps_maxent)) {
695		gctl_error(req, "%d entries '%d'", EINVAL, gpp->gpp_entries);
696		return (EINVAL);
697	}
698
699	if (null == NULL)
700		gp = g_new_geomf(&g_part_class, "%s", pp->name);
701	gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
702	    M_WAITOK);
703	table = gp->softc;
704	table->gpt_gp = gp;
705	table->gpt_scheme = gpp->gpp_scheme;
706	table->gpt_entries = (gpp->gpp_parms & G_PART_PARM_ENTRIES) ?
707	    gpp->gpp_entries : scheme->gps_minent;
708	LIST_INIT(&table->gpt_entry);
709	if (null == NULL) {
710		cp = g_new_consumer(gp);
711		error = g_attach(cp, pp);
712		if (error == 0)
713			error = g_access(cp, 1, 1, 1);
714		if (error != 0) {
715			g_part_wither(gp, error);
716			gctl_error(req, "%d geom '%s'", error, pp->name);
717			return (error);
718		}
719		table->gpt_opened = 1;
720	} else {
721		cp = LIST_FIRST(&gp->consumer);
722		table->gpt_opened = null->gpt_opened;
723		table->gpt_smhead = null->gpt_smhead;
724		table->gpt_smtail = null->gpt_smtail;
725	}
726
727	g_topology_unlock();
728
729	/* Make sure the provider has media. */
730	if (pp->mediasize == 0 || pp->sectorsize == 0) {
731		error = ENODEV;
732		goto fail;
733	}
734
735	/* Make sure we can nest and if so, determine our depth. */
736	error = g_getattr("PART::isleaf", cp, &attr);
737	if (!error && attr) {
738		error = ENODEV;
739		goto fail;
740	}
741	error = g_getattr("PART::depth", cp, &attr);
742	table->gpt_depth = (!error) ? attr + 1 : 0;
743
744	/*
745	 * Synthesize a disk geometry. Some partitioning schemes
746	 * depend on it and since some file systems need it even
747	 * when the partitition scheme doesn't, we do it here in
748	 * scheme-independent code.
749	 */
750	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
751
752	error = G_PART_CREATE(table, gpp);
753	if (error)
754		goto fail;
755
756	g_topology_lock();
757
758	table->gpt_created = 1;
759	if (null != NULL)
760		kobj_delete((kobj_t)null, M_GEOM);
761
762	/*
763	 * Support automatic commit by filling in the gpp_geom
764	 * parameter.
765	 */
766	gpp->gpp_parms |= G_PART_PARM_GEOM;
767	gpp->gpp_geom = gp;
768
769	/* Provide feedback if so requested. */
770	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
771		sb = sbuf_new_auto();
772		sbuf_printf(sb, "%s created\n", gp->name);
773		sbuf_finish(sb);
774		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
775		sbuf_delete(sb);
776	}
777	return (0);
778
779fail:
780	g_topology_lock();
781	if (null == NULL) {
782		g_access(cp, -1, -1, -1);
783		g_part_wither(gp, error);
784	} else {
785		kobj_delete((kobj_t)gp->softc, M_GEOM);
786		gp->softc = null;
787	}
788	gctl_error(req, "%d provider", error);
789	return (error);
790}
791
792static int
793g_part_ctl_delete(struct gctl_req *req, struct g_part_parms *gpp)
794{
795	struct g_geom *gp;
796	struct g_provider *pp;
797	struct g_part_entry *entry;
798	struct g_part_table *table;
799	struct sbuf *sb;
800
801	gp = gpp->gpp_geom;
802	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
803	g_topology_assert();
804
805	table = gp->softc;
806
807	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
808		if (entry->gpe_deleted || entry->gpe_internal)
809			continue;
810		if (entry->gpe_index == gpp->gpp_index)
811			break;
812	}
813	if (entry == NULL) {
814		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
815		return (ENOENT);
816	}
817
818	pp = entry->gpe_pp;
819	if (pp != NULL) {
820		if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) {
821			gctl_error(req, "%d", EBUSY);
822			return (EBUSY);
823		}
824
825		pp->private = NULL;
826		entry->gpe_pp = NULL;
827	}
828
829	if (pp != NULL)
830		g_wither_provider(pp, ENXIO);
831
832	/* Provide feedback if so requested. */
833	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
834		sb = sbuf_new_auto();
835		G_PART_FULLNAME(table, entry, sb, gp->name);
836		sbuf_cat(sb, " deleted\n");
837		sbuf_finish(sb);
838		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
839		sbuf_delete(sb);
840	}
841
842	if (entry->gpe_created) {
843		LIST_REMOVE(entry, gpe_entry);
844		g_free(entry);
845	} else {
846		entry->gpe_modified = 0;
847		entry->gpe_deleted = 1;
848	}
849	return (0);
850}
851
852static int
853g_part_ctl_destroy(struct gctl_req *req, struct g_part_parms *gpp)
854{
855	struct g_consumer *cp;
856	struct g_geom *gp;
857	struct g_provider *pp;
858	struct g_part_entry *entry;
859	struct g_part_table *null, *table;
860	struct sbuf *sb;
861	int error;
862
863	gp = gpp->gpp_geom;
864	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
865	g_topology_assert();
866
867	table = gp->softc;
868	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
869		if (entry->gpe_deleted || entry->gpe_internal)
870			continue;
871		gctl_error(req, "%d", EBUSY);
872		return (EBUSY);
873	}
874
875	error = G_PART_DESTROY(table, gpp);
876	if (error) {
877		gctl_error(req, "%d", error);
878		return (error);
879	}
880
881	gp->softc = kobj_create((kobj_class_t)&g_part_null_scheme, M_GEOM,
882	    M_WAITOK);
883	null = gp->softc;
884	null->gpt_gp = gp;
885	null->gpt_scheme = &g_part_null_scheme;
886	LIST_INIT(&null->gpt_entry);
887
888	cp = LIST_FIRST(&gp->consumer);
889	pp = cp->provider;
890	null->gpt_last = pp->mediasize / pp->sectorsize - 1;
891
892	null->gpt_depth = table->gpt_depth;
893	null->gpt_opened = table->gpt_opened;
894	null->gpt_smhead = table->gpt_smhead;
895	null->gpt_smtail = table->gpt_smtail;
896
897	while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
898		LIST_REMOVE(entry, gpe_entry);
899		g_free(entry);
900	}
901	kobj_delete((kobj_t)table, M_GEOM);
902
903	/* Provide feedback if so requested. */
904	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
905		sb = sbuf_new_auto();
906		sbuf_printf(sb, "%s destroyed\n", gp->name);
907		sbuf_finish(sb);
908		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
909		sbuf_delete(sb);
910	}
911	return (0);
912}
913
914static int
915g_part_ctl_modify(struct gctl_req *req, struct g_part_parms *gpp)
916{
917	struct g_geom *gp;
918	struct g_part_entry *entry;
919	struct g_part_table *table;
920	struct sbuf *sb;
921	int error;
922
923	gp = gpp->gpp_geom;
924	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
925	g_topology_assert();
926
927	table = gp->softc;
928
929	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
930		if (entry->gpe_deleted || entry->gpe_internal)
931			continue;
932		if (entry->gpe_index == gpp->gpp_index)
933			break;
934	}
935	if (entry == NULL) {
936		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
937		return (ENOENT);
938	}
939
940	error = G_PART_MODIFY(table, entry, gpp);
941	if (error) {
942		gctl_error(req, "%d", error);
943		return (error);
944	}
945
946	if (!entry->gpe_created)
947		entry->gpe_modified = 1;
948
949	/* Provide feedback if so requested. */
950	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
951		sb = sbuf_new_auto();
952		G_PART_FULLNAME(table, entry, sb, gp->name);
953		sbuf_cat(sb, " modified\n");
954		sbuf_finish(sb);
955		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
956		sbuf_delete(sb);
957	}
958	return (0);
959}
960
961static int
962g_part_ctl_move(struct gctl_req *req, struct g_part_parms *gpp)
963{
964	gctl_error(req, "%d verb 'move'", ENOSYS);
965	return (ENOSYS);
966}
967
968static int
969g_part_ctl_recover(struct gctl_req *req, struct g_part_parms *gpp)
970{
971	gctl_error(req, "%d verb 'recover'", ENOSYS);
972	return (ENOSYS);
973}
974
975static int
976g_part_ctl_resize(struct gctl_req *req, struct g_part_parms *gpp)
977{
978	struct g_geom *gp;
979	struct g_provider *pp;
980	struct g_part_entry *pe, *entry;
981	struct g_part_table *table;
982	struct sbuf *sb;
983	quad_t end;
984	int error;
985
986	gp = gpp->gpp_geom;
987	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
988	g_topology_assert();
989	table = gp->softc;
990
991	/* check gpp_index */
992	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
993		if (entry->gpe_deleted || entry->gpe_internal)
994			continue;
995		if (entry->gpe_index == gpp->gpp_index)
996			break;
997	}
998	if (entry == NULL) {
999		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1000		return (ENOENT);
1001	}
1002
1003	/* check gpp_size */
1004	end = entry->gpe_start + gpp->gpp_size - 1;
1005	if (gpp->gpp_size < 1 || end > table->gpt_last) {
1006		gctl_error(req, "%d size '%jd'", EINVAL,
1007		    (intmax_t)gpp->gpp_size);
1008		return (EINVAL);
1009	}
1010
1011	LIST_FOREACH(pe, &table->gpt_entry, gpe_entry) {
1012		if (pe->gpe_deleted || pe->gpe_internal || pe == entry)
1013			continue;
1014		if (end >= pe->gpe_start && end <= pe->gpe_end) {
1015			gctl_error(req, "%d end '%jd'", ENOSPC,
1016			    (intmax_t)end);
1017			return (ENOSPC);
1018		}
1019		if (entry->gpe_start < pe->gpe_start && end > pe->gpe_end) {
1020			gctl_error(req, "%d size '%jd'", ENOSPC,
1021			    (intmax_t)gpp->gpp_size);
1022			return (ENOSPC);
1023		}
1024	}
1025
1026	pp = entry->gpe_pp;
1027	if ((g_debugflags & 16) == 0 &&
1028	    (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)) {
1029		gctl_error(req, "%d", EBUSY);
1030		return (EBUSY);
1031	}
1032
1033	error = G_PART_RESIZE(table, entry, gpp);
1034	if (error) {
1035		gctl_error(req, "%d", error);
1036		return (error);
1037	}
1038
1039	if (!entry->gpe_created)
1040		entry->gpe_modified = 1;
1041
1042	/* update mediasize of changed provider */
1043	pp->mediasize = (entry->gpe_end - entry->gpe_start + 1) *
1044		pp->sectorsize;
1045
1046	/* Provide feedback if so requested. */
1047	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1048		sb = sbuf_new_auto();
1049		G_PART_FULLNAME(table, entry, sb, gp->name);
1050		sbuf_cat(sb, " resized\n");
1051		sbuf_finish(sb);
1052		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1053		sbuf_delete(sb);
1054	}
1055	return (0);
1056}
1057
1058static int
1059g_part_ctl_setunset(struct gctl_req *req, struct g_part_parms *gpp,
1060    unsigned int set)
1061{
1062	struct g_geom *gp;
1063	struct g_part_entry *entry;
1064	struct g_part_table *table;
1065	struct sbuf *sb;
1066	int error;
1067
1068	gp = gpp->gpp_geom;
1069	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1070	g_topology_assert();
1071
1072	table = gp->softc;
1073
1074	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1075		if (entry->gpe_deleted || entry->gpe_internal)
1076			continue;
1077		if (entry->gpe_index == gpp->gpp_index)
1078			break;
1079	}
1080	if (entry == NULL) {
1081		gctl_error(req, "%d index '%d'", ENOENT, gpp->gpp_index);
1082		return (ENOENT);
1083	}
1084
1085	error = G_PART_SETUNSET(table, entry, gpp->gpp_attrib, set);
1086	if (error) {
1087		gctl_error(req, "%d attrib '%s'", error, gpp->gpp_attrib);
1088		return (error);
1089	}
1090
1091	/* Provide feedback if so requested. */
1092	if (gpp->gpp_parms & G_PART_PARM_OUTPUT) {
1093		sb = sbuf_new_auto();
1094		G_PART_FULLNAME(table, entry, sb, gp->name);
1095		sbuf_printf(sb, " has %s %sset\n", gpp->gpp_attrib,
1096		    (set) ? "" : "un");
1097		sbuf_finish(sb);
1098		gctl_set_param(req, "output", sbuf_data(sb), sbuf_len(sb) + 1);
1099		sbuf_delete(sb);
1100	}
1101	return (0);
1102}
1103
1104static int
1105g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
1106{
1107	struct g_consumer *cp;
1108	struct g_provider *pp;
1109	struct g_geom *gp;
1110	struct g_part_entry *entry, *tmp;
1111	struct g_part_table *table;
1112	int error, reprobe;
1113
1114	gp = gpp->gpp_geom;
1115	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, gp->name));
1116	g_topology_assert();
1117
1118	table = gp->softc;
1119	if (!table->gpt_opened) {
1120		gctl_error(req, "%d", EPERM);
1121		return (EPERM);
1122	}
1123
1124	cp = LIST_FIRST(&gp->consumer);
1125	LIST_FOREACH_SAFE(entry, &table->gpt_entry, gpe_entry, tmp) {
1126		entry->gpe_modified = 0;
1127		if (entry->gpe_created) {
1128			pp = entry->gpe_pp;
1129			if (pp != NULL) {
1130				pp->private = NULL;
1131				entry->gpe_pp = NULL;
1132				g_wither_provider(pp, ENXIO);
1133			}
1134			entry->gpe_deleted = 1;
1135		}
1136		if (entry->gpe_deleted) {
1137			LIST_REMOVE(entry, gpe_entry);
1138			g_free(entry);
1139		}
1140	}
1141
1142	g_topology_unlock();
1143
1144	reprobe = (table->gpt_scheme == &g_part_null_scheme ||
1145	    table->gpt_created) ? 1 : 0;
1146
1147	if (reprobe) {
1148		LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1149			if (entry->gpe_internal)
1150				continue;
1151			error = EBUSY;
1152			goto fail;
1153		}
1154		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1155			LIST_REMOVE(entry, gpe_entry);
1156			g_free(entry);
1157		}
1158		error = g_part_probe(gp, cp, table->gpt_depth);
1159		if (error) {
1160			g_topology_lock();
1161			g_access(cp, -1, -1, -1);
1162			g_part_wither(gp, error);
1163			return (0);
1164		}
1165		table = gp->softc;
1166
1167		/*
1168		 * Synthesize a disk geometry. Some partitioning schemes
1169		 * depend on it and since some file systems need it even
1170		 * when the partitition scheme doesn't, we do it here in
1171		 * scheme-independent code.
1172		 */
1173		pp = cp->provider;
1174		g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1175	}
1176
1177	error = G_PART_READ(table, cp);
1178	if (error)
1179		goto fail;
1180
1181	g_topology_lock();
1182
1183	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1184		if (!entry->gpe_internal)
1185			g_part_new_provider(gp, table, entry);
1186	}
1187
1188	table->gpt_opened = 0;
1189	g_access(cp, -1, -1, -1);
1190	return (0);
1191
1192fail:
1193	g_topology_lock();
1194	gctl_error(req, "%d", error);
1195	return (error);
1196}
1197
1198static void
1199g_part_wither(struct g_geom *gp, int error)
1200{
1201	struct g_part_entry *entry;
1202	struct g_part_table *table;
1203
1204	table = gp->softc;
1205	if (table != NULL) {
1206		while ((entry = LIST_FIRST(&table->gpt_entry)) != NULL) {
1207			LIST_REMOVE(entry, gpe_entry);
1208			g_free(entry);
1209		}
1210		if (gp->softc != NULL) {
1211			kobj_delete((kobj_t)gp->softc, M_GEOM);
1212			gp->softc = NULL;
1213		}
1214	}
1215	g_wither_geom(gp, error);
1216}
1217
1218/*
1219 * Class methods.
1220 */
1221
1222static void
1223g_part_ctlreq(struct gctl_req *req, struct g_class *mp, const char *verb)
1224{
1225	struct g_part_parms gpp;
1226	struct g_part_table *table;
1227	struct gctl_req_arg *ap;
1228	const char *p;
1229	enum g_part_ctl ctlreq;
1230	unsigned int i, mparms, oparms, parm;
1231	int auto_commit, close_on_error;
1232	int error, len, modifies;
1233
1234	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, verb));
1235	g_topology_assert();
1236
1237	ctlreq = G_PART_CTL_NONE;
1238	modifies = 1;
1239	mparms = 0;
1240	oparms = G_PART_PARM_FLAGS | G_PART_PARM_OUTPUT | G_PART_PARM_VERSION;
1241	switch (*verb) {
1242	case 'a':
1243		if (!strcmp(verb, "add")) {
1244			ctlreq = G_PART_CTL_ADD;
1245			mparms |= G_PART_PARM_GEOM | G_PART_PARM_SIZE |
1246			    G_PART_PARM_START | G_PART_PARM_TYPE;
1247			oparms |= G_PART_PARM_INDEX | G_PART_PARM_LABEL;
1248		}
1249		break;
1250	case 'b':
1251		if (!strcmp(verb, "bootcode")) {
1252			ctlreq = G_PART_CTL_BOOTCODE;
1253			mparms |= G_PART_PARM_GEOM | G_PART_PARM_BOOTCODE;
1254		}
1255		break;
1256	case 'c':
1257		if (!strcmp(verb, "commit")) {
1258			ctlreq = G_PART_CTL_COMMIT;
1259			mparms |= G_PART_PARM_GEOM;
1260			modifies = 0;
1261		} else if (!strcmp(verb, "create")) {
1262			ctlreq = G_PART_CTL_CREATE;
1263			mparms |= G_PART_PARM_PROVIDER | G_PART_PARM_SCHEME;
1264			oparms |= G_PART_PARM_ENTRIES;
1265		}
1266		break;
1267	case 'd':
1268		if (!strcmp(verb, "delete")) {
1269			ctlreq = G_PART_CTL_DELETE;
1270			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1271		} else if (!strcmp(verb, "destroy")) {
1272			ctlreq = G_PART_CTL_DESTROY;
1273			mparms |= G_PART_PARM_GEOM;
1274		}
1275		break;
1276	case 'm':
1277		if (!strcmp(verb, "modify")) {
1278			ctlreq = G_PART_CTL_MODIFY;
1279			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1280			oparms |= G_PART_PARM_LABEL | G_PART_PARM_TYPE;
1281		} else if (!strcmp(verb, "move")) {
1282			ctlreq = G_PART_CTL_MOVE;
1283			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX;
1284		}
1285		break;
1286	case 'r':
1287		if (!strcmp(verb, "recover")) {
1288			ctlreq = G_PART_CTL_RECOVER;
1289			mparms |= G_PART_PARM_GEOM;
1290		} else if (!strcmp(verb, "resize")) {
1291			ctlreq = G_PART_CTL_RESIZE;
1292			mparms |= G_PART_PARM_GEOM | G_PART_PARM_INDEX |
1293			    G_PART_PARM_SIZE;
1294		}
1295		break;
1296	case 's':
1297		if (!strcmp(verb, "set")) {
1298			ctlreq = G_PART_CTL_SET;
1299			mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1300			    G_PART_PARM_INDEX;
1301		}
1302		break;
1303	case 'u':
1304		if (!strcmp(verb, "undo")) {
1305			ctlreq = G_PART_CTL_UNDO;
1306			mparms |= G_PART_PARM_GEOM;
1307			modifies = 0;
1308		} else if (!strcmp(verb, "unset")) {
1309			ctlreq = G_PART_CTL_UNSET;
1310			mparms |= G_PART_PARM_ATTRIB | G_PART_PARM_GEOM |
1311			    G_PART_PARM_INDEX;
1312		}
1313		break;
1314	}
1315	if (ctlreq == G_PART_CTL_NONE) {
1316		gctl_error(req, "%d verb '%s'", EINVAL, verb);
1317		return;
1318	}
1319
1320	bzero(&gpp, sizeof(gpp));
1321	for (i = 0; i < req->narg; i++) {
1322		ap = &req->arg[i];
1323		parm = 0;
1324		switch (ap->name[0]) {
1325		case 'a':
1326			if (!strcmp(ap->name, "arg0")) {
1327				parm = mparms &
1328				    (G_PART_PARM_GEOM | G_PART_PARM_PROVIDER);
1329			}
1330			if (!strcmp(ap->name, "attrib"))
1331				parm = G_PART_PARM_ATTRIB;
1332			break;
1333		case 'b':
1334			if (!strcmp(ap->name, "bootcode"))
1335				parm = G_PART_PARM_BOOTCODE;
1336			break;
1337		case 'c':
1338			if (!strcmp(ap->name, "class"))
1339				continue;
1340			break;
1341		case 'e':
1342			if (!strcmp(ap->name, "entries"))
1343				parm = G_PART_PARM_ENTRIES;
1344			break;
1345		case 'f':
1346			if (!strcmp(ap->name, "flags"))
1347				parm = G_PART_PARM_FLAGS;
1348			break;
1349		case 'i':
1350			if (!strcmp(ap->name, "index"))
1351				parm = G_PART_PARM_INDEX;
1352			break;
1353		case 'l':
1354			if (!strcmp(ap->name, "label"))
1355				parm = G_PART_PARM_LABEL;
1356			break;
1357		case 'o':
1358			if (!strcmp(ap->name, "output"))
1359				parm = G_PART_PARM_OUTPUT;
1360			break;
1361		case 's':
1362			if (!strcmp(ap->name, "scheme"))
1363				parm = G_PART_PARM_SCHEME;
1364			else if (!strcmp(ap->name, "size"))
1365				parm = G_PART_PARM_SIZE;
1366			else if (!strcmp(ap->name, "start"))
1367				parm = G_PART_PARM_START;
1368			break;
1369		case 't':
1370			if (!strcmp(ap->name, "type"))
1371				parm = G_PART_PARM_TYPE;
1372			break;
1373		case 'v':
1374			if (!strcmp(ap->name, "verb"))
1375				continue;
1376			else if (!strcmp(ap->name, "version"))
1377				parm = G_PART_PARM_VERSION;
1378			break;
1379		}
1380		if ((parm & (mparms | oparms)) == 0) {
1381			gctl_error(req, "%d param '%s'", EINVAL, ap->name);
1382			return;
1383		}
1384		if (parm == G_PART_PARM_BOOTCODE)
1385			p = gctl_get_param(req, ap->name, &len);
1386		else
1387			p = gctl_get_asciiparam(req, ap->name);
1388		if (p == NULL) {
1389			gctl_error(req, "%d param '%s'", ENOATTR, ap->name);
1390			return;
1391		}
1392		switch (parm) {
1393		case G_PART_PARM_ATTRIB:
1394			error = g_part_parm_str(p, &gpp.gpp_attrib);
1395			break;
1396		case G_PART_PARM_BOOTCODE:
1397			gpp.gpp_codeptr = p;
1398			gpp.gpp_codesize = len;
1399			error = 0;
1400			break;
1401		case G_PART_PARM_ENTRIES:
1402			error = g_part_parm_uint(p, &gpp.gpp_entries);
1403			break;
1404		case G_PART_PARM_FLAGS:
1405			if (p[0] == '\0')
1406				continue;
1407			error = g_part_parm_str(p, &gpp.gpp_flags);
1408			break;
1409		case G_PART_PARM_GEOM:
1410			error = g_part_parm_geom(p, &gpp.gpp_geom);
1411			break;
1412		case G_PART_PARM_INDEX:
1413			error = g_part_parm_uint(p, &gpp.gpp_index);
1414			break;
1415		case G_PART_PARM_LABEL:
1416			/* An empty label is always valid. */
1417			gpp.gpp_label = p;
1418			error = 0;
1419			break;
1420		case G_PART_PARM_OUTPUT:
1421			error = 0;	/* Write-only parameter */
1422			break;
1423		case G_PART_PARM_PROVIDER:
1424			error = g_part_parm_provider(p, &gpp.gpp_provider);
1425			break;
1426		case G_PART_PARM_SCHEME:
1427			error = g_part_parm_scheme(p, &gpp.gpp_scheme);
1428			break;
1429		case G_PART_PARM_SIZE:
1430			error = g_part_parm_quad(p, &gpp.gpp_size);
1431			break;
1432		case G_PART_PARM_START:
1433			error = g_part_parm_quad(p, &gpp.gpp_start);
1434			break;
1435		case G_PART_PARM_TYPE:
1436			error = g_part_parm_str(p, &gpp.gpp_type);
1437			break;
1438		case G_PART_PARM_VERSION:
1439			error = g_part_parm_uint(p, &gpp.gpp_version);
1440			break;
1441		default:
1442			error = EDOOFUS;
1443			break;
1444		}
1445		if (error) {
1446			gctl_error(req, "%d %s '%s'", error, ap->name, p);
1447			return;
1448		}
1449		gpp.gpp_parms |= parm;
1450	}
1451	if ((gpp.gpp_parms & mparms) != mparms) {
1452		parm = mparms - (gpp.gpp_parms & mparms);
1453		gctl_error(req, "%d param '%x'", ENOATTR, parm);
1454		return;
1455	}
1456
1457	/* Obtain permissions if possible/necessary. */
1458	close_on_error = 0;
1459	table = NULL;
1460	if (modifies && (gpp.gpp_parms & G_PART_PARM_GEOM)) {
1461		table = gpp.gpp_geom->softc;
1462		if (table != NULL && !table->gpt_opened) {
1463			error = g_access(LIST_FIRST(&gpp.gpp_geom->consumer),
1464			    1, 1, 1);
1465			if (error) {
1466				gctl_error(req, "%d geom '%s'", error,
1467				    gpp.gpp_geom->name);
1468				return;
1469			}
1470			table->gpt_opened = 1;
1471			close_on_error = 1;
1472		}
1473	}
1474
1475	/* Allow the scheme to check or modify the parameters. */
1476	if (table != NULL) {
1477		error = G_PART_PRECHECK(table, ctlreq, &gpp);
1478		if (error) {
1479			gctl_error(req, "%d pre-check failed", error);
1480			goto out;
1481		}
1482	} else
1483		error = EDOOFUS;	/* Prevent bogus uninit. warning. */
1484
1485	switch (ctlreq) {
1486	case G_PART_CTL_NONE:
1487		panic("%s", __func__);
1488	case G_PART_CTL_ADD:
1489		error = g_part_ctl_add(req, &gpp);
1490		break;
1491	case G_PART_CTL_BOOTCODE:
1492		error = g_part_ctl_bootcode(req, &gpp);
1493		break;
1494	case G_PART_CTL_COMMIT:
1495		error = g_part_ctl_commit(req, &gpp);
1496		break;
1497	case G_PART_CTL_CREATE:
1498		error = g_part_ctl_create(req, &gpp);
1499		break;
1500	case G_PART_CTL_DELETE:
1501		error = g_part_ctl_delete(req, &gpp);
1502		break;
1503	case G_PART_CTL_DESTROY:
1504		error = g_part_ctl_destroy(req, &gpp);
1505		break;
1506	case G_PART_CTL_MODIFY:
1507		error = g_part_ctl_modify(req, &gpp);
1508		break;
1509	case G_PART_CTL_MOVE:
1510		error = g_part_ctl_move(req, &gpp);
1511		break;
1512	case G_PART_CTL_RECOVER:
1513		error = g_part_ctl_recover(req, &gpp);
1514		break;
1515	case G_PART_CTL_RESIZE:
1516		error = g_part_ctl_resize(req, &gpp);
1517		break;
1518	case G_PART_CTL_SET:
1519		error = g_part_ctl_setunset(req, &gpp, 1);
1520		break;
1521	case G_PART_CTL_UNDO:
1522		error = g_part_ctl_undo(req, &gpp);
1523		break;
1524	case G_PART_CTL_UNSET:
1525		error = g_part_ctl_setunset(req, &gpp, 0);
1526		break;
1527	}
1528
1529	/* Implement automatic commit. */
1530	if (!error) {
1531		auto_commit = (modifies &&
1532		    (gpp.gpp_parms & G_PART_PARM_FLAGS) &&
1533		    strchr(gpp.gpp_flags, 'C') != NULL) ? 1 : 0;
1534		if (auto_commit) {
1535			KASSERT(gpp.gpp_parms & G_PART_PARM_GEOM, (__func__));
1536			error = g_part_ctl_commit(req, &gpp);
1537		}
1538	}
1539
1540 out:
1541	if (error && close_on_error) {
1542		g_access(LIST_FIRST(&gpp.gpp_geom->consumer), -1, -1, -1);
1543		table->gpt_opened = 0;
1544	}
1545}
1546
1547static int
1548g_part_destroy_geom(struct gctl_req *req, struct g_class *mp,
1549    struct g_geom *gp)
1550{
1551
1552	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, gp->name));
1553	g_topology_assert();
1554
1555	g_part_wither(gp, EINVAL);
1556	return (0);
1557}
1558
1559static struct g_geom *
1560g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1561{
1562	struct g_consumer *cp;
1563	struct g_geom *gp;
1564	struct g_part_entry *entry;
1565	struct g_part_table *table;
1566	struct root_hold_token *rht;
1567	int attr, depth;
1568	int error;
1569
1570	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name));
1571	g_topology_assert();
1572
1573	/* Skip providers that are already open for writing. */
1574	if (pp->acw > 0)
1575		return (NULL);
1576
1577	/*
1578	 * Create a GEOM with consumer and hook it up to the provider.
1579	 * With that we become part of the topology. Optain read access
1580	 * to the provider.
1581	 */
1582	gp = g_new_geomf(mp, "%s", pp->name);
1583	cp = g_new_consumer(gp);
1584	error = g_attach(cp, pp);
1585	if (error == 0)
1586		error = g_access(cp, 1, 0, 0);
1587	if (error != 0) {
1588		g_part_wither(gp, error);
1589		return (NULL);
1590	}
1591
1592	rht = root_mount_hold(mp->name);
1593	g_topology_unlock();
1594
1595	/*
1596	 * Short-circuit the whole probing galore when there's no
1597	 * media present.
1598	 */
1599	if (pp->mediasize == 0 || pp->sectorsize == 0) {
1600		error = ENODEV;
1601		goto fail;
1602	}
1603
1604	/* Make sure we can nest and if so, determine our depth. */
1605	error = g_getattr("PART::isleaf", cp, &attr);
1606	if (!error && attr) {
1607		error = ENODEV;
1608		goto fail;
1609	}
1610	error = g_getattr("PART::depth", cp, &attr);
1611	depth = (!error) ? attr + 1 : 0;
1612
1613	error = g_part_probe(gp, cp, depth);
1614	if (error)
1615		goto fail;
1616
1617	table = gp->softc;
1618
1619	/*
1620	 * Synthesize a disk geometry. Some partitioning schemes
1621	 * depend on it and since some file systems need it even
1622	 * when the partitition scheme doesn't, we do it here in
1623	 * scheme-independent code.
1624	 */
1625	g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
1626
1627	error = G_PART_READ(table, cp);
1628	if (error)
1629		goto fail;
1630
1631	g_topology_lock();
1632	LIST_FOREACH(entry, &table->gpt_entry, gpe_entry) {
1633		if (!entry->gpe_internal)
1634			g_part_new_provider(gp, table, entry);
1635	}
1636
1637	root_mount_rel(rht);
1638	g_access(cp, -1, 0, 0);
1639	return (gp);
1640
1641 fail:
1642	g_topology_lock();
1643	root_mount_rel(rht);
1644	g_access(cp, -1, 0, 0);
1645	g_part_wither(gp, error);
1646	return (NULL);
1647}
1648
1649/*
1650 * Geom methods.
1651 */
1652
1653static int
1654g_part_access(struct g_provider *pp, int dr, int dw, int de)
1655{
1656	struct g_consumer *cp;
1657
1658	G_PART_TRACE((G_T_ACCESS, "%s(%s,%d,%d,%d)", __func__, pp->name, dr,
1659	    dw, de));
1660
1661	cp = LIST_FIRST(&pp->geom->consumer);
1662
1663	/* We always gain write-exclusive access. */
1664	return (g_access(cp, dr, dw, dw + de));
1665}
1666
1667static void
1668g_part_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1669    struct g_consumer *cp, struct g_provider *pp)
1670{
1671	char buf[64];
1672	struct g_part_entry *entry;
1673	struct g_part_table *table;
1674
1675	KASSERT(sb != NULL && gp != NULL, (__func__));
1676	table = gp->softc;
1677
1678	if (indent == NULL) {
1679		KASSERT(cp == NULL && pp != NULL, (__func__));
1680		entry = pp->private;
1681		if (entry == NULL)
1682			return;
1683		sbuf_printf(sb, " i %u o %ju ty %s", entry->gpe_index,
1684		    (uintmax_t)entry->gpe_offset,
1685		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1686		/*
1687		 * libdisk compatibility quirk - the scheme dumps the
1688		 * slicer name and partition type in a way that is
1689		 * compatible with libdisk. When libdisk is not used
1690		 * anymore, this should go away.
1691		 */
1692		G_PART_DUMPCONF(table, entry, sb, indent);
1693	} else if (cp != NULL) {	/* Consumer configuration. */
1694		KASSERT(pp == NULL, (__func__));
1695		/* none */
1696	} else if (pp != NULL) {	/* Provider configuration. */
1697		entry = pp->private;
1698		if (entry == NULL)
1699			return;
1700		sbuf_printf(sb, "%s<start>%ju</start>\n", indent,
1701		    (uintmax_t)entry->gpe_start);
1702		sbuf_printf(sb, "%s<end>%ju</end>\n", indent,
1703		    (uintmax_t)entry->gpe_end);
1704		sbuf_printf(sb, "%s<index>%u</index>\n", indent,
1705		    entry->gpe_index);
1706		sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1707		    G_PART_TYPE(table, entry, buf, sizeof(buf)));
1708		sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent,
1709		    (uintmax_t)entry->gpe_offset);
1710		sbuf_printf(sb, "%s<length>%ju</length>\n", indent,
1711		    (uintmax_t)pp->mediasize);
1712		G_PART_DUMPCONF(table, entry, sb, indent);
1713	} else {			/* Geom configuration. */
1714		sbuf_printf(sb, "%s<scheme>%s</scheme>\n", indent,
1715		    table->gpt_scheme->name);
1716		sbuf_printf(sb, "%s<entries>%u</entries>\n", indent,
1717		    table->gpt_entries);
1718		sbuf_printf(sb, "%s<first>%ju</first>\n", indent,
1719		    (uintmax_t)table->gpt_first);
1720		sbuf_printf(sb, "%s<last>%ju</last>\n", indent,
1721		    (uintmax_t)table->gpt_last);
1722		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", indent,
1723		    table->gpt_sectors);
1724		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", indent,
1725		    table->gpt_heads);
1726		G_PART_DUMPCONF(table, NULL, sb, indent);
1727	}
1728}
1729
1730static void
1731g_part_orphan(struct g_consumer *cp)
1732{
1733	struct g_provider *pp;
1734	struct g_part_table *table;
1735
1736	pp = cp->provider;
1737	KASSERT(pp != NULL, (__func__));
1738	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, pp->name));
1739	g_topology_assert();
1740
1741	KASSERT(pp->error != 0, (__func__));
1742	table = cp->geom->softc;
1743	if (table != NULL && table->gpt_opened)
1744		g_access(cp, -1, -1, -1);
1745	g_part_wither(cp->geom, pp->error);
1746}
1747
1748static void
1749g_part_spoiled(struct g_consumer *cp)
1750{
1751
1752	G_PART_TRACE((G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name));
1753	g_topology_assert();
1754
1755	g_part_wither(cp->geom, ENXIO);
1756}
1757
1758static void
1759g_part_start(struct bio *bp)
1760{
1761	struct bio *bp2;
1762	struct g_consumer *cp;
1763	struct g_geom *gp;
1764	struct g_part_entry *entry;
1765	struct g_part_table *table;
1766	struct g_kerneldump *gkd;
1767	struct g_provider *pp;
1768
1769	pp = bp->bio_to;
1770	gp = pp->geom;
1771	table = gp->softc;
1772	cp = LIST_FIRST(&gp->consumer);
1773
1774	G_PART_TRACE((G_T_BIO, "%s: cmd=%d, provider=%s", __func__, bp->bio_cmd,
1775	    pp->name));
1776
1777	entry = pp->private;
1778	if (entry == NULL) {
1779		g_io_deliver(bp, ENXIO);
1780		return;
1781	}
1782
1783	switch(bp->bio_cmd) {
1784	case BIO_DELETE:
1785	case BIO_READ:
1786	case BIO_WRITE:
1787		if (bp->bio_offset >= pp->mediasize) {
1788			g_io_deliver(bp, EIO);
1789			return;
1790		}
1791		bp2 = g_clone_bio(bp);
1792		if (bp2 == NULL) {
1793			g_io_deliver(bp, ENOMEM);
1794			return;
1795		}
1796		if (bp2->bio_offset + bp2->bio_length > pp->mediasize)
1797			bp2->bio_length = pp->mediasize - bp2->bio_offset;
1798		bp2->bio_done = g_std_done;
1799		bp2->bio_offset += entry->gpe_offset;
1800		g_io_request(bp2, cp);
1801		return;
1802	case BIO_FLUSH:
1803		break;
1804	case BIO_GETATTR:
1805		if (g_handleattr_int(bp, "GEOM::fwheads", table->gpt_heads))
1806			return;
1807		if (g_handleattr_int(bp, "GEOM::fwsectors", table->gpt_sectors))
1808			return;
1809		if (g_handleattr_int(bp, "PART::isleaf", table->gpt_isleaf))
1810			return;
1811		if (g_handleattr_int(bp, "PART::depth", table->gpt_depth))
1812			return;
1813		if (g_handleattr_str(bp, "PART::scheme",
1814		    table->gpt_scheme->name))
1815			return;
1816		if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) {
1817			/*
1818			 * Check that the partition is suitable for kernel
1819			 * dumps. Typically only swap partitions should be
1820			 * used.
1821			 */
1822			if (!G_PART_DUMPTO(table, entry)) {
1823				g_io_deliver(bp, ENODEV);
1824				printf("GEOM_PART: Partition '%s' not suitable"
1825				    " for kernel dumps (wrong type?)\n",
1826				    pp->name);
1827				return;
1828			}
1829			gkd = (struct g_kerneldump *)bp->bio_data;
1830			if (gkd->offset >= pp->mediasize) {
1831				g_io_deliver(bp, EIO);
1832				return;
1833			}
1834			if (gkd->offset + gkd->length > pp->mediasize)
1835				gkd->length = pp->mediasize - gkd->offset;
1836			gkd->offset += entry->gpe_offset;
1837		}
1838		break;
1839	default:
1840		g_io_deliver(bp, EOPNOTSUPP);
1841		return;
1842	}
1843
1844	bp2 = g_clone_bio(bp);
1845	if (bp2 == NULL) {
1846		g_io_deliver(bp, ENOMEM);
1847		return;
1848	}
1849	bp2->bio_done = g_std_done;
1850	g_io_request(bp2, cp);
1851}
1852
1853static void
1854g_part_init(struct g_class *mp)
1855{
1856
1857	TAILQ_INSERT_HEAD(&g_part_schemes, &g_part_null_scheme, scheme_list);
1858}
1859
1860static void
1861g_part_fini(struct g_class *mp)
1862{
1863
1864	TAILQ_REMOVE(&g_part_schemes, &g_part_null_scheme, scheme_list);
1865}
1866
1867static void
1868g_part_unload_event(void *arg, int flag)
1869{
1870	struct g_consumer *cp;
1871	struct g_geom *gp;
1872	struct g_provider *pp;
1873	struct g_part_scheme *scheme;
1874	struct g_part_table *table;
1875	uintptr_t *xchg;
1876	int acc, error;
1877
1878	if (flag == EV_CANCEL)
1879		return;
1880
1881	xchg = arg;
1882	error = 0;
1883	scheme = (void *)(*xchg);
1884
1885	g_topology_assert();
1886
1887	LIST_FOREACH(gp, &g_part_class.geom, geom) {
1888		table = gp->softc;
1889		if (table->gpt_scheme != scheme)
1890			continue;
1891
1892		acc = 0;
1893		LIST_FOREACH(pp, &gp->provider, provider)
1894			acc += pp->acr + pp->acw + pp->ace;
1895		LIST_FOREACH(cp, &gp->consumer, consumer)
1896			acc += cp->acr + cp->acw + cp->ace;
1897
1898		if (!acc)
1899			g_part_wither(gp, ENOSYS);
1900		else
1901			error = EBUSY;
1902	}
1903
1904	if (!error)
1905		TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1906
1907	*xchg = error;
1908}
1909
1910int
1911g_part_modevent(module_t mod, int type, struct g_part_scheme *scheme)
1912{
1913	uintptr_t arg;
1914	int error;
1915
1916	switch (type) {
1917	case MOD_LOAD:
1918		TAILQ_INSERT_TAIL(&g_part_schemes, scheme, scheme_list);
1919
1920		error = g_retaste(&g_part_class);
1921		if (error)
1922			TAILQ_REMOVE(&g_part_schemes, scheme, scheme_list);
1923		break;
1924	case MOD_UNLOAD:
1925		arg = (uintptr_t)scheme;
1926		error = g_waitfor_event(g_part_unload_event, &arg, M_WAITOK,
1927		    NULL);
1928		if (!error)
1929			error = (arg == (uintptr_t)scheme) ? EDOOFUS : arg;
1930		break;
1931	default:
1932		error = EOPNOTSUPP;
1933		break;
1934	}
1935
1936	return (error);
1937}
1938