1/*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: releng/10.3/sys/geom/geom_disk.c 294483 2016-01-21 03:05:03Z rpokala $");
38
39#include "opt_geom.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/sysctl.h>
45#include <sys/bio.h>
46#include <sys/ctype.h>
47#include <sys/fcntl.h>
48#include <sys/malloc.h>
49#include <sys/sbuf.h>
50#include <sys/devicestat.h>
51#include <machine/md_var.h>
52
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <geom/geom.h>
56#include <geom/geom_disk.h>
57#include <geom/geom_int.h>
58
59#include <dev/led/led.h>
60
61#include <machine/bus.h>
62
63struct g_disk_softc {
64	struct mtx		 done_mtx;
65	struct disk		*dp;
66	struct sysctl_ctx_list	sysctl_ctx;
67	struct sysctl_oid	*sysctl_tree;
68	char			led[64];
69	uint32_t		state;
70	struct mtx		 start_mtx;
71};
72
73static g_access_t g_disk_access;
74static g_start_t g_disk_start;
75static g_ioctl_t g_disk_ioctl;
76static g_dumpconf_t g_disk_dumpconf;
77static g_provgone_t g_disk_providergone;
78
79static struct g_class g_disk_class = {
80	.name = G_DISK_CLASS_NAME,
81	.version = G_VERSION,
82	.start = g_disk_start,
83	.access = g_disk_access,
84	.ioctl = g_disk_ioctl,
85	.providergone = g_disk_providergone,
86	.dumpconf = g_disk_dumpconf,
87};
88
89SYSCTL_DECL(_kern_geom);
90static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0,
91    "GEOM_DISK stuff");
92
93DECLARE_GEOM_CLASS(g_disk_class, g_disk);
94
95static void __inline
96g_disk_lock_giant(struct disk *dp)
97{
98
99	if (dp->d_flags & DISKFLAG_NEEDSGIANT)
100		mtx_lock(&Giant);
101}
102
103static void __inline
104g_disk_unlock_giant(struct disk *dp)
105{
106
107	if (dp->d_flags & DISKFLAG_NEEDSGIANT)
108		mtx_unlock(&Giant);
109}
110
111static int
112g_disk_access(struct g_provider *pp, int r, int w, int e)
113{
114	struct disk *dp;
115	struct g_disk_softc *sc;
116	int error;
117
118	g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)",
119	    pp->name, r, w, e);
120	g_topology_assert();
121	sc = pp->private;
122	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
123		/*
124		 * Allow decreasing access count even if disk is not
125		 * avaliable anymore.
126		 */
127		if (r <= 0 && w <= 0 && e <= 0)
128			return (0);
129		return (ENXIO);
130	}
131	r += pp->acr;
132	w += pp->acw;
133	e += pp->ace;
134	error = 0;
135	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
136		if (dp->d_open != NULL) {
137			g_disk_lock_giant(dp);
138			error = dp->d_open(dp);
139			if (bootverbose && error != 0)
140				printf("Opened disk %s -> %d\n",
141				    pp->name, error);
142			g_disk_unlock_giant(dp);
143			if (error != 0)
144				return (error);
145		}
146		pp->mediasize = dp->d_mediasize;
147		pp->sectorsize = dp->d_sectorsize;
148		if (dp->d_maxsize == 0) {
149			printf("WARNING: Disk drive %s%d has no d_maxsize\n",
150			    dp->d_name, dp->d_unit);
151			dp->d_maxsize = DFLTPHYS;
152		}
153		if (dp->d_delmaxsize == 0) {
154			if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) {
155				printf("WARNING: Disk drive %s%d has no "
156				    "d_delmaxsize\n", dp->d_name, dp->d_unit);
157			}
158			dp->d_delmaxsize = dp->d_maxsize;
159		}
160		pp->stripeoffset = dp->d_stripeoffset;
161		pp->stripesize = dp->d_stripesize;
162		dp->d_flags |= DISKFLAG_OPEN;
163	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
164		if (dp->d_close != NULL) {
165			g_disk_lock_giant(dp);
166			error = dp->d_close(dp);
167			if (error != 0)
168				printf("Closed disk %s -> %d\n",
169				    pp->name, error);
170			g_disk_unlock_giant(dp);
171		}
172		sc->state = G_STATE_ACTIVE;
173		if (sc->led[0] != 0)
174			led_set(sc->led, "0");
175		dp->d_flags &= ~DISKFLAG_OPEN;
176	}
177	return (error);
178}
179
180static void
181g_disk_kerneldump(struct bio *bp, struct disk *dp)
182{
183	struct g_kerneldump *gkd;
184	struct g_geom *gp;
185
186	gkd = (struct g_kerneldump*)bp->bio_data;
187	gp = bp->bio_to->geom;
188	g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)",
189		gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
190	if (dp->d_dump == NULL) {
191		g_io_deliver(bp, ENODEV);
192		return;
193	}
194	gkd->di.dumper = dp->d_dump;
195	gkd->di.priv = dp;
196	gkd->di.blocksize = dp->d_sectorsize;
197	gkd->di.maxiosize = dp->d_maxsize;
198	gkd->di.mediaoffset = gkd->offset;
199	if ((gkd->offset + gkd->length) > dp->d_mediasize)
200		gkd->length = dp->d_mediasize - gkd->offset;
201	gkd->di.mediasize = gkd->length;
202	g_io_deliver(bp, 0);
203}
204
205static void
206g_disk_setstate(struct bio *bp, struct g_disk_softc *sc)
207{
208	const char *cmd;
209
210	memcpy(&sc->state, bp->bio_data, sizeof(sc->state));
211	if (sc->led[0] != 0) {
212		switch (sc->state) {
213		case G_STATE_FAILED:
214			cmd = "1";
215			break;
216		case G_STATE_REBUILD:
217			cmd = "f5";
218			break;
219		case G_STATE_RESYNC:
220			cmd = "f1";
221			break;
222		default:
223			cmd = "0";
224			break;
225		}
226		led_set(sc->led, cmd);
227	}
228	g_io_deliver(bp, 0);
229}
230
231static void
232g_disk_done(struct bio *bp)
233{
234	struct bintime now;
235	struct bio *bp2;
236	struct g_disk_softc *sc;
237
238	/* See "notes" for why we need a mutex here */
239	/* XXX: will witness accept a mix of Giant/unGiant drivers here ? */
240	bp2 = bp->bio_parent;
241	sc = bp2->bio_to->private;
242	bp->bio_completed = bp->bio_length - bp->bio_resid;
243	binuptime(&now);
244	mtx_lock(&sc->done_mtx);
245	if (bp2->bio_error == 0)
246		bp2->bio_error = bp->bio_error;
247	bp2->bio_completed += bp->bio_completed;
248	if ((bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE|BIO_FLUSH)) != 0)
249		devstat_end_transaction_bio_bt(sc->dp->d_devstat, bp, &now);
250	bp2->bio_inbed++;
251	if (bp2->bio_children == bp2->bio_inbed) {
252		mtx_unlock(&sc->done_mtx);
253		bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed;
254		g_io_deliver(bp2, bp2->bio_error);
255	} else
256		mtx_unlock(&sc->done_mtx);
257	g_destroy_bio(bp);
258}
259
260static int
261g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td)
262{
263	struct disk *dp;
264	struct g_disk_softc *sc;
265	int error;
266
267	sc = pp->private;
268	dp = sc->dp;
269
270	if (dp->d_ioctl == NULL)
271		return (ENOIOCTL);
272	g_disk_lock_giant(dp);
273	error = dp->d_ioctl(dp, cmd, data, fflag, td);
274	g_disk_unlock_giant(dp);
275	return (error);
276}
277
278static off_t
279g_disk_maxsize(struct disk *dp, struct bio *bp)
280{
281	if (bp->bio_cmd == BIO_DELETE)
282		return (dp->d_delmaxsize);
283	return (dp->d_maxsize);
284}
285
286static int
287g_disk_maxsegs(struct disk *dp, struct bio *bp)
288{
289	return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1);
290}
291
292static void
293g_disk_advance(struct disk *dp, struct bio *bp, off_t off)
294{
295
296	bp->bio_offset += off;
297	bp->bio_length -= off;
298
299	if ((bp->bio_flags & BIO_VLIST) != 0) {
300		bus_dma_segment_t *seg, *end;
301
302		seg = (bus_dma_segment_t *)bp->bio_data;
303		end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
304		off += bp->bio_ma_offset;
305		while (off >= seg->ds_len) {
306			KASSERT((seg != end),
307			    ("vlist request runs off the end"));
308			off -= seg->ds_len;
309			seg++;
310		}
311		bp->bio_ma_offset = off;
312		bp->bio_ma_n = end - seg;
313		bp->bio_data = (void *)seg;
314	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
315		bp->bio_ma += off / PAGE_SIZE;
316		bp->bio_ma_offset += off;
317		bp->bio_ma_offset %= PAGE_SIZE;
318		bp->bio_ma_n -= off / PAGE_SIZE;
319	} else {
320		bp->bio_data += off;
321	}
322}
323
324static void
325g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset,
326    off_t *plength, int *ppages)
327{
328	uintptr_t seg_page_base;
329	uintptr_t seg_page_end;
330	off_t offset;
331	off_t length;
332	int seg_pages;
333
334	offset = *poffset;
335	length = *plength;
336
337	if (length > seg->ds_len - offset)
338		length = seg->ds_len - offset;
339
340	seg_page_base = trunc_page(seg->ds_addr + offset);
341	seg_page_end  = round_page(seg->ds_addr + offset + length);
342	seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT;
343
344	if (seg_pages > *ppages) {
345		seg_pages = *ppages;
346		length = (seg_page_base + (seg_pages << PAGE_SHIFT)) -
347		    (seg->ds_addr + offset);
348	}
349
350	*poffset = 0;
351	*plength -= length;
352	*ppages -= seg_pages;
353}
354
355static off_t
356g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg)
357{
358	bus_dma_segment_t *seg, *end;
359	off_t residual;
360	off_t offset;
361	int pages;
362
363	seg = (bus_dma_segment_t *)bp->bio_data;
364	end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n;
365	residual = bp->bio_length;
366	offset = bp->bio_ma_offset;
367	pages = g_disk_maxsegs(dp, bp);
368	while (residual != 0 && pages != 0) {
369		KASSERT((seg != end),
370		    ("vlist limit runs off the end"));
371		g_disk_seg_limit(seg, &offset, &residual, &pages);
372		seg++;
373	}
374	if (pendseg != NULL)
375		*pendseg = seg;
376	return (residual);
377}
378
379static bool
380g_disk_limit(struct disk *dp, struct bio *bp)
381{
382	bool limited = false;
383	off_t maxsz;
384
385	maxsz = g_disk_maxsize(dp, bp);
386
387	/*
388	 * XXX: If we have a stripesize we should really use it here.
389	 *      Care should be taken in the delete case if this is done
390	 *      as deletes can be very sensitive to size given how they
391	 *      are processed.
392	 */
393	if (bp->bio_length > maxsz) {
394		bp->bio_length = maxsz;
395		limited = true;
396	}
397
398	if ((bp->bio_flags & BIO_VLIST) != 0) {
399		bus_dma_segment_t *firstseg, *endseg;
400		off_t residual;
401
402		firstseg = (bus_dma_segment_t*)bp->bio_data;
403		residual = g_disk_vlist_limit(dp, bp, &endseg);
404		if (residual != 0) {
405			bp->bio_ma_n = endseg - firstseg;
406			bp->bio_length -= residual;
407			limited = true;
408		}
409	} else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
410		bp->bio_ma_n =
411		    howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE);
412	}
413
414	return (limited);
415}
416
417static void
418g_disk_start(struct bio *bp)
419{
420	struct bio *bp2, *bp3;
421	struct disk *dp;
422	struct g_disk_softc *sc;
423	int error;
424	off_t off;
425
426	sc = bp->bio_to->private;
427	if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) {
428		g_io_deliver(bp, ENXIO);
429		return;
430	}
431	error = EJUSTRETURN;
432	switch(bp->bio_cmd) {
433	case BIO_DELETE:
434		if (!(dp->d_flags & DISKFLAG_CANDELETE)) {
435			error = EOPNOTSUPP;
436			break;
437		}
438		/* fall-through */
439	case BIO_READ:
440	case BIO_WRITE:
441		KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 ||
442		    (bp->bio_flags & BIO_UNMAPPED) == 0,
443		    ("unmapped bio not supported by disk %s", dp->d_name));
444		off = 0;
445		bp3 = NULL;
446		bp2 = g_clone_bio(bp);
447		if (bp2 == NULL) {
448			error = ENOMEM;
449			break;
450		}
451		for (;;) {
452			if (g_disk_limit(dp, bp2)) {
453				off += bp2->bio_length;
454
455				/*
456				 * To avoid a race, we need to grab the next bio
457				 * before we schedule this one.  See "notes".
458				 */
459				bp3 = g_clone_bio(bp);
460				if (bp3 == NULL)
461					bp->bio_error = ENOMEM;
462			}
463			bp2->bio_done = g_disk_done;
464			bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize;
465			bp2->bio_bcount = bp2->bio_length;
466			bp2->bio_disk = dp;
467			mtx_lock(&sc->start_mtx);
468			devstat_start_transaction_bio(dp->d_devstat, bp2);
469			mtx_unlock(&sc->start_mtx);
470			g_disk_lock_giant(dp);
471			dp->d_strategy(bp2);
472			g_disk_unlock_giant(dp);
473
474			if (bp3 == NULL)
475				break;
476
477			bp2 = bp3;
478			bp3 = NULL;
479			g_disk_advance(dp, bp2, off);
480		}
481		break;
482	case BIO_GETATTR:
483		/* Give the driver a chance to override */
484		if (dp->d_getattr != NULL) {
485			if (bp->bio_disk == NULL)
486				bp->bio_disk = dp;
487			error = dp->d_getattr(bp);
488			if (error != -1)
489				break;
490			error = EJUSTRETURN;
491		}
492		if (g_handleattr_int(bp, "GEOM::candelete",
493		    (dp->d_flags & DISKFLAG_CANDELETE) != 0))
494			break;
495		else if (g_handleattr_int(bp, "GEOM::fwsectors",
496		    dp->d_fwsectors))
497			break;
498		else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads))
499			break;
500		else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0))
501			break;
502		else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident))
503			break;
504		else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor",
505		    dp->d_hba_vendor))
506			break;
507		else if (g_handleattr_uint16_t(bp, "GEOM::hba_device",
508		    dp->d_hba_device))
509			break;
510		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor",
511		    dp->d_hba_subvendor))
512			break;
513		else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice",
514		    dp->d_hba_subdevice))
515			break;
516		else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
517			g_disk_kerneldump(bp, dp);
518		else if (!strcmp(bp->bio_attribute, "GEOM::setstate"))
519			g_disk_setstate(bp, sc);
520		else if (!strcmp(bp->bio_attribute, "GEOM::rotation_rate")) {
521			uint64_t v;
522
523			if ((dp->d_flags & DISKFLAG_LACKS_ROTRATE) == 0)
524				v = dp->d_rotation_rate;
525			else
526				v = 0; /* rate unknown */
527			g_handleattr_uint16_t(bp, "GEOM::rotation_rate", v);
528			break;
529		} else
530			error = ENOIOCTL;
531		break;
532	case BIO_FLUSH:
533		g_trace(G_T_BIO, "g_disk_flushcache(%s)",
534		    bp->bio_to->name);
535		if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) {
536			error = EOPNOTSUPP;
537			break;
538		}
539		bp2 = g_clone_bio(bp);
540		if (bp2 == NULL) {
541			g_io_deliver(bp, ENOMEM);
542			return;
543		}
544		bp2->bio_done = g_disk_done;
545		bp2->bio_disk = dp;
546		mtx_lock(&sc->start_mtx);
547		devstat_start_transaction_bio(dp->d_devstat, bp2);
548		mtx_unlock(&sc->start_mtx);
549		g_disk_lock_giant(dp);
550		dp->d_strategy(bp2);
551		g_disk_unlock_giant(dp);
552		break;
553	default:
554		error = EOPNOTSUPP;
555		break;
556	}
557	if (error != EJUSTRETURN)
558		g_io_deliver(bp, error);
559	return;
560}
561
562static void
563g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp)
564{
565	struct bio *bp;
566	struct disk *dp;
567	struct g_disk_softc *sc;
568	char *buf;
569	int res = 0;
570
571	sc = gp->softc;
572	if (sc == NULL || (dp = sc->dp) == NULL)
573		return;
574	if (indent == NULL) {
575		sbuf_printf(sb, " hd %u", dp->d_fwheads);
576		sbuf_printf(sb, " sc %u", dp->d_fwsectors);
577		return;
578	}
579	if (pp != NULL) {
580		sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n",
581		    indent, dp->d_fwheads);
582		sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n",
583		    indent, dp->d_fwsectors);
584
585		/*
586		 * "rotationrate" is a little complicated, because the value
587		 * returned by the drive might not be the RPM; 0 and 1 are
588		 * special cases, and there's also a valid range.
589		 */
590		sbuf_printf(sb, "%s<rotationrate>", indent);
591		if (dp->d_rotation_rate == 0)		/* Old drives don't */
592			sbuf_printf(sb, "unknown");	/* report RPM. */
593		else if (dp->d_rotation_rate == 1)	/* Since 0 is used */
594			sbuf_printf(sb, "0");		/* above, SSDs use 1. */
595		else if ((dp->d_rotation_rate >= 0x041) &&
596		    (dp->d_rotation_rate <= 0xfffe))
597			sbuf_printf(sb, "%u", dp->d_rotation_rate);
598		else
599			sbuf_printf(sb, "invalid");
600		sbuf_printf(sb, "</rotationrate>\n");
601		if (dp->d_getattr != NULL) {
602			buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK);
603			bp = g_alloc_bio();
604			bp->bio_disk = dp;
605			bp->bio_attribute = "GEOM::ident";
606			bp->bio_length = DISK_IDENT_SIZE;
607			bp->bio_data = buf;
608			res = dp->d_getattr(bp);
609			sbuf_printf(sb, "%s<ident>", indent);
610			g_conf_printf_escaped(sb, "%s",
611			    res == 0 ? buf: dp->d_ident);
612			sbuf_printf(sb, "</ident>\n");
613			bp->bio_attribute = "GEOM::lunid";
614			bp->bio_length = DISK_IDENT_SIZE;
615			bp->bio_data = buf;
616			if (dp->d_getattr(bp) == 0) {
617				sbuf_printf(sb, "%s<lunid>", indent);
618				g_conf_printf_escaped(sb, "%s", buf);
619				sbuf_printf(sb, "</lunid>\n");
620			}
621			bp->bio_attribute = "GEOM::lunname";
622			bp->bio_length = DISK_IDENT_SIZE;
623			bp->bio_data = buf;
624			if (dp->d_getattr(bp) == 0) {
625				sbuf_printf(sb, "%s<lunname>", indent);
626				g_conf_printf_escaped(sb, "%s", buf);
627				sbuf_printf(sb, "</lunname>\n");
628			}
629			g_destroy_bio(bp);
630			g_free(buf);
631		} else {
632			sbuf_printf(sb, "%s<ident>", indent);
633			g_conf_printf_escaped(sb, "%s", dp->d_ident);
634			sbuf_printf(sb, "</ident>\n");
635		}
636		sbuf_printf(sb, "%s<descr>", indent);
637		g_conf_printf_escaped(sb, "%s", dp->d_descr);
638		sbuf_printf(sb, "</descr>\n");
639	}
640}
641
642static void
643g_disk_resize(void *ptr, int flag)
644{
645	struct disk *dp;
646	struct g_geom *gp;
647	struct g_provider *pp;
648
649	if (flag == EV_CANCEL)
650		return;
651	g_topology_assert();
652
653	dp = ptr;
654	gp = dp->d_geom;
655
656	if (dp->d_destroyed || gp == NULL)
657		return;
658
659	LIST_FOREACH(pp, &gp->provider, provider) {
660		if (pp->sectorsize != 0 &&
661		    pp->sectorsize != dp->d_sectorsize)
662			g_wither_provider(pp, ENXIO);
663		else
664			g_resize_provider(pp, dp->d_mediasize);
665	}
666}
667
668static void
669g_disk_create(void *arg, int flag)
670{
671	struct g_geom *gp;
672	struct g_provider *pp;
673	struct disk *dp;
674	struct g_disk_softc *sc;
675	char tmpstr[80];
676
677	if (flag == EV_CANCEL)
678		return;
679	g_topology_assert();
680	dp = arg;
681	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
682	mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF);
683	mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF);
684	sc->dp = dp;
685	gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit);
686	gp->softc = sc;
687	pp = g_new_providerf(gp, "%s", gp->name);
688	devstat_remove_entry(pp->stat);
689	pp->stat = NULL;
690	dp->d_devstat->id = pp;
691	pp->mediasize = dp->d_mediasize;
692	pp->sectorsize = dp->d_sectorsize;
693	pp->stripeoffset = dp->d_stripeoffset;
694	pp->stripesize = dp->d_stripesize;
695	if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0)
696		pp->flags |= G_PF_ACCEPT_UNMAPPED;
697	if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0)
698		pp->flags |= G_PF_DIRECT_SEND;
699	pp->flags |= G_PF_DIRECT_RECEIVE;
700	if (bootverbose)
701		printf("GEOM: new disk %s\n", gp->name);
702	sysctl_ctx_init(&sc->sysctl_ctx);
703	snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name);
704	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
705		SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name,
706		CTLFLAG_RD, 0, tmpstr);
707	if (sc->sysctl_tree != NULL) {
708		snprintf(tmpstr, sizeof(tmpstr),
709		    "kern.geom.disk.%s.led", gp->name);
710		TUNABLE_STR_FETCH(tmpstr, sc->led, sizeof(sc->led));
711		SYSCTL_ADD_STRING(&sc->sysctl_ctx,
712		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led",
713		    CTLFLAG_RW | CTLFLAG_TUN, sc->led, sizeof(sc->led),
714		    "LED name");
715	}
716	pp->private = sc;
717	dp->d_geom = gp;
718	g_error_provider(pp, 0);
719}
720
721/*
722 * We get this callback after all of the consumers have gone away, and just
723 * before the provider is freed.  If the disk driver provided a d_gone
724 * callback, let them know that it is okay to free resources -- they won't
725 * be getting any more accesses from GEOM.
726 */
727static void
728g_disk_providergone(struct g_provider *pp)
729{
730	struct disk *dp;
731	struct g_disk_softc *sc;
732
733	sc = (struct g_disk_softc *)pp->private;
734	dp = sc->dp;
735	if (dp != NULL && dp->d_gone != NULL)
736		dp->d_gone(dp);
737	if (sc->sysctl_tree != NULL) {
738		sysctl_ctx_free(&sc->sysctl_ctx);
739		sc->sysctl_tree = NULL;
740	}
741	if (sc->led[0] != 0) {
742		led_set(sc->led, "0");
743		sc->led[0] = 0;
744	}
745	pp->private = NULL;
746	pp->geom->softc = NULL;
747	mtx_destroy(&sc->done_mtx);
748	mtx_destroy(&sc->start_mtx);
749	g_free(sc);
750}
751
752static void
753g_disk_destroy(void *ptr, int flag)
754{
755	struct disk *dp;
756	struct g_geom *gp;
757	struct g_disk_softc *sc;
758
759	g_topology_assert();
760	dp = ptr;
761	gp = dp->d_geom;
762	if (gp != NULL) {
763		sc = gp->softc;
764		if (sc != NULL)
765			sc->dp = NULL;
766		dp->d_geom = NULL;
767		g_wither_geom(gp, ENXIO);
768	}
769	g_free(dp);
770}
771
772/*
773 * We only allow printable characters in disk ident,
774 * the rest is converted to 'x<HH>'.
775 */
776static void
777g_disk_ident_adjust(char *ident, size_t size)
778{
779	char *p, tmp[4], newid[DISK_IDENT_SIZE];
780
781	newid[0] = '\0';
782	for (p = ident; *p != '\0'; p++) {
783		if (isprint(*p)) {
784			tmp[0] = *p;
785			tmp[1] = '\0';
786		} else {
787			snprintf(tmp, sizeof(tmp), "x%02hhx",
788			    *(unsigned char *)p);
789		}
790		if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid))
791			break;
792	}
793	bzero(ident, size);
794	strlcpy(ident, newid, size);
795}
796
797struct disk *
798disk_alloc(void)
799{
800
801	return (g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO));
802}
803
804void
805disk_create(struct disk *dp, int version)
806{
807
808	if (version != DISK_VERSION) {
809		printf("WARNING: Attempt to add disk %s%d %s",
810		    dp->d_name, dp->d_unit,
811		    " using incompatible ABI version of disk(9)\n");
812		printf("WARNING: Ignoring disk %s%d\n",
813		    dp->d_name, dp->d_unit);
814		return;
815	}
816	if (version < DISK_VERSION_04)
817		dp->d_flags |= DISKFLAG_LACKS_ROTRATE;
818	KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy"));
819	KASSERT(dp->d_name != NULL, ("disk_create need d_name"));
820	KASSERT(*dp->d_name != 0, ("disk_create need d_name"));
821	KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long"));
822	if (dp->d_devstat == NULL)
823		dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit,
824		    dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED,
825		    DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
826	dp->d_geom = NULL;
827	g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident));
828	g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL);
829}
830
831void
832disk_destroy(struct disk *dp)
833{
834
835	g_cancel_event(dp);
836	dp->d_destroyed = 1;
837	if (dp->d_devstat != NULL)
838		devstat_remove_entry(dp->d_devstat);
839	g_post_event(g_disk_destroy, dp, M_WAITOK, NULL);
840}
841
842void
843disk_gone(struct disk *dp)
844{
845	struct g_geom *gp;
846	struct g_provider *pp;
847
848	gp = dp->d_geom;
849	if (gp != NULL) {
850		pp = LIST_FIRST(&gp->provider);
851		if (pp != NULL) {
852			KASSERT(LIST_NEXT(pp, provider) == NULL,
853			    ("geom %p has more than one provider", gp));
854			g_wither_provider(pp, ENXIO);
855		}
856	}
857}
858
859void
860disk_attr_changed(struct disk *dp, const char *attr, int flag)
861{
862	struct g_geom *gp;
863	struct g_provider *pp;
864
865	gp = dp->d_geom;
866	if (gp != NULL)
867		LIST_FOREACH(pp, &gp->provider, provider)
868			(void)g_attr_changed(pp, attr, flag);
869}
870
871void
872disk_media_changed(struct disk *dp, int flag)
873{
874	struct g_geom *gp;
875	struct g_provider *pp;
876
877	gp = dp->d_geom;
878	if (gp != NULL) {
879		pp = LIST_FIRST(&gp->provider);
880		if (pp != NULL) {
881			KASSERT(LIST_NEXT(pp, provider) == NULL,
882			    ("geom %p has more than one provider", gp));
883			g_media_changed(pp, flag);
884		}
885	}
886}
887
888void
889disk_media_gone(struct disk *dp, int flag)
890{
891	struct g_geom *gp;
892	struct g_provider *pp;
893
894	gp = dp->d_geom;
895	if (gp != NULL) {
896		pp = LIST_FIRST(&gp->provider);
897		if (pp != NULL) {
898			KASSERT(LIST_NEXT(pp, provider) == NULL,
899			    ("geom %p has more than one provider", gp));
900			g_media_gone(pp, flag);
901		}
902	}
903}
904
905int
906disk_resize(struct disk *dp, int flag)
907{
908
909	if (dp->d_destroyed || dp->d_geom == NULL)
910		return (0);
911
912	return (g_post_event(g_disk_resize, dp, flag, NULL));
913}
914
915static void
916g_kern_disks(void *p, int flag __unused)
917{
918	struct sbuf *sb;
919	struct g_geom *gp;
920	char *sp;
921
922	sb = p;
923	sp = "";
924	g_topology_assert();
925	LIST_FOREACH(gp, &g_disk_class.geom, geom) {
926		sbuf_printf(sb, "%s%s", sp, gp->name);
927		sp = " ";
928	}
929	sbuf_finish(sb);
930}
931
932static int
933sysctl_disks(SYSCTL_HANDLER_ARGS)
934{
935	int error;
936	struct sbuf *sb;
937
938	sb = sbuf_new_auto();
939	g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL);
940	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
941	sbuf_delete(sb);
942	return error;
943}
944
945SYSCTL_PROC(_kern, OID_AUTO, disks,
946    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
947    sysctl_disks, "A", "names of available disks");
948