1/* $NetBSD: dksubr.c,v 1.114 2023/07/11 23:26:41 christos Exp $ */
2
3/*-
4 * Copyright (c) 1996, 1997, 1998, 1999, 2002, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: dksubr.c,v 1.114 2023/07/11 23:26:41 christos Exp $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/stat.h>
38#include <sys/proc.h>
39#include <sys/ioctl.h>
40#include <sys/device.h>
41#include <sys/disk.h>
42#include <sys/disklabel.h>
43#include <sys/buf.h>
44#include <sys/bufq.h>
45#include <sys/vnode.h>
46#include <sys/fcntl.h>
47#include <sys/namei.h>
48#include <sys/module.h>
49#include <sys/syslog.h>
50
51#include <dev/dkvar.h>
52#include <miscfs/specfs/specdev.h> /* for v_rdev */
53
54int	dkdebug = 0;
55
56#ifdef DEBUG
57#define DKDB_FOLLOW	0x1
58#define DKDB_INIT	0x2
59#define DKDB_VNODE	0x4
60#define DKDB_DUMP	0x8
61
62#define IFDEBUG(x,y)		if (dkdebug & (x)) y
63#define DPRINTF(x,y)		IFDEBUG(x, printf y)
64#define DPRINTF_FOLLOW(y)	DPRINTF(DKDB_FOLLOW, y)
65#else
66#define IFDEBUG(x,y)
67#define DPRINTF(x,y)
68#define DPRINTF_FOLLOW(y)
69#endif
70
71#define DKF_READYFORDUMP	(DKF_INITED|DKF_TAKEDUMP)
72
73static int dk_subr_modcmd(modcmd_t, void *);
74
75#define DKLABELDEV(dev)	\
76	(MAKEDISKDEV(major((dev)), DISKUNIT((dev)), RAW_PART))
77
78static void	dk_makedisklabel(struct dk_softc *);
79static int	dk_translate(struct dk_softc *, struct buf *);
80
81void
82dk_init(struct dk_softc *dksc, device_t dev, int dtype)
83{
84
85	memset(dksc, 0x0, sizeof(*dksc));
86	dksc->sc_dtype = dtype;
87	dksc->sc_dev = dev;
88
89	strlcpy(dksc->sc_xname, device_xname(dev), DK_XNAME_SIZE);
90	dksc->sc_dkdev.dk_name = dksc->sc_xname;
91}
92
93void
94dk_attach(struct dk_softc *dksc)
95{
96	KASSERT(dksc->sc_dev != NULL);
97
98	mutex_init(&dksc->sc_iolock, MUTEX_DEFAULT, IPL_VM);
99	dksc->sc_flags |= DKF_READYFORDUMP;
100#ifdef DIAGNOSTIC
101	dksc->sc_flags |= DKF_WARNLABEL | DKF_LABELSANITY;
102#endif
103
104	if ((dksc->sc_flags & DKF_NO_RND) == 0) {
105		/* Attach the device into the rnd source list. */
106		rnd_attach_source(&dksc->sc_rnd_source, dksc->sc_xname,
107		    RND_TYPE_DISK, RND_FLAG_DEFAULT);
108	}
109}
110
111void
112dk_detach(struct dk_softc *dksc)
113{
114	if ((dksc->sc_flags & DKF_NO_RND) == 0) {
115		/* Unhook the entropy source. */
116		rnd_detach_source(&dksc->sc_rnd_source);
117	}
118
119	dksc->sc_flags &= ~DKF_READYFORDUMP;
120	mutex_destroy(&dksc->sc_iolock);
121}
122
123/* ARGSUSED */
124int
125dk_open(struct dk_softc *dksc, dev_t dev,
126    int flags, int fmt, struct lwp *l)
127{
128	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
129	struct	disklabel *lp = dksc->sc_dkdev.dk_label;
130	int	part = DISKPART(dev);
131	int	pmask = 1 << part;
132	int	ret = 0;
133	struct disk *dk = &dksc->sc_dkdev;
134
135	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
136	    dksc->sc_xname, dksc, dev, flags));
137
138	mutex_enter(&dk->dk_openlock);
139
140	/*
141	 * If there are wedges, and this is not RAW_PART, then we
142	 * need to fail.
143	 */
144	if (dk->dk_nwedges != 0 && part != RAW_PART) {
145		ret = EBUSY;
146		goto done;
147	}
148
149	/* If no dkdriver attached, bail */
150	if (dkd == NULL) {
151		ret = ENXIO;
152		goto done;
153	}
154
155	/*
156	 * initialize driver for the first opener
157	 */
158	if (dk->dk_openmask == 0 && dkd->d_firstopen != NULL) {
159		ret = (*dkd->d_firstopen)(dksc->sc_dev, dev, flags, fmt);
160		if (ret)
161			goto done;
162	}
163
164	/*
165	 * If we're init'ed and there are no other open partitions then
166	 * update the in-core disklabel.
167	 */
168	if ((dksc->sc_flags & DKF_INITED)) {
169		if ((dksc->sc_flags & DKF_VLABEL) == 0) {
170			dksc->sc_flags |= DKF_VLABEL;
171			dk_getdisklabel(dksc, dev);
172		}
173	}
174
175	/* Fail if we can't find the partition. */
176	if (part != RAW_PART &&
177	    ((dksc->sc_flags & DKF_VLABEL) == 0 ||
178	     part >= lp->d_npartitions ||
179	     lp->d_partitions[part].p_fstype == FS_UNUSED)) {
180		ret = ENXIO;
181		goto done;
182	}
183
184	/* Mark our unit as open. */
185	switch (fmt) {
186	case S_IFCHR:
187		dk->dk_copenmask |= pmask;
188		break;
189	case S_IFBLK:
190		dk->dk_bopenmask |= pmask;
191		break;
192	}
193
194	dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
195
196done:
197	mutex_exit(&dk->dk_openlock);
198	return ret;
199}
200
201/* ARGSUSED */
202int
203dk_close(struct dk_softc *dksc, dev_t dev,
204    int flags, int fmt, struct lwp *l)
205{
206	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
207	int	part = DISKPART(dev);
208	int	pmask = 1 << part;
209	struct disk *dk = &dksc->sc_dkdev;
210
211	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
212	    dksc->sc_xname, dksc, dev, flags));
213
214	mutex_enter(&dk->dk_openlock);
215
216	switch (fmt) {
217	case S_IFCHR:
218		dk->dk_copenmask &= ~pmask;
219		break;
220	case S_IFBLK:
221		dk->dk_bopenmask &= ~pmask;
222		break;
223	}
224	dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
225
226	if (dk->dk_openmask == 0) {
227		if (dkd->d_lastclose != NULL)
228			(*dkd->d_lastclose)(dksc->sc_dev);
229		if ((dksc->sc_flags & DKF_KLABEL) == 0)
230			dksc->sc_flags &= ~DKF_VLABEL;
231	}
232
233	mutex_exit(&dk->dk_openlock);
234	return 0;
235}
236
237static int
238dk_translate(struct dk_softc *dksc, struct buf *bp)
239{
240	int	part;
241	int	wlabel;
242	daddr_t	blkno;
243	struct disklabel *lp;
244	struct disk *dk;
245	uint64_t numsecs;
246	unsigned secsize;
247
248	lp = dksc->sc_dkdev.dk_label;
249	dk = &dksc->sc_dkdev;
250
251	part = DISKPART(bp->b_dev);
252	numsecs = dk->dk_geom.dg_secperunit;
253	secsize = dk->dk_geom.dg_secsize;
254
255	/*
256	 * The transfer must be a whole number of blocks and the offset must
257	 * not be negative.
258	 */
259	if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) {
260		bp->b_error = EINVAL;
261		goto done;
262	}
263
264	/* If there is nothing to do, then we are done */
265	if (bp->b_bcount == 0)
266		goto done;
267
268	wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
269	if (part == RAW_PART) {
270		uint64_t numblocks = btodb(numsecs * secsize);
271		if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0)
272			goto done;
273	} else {
274		if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0)
275			goto done;
276	}
277
278	/*
279	 * Convert the block number to absolute and put it in terms
280	 * of the device's logical block size.
281	 */
282	if (secsize >= DEV_BSIZE)
283		blkno = bp->b_blkno / (secsize / DEV_BSIZE);
284	else
285		blkno = bp->b_blkno * (DEV_BSIZE / secsize);
286
287	if (part != RAW_PART)
288		blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
289	bp->b_rawblkno = blkno;
290
291	return -1;
292
293done:
294	bp->b_resid = bp->b_bcount;
295	return bp->b_error;
296}
297
298static int
299dk_strategy1(struct dk_softc *dksc, struct buf *bp)
300{
301	int error;
302
303	DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__,
304	    dksc->sc_xname, dksc, bp));
305
306	if (!(dksc->sc_flags & DKF_INITED)) {
307		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
308		bp->b_error = ENXIO;
309		bp->b_resid = bp->b_bcount;
310		biodone(bp);
311		return 1;
312	}
313
314	error = dk_translate(dksc, bp);
315	if (error >= 0) {
316		biodone(bp);
317		return 1;
318	}
319
320	return 0;
321}
322
323void
324dk_strategy(struct dk_softc *dksc, struct buf *bp)
325{
326	int error;
327
328	error = dk_strategy1(dksc, bp);
329	if (error)
330		return;
331
332	/*
333	 * Queue buffer and start unit
334	 */
335	dk_start(dksc, bp);
336}
337
338int
339dk_strategy_defer(struct dk_softc *dksc, struct buf *bp)
340{
341	int error;
342
343	error = dk_strategy1(dksc, bp);
344	if (error)
345		return error;
346
347	/*
348	 * Queue buffer only
349	 */
350	mutex_enter(&dksc->sc_iolock);
351	disk_wait(&dksc->sc_dkdev);
352	bufq_put(dksc->sc_bufq, bp);
353	mutex_exit(&dksc->sc_iolock);
354
355	return 0;
356}
357
358int
359dk_strategy_pending(struct dk_softc *dksc)
360{
361	struct buf *bp;
362
363	if (!(dksc->sc_flags & DKF_INITED)) {
364		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
365		return 0;
366	}
367
368	mutex_enter(&dksc->sc_iolock);
369	bp = bufq_peek(dksc->sc_bufq);
370	mutex_exit(&dksc->sc_iolock);
371
372	return bp != NULL;
373}
374
375void
376dk_start(struct dk_softc *dksc, struct buf *bp)
377{
378	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
379	int error;
380
381	if (!(dksc->sc_flags & DKF_INITED)) {
382		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
383		return;
384	}
385
386	mutex_enter(&dksc->sc_iolock);
387
388	if (bp != NULL) {
389		bp->b_ci = curcpu();
390		disk_wait(&dksc->sc_dkdev);
391		bufq_put(dksc->sc_bufq, bp);
392	}
393
394	/*
395	 * If another thread is running the queue, increment
396	 * busy counter to 2 so that the queue is retried,
397	 * because the driver may now accept additional
398	 * requests.
399	 */
400	if (dksc->sc_busy < 2)
401		dksc->sc_busy++;
402	if (dksc->sc_busy > 1)
403		goto done;
404
405	/*
406	 * Peeking at the buffer queue and committing the operation
407	 * only after success isn't atomic.
408	 *
409	 * So when a diskstart fails, the buffer is saved
410	 * and tried again before the next buffer is fetched.
411	 * dk_drain() handles flushing of a saved buffer.
412	 *
413	 * This keeps order of I/O operations, unlike bufq_put.
414	 */
415
416	while (dksc->sc_busy > 0) {
417
418		bp = dksc->sc_deferred;
419		dksc->sc_deferred = NULL;
420
421		if (bp == NULL)
422			bp = bufq_get(dksc->sc_bufq);
423
424		while (bp != NULL) {
425
426			disk_busy(&dksc->sc_dkdev);
427			mutex_exit(&dksc->sc_iolock);
428			error = dkd->d_diskstart(dksc->sc_dev, bp);
429			mutex_enter(&dksc->sc_iolock);
430			if (error == EAGAIN || error == ENOMEM) {
431				/*
432				 * Not a disk error. Retry later.
433				 */
434				KASSERT(dksc->sc_deferred == NULL);
435				dksc->sc_deferred = bp;
436				disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
437				disk_wait(&dksc->sc_dkdev);
438				break;
439			}
440
441			if (error != 0) {
442				bp->b_error = error;
443				bp->b_resid = bp->b_bcount;
444				mutex_exit(&dksc->sc_iolock);
445				dk_done(dksc, bp);
446				mutex_enter(&dksc->sc_iolock);
447			}
448
449			bp = bufq_get(dksc->sc_bufq);
450		}
451
452		dksc->sc_busy--;
453	}
454done:
455	mutex_exit(&dksc->sc_iolock);
456}
457
458void
459dk_done(struct dk_softc *dksc, struct buf *bp)
460{
461	struct disk *dk = &dksc->sc_dkdev;
462
463	if (bp->b_error != 0) {
464		struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
465
466		diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
467			dk->dk_label);
468		printf("\n");
469	}
470
471	mutex_enter(&dksc->sc_iolock);
472	disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
473	mutex_exit(&dksc->sc_iolock);
474
475	if ((dksc->sc_flags & DKF_NO_RND) == 0)
476		rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);
477
478	biodone(bp);
479}
480
481void
482dk_drain(struct dk_softc *dksc)
483{
484	struct buf *bp;
485
486	mutex_enter(&dksc->sc_iolock);
487	bp = dksc->sc_deferred;
488	dksc->sc_deferred = NULL;
489	if (bp != NULL) {
490		bp->b_error = EIO;
491		bp->b_resid = bp->b_bcount;
492		biodone(bp);
493	}
494	bufq_drain(dksc->sc_bufq);
495	mutex_exit(&dksc->sc_iolock);
496}
497
498int
499dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len)
500{
501	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
502	unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
503	struct buf tmp, *bp = &tmp;
504	int maxsz;
505	int error = 0;
506
507	KASSERT(len >= 0);
508
509	DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__,
510	    dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len));
511
512	if (!(dksc->sc_flags & DKF_INITED)) {
513		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
514		return ENXIO;
515	}
516
517	if (secsize == 0 || (pos % secsize) != 0 || (len % secsize) != 0)
518		return EINVAL;
519
520	/* largest value that b_bcount can store */
521	maxsz = rounddown(INT_MAX, secsize);
522
523	while (len > 0) {
524		/* enough data to please the bounds checking code */
525		bp->b_dev = dev;
526		bp->b_blkno = (daddr_t)(pos / secsize);
527		bp->b_bcount = uimin(len, maxsz);
528		bp->b_flags = B_WRITE;
529
530		error = dk_translate(dksc, bp);
531		if (error >= 0)
532			break;
533
534		error = dkd->d_discard(dksc->sc_dev,
535			(off_t)bp->b_rawblkno * secsize,
536			(off_t)bp->b_bcount);
537		if (error)
538			break;
539
540		pos += bp->b_bcount;
541		len -= bp->b_bcount;
542	}
543
544	return error;
545}
546
547int
548dk_size(struct dk_softc *dksc, dev_t dev)
549{
550	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
551	struct	disklabel *lp;
552	int	is_open;
553	int	part;
554	int	size;
555
556	if ((dksc->sc_flags & DKF_INITED) == 0)
557		return -1;
558
559	part = DISKPART(dev);
560	is_open = dksc->sc_dkdev.dk_openmask & (1 << part);
561
562	if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp))
563		return -1;
564
565	lp = dksc->sc_dkdev.dk_label;
566	if (lp->d_partitions[part].p_fstype != FS_SWAP)
567		size = -1;
568	else
569		size = lp->d_partitions[part].p_size *
570		    (lp->d_secsize / DEV_BSIZE);
571
572	if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp))
573		return -1;
574
575	return size;
576}
577
578int
579dk_ioctl(struct dk_softc *dksc, dev_t dev,
580	    u_long cmd, void *data, int flag, struct lwp *l)
581{
582	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
583	struct	disklabel *lp;
584	struct	disk *dk = &dksc->sc_dkdev;
585#ifdef __HAVE_OLD_DISKLABEL
586	struct	disklabel newlabel;
587#endif
588	int	error;
589
590	DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__,
591	    dksc->sc_xname, dksc, dev, cmd));
592
593	/* ensure that the pseudo disk is open for writes for these commands */
594	switch (cmd) {
595	case DIOCSDINFO:
596	case DIOCWDINFO:
597#ifdef __HAVE_OLD_DISKLABEL
598	case ODIOCSDINFO:
599	case ODIOCWDINFO:
600#endif
601	case DIOCKLABEL:
602	case DIOCWLABEL:
603	case DIOCAWEDGE:
604	case DIOCDWEDGE:
605	case DIOCSSTRATEGY:
606		if ((flag & FWRITE) == 0)
607			return EBADF;
608	}
609
610	/* ensure that the pseudo-disk is initialized for these */
611	switch (cmd) {
612	case DIOCGDINFO:
613	case DIOCSDINFO:
614	case DIOCWDINFO:
615	case DIOCGPARTINFO:
616	case DIOCKLABEL:
617	case DIOCWLABEL:
618	case DIOCGDEFLABEL:
619	case DIOCAWEDGE:
620	case DIOCDWEDGE:
621	case DIOCLWEDGES:
622	case DIOCMWEDGES:
623	case DIOCRMWEDGES:
624	case DIOCCACHESYNC:
625#ifdef __HAVE_OLD_DISKLABEL
626	case ODIOCGDINFO:
627	case ODIOCSDINFO:
628	case ODIOCWDINFO:
629	case ODIOCGDEFLABEL:
630#endif
631		if ((dksc->sc_flags & DKF_INITED) == 0)
632			return ENXIO;
633	}
634
635	error = disk_ioctl(dk, dev, cmd, data, flag, l);
636	if (error != EPASSTHROUGH)
637		return error;
638	else
639		error = 0;
640
641	switch (cmd) {
642	case DIOCWDINFO:
643	case DIOCSDINFO:
644#ifdef __HAVE_OLD_DISKLABEL
645	case ODIOCWDINFO:
646	case ODIOCSDINFO:
647#endif
648#ifdef __HAVE_OLD_DISKLABEL
649		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
650			memset(&newlabel, 0, sizeof newlabel);
651			memcpy(&newlabel, data, sizeof (struct olddisklabel));
652			lp = &newlabel;
653		} else
654#endif
655		lp = (struct disklabel *)data;
656
657		mutex_enter(&dk->dk_openlock);
658		dksc->sc_flags |= DKF_LABELLING;
659
660		error = setdisklabel(dksc->sc_dkdev.dk_label,
661		    lp, 0, dksc->sc_dkdev.dk_cpulabel);
662		if (error == 0) {
663			if (cmd == DIOCWDINFO
664#ifdef __HAVE_OLD_DISKLABEL
665			    || cmd == ODIOCWDINFO
666#endif
667			   )
668				error = writedisklabel(DKLABELDEV(dev),
669				    dkd->d_strategy, dksc->sc_dkdev.dk_label,
670				    dksc->sc_dkdev.dk_cpulabel);
671		}
672
673		dksc->sc_flags &= ~DKF_LABELLING;
674		mutex_exit(&dk->dk_openlock);
675		break;
676
677	case DIOCKLABEL:
678		if (*(int *)data != 0)
679			dksc->sc_flags |= DKF_KLABEL;
680		else
681			dksc->sc_flags &= ~DKF_KLABEL;
682		break;
683
684	case DIOCWLABEL:
685		if (*(int *)data != 0)
686			dksc->sc_flags |= DKF_WLABEL;
687		else
688			dksc->sc_flags &= ~DKF_WLABEL;
689		break;
690
691	case DIOCGDEFLABEL:
692		dk_getdefaultlabel(dksc, (struct disklabel *)data);
693		break;
694
695#ifdef __HAVE_OLD_DISKLABEL
696	case ODIOCGDEFLABEL:
697		dk_getdefaultlabel(dksc, &newlabel);
698		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
699			return ENOTTY;
700		memcpy(data, &newlabel, sizeof (struct olddisklabel));
701		break;
702#endif
703
704	case DIOCGSTRATEGY:
705	    {
706		struct disk_strategy *dks = (void *)data;
707
708		mutex_enter(&dksc->sc_iolock);
709		if (dksc->sc_bufq != NULL)
710			strlcpy(dks->dks_name,
711			    bufq_getstrategyname(dksc->sc_bufq),
712			    sizeof(dks->dks_name));
713		else
714			error = EINVAL;
715		mutex_exit(&dksc->sc_iolock);
716		dks->dks_paramlen = 0;
717		break;
718	    }
719
720	case DIOCSSTRATEGY:
721	    {
722		struct disk_strategy *dks = (void *)data;
723		struct bufq_state *new;
724		struct bufq_state *old;
725
726		if (dks->dks_param != NULL) {
727			return EINVAL;
728		}
729		dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
730		error = bufq_alloc(&new, dks->dks_name,
731		    BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
732		if (error) {
733			return error;
734		}
735		mutex_enter(&dksc->sc_iolock);
736		old = dksc->sc_bufq;
737		if (old)
738			bufq_move(new, old);
739		dksc->sc_bufq = new;
740		mutex_exit(&dksc->sc_iolock);
741		if (old)
742			bufq_free(old);
743		break;
744	    }
745
746	default:
747		error = ENOTTY;
748	}
749
750	return error;
751}
752
753/*
754 * dk_dump dumps all of physical memory into the partition specified.
755 * This requires substantially more framework than {s,w}ddump, and hence
756 * is probably much more fragile.
757 *
758 */
759
760#define DKFF_READYFORDUMP(x)	(((x) & DKF_READYFORDUMP) == DKF_READYFORDUMP)
761static volatile int	dk_dumping = 0;
762
763/* ARGSUSED */
764int
765dk_dump(struct dk_softc *dksc, dev_t dev,
766    daddr_t blkno, void *vav, size_t size, int flags)
767{
768	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
769	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
770	char *va = vav;
771	struct disklabel *lp;
772	struct partition *p;
773	int part, towrt, maxblkcnt, nblk;
774	int maxxfer, rv = 0;
775
776	/*
777	 * ensure that we consider this device to be safe for dumping,
778	 * and that the device is configured.
779	 */
780	if (!DKFF_READYFORDUMP(dksc->sc_flags)) {
781		DPRINTF(DKDB_DUMP, ("%s: bad dump flags 0x%x\n", __func__,
782		    dksc->sc_flags));
783		return ENXIO;
784	}
785
786	/* ensure that we are not already dumping */
787	if (dk_dumping)
788		return EFAULT;
789	if ((flags & DK_DUMP_RECURSIVE) == 0)
790		dk_dumping = 1;
791
792	if (dkd->d_dumpblocks == NULL) {
793		DPRINTF(DKDB_DUMP, ("%s: no dumpblocks\n", __func__));
794		return ENXIO;
795	}
796
797	/* device specific max transfer size */
798	maxxfer = MAXPHYS;
799	if (dkd->d_iosize != NULL)
800		(*dkd->d_iosize)(dksc->sc_dev, &maxxfer);
801
802	/* Convert to disk sectors.  Request must be a multiple of size. */
803	part = DISKPART(dev);
804	lp = dksc->sc_dkdev.dk_label;
805	if ((size % lp->d_secsize) != 0) {
806		DPRINTF(DKDB_DUMP, ("%s: odd size %zu\n", __func__, size));
807		return EFAULT;
808	}
809	towrt = size / lp->d_secsize;
810	blkno = dbtob(blkno) / lp->d_secsize;   /* blkno in secsize units */
811
812	p = &lp->d_partitions[part];
813	if (part == RAW_PART) {
814		if (p->p_fstype != FS_UNUSED) {
815			DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
816			    p->p_fstype));
817			return ENXIO;
818		}
819		/* Check whether dump goes to a wedge */
820		if (dksc->sc_dkdev.dk_nwedges == 0) {
821			DPRINTF(DKDB_DUMP, ("%s: dump to raw\n", __func__));
822			return ENXIO;
823		}
824		/* Check transfer bounds against media size */
825		if (blkno < 0 || (blkno + towrt) > dg->dg_secperunit) {
826			DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
827			    "nsects=%jd\n", __func__, (intmax_t)blkno, towrt, dg->dg_secperunit));
828			return EINVAL;
829		}
830	} else {
831		int nsects, sectoff;
832
833		if (p->p_fstype != FS_SWAP) {
834			DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
835			    p->p_fstype));
836			return ENXIO;
837		}
838		nsects = p->p_size;
839		sectoff = p->p_offset;
840
841		/* Check transfer bounds against partition size. */
842		if ((blkno < 0) || ((blkno + towrt) > nsects)) {
843			DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
844			    "nsects=%d\n", __func__, (intmax_t)blkno, towrt, nsects));
845			return EINVAL;
846		}
847
848		/* Offset block number to start of partition. */
849		blkno += sectoff;
850	}
851
852	/* Start dumping and return when done. */
853	maxblkcnt = howmany(maxxfer, lp->d_secsize);
854	while (towrt > 0) {
855		nblk = uimin(maxblkcnt, towrt);
856
857		if ((rv = (*dkd->d_dumpblocks)(dksc->sc_dev, va, blkno, nblk))
858		    != 0) {
859			DPRINTF(DKDB_DUMP, ("%s: dumpblocks %d\n", __func__,
860			    rv));
861			return rv;
862		}
863
864		towrt -= nblk;
865		blkno += nblk;
866		va += nblk * lp->d_secsize;
867	}
868
869	if ((flags & DK_DUMP_RECURSIVE) == 0)
870		dk_dumping = 0;
871
872	return 0;
873}
874
875/* ARGSUSED */
876void
877dk_getdefaultlabel(struct dk_softc *dksc, struct disklabel *lp)
878{
879	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
880	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
881
882	memset(lp, 0, sizeof(*lp));
883
884	if (dg->dg_secperunit > UINT32_MAX)
885		lp->d_secperunit = UINT32_MAX;
886	else
887		lp->d_secperunit = dg->dg_secperunit;
888	lp->d_secsize = dg->dg_secsize;
889	lp->d_nsectors = dg->dg_nsectors;
890	lp->d_ntracks = dg->dg_ntracks;
891	lp->d_ncylinders = dg->dg_ncylinders;
892	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
893
894	strlcpy(lp->d_typename, dksc->sc_xname, sizeof(lp->d_typename));
895	lp->d_type = dksc->sc_dtype;
896	strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
897	lp->d_rpm = 3600;
898	lp->d_interleave = 1;
899	lp->d_flags = 0;
900
901	lp->d_partitions[RAW_PART].p_offset = 0;
902	lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
903	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
904	lp->d_npartitions = RAW_PART + 1;
905
906	lp->d_magic = DISKMAGIC;
907	lp->d_magic2 = DISKMAGIC;
908
909	if (dkd->d_label)
910		dkd->d_label(dksc->sc_dev, lp);
911
912	lp->d_checksum = dkcksum(lp);
913}
914
915/* ARGSUSED */
916void
917dk_getdisklabel(struct dk_softc *dksc, dev_t dev)
918{
919	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
920	struct	 disklabel *lp = dksc->sc_dkdev.dk_label;
921	struct	 cpu_disklabel *clp = dksc->sc_dkdev.dk_cpulabel;
922	struct   disk_geom *dg = &dksc->sc_dkdev.dk_geom;
923	struct	 partition *pp;
924	int	 i, lpratio, dgratio;
925	const char	*errstring;
926
927	memset(clp, 0x0, sizeof(*clp));
928	dk_getdefaultlabel(dksc, lp);
929	errstring = readdisklabel(DKLABELDEV(dev), dkd->d_strategy,
930	    dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel);
931	if (errstring) {
932		dk_makedisklabel(dksc);
933		if (dksc->sc_flags & DKF_WARNLABEL)
934			printf("%s: %s\n", dksc->sc_xname, errstring);
935		return;
936	}
937
938	if ((dksc->sc_flags & DKF_LABELSANITY) == 0)
939		return;
940
941	/* Convert sector counts to multiple of DEV_BSIZE for comparison */
942	lpratio = dgratio = 1;
943	if (lp->d_secsize > DEV_BSIZE)
944		lpratio = lp->d_secsize / DEV_BSIZE;
945	if (dg->dg_secsize > DEV_BSIZE)
946		dgratio = dg->dg_secsize / DEV_BSIZE;
947
948	/* Sanity check */
949	if ((uint64_t)lp->d_secperunit * lpratio > dg->dg_secperunit * dgratio)
950		printf("WARNING: %s: "
951		    "total unit size in disklabel (%" PRIu64 ") "
952		    "!= the size of %s (%" PRIu64 ")\n", dksc->sc_xname,
953		    (uint64_t)lp->d_secperunit * lpratio, dksc->sc_xname,
954		    dg->dg_secperunit * dgratio);
955	else if (lp->d_secperunit < UINT32_MAX &&
956	    (uint64_t)lp->d_secperunit * lpratio < dg->dg_secperunit * dgratio)
957		printf("%s: %" PRIu64 " trailing sectors not covered"
958		    " by disklabel\n", dksc->sc_xname,
959		    (dg->dg_secperunit * dgratio)
960		    - (lp->d_secperunit * lpratio));
961
962	for (i=0; i < lp->d_npartitions; i++) {
963		uint64_t pend;
964
965		pp = &lp->d_partitions[i];
966		pend = pp->p_offset + pp->p_size;
967		if (pend * lpratio > dg->dg_secperunit * dgratio)
968			printf("WARNING: %s: end of partition `%c' exceeds "
969			    "the size of %s (%" PRIu64 ")\n", dksc->sc_xname,
970			    'a' + i, dksc->sc_xname,
971			    dg->dg_secperunit * dgratio);
972	}
973}
974
975/*
976 * Heuristic to conjure a disklabel if reading a disklabel failed.
977 *
978 * This is to allow the raw partition to be used for a filesystem
979 * without caring about the write protected label sector.
980 *
981 * If the driver provides it's own callback, use that instead.
982 */
983/* ARGSUSED */
984static void
985dk_makedisklabel(struct dk_softc *dksc)
986{
987	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
988	struct  disklabel *lp = dksc->sc_dkdev.dk_label;
989
990	strlcpy(lp->d_packname, "default label", sizeof(lp->d_packname));
991
992	if (dkd->d_label)
993		dkd->d_label(dksc->sc_dev, lp);
994	else
995		lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
996
997	lp->d_checksum = dkcksum(lp);
998}
999
1000MODULE(MODULE_CLASS_MISC, dk_subr, NULL);
1001
1002static int
1003dk_subr_modcmd(modcmd_t cmd, void *arg)
1004{
1005	switch (cmd) {
1006	case MODULE_CMD_INIT:
1007	case MODULE_CMD_FINI:
1008		return 0;
1009	case MODULE_CMD_STAT:
1010	case MODULE_CMD_AUTOUNLOAD:
1011	default:
1012		return ENOTTY;
1013	}
1014}
1015