1/*	$NetBSD: ld.c,v 1.112 2021/05/30 11:24:02 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Disk driver for use by RAID controllers.
34 */
35
36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.112 2021/05/30 11:24:02 riastradh Exp $");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/device.h>
43#include <sys/queue.h>
44#include <sys/proc.h>
45#include <sys/buf.h>
46#include <sys/bufq.h>
47#include <sys/endian.h>
48#include <sys/disklabel.h>
49#include <sys/disk.h>
50#include <sys/dkio.h>
51#include <sys/stat.h>
52#include <sys/conf.h>
53#include <sys/fcntl.h>
54#include <sys/vnode.h>
55#include <sys/syslog.h>
56#include <sys/mutex.h>
57#include <sys/module.h>
58#include <sys/reboot.h>
59
60#include <dev/ldvar.h>
61
62#include "ioconf.h"
63
64static void	ldminphys(struct buf *bp);
65static bool	ld_suspend(device_t, const pmf_qual_t *);
66static bool	ld_resume(device_t, const pmf_qual_t *);
67static bool	ld_shutdown(device_t, int);
68static int	ld_diskstart(device_t, struct buf *bp);
69static void	ld_iosize(device_t, int *);
70static int	ld_dumpblocks(device_t, void *, daddr_t, int);
71static void	ld_fake_geometry(struct ld_softc *);
72static void	ld_set_geometry(struct ld_softc *);
73static void	ld_config_interrupts (device_t);
74static int	ld_lastclose(device_t);
75static int	ld_discard(device_t, off_t, off_t);
76static int	ld_flush(device_t, bool);
77
78static dev_type_open(ldopen);
79static dev_type_close(ldclose);
80static dev_type_read(ldread);
81static dev_type_write(ldwrite);
82static dev_type_ioctl(ldioctl);
83static dev_type_strategy(ldstrategy);
84static dev_type_dump(lddump);
85static dev_type_size(ldsize);
86static dev_type_discard(lddiscard);
87
88const struct bdevsw ld_bdevsw = {
89	.d_open = ldopen,
90	.d_close = ldclose,
91	.d_strategy = ldstrategy,
92	.d_ioctl = ldioctl,
93	.d_dump = lddump,
94	.d_psize = ldsize,
95	.d_discard = lddiscard,
96	.d_flag = D_DISK | D_MPSAFE
97};
98
99const struct cdevsw ld_cdevsw = {
100	.d_open = ldopen,
101	.d_close = ldclose,
102	.d_read = ldread,
103	.d_write = ldwrite,
104	.d_ioctl = ldioctl,
105	.d_stop = nostop,
106	.d_tty = notty,
107	.d_poll = nopoll,
108	.d_mmap = nommap,
109	.d_kqfilter = nokqfilter,
110	.d_discard = lddiscard,
111	.d_flag = D_DISK | D_MPSAFE
112};
113
114static const struct	dkdriver lddkdriver = {
115	.d_open = ldopen,
116	.d_close = ldclose,
117	.d_strategy = ldstrategy,
118	.d_iosize = ld_iosize,
119	.d_minphys  = ldminphys,
120	.d_diskstart = ld_diskstart,
121	.d_dumpblocks = ld_dumpblocks,
122	.d_lastclose = ld_lastclose,
123	.d_discard = ld_discard
124};
125
126void
127ldattach(struct ld_softc *sc, const char *default_strategy)
128{
129	device_t self = sc->sc_dv;
130	struct dk_softc *dksc = &sc->sc_dksc;
131
132	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
133	cv_init(&sc->sc_drain, "lddrain");
134
135	if ((sc->sc_flags & LDF_ENABLED) == 0) {
136		return;
137	}
138
139	/* don't attach a disk that we cannot handle */
140	if (sc->sc_secsize < DEV_BSIZE) {
141		sc->sc_flags &= ~LDF_ENABLED;
142		return;
143	}
144
145	/* Initialise dk and disk structure. */
146	dk_init(dksc, self, DKTYPE_LD);
147	disk_init(&dksc->sc_dkdev, dksc->sc_xname, &lddkdriver);
148
149	if (sc->sc_maxxfer > MAXPHYS)
150		sc->sc_maxxfer = MAXPHYS;
151
152	/* Build synthetic geometry if necessary. */
153	if (sc->sc_nheads == 0 || sc->sc_nsectors == 0 ||
154	    sc->sc_ncylinders == 0)
155	    ld_fake_geometry(sc);
156
157	sc->sc_disksize512 = sc->sc_secperunit * sc->sc_secsize / DEV_BSIZE;
158
159	if (sc->sc_flags & LDF_NO_RND)
160		dksc->sc_flags |= DKF_NO_RND;
161
162	/* Attach dk and disk subsystems */
163	dk_attach(dksc);
164	disk_attach(&dksc->sc_dkdev);
165	ld_set_geometry(sc);
166
167	bufq_alloc(&dksc->sc_bufq, default_strategy, BUFQ_SORT_RAWBLOCK);
168
169	/* Register with PMF */
170	if (!pmf_device_register1(dksc->sc_dev, ld_suspend, ld_resume,
171		ld_shutdown))
172		aprint_error_dev(dksc->sc_dev,
173		    "couldn't establish power handler\n");
174
175	/* Discover wedges on this disk. */
176	config_interrupts(sc->sc_dv, ld_config_interrupts);
177}
178
179int
180ldadjqparam(struct ld_softc *sc, int xmax)
181{
182
183	mutex_enter(&sc->sc_mutex);
184	sc->sc_maxqueuecnt = xmax;
185	mutex_exit(&sc->sc_mutex);
186
187	return (0);
188}
189
190int
191ldbegindetach(struct ld_softc *sc, int flags)
192{
193	struct dk_softc *dksc = &sc->sc_dksc;
194	int error;
195
196	/* If we never attached properly, no problem with detaching.  */
197	if ((sc->sc_flags & LDF_ENABLED) == 0)
198		return 0;
199
200	/*
201	 * If the disk is still open, back out before we commit to
202	 * detaching.
203	 */
204	error = disk_begindetach(&dksc->sc_dkdev, ld_lastclose, dksc->sc_dev,
205	    flags);
206	if (error)
207		return error;
208
209	/* We are now committed to detaching.  Prevent new xfers.  */
210	ldadjqparam(sc, 0);
211
212	return 0;
213}
214
215void
216ldenddetach(struct ld_softc *sc)
217{
218	struct dk_softc *dksc = &sc->sc_dksc;
219	int bmaj, cmaj, i, mn;
220
221	if ((sc->sc_flags & LDF_ENABLED) == 0)
222		return;
223
224	/* Wait for commands queued with the hardware to complete. */
225	mutex_enter(&sc->sc_mutex);
226	while (sc->sc_queuecnt > 0) {
227		if (cv_timedwait(&sc->sc_drain, &sc->sc_mutex, 30 * hz)) {
228			/*
229			 * XXX This seems like a recipe for crashing on
230			 * use after free...
231			 */
232			printf("%s: not drained\n", dksc->sc_xname);
233			break;
234		}
235	}
236	mutex_exit(&sc->sc_mutex);
237
238	/* Kill off any queued buffers. */
239	dk_drain(dksc);
240	bufq_free(dksc->sc_bufq);
241
242	/* Locate the major numbers. */
243	bmaj = bdevsw_lookup_major(&ld_bdevsw);
244	cmaj = cdevsw_lookup_major(&ld_cdevsw);
245
246	/* Nuke the vnodes for any open instances. */
247	for (i = 0; i < MAXPARTITIONS; i++) {
248		mn = DISKMINOR(device_unit(dksc->sc_dev), i);
249		vdevgone(bmaj, mn, mn, VBLK);
250		vdevgone(cmaj, mn, mn, VCHR);
251	}
252
253	/* Delete all of our wedges. */
254	dkwedge_delall(&dksc->sc_dkdev);
255
256	/* Detach from the disk list. */
257	disk_detach(&dksc->sc_dkdev);
258	disk_destroy(&dksc->sc_dkdev);
259
260	dk_detach(dksc);
261
262	/* Deregister with PMF */
263	pmf_device_deregister(dksc->sc_dev);
264
265	/*
266	 * XXX We can't really flush the cache here, because the
267	 * XXX device may already be non-existent from the controller's
268	 * XXX perspective.
269	 */
270#if 0
271	ld_flush(dksc->sc_dev, false);
272#endif
273	cv_destroy(&sc->sc_drain);
274	mutex_destroy(&sc->sc_mutex);
275}
276
277/* ARGSUSED */
278static bool
279ld_suspend(device_t dev, const pmf_qual_t *qual)
280{
281	struct ld_softc *sc = device_private(dev);
282	int queuecnt;
283	bool ok = false;
284
285	/* Block new requests and wait for outstanding requests to drain.  */
286	mutex_enter(&sc->sc_mutex);
287	KASSERT((sc->sc_flags & LDF_SUSPEND) == 0);
288	sc->sc_flags |= LDF_SUSPEND;
289	while ((queuecnt = sc->sc_queuecnt) > 0) {
290		if (cv_timedwait(&sc->sc_drain, &sc->sc_mutex, 30 * hz))
291			break;
292	}
293	mutex_exit(&sc->sc_mutex);
294
295	/* Block suspend if we couldn't drain everything in 30sec.  */
296	if (queuecnt > 0) {
297		device_printf(dev, "timeout draining buffers\n");
298		goto out;
299	}
300
301	/* Flush cache before we lose power.  If we can't, block suspend.  */
302	if (ld_flush(dev, /*poll*/false) != 0) {
303		device_printf(dev, "failed to flush cache\n");
304		goto out;
305	}
306
307	/* Success!  */
308	ok = true;
309
310out:	if (!ok)
311		(void)ld_resume(dev, qual);
312	return ok;
313}
314
315static bool
316ld_resume(device_t dev, const pmf_qual_t *qual)
317{
318	struct ld_softc *sc = device_private(dev);
319
320	/* Allow new requests to come in.  */
321	mutex_enter(&sc->sc_mutex);
322	KASSERT(sc->sc_flags & LDF_SUSPEND);
323	sc->sc_flags &= ~LDF_SUSPEND;
324	mutex_exit(&sc->sc_mutex);
325
326	/* Restart any pending queued requests.  */
327	dk_start(&sc->sc_dksc, NULL);
328
329	return true;
330}
331
332/* ARGSUSED */
333static bool
334ld_shutdown(device_t dev, int flags)
335{
336	if ((flags & RB_NOSYNC) == 0 && ld_flush(dev, true) != 0)
337		return false;
338
339	return true;
340}
341
342/* ARGSUSED */
343static int
344ldopen(dev_t dev, int flags, int fmt, struct lwp *l)
345{
346	struct ld_softc *sc;
347	struct dk_softc *dksc;
348	int unit;
349
350	unit = DISKUNIT(dev);
351	if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
352		return (ENXIO);
353
354	if ((sc->sc_flags & LDF_ENABLED) == 0)
355		return (ENODEV);
356
357	dksc = &sc->sc_dksc;
358
359	return dk_open(dksc, dev, flags, fmt, l);
360}
361
362static int
363ld_lastclose(device_t self)
364{
365	ld_flush(self, false);
366
367	return 0;
368}
369
370/* ARGSUSED */
371static int
372ldclose(dev_t dev, int flags, int fmt, struct lwp *l)
373{
374	struct ld_softc *sc;
375	struct dk_softc *dksc;
376	int unit;
377
378	unit = DISKUNIT(dev);
379	sc = device_lookup_private(&ld_cd, unit);
380	dksc = &sc->sc_dksc;
381
382	return dk_close(dksc, dev, flags, fmt, l);
383}
384
385/* ARGSUSED */
386static int
387ldread(dev_t dev, struct uio *uio, int ioflag)
388{
389
390	return (physio(ldstrategy, NULL, dev, B_READ, ldminphys, uio));
391}
392
393/* ARGSUSED */
394static int
395ldwrite(dev_t dev, struct uio *uio, int ioflag)
396{
397
398	return (physio(ldstrategy, NULL, dev, B_WRITE, ldminphys, uio));
399}
400
401/* ARGSUSED */
402static int
403ldioctl(dev_t dev, u_long cmd, void *addr, int32_t flag, struct lwp *l)
404{
405	struct ld_softc *sc;
406	struct dk_softc *dksc;
407	int unit, error;
408
409	unit = DISKUNIT(dev);
410	sc = device_lookup_private(&ld_cd, unit);
411	dksc = &sc->sc_dksc;
412
413	error = 0;
414
415	/*
416	 * Some common checks so that individual attachments wouldn't need
417	 * to duplicate them.
418	 */
419	switch (cmd) {
420	case DIOCCACHESYNC:
421		/*
422		 * XXX Do we really need to care about having a writable
423		 * file descriptor here?
424		 */
425		if ((flag & FWRITE) == 0)
426			error = EBADF;
427		else
428			error = 0;
429		break;
430	}
431
432	if (error != 0)
433		return (error);
434
435	if (sc->sc_ioctl) {
436		if ((sc->sc_flags & LDF_MPSAFE) == 0)
437			KERNEL_LOCK(1, curlwp);
438		error = (*sc->sc_ioctl)(sc, cmd, addr, flag, 0);
439		if ((sc->sc_flags & LDF_MPSAFE) == 0)
440			KERNEL_UNLOCK_ONE(curlwp);
441		if (error != EPASSTHROUGH)
442			return (error);
443	}
444
445	/* something not handled by the attachment */
446	return dk_ioctl(dksc, dev, cmd, addr, flag, l);
447}
448
449/*
450 * Flush the device's cache.
451 */
452static int
453ld_flush(device_t self, bool poll)
454{
455	int error = 0;
456	struct ld_softc *sc = device_private(self);
457
458	if (sc->sc_ioctl) {
459		if ((sc->sc_flags & LDF_MPSAFE) == 0)
460			KERNEL_LOCK(1, curlwp);
461		error = (*sc->sc_ioctl)(sc, DIOCCACHESYNC, NULL, 0, poll);
462		if ((sc->sc_flags & LDF_MPSAFE) == 0)
463			KERNEL_UNLOCK_ONE(curlwp);
464		if (error != 0)
465			device_printf(self, "unable to flush cache\n");
466	}
467
468	return error;
469}
470
471static void
472ldstrategy(struct buf *bp)
473{
474	struct ld_softc *sc;
475	struct dk_softc *dksc;
476	int unit;
477
478	unit = DISKUNIT(bp->b_dev);
479	sc = device_lookup_private(&ld_cd, unit);
480	dksc = &sc->sc_dksc;
481
482	dk_strategy(dksc, bp);
483}
484
485static int
486ld_diskstart(device_t dev, struct buf *bp)
487{
488	struct ld_softc *sc = device_private(dev);
489	int error;
490
491	if (sc->sc_queuecnt >= sc->sc_maxqueuecnt ||
492	    sc->sc_flags & LDF_SUSPEND) {
493		if (sc->sc_flags & LDF_SUSPEND)
494			aprint_debug_dev(dev, "i/o blocked while suspended\n");
495		return EAGAIN;
496	}
497
498	if ((sc->sc_flags & LDF_MPSAFE) == 0)
499		KERNEL_LOCK(1, curlwp);
500
501	mutex_enter(&sc->sc_mutex);
502
503	if (sc->sc_queuecnt >= sc->sc_maxqueuecnt ||
504	    sc->sc_flags & LDF_SUSPEND) {
505		if (sc->sc_flags & LDF_SUSPEND)
506			aprint_debug_dev(dev, "i/o blocked while suspended\n");
507		error = EAGAIN;
508	} else {
509		error = (*sc->sc_start)(sc, bp);
510		if (error == 0)
511			sc->sc_queuecnt++;
512	}
513
514	mutex_exit(&sc->sc_mutex);
515
516	if ((sc->sc_flags & LDF_MPSAFE) == 0)
517		KERNEL_UNLOCK_ONE(curlwp);
518
519	return error;
520}
521
522void
523lddone(struct ld_softc *sc, struct buf *bp)
524{
525	struct dk_softc *dksc = &sc->sc_dksc;
526
527	dk_done(dksc, bp);
528
529	mutex_enter(&sc->sc_mutex);
530	if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
531		cv_broadcast(&sc->sc_drain);
532		mutex_exit(&sc->sc_mutex);
533		dk_start(dksc, NULL);
534	} else
535		mutex_exit(&sc->sc_mutex);
536}
537
538static int
539ldsize(dev_t dev)
540{
541	struct ld_softc *sc;
542	struct dk_softc *dksc;
543	int unit;
544
545	unit = DISKUNIT(dev);
546	if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
547		return (-1);
548	dksc = &sc->sc_dksc;
549
550	if ((sc->sc_flags & LDF_ENABLED) == 0)
551		return (-1);
552
553	return dk_size(dksc, dev);
554}
555
556/*
557 * Take a dump.
558 */
559static int
560lddump(dev_t dev, daddr_t blkno, void *va, size_t size)
561{
562	struct ld_softc *sc;
563	struct dk_softc *dksc;
564	int unit;
565
566	unit = DISKUNIT(dev);
567	if ((sc = device_lookup_private(&ld_cd, unit)) == NULL)
568		return (ENXIO);
569	dksc = &sc->sc_dksc;
570
571	if ((sc->sc_flags & LDF_ENABLED) == 0)
572		return (ENODEV);
573
574	return dk_dump(dksc, dev, blkno, va, size, 0);
575}
576
577static int
578ld_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
579{
580	struct ld_softc *sc = device_private(dev);
581
582	if (sc->sc_dump == NULL)
583		return (ENODEV);
584
585	return (*sc->sc_dump)(sc, va, blkno, nblk);
586}
587
588/*
589 * Adjust the size of a transfer.
590 */
591static void
592ldminphys(struct buf *bp)
593{
594	int unit;
595	struct ld_softc *sc;
596
597	unit = DISKUNIT(bp->b_dev);
598	sc = device_lookup_private(&ld_cd, unit);
599
600	ld_iosize(sc->sc_dv, &bp->b_bcount);
601	minphys(bp);
602}
603
604static void
605ld_iosize(device_t d, int *countp)
606{
607	struct ld_softc *sc = device_private(d);
608
609	if (*countp > sc->sc_maxxfer)
610		*countp = sc->sc_maxxfer;
611}
612
613static void
614ld_fake_geometry(struct ld_softc *sc)
615{
616	uint64_t ncyl;
617
618	if (sc->sc_secperunit <= 528 * 2048)		/* 528MB */
619		sc->sc_nheads = 16;
620	else if (sc->sc_secperunit <= 1024 * 2048)	/* 1GB */
621		sc->sc_nheads = 32;
622	else if (sc->sc_secperunit <= 21504 * 2048)	/* 21GB */
623		sc->sc_nheads = 64;
624	else if (sc->sc_secperunit <= 43008 * 2048)	/* 42GB */
625		sc->sc_nheads = 128;
626	else
627		sc->sc_nheads = 255;
628
629	sc->sc_nsectors = 63;
630	sc->sc_ncylinders = INT_MAX;
631	ncyl = sc->sc_secperunit /
632	    (sc->sc_nheads * sc->sc_nsectors);
633	if (ncyl < INT_MAX)
634		sc->sc_ncylinders = (int)ncyl;
635}
636
637static void
638ld_set_geometry(struct ld_softc *sc)
639{
640	struct dk_softc *dksc = &sc->sc_dksc;
641	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
642	char tbuf[9];
643
644	format_bytes(tbuf, sizeof(tbuf), sc->sc_secperunit *
645	    sc->sc_secsize);
646	aprint_normal_dev(dksc->sc_dev, "%s, %d cyl, %d head, %d sec, "
647	    "%d bytes/sect x %"PRIu64" sectors\n",
648	    tbuf, sc->sc_ncylinders, sc->sc_nheads,
649	    sc->sc_nsectors, sc->sc_secsize, sc->sc_secperunit);
650
651	memset(dg, 0, sizeof(*dg));
652	dg->dg_secperunit = sc->sc_secperunit;
653	dg->dg_secsize = sc->sc_secsize;
654	dg->dg_nsectors = sc->sc_nsectors;
655	dg->dg_ntracks = sc->sc_nheads;
656	dg->dg_ncylinders = sc->sc_ncylinders;
657
658	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, sc->sc_typename);
659}
660
661static void
662ld_config_interrupts(device_t d)
663{
664	struct ld_softc *sc = device_private(d);
665	struct dk_softc *dksc = &sc->sc_dksc;
666
667	dkwedge_discover(&dksc->sc_dkdev);
668}
669
670static int
671ld_discard(device_t dev, off_t pos, off_t len)
672{
673	struct ld_softc *sc = device_private(dev);
674	struct buf dbuf, *bp = &dbuf;
675	int error = 0;
676
677	KASSERT(len <= INT_MAX);
678
679	if (sc->sc_discard == NULL)
680		return (ENODEV);
681
682	if ((sc->sc_flags & LDF_MPSAFE) == 0)
683		KERNEL_LOCK(1, curlwp);
684
685	buf_init(bp);
686	bp->b_vp = NULL;
687	bp->b_data = NULL;
688	bp->b_bufsize = 0;
689	bp->b_rawblkno = pos / sc->sc_secsize;
690	bp->b_bcount = len;
691	bp->b_flags = B_WRITE;
692	bp->b_cflags = BC_BUSY;
693
694	error = (*sc->sc_discard)(sc, bp);
695	if (error == 0)
696		error = biowait(bp);
697
698	buf_destroy(bp);
699
700	if ((sc->sc_flags & LDF_MPSAFE) == 0)
701		KERNEL_UNLOCK_ONE(curlwp);
702
703	return error;
704}
705
706void
707lddiscardend(struct ld_softc *sc, struct buf *bp)
708{
709
710	if (bp->b_error)
711		bp->b_resid = bp->b_bcount;
712	biodone(bp);
713}
714
715static int
716lddiscard(dev_t dev, off_t pos, off_t len)
717{
718	struct ld_softc *sc;
719	struct dk_softc *dksc;
720	int unit;
721
722	unit = DISKUNIT(dev);
723	sc = device_lookup_private(&ld_cd, unit);
724	dksc = &sc->sc_dksc;
725
726	return dk_discard(dksc, dev, pos, len);
727}
728
729MODULE(MODULE_CLASS_DRIVER, ld, "dk_subr");
730
731#ifdef _MODULE
732CFDRIVER_DECL(ld, DV_DISK, NULL);
733#endif
734
735static int
736ld_modcmd(modcmd_t cmd, void *opaque)
737{
738#ifdef _MODULE
739	devmajor_t bmajor, cmajor;
740#endif
741	int error = 0;
742
743#ifdef _MODULE
744	switch (cmd) {
745	case MODULE_CMD_INIT:
746		bmajor = cmajor = -1;
747		error = devsw_attach(ld_cd.cd_name, &ld_bdevsw, &bmajor,
748		    &ld_cdevsw, &cmajor);
749		if (error)
750			break;
751		error = config_cfdriver_attach(&ld_cd);
752		break;
753	case MODULE_CMD_FINI:
754		error = config_cfdriver_detach(&ld_cd);
755		if (error)
756			break;
757		devsw_detach(&ld_bdevsw, &ld_cdevsw);
758		break;
759	default:
760		error = ENOTTY;
761		break;
762	}
763#endif
764
765	return error;
766}
767