kern_conf.c revision 171182
1/*-
2 * Copyright (c) 1999-2002 Poul-Henning Kamp
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/kern_conf.c 171182 2007-07-03 17:43:20Z kib $");
29
30#include <sys/param.h>
31#include <sys/kernel.h>
32#include <sys/systm.h>
33#include <sys/bio.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/module.h>
37#include <sys/malloc.h>
38#include <sys/conf.h>
39#include <sys/vnode.h>
40#include <sys/queue.h>
41#include <sys/poll.h>
42#include <sys/sx.h>
43#include <sys/ctype.h>
44#include <sys/tty.h>
45#include <sys/ucred.h>
46#include <sys/taskqueue.h>
47#include <machine/stdarg.h>
48
49#include <fs/devfs/devfs_int.h>
50
51static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage");
52
53struct mtx devmtx;
54static void destroy_devl(struct cdev *dev);
55
56static struct cdev *make_dev_credv(int flags,
57    struct cdevsw *devsw, int minornr,
58    struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,
59    va_list ap);
60
61static struct cdev_priv_list cdevp_free_list =
62    TAILQ_HEAD_INITIALIZER(cdevp_free_list);
63
64void
65dev_lock(void)
66{
67
68	mtx_lock(&devmtx);
69}
70
71static void
72dev_unlock_and_free(void)
73{
74	struct cdev_priv *cdp;
75
76	mtx_assert(&devmtx, MA_OWNED);
77	while ((cdp = TAILQ_FIRST(&cdevp_free_list)) != NULL) {
78		TAILQ_REMOVE(&cdevp_free_list, cdp, cdp_list);
79		mtx_unlock(&devmtx);
80		devfs_free(&cdp->cdp_c);
81		mtx_lock(&devmtx);
82	}
83	mtx_unlock(&devmtx);
84}
85
86static void
87dev_free_devlocked(struct cdev *cdev)
88{
89	struct cdev_priv *cdp;
90
91	mtx_assert(&devmtx, MA_OWNED);
92	cdp = cdev->si_priv;
93	TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list);
94}
95
96void
97dev_unlock(void)
98{
99
100	mtx_unlock(&devmtx);
101}
102
103void
104dev_ref(struct cdev *dev)
105{
106
107	mtx_assert(&devmtx, MA_NOTOWNED);
108	mtx_lock(&devmtx);
109	dev->si_refcount++;
110	mtx_unlock(&devmtx);
111}
112
113void
114dev_refl(struct cdev *dev)
115{
116
117	mtx_assert(&devmtx, MA_OWNED);
118	dev->si_refcount++;
119}
120
121void
122dev_rel(struct cdev *dev)
123{
124	int flag = 0;
125
126	mtx_assert(&devmtx, MA_NOTOWNED);
127	dev_lock();
128	dev->si_refcount--;
129	KASSERT(dev->si_refcount >= 0,
130	    ("dev_rel(%s) gave negative count", devtoname(dev)));
131#if 0
132	if (dev->si_usecount == 0 &&
133	    (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED))
134		;
135	else
136#endif
137	if (dev->si_devsw == NULL && dev->si_refcount == 0) {
138		LIST_REMOVE(dev, si_list);
139		flag = 1;
140	}
141	dev_unlock();
142	if (flag)
143		devfs_free(dev);
144}
145
146struct cdevsw *
147dev_refthread(struct cdev *dev)
148{
149	struct cdevsw *csw;
150	struct cdev_priv *cdp;
151
152	mtx_assert(&devmtx, MA_NOTOWNED);
153	dev_lock();
154	csw = dev->si_devsw;
155	if (csw != NULL) {
156		cdp = dev->si_priv;
157		if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0)
158			dev->si_threadcount++;
159		else
160			csw = NULL;
161	}
162	dev_unlock();
163	return (csw);
164}
165
166struct cdevsw *
167devvn_refthread(struct vnode *vp, struct cdev **devp)
168{
169	struct cdevsw *csw;
170	struct cdev_priv *cdp;
171
172	mtx_assert(&devmtx, MA_NOTOWNED);
173	csw = NULL;
174	dev_lock();
175	*devp = vp->v_rdev;
176	if (*devp != NULL) {
177		cdp = (*devp)->si_priv;
178		if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) {
179			csw = (*devp)->si_devsw;
180			if (csw != NULL)
181				(*devp)->si_threadcount++;
182		}
183	}
184	dev_unlock();
185	return (csw);
186}
187
188void
189dev_relthread(struct cdev *dev)
190{
191
192	mtx_assert(&devmtx, MA_NOTOWNED);
193	dev_lock();
194	dev->si_threadcount--;
195	dev_unlock();
196}
197
198int
199nullop(void)
200{
201
202	return (0);
203}
204
205int
206eopnotsupp(void)
207{
208
209	return (EOPNOTSUPP);
210}
211
212static int
213enxio(void)
214{
215	return (ENXIO);
216}
217
218static int
219enodev(void)
220{
221	return (ENODEV);
222}
223
224/* Define a dead_cdevsw for use when devices leave unexpectedly. */
225
226#define dead_open	(d_open_t *)enxio
227#define dead_close	(d_close_t *)enxio
228#define dead_read	(d_read_t *)enxio
229#define dead_write	(d_write_t *)enxio
230#define dead_ioctl	(d_ioctl_t *)enxio
231#define dead_poll	(d_poll_t *)enodev
232#define dead_mmap	(d_mmap_t *)enodev
233
234static void
235dead_strategy(struct bio *bp)
236{
237
238	biofinish(bp, NULL, ENXIO);
239}
240
241#define dead_dump	(dumper_t *)enxio
242#define dead_kqfilter	(d_kqfilter_t *)enxio
243
244static struct cdevsw dead_cdevsw = {
245	.d_version =	D_VERSION,
246	.d_flags =	D_NEEDGIANT, /* XXX: does dead_strategy need this ? */
247	.d_open =	dead_open,
248	.d_close =	dead_close,
249	.d_read =	dead_read,
250	.d_write =	dead_write,
251	.d_ioctl =	dead_ioctl,
252	.d_poll =	dead_poll,
253	.d_mmap =	dead_mmap,
254	.d_strategy =	dead_strategy,
255	.d_name =	"dead",
256	.d_dump =	dead_dump,
257	.d_kqfilter =	dead_kqfilter
258};
259
260/* Default methods if driver does not specify method */
261
262#define null_open	(d_open_t *)nullop
263#define null_close	(d_close_t *)nullop
264#define no_read		(d_read_t *)enodev
265#define no_write	(d_write_t *)enodev
266#define no_ioctl	(d_ioctl_t *)enodev
267#define no_mmap		(d_mmap_t *)enodev
268#define no_kqfilter	(d_kqfilter_t *)enodev
269
270static void
271no_strategy(struct bio *bp)
272{
273
274	biofinish(bp, NULL, ENODEV);
275}
276
277static int
278no_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
279{
280	/*
281	 * Return true for read/write.  If the user asked for something
282	 * special, return POLLNVAL, so that clients have a way of
283	 * determining reliably whether or not the extended
284	 * functionality is present without hard-coding knowledge
285	 * of specific filesystem implementations.
286	 * Stay in sync with vop_nopoll().
287	 */
288	if (events & ~POLLSTANDARD)
289		return (POLLNVAL);
290
291	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
292}
293
294#define no_dump		(dumper_t *)enodev
295
296static int
297giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
298{
299	int retval;
300
301	mtx_lock(&Giant);
302	retval = dev->si_devsw->d_gianttrick->
303	    d_open(dev, oflags, devtype, td);
304	mtx_unlock(&Giant);
305	return (retval);
306}
307
308static int
309giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp)
310{
311	int retval;
312
313	mtx_lock(&Giant);
314	retval = dev->si_devsw->d_gianttrick->
315	    d_fdopen(dev, oflags, td, fp);
316	mtx_unlock(&Giant);
317	return (retval);
318}
319
320static int
321giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
322{
323	int retval;
324
325	mtx_lock(&Giant);
326	retval = dev->si_devsw->d_gianttrick->
327	    d_close(dev, fflag, devtype, td);
328	mtx_unlock(&Giant);
329	return (retval);
330}
331
332static void
333giant_strategy(struct bio *bp)
334{
335
336	mtx_lock(&Giant);
337	bp->bio_dev->si_devsw->d_gianttrick->
338	    d_strategy(bp);
339	mtx_unlock(&Giant);
340}
341
342static int
343giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
344{
345	int retval;
346
347	mtx_lock(&Giant);
348	retval = dev->si_devsw->d_gianttrick->
349	    d_ioctl(dev, cmd, data, fflag, td);
350	mtx_unlock(&Giant);
351	return (retval);
352}
353
354static int
355giant_read(struct cdev *dev, struct uio *uio, int ioflag)
356{
357	int retval;
358
359	mtx_lock(&Giant);
360	retval = dev->si_devsw->d_gianttrick->
361	    d_read(dev, uio, ioflag);
362	mtx_unlock(&Giant);
363	return (retval);
364}
365
366static int
367giant_write(struct cdev *dev, struct uio *uio, int ioflag)
368{
369	int retval;
370
371	mtx_lock(&Giant);
372	retval = dev->si_devsw->d_gianttrick->
373		d_write(dev, uio, ioflag);
374	mtx_unlock(&Giant);
375	return (retval);
376}
377
378static int
379giant_poll(struct cdev *dev, int events, struct thread *td)
380{
381	int retval;
382
383	mtx_lock(&Giant);
384	retval = dev->si_devsw->d_gianttrick->
385	    d_poll(dev, events, td);
386	mtx_unlock(&Giant);
387	return (retval);
388}
389
390static int
391giant_kqfilter(struct cdev *dev, struct knote *kn)
392{
393	int retval;
394
395	mtx_lock(&Giant);
396	retval = dev->si_devsw->d_gianttrick->
397	    d_kqfilter(dev, kn);
398	mtx_unlock(&Giant);
399	return (retval);
400}
401
402static int
403giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot)
404{
405	int retval;
406
407	mtx_lock(&Giant);
408	retval = dev->si_devsw->d_gianttrick->
409	    d_mmap(dev, offset, paddr, nprot);
410	mtx_unlock(&Giant);
411	return (retval);
412}
413
414
415/*
416 * struct cdev * and u_dev_t primitives
417 */
418
419int
420minor(struct cdev *x)
421{
422	if (x == NULL)
423		return NODEV;
424	return(x->si_drv0 & MAXMINOR);
425}
426
427int
428dev2unit(struct cdev *x)
429{
430
431	if (x == NULL)
432		return NODEV;
433	return (minor2unit(minor(x)));
434}
435
436u_int
437minor2unit(u_int _minor)
438{
439
440	KASSERT((_minor & ~MAXMINOR) == 0, ("Illegal minor %x", _minor));
441	return ((_minor & 0xff) | ((_minor >> 8) & 0xffff00));
442}
443
444int
445unit2minor(int unit)
446{
447
448	KASSERT(unit <= 0xffffff, ("Invalid unit (%d) in unit2minor", unit));
449	return ((unit & 0xff) | ((unit << 8) & ~0xffff));
450}
451
452static struct cdev *
453newdev(struct cdevsw *csw, int y, struct cdev *si)
454{
455	struct cdev *si2;
456	dev_t	udev;
457
458	mtx_assert(&devmtx, MA_OWNED);
459	udev = y;
460	LIST_FOREACH(si2, &csw->d_devs, si_list) {
461		if (si2->si_drv0 == udev) {
462			dev_free_devlocked(si);
463			return (si2);
464		}
465	}
466	si->si_drv0 = udev;
467	si->si_devsw = csw;
468	LIST_INSERT_HEAD(&csw->d_devs, si, si_list);
469	return (si);
470}
471
472int
473uminor(dev_t dev)
474{
475	return (dev & MAXMINOR);
476}
477
478int
479umajor(dev_t dev)
480{
481	return ((dev & ~MAXMINOR) >> 8);
482}
483
484static void
485fini_cdevsw(struct cdevsw *devsw)
486{
487	struct cdevsw *gt;
488
489	if (devsw->d_gianttrick != NULL) {
490		gt = devsw->d_gianttrick;
491		memcpy(devsw, gt, sizeof *devsw);
492		free(gt, M_DEVT);
493		devsw->d_gianttrick = NULL;
494	}
495	devsw->d_flags &= ~D_INIT;
496}
497
498static void
499prep_cdevsw(struct cdevsw *devsw)
500{
501	struct cdevsw *dsw2;
502
503	if (devsw->d_flags & D_NEEDGIANT)
504		dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK);
505	else
506		dsw2 = NULL;
507	dev_lock();
508
509	if (devsw->d_version != D_VERSION_01) {
510		printf(
511		    "WARNING: Device driver \"%s\" has wrong version %s\n",
512		    devsw->d_name == NULL ? "???" : devsw->d_name,
513		    "and is disabled.  Recompile KLD module.");
514		devsw->d_open = dead_open;
515		devsw->d_close = dead_close;
516		devsw->d_read = dead_read;
517		devsw->d_write = dead_write;
518		devsw->d_ioctl = dead_ioctl;
519		devsw->d_poll = dead_poll;
520		devsw->d_mmap = dead_mmap;
521		devsw->d_strategy = dead_strategy;
522		devsw->d_dump = dead_dump;
523		devsw->d_kqfilter = dead_kqfilter;
524	}
525
526	if (devsw->d_flags & D_TTY) {
527		if (devsw->d_ioctl == NULL)	devsw->d_ioctl = ttyioctl;
528		if (devsw->d_read == NULL)	devsw->d_read = ttyread;
529		if (devsw->d_write == NULL)	devsw->d_write = ttywrite;
530		if (devsw->d_kqfilter == NULL)	devsw->d_kqfilter = ttykqfilter;
531		if (devsw->d_poll == NULL)	devsw->d_poll = ttypoll;
532	}
533
534	if (devsw->d_flags & D_NEEDGIANT) {
535		if (devsw->d_gianttrick == NULL) {
536			memcpy(dsw2, devsw, sizeof *dsw2);
537			devsw->d_gianttrick = dsw2;
538		} else
539			free(dsw2, M_DEVT);
540	}
541
542#define FIXUP(member, noop, giant) 				\
543	do {							\
544		if (devsw->member == NULL) {			\
545			devsw->member = noop;			\
546		} else if (devsw->d_flags & D_NEEDGIANT)	\
547			devsw->member = giant;			\
548		}						\
549	while (0)
550
551	FIXUP(d_open,		null_open,	giant_open);
552	FIXUP(d_fdopen,		NULL,		giant_fdopen);
553	FIXUP(d_close,		null_close,	giant_close);
554	FIXUP(d_read,		no_read,	giant_read);
555	FIXUP(d_write,		no_write,	giant_write);
556	FIXUP(d_ioctl,		no_ioctl,	giant_ioctl);
557	FIXUP(d_poll,		no_poll,	giant_poll);
558	FIXUP(d_mmap,		no_mmap,	giant_mmap);
559	FIXUP(d_strategy,	no_strategy,	giant_strategy);
560	FIXUP(d_kqfilter,	no_kqfilter,	giant_kqfilter);
561
562	if (devsw->d_dump == NULL)	devsw->d_dump = no_dump;
563
564	LIST_INIT(&devsw->d_devs);
565
566	devsw->d_flags |= D_INIT;
567
568	dev_unlock();
569}
570
571struct cdev *
572make_dev_credv(int flags, struct cdevsw *devsw, int minornr,
573    struct ucred *cr, uid_t uid,
574    gid_t gid, int mode, const char *fmt, va_list ap)
575{
576	struct cdev *dev;
577	int i;
578
579	KASSERT((minornr & ~MAXMINOR) == 0,
580	    ("Invalid minor (0x%x) in make_dev", minornr));
581
582	if (!(devsw->d_flags & D_INIT))
583		prep_cdevsw(devsw);
584	dev = devfs_alloc();
585	dev_lock();
586	dev = newdev(devsw, minornr, dev);
587	if (flags & MAKEDEV_REF)
588		dev_refl(dev);
589	if (dev->si_flags & SI_CHEAPCLONE &&
590	    dev->si_flags & SI_NAMED) {
591		/*
592		 * This is allowed as it removes races and generally
593		 * simplifies cloning devices.
594		 * XXX: still ??
595		 */
596		dev_unlock_and_free();
597		return (dev);
598	}
599	KASSERT(!(dev->si_flags & SI_NAMED),
600	    ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)",
601	    devsw->d_name, minor(dev), devtoname(dev)));
602
603	i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
604	if (i > (sizeof dev->__si_namebuf - 1)) {
605		printf("WARNING: Device name truncated! (%s)\n",
606		    dev->__si_namebuf);
607	}
608
609	dev->si_flags |= SI_NAMED;
610	if (cr != NULL)
611		dev->si_cred = crhold(cr);
612	else
613		dev->si_cred = NULL;
614	dev->si_uid = uid;
615	dev->si_gid = gid;
616	dev->si_mode = mode;
617
618	devfs_create(dev);
619	dev_unlock();
620	return (dev);
621}
622
623struct cdev *
624make_dev(struct cdevsw *devsw, int minornr, uid_t uid, gid_t gid, int mode,
625    const char *fmt, ...)
626{
627	struct cdev *dev;
628	va_list ap;
629
630	va_start(ap, fmt);
631	dev = make_dev_credv(0, devsw, minornr, NULL, uid, gid, mode, fmt, ap);
632	va_end(ap);
633	return (dev);
634}
635
636struct cdev *
637make_dev_cred(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid,
638    gid_t gid, int mode, const char *fmt, ...)
639{
640	struct cdev *dev;
641	va_list ap;
642
643	va_start(ap, fmt);
644	dev = make_dev_credv(0, devsw, minornr, cr, uid, gid, mode, fmt, ap);
645	va_end(ap);
646
647	return (dev);
648}
649
650struct cdev *
651make_dev_credf(int flags, struct cdevsw *devsw, int minornr,
652    struct ucred *cr, uid_t uid,
653    gid_t gid, int mode, const char *fmt, ...)
654{
655	struct cdev *dev;
656	va_list ap;
657
658	va_start(ap, fmt);
659	dev = make_dev_credv(flags, devsw, minornr, cr, uid, gid, mode,
660	    fmt, ap);
661	va_end(ap);
662
663	return (dev);
664}
665
666static void
667dev_dependsl(struct cdev *pdev, struct cdev *cdev)
668{
669
670	cdev->si_parent = pdev;
671	cdev->si_flags |= SI_CHILD;
672	LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings);
673}
674
675
676void
677dev_depends(struct cdev *pdev, struct cdev *cdev)
678{
679
680	dev_lock();
681	dev_dependsl(pdev, cdev);
682	dev_unlock();
683}
684
685struct cdev *
686make_dev_alias(struct cdev *pdev, const char *fmt, ...)
687{
688	struct cdev *dev;
689	va_list ap;
690	int i;
691
692	dev = devfs_alloc();
693	dev_lock();
694	dev->si_flags |= SI_ALIAS;
695	dev->si_flags |= SI_NAMED;
696	va_start(ap, fmt);
697	i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
698	if (i > (sizeof dev->__si_namebuf - 1)) {
699		printf("WARNING: Device name truncated! (%s)\n",
700		    dev->__si_namebuf);
701	}
702	va_end(ap);
703
704	devfs_create(dev);
705	dev_unlock();
706	dev_depends(pdev, dev);
707	return (dev);
708}
709
710static void
711destroy_devl(struct cdev *dev)
712{
713	struct cdevsw *csw;
714
715	mtx_assert(&devmtx, MA_OWNED);
716	KASSERT(dev->si_flags & SI_NAMED,
717	    ("WARNING: Driver mistake: destroy_dev on %d\n", minor(dev)));
718
719	devfs_destroy(dev);
720
721	/* Remove name marking */
722	dev->si_flags &= ~SI_NAMED;
723
724	/* If we are a child, remove us from the parents list */
725	if (dev->si_flags & SI_CHILD) {
726		LIST_REMOVE(dev, si_siblings);
727		dev->si_flags &= ~SI_CHILD;
728	}
729
730	/* Kill our children */
731	while (!LIST_EMPTY(&dev->si_children))
732		destroy_devl(LIST_FIRST(&dev->si_children));
733
734	/* Remove from clone list */
735	if (dev->si_flags & SI_CLONELIST) {
736		LIST_REMOVE(dev, si_clone);
737		dev->si_flags &= ~SI_CLONELIST;
738	}
739
740	dev->si_refcount++;	/* Avoid race with dev_rel() */
741	csw = dev->si_devsw;
742	dev->si_devsw = NULL;	/* already NULL for SI_ALIAS */
743	while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) {
744		csw->d_purge(dev);
745		msleep(csw, &devmtx, PRIBIO, "devprg", hz/10);
746		if (dev->si_threadcount)
747			printf("Still %lu threads in %s\n",
748			    dev->si_threadcount, devtoname(dev));
749	}
750	while (dev->si_threadcount != 0) {
751		/* Use unique dummy wait ident */
752		msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10);
753	}
754
755	dev->si_drv1 = 0;
756	dev->si_drv2 = 0;
757	bzero(&dev->__si_u, sizeof(dev->__si_u));
758
759	if (!(dev->si_flags & SI_ALIAS)) {
760		/* Remove from cdevsw list */
761		LIST_REMOVE(dev, si_list);
762
763		/* If cdevsw has no more struct cdev *'s, clean it */
764		if (LIST_EMPTY(&csw->d_devs)) {
765			fini_cdevsw(csw);
766			wakeup(&csw->d_devs);
767		}
768	}
769	dev->si_flags &= ~SI_ALIAS;
770	dev->si_refcount--;	/* Avoid race with dev_rel() */
771
772	if (dev->si_refcount > 0) {
773		LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list);
774	} else {
775		dev_free_devlocked(dev);
776	}
777}
778
779void
780destroy_dev(struct cdev *dev)
781{
782	struct cdevsw *csw;
783
784	dev_lock();
785	csw = dev->si_devsw;
786	if ((csw != NULL && csw->d_purge != NULL) ||
787	    dev->si_threadcount == 0) {
788		destroy_devl(dev);
789		dev_unlock_and_free();
790	} else
791		destroy_dev_sched(dev);
792}
793
794const char *
795devtoname(struct cdev *dev)
796{
797	char *p;
798	struct cdevsw *csw;
799	int mynor;
800
801	if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') {
802		p = dev->si_name;
803		csw = dev_refthread(dev);
804		if (csw != NULL) {
805			sprintf(p, "(%s)", csw->d_name);
806			dev_relthread(dev);
807		}
808		p += strlen(p);
809		mynor = minor(dev);
810		if (mynor < 0 || mynor > 255)
811			sprintf(p, "/%#x", (u_int)mynor);
812		else
813			sprintf(p, "/%d", mynor);
814	}
815	return (dev->si_name);
816}
817
818int
819dev_stdclone(char *name, char **namep, const char *stem, int *unit)
820{
821	int u, i;
822
823	i = strlen(stem);
824	if (bcmp(stem, name, i) != 0)
825		return (0);
826	if (!isdigit(name[i]))
827		return (0);
828	u = 0;
829	if (name[i] == '0' && isdigit(name[i+1]))
830		return (0);
831	while (isdigit(name[i])) {
832		u *= 10;
833		u += name[i++] - '0';
834	}
835	if (u > 0xffffff)
836		return (0);
837	*unit = u;
838	if (namep)
839		*namep = &name[i];
840	if (name[i])
841		return (2);
842	return (1);
843}
844
845/*
846 * Helper functions for cloning device drivers.
847 *
848 * The objective here is to make it unnecessary for the device drivers to
849 * use rman or similar to manage their unit number space.  Due to the way
850 * we do "on-demand" devices, using rman or other "private" methods
851 * will be very tricky to lock down properly once we lock down this file.
852 *
853 * Instead we give the drivers these routines which puts the struct cdev *'s
854 * that are to be managed on their own list, and gives the driver the ability
855 * to ask for the first free unit number or a given specified unit number.
856 *
857 * In addition these routines support paired devices (pty, nmdm and similar)
858 * by respecting a number of "flag" bits in the minor number.
859 *
860 */
861
862struct clonedevs {
863	LIST_HEAD(,cdev)	head;
864};
865
866void
867clone_setup(struct clonedevs **cdp)
868{
869
870	*cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO);
871	LIST_INIT(&(*cdp)->head);
872}
873
874int
875clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra)
876{
877	struct clonedevs *cd;
878	struct cdev *dev, *ndev, *dl, *de;
879	int unit, low, u;
880
881	KASSERT(*cdp != NULL,
882	    ("clone_setup() not called in driver \"%s\"", csw->d_name));
883	KASSERT(!(extra & CLONE_UNITMASK),
884	    ("Illegal extra bits (0x%x) in clone_create", extra));
885	KASSERT(*up <= CLONE_UNITMASK,
886	    ("Too high unit (0x%x) in clone_create", *up));
887
888	if (!(csw->d_flags & D_INIT))
889		prep_cdevsw(csw);
890
891	/*
892	 * Search the list for a lot of things in one go:
893	 *   A preexisting match is returned immediately.
894	 *   The lowest free unit number if we are passed -1, and the place
895	 *	 in the list where we should insert that new element.
896	 *   The place to insert a specified unit number, if applicable
897	 *       the end of the list.
898	 */
899	unit = *up;
900	ndev = devfs_alloc();
901	dev_lock();
902	low = extra;
903	de = dl = NULL;
904	cd = *cdp;
905	LIST_FOREACH(dev, &cd->head, si_clone) {
906		KASSERT(dev->si_flags & SI_CLONELIST,
907		    ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
908		u = dev2unit(dev);
909		if (u == (unit | extra)) {
910			*dp = dev;
911			dev_unlock();
912			devfs_free(ndev);
913			return (0);
914		}
915		if (unit == -1 && u == low) {
916			low++;
917			de = dev;
918			continue;
919		} else if (u < (unit | extra)) {
920			de = dev;
921			continue;
922		} else if (u > (unit | extra)) {
923			dl = dev;
924			break;
925		}
926	}
927	if (unit == -1)
928		unit = low & CLONE_UNITMASK;
929	dev = newdev(csw, unit2minor(unit | extra), ndev);
930	if (dev->si_flags & SI_CLONELIST) {
931		printf("dev %p (%s) is on clonelist\n", dev, dev->si_name);
932		printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra);
933		LIST_FOREACH(dev, &cd->head, si_clone) {
934			printf("\t%p %s\n", dev, dev->si_name);
935		}
936		panic("foo");
937	}
938	KASSERT(!(dev->si_flags & SI_CLONELIST),
939	    ("Dev %p(%s) should not be on clonelist", dev, dev->si_name));
940	if (dl != NULL)
941		LIST_INSERT_BEFORE(dl, dev, si_clone);
942	else if (de != NULL)
943		LIST_INSERT_AFTER(de, dev, si_clone);
944	else
945		LIST_INSERT_HEAD(&cd->head, dev, si_clone);
946	dev->si_flags |= SI_CLONELIST;
947	*up = unit;
948	dev_unlock_and_free();
949	return (1);
950}
951
952/*
953 * Kill everything still on the list.  The driver should already have
954 * disposed of any softc hung of the struct cdev *'s at this time.
955 */
956void
957clone_cleanup(struct clonedevs **cdp)
958{
959	struct cdev *dev;
960	struct cdev_priv *cp;
961	struct clonedevs *cd;
962
963	cd = *cdp;
964	if (cd == NULL)
965		return;
966	dev_lock();
967	while (!LIST_EMPTY(&cd->head)) {
968		dev = LIST_FIRST(&cd->head);
969		LIST_REMOVE(dev, si_clone);
970		KASSERT(dev->si_flags & SI_CLONELIST,
971		    ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
972		dev->si_flags &= ~SI_CLONELIST;
973		cp = dev->si_priv;
974		if (!(cp->cdp_flags & CDP_SCHED_DTR)) {
975			cp->cdp_flags |= CDP_SCHED_DTR;
976			KASSERT(dev->si_flags & SI_NAMED,
977				("Driver has goofed in cloning underways udev %x", dev->si_drv0));
978			destroy_devl(dev);
979		}
980	}
981	dev_unlock();
982	free(cd, M_DEVBUF);
983	*cdp = NULL;
984}
985
986static TAILQ_HEAD(, cdev_priv) dev_ddtr =
987	TAILQ_HEAD_INITIALIZER(dev_ddtr);
988static struct task dev_dtr_task;
989
990static void
991destroy_dev_tq(void *ctx, int pending)
992{
993	struct cdev_priv *cp;
994	struct cdev *dev;
995	void (*cb)(void *);
996	void *cb_arg;
997
998	dev_lock();
999	while (!TAILQ_EMPTY(&dev_ddtr)) {
1000		cp = TAILQ_FIRST(&dev_ddtr);
1001		dev = &cp->cdp_c;
1002		KASSERT(cp->cdp_flags & CDP_SCHED_DTR,
1003		    ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp));
1004		TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list);
1005		cb = cp->cdp_dtr_cb;
1006		cb_arg = cp->cdp_dtr_cb_arg;
1007		destroy_devl(dev);
1008		dev_unlock();
1009		dev_rel(dev);
1010		if (cb != NULL)
1011			cb(cb_arg);
1012		dev_lock();
1013	}
1014	dev_unlock();
1015}
1016
1017int
1018destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg)
1019{
1020	struct cdev_priv *cp;
1021
1022	cp = dev->si_priv;
1023	dev_lock();
1024	if (cp->cdp_flags & CDP_SCHED_DTR) {
1025		dev_unlock();
1026		return (0);
1027	}
1028	dev_refl(dev);
1029	cp->cdp_flags |= CDP_SCHED_DTR;
1030	cp->cdp_dtr_cb = cb;
1031	cp->cdp_dtr_cb_arg = arg;
1032	TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list);
1033	dev_unlock();
1034	taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task);
1035	return (1);
1036}
1037
1038int
1039destroy_dev_sched(struct cdev *dev)
1040{
1041	return (destroy_dev_sched_cb(dev, NULL, NULL));
1042}
1043
1044void
1045destroy_dev_drain(struct cdevsw *csw)
1046{
1047
1048	dev_lock();
1049	while (!LIST_EMPTY(&csw->d_devs)) {
1050		msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10);
1051	}
1052	dev_unlock();
1053}
1054
1055void
1056drain_dev_clone_events(void)
1057{
1058
1059	sx_xlock(&clone_drain_lock);
1060	sx_xunlock(&clone_drain_lock);
1061}
1062
1063static void
1064devdtr_init(void *dummy __unused)
1065{
1066
1067	TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL);
1068}
1069
1070SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL);
1071