1/*-
2 * Copyright (c) 2000,2004
3 *	Poul-Henning Kamp.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Neither the name of the University nor the names of its contributors
11 *    may be used to endorse or promote products derived from this software
12 *    without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36
27 *
28 * $FreeBSD$
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/conf.h>
34#include <sys/dirent.h>
35#include <sys/kernel.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/proc.h>
40#include <sys/sx.h>
41#include <sys/sysctl.h>
42#include <sys/vnode.h>
43
44#include <sys/kdb.h>
45
46#include <fs/devfs/devfs.h>
47#include <fs/devfs/devfs_int.h>
48
49#include <security/mac/mac_framework.h>
50
51/*
52 * The one true (but secret) list of active devices in the system.
53 * Locked by dev_lock()/devmtx
54 */
55struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
56
57struct unrhdr *devfs_inos;
58
59
60static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2");
61static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3");
62static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage");
63
64static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem");
65
66static unsigned devfs_generation;
67SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD,
68	&devfs_generation, 0, "DEVFS generation number");
69
70unsigned devfs_rule_depth = 1;
71SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW,
72	&devfs_rule_depth, 0, "Max depth of ruleset include");
73
74/*
75 * Helper sysctl for devname(3).  We're given a dev_t and return the
76 * name, if any, registered by the device driver.
77 */
78static int
79sysctl_devname(SYSCTL_HANDLER_ARGS)
80{
81	int error;
82	dev_t ud;
83	struct cdev_priv *cdp;
84	struct cdev *dev;
85
86	error = SYSCTL_IN(req, &ud, sizeof (ud));
87	if (error)
88		return (error);
89	if (ud == NODEV)
90		return (EINVAL);
91	dev = NULL;
92	dev_lock();
93	TAILQ_FOREACH(cdp, &cdevp_list, cdp_list)
94		if (cdp->cdp_inode == ud) {
95			dev = &cdp->cdp_c;
96			dev_refl(dev);
97			break;
98		}
99	dev_unlock();
100	if (dev == NULL)
101		return (ENOENT);
102	error = SYSCTL_OUT(req, dev->si_name, strlen(dev->si_name) + 1);
103	dev_rel(dev);
104	return (error);
105}
106
107SYSCTL_PROC(_kern, OID_AUTO, devname,
108    CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE,
109    NULL, 0, sysctl_devname, "", "devname(3) handler");
110
111SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD,
112    0, sizeof(struct cdev), "sizeof(struct cdev)");
113
114SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD,
115    0, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)");
116
117struct cdev *
118devfs_alloc(int flags)
119{
120	struct cdev_priv *cdp;
121	struct cdev *cdev;
122	struct timespec ts;
123
124	cdp = malloc(sizeof *cdp, M_CDEVP, M_USE_RESERVE | M_ZERO |
125	    ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK));
126	if (cdp == NULL)
127		return (NULL);
128
129	cdp->cdp_dirents = &cdp->cdp_dirent0;
130	cdp->cdp_dirent0 = NULL;
131	cdp->cdp_maxdirent = 0;
132	cdp->cdp_inode = 0;
133
134	cdev = &cdp->cdp_c;
135
136	cdev->si_name = cdev->__si_namebuf;
137	LIST_INIT(&cdev->si_children);
138	vfs_timestamp(&ts);
139	cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts;
140	cdev->si_cred = NULL;
141
142	return (cdev);
143}
144
145int
146devfs_dev_exists(const char *name)
147{
148	struct cdev_priv *cdp;
149
150	mtx_assert(&devmtx, MA_OWNED);
151
152	TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
153		if ((cdp->cdp_flags & CDP_ACTIVE) == 0)
154			continue;
155		if (devfs_pathpath(cdp->cdp_c.si_name, name) != 0)
156			return (1);
157		if (devfs_pathpath(name, cdp->cdp_c.si_name) != 0)
158			return (1);
159	}
160	if (devfs_dir_find(name) != 0)
161		return (1);
162
163	return (0);
164}
165
166void
167devfs_free(struct cdev *cdev)
168{
169	struct cdev_priv *cdp;
170
171	cdp = cdev2priv(cdev);
172	if (cdev->si_cred != NULL)
173		crfree(cdev->si_cred);
174	devfs_free_cdp_inode(cdp->cdp_inode);
175	if (cdp->cdp_maxdirent > 0)
176		free(cdp->cdp_dirents, M_DEVFS2);
177	free(cdp, M_CDEVP);
178}
179
180struct devfs_dirent *
181devfs_find(struct devfs_dirent *dd, const char *name, int namelen, int type)
182{
183	struct devfs_dirent *de;
184
185	TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
186		if (namelen != de->de_dirent->d_namlen)
187			continue;
188		if (type != 0 && type != de->de_dirent->d_type)
189			continue;
190		if (bcmp(name, de->de_dirent->d_name, namelen) != 0)
191			continue;
192		break;
193	}
194	KASSERT(de == NULL || (de->de_flags & DE_DOOMED) == 0,
195	    ("devfs_find: returning a doomed entry"));
196	return (de);
197}
198
199struct devfs_dirent *
200devfs_newdirent(char *name, int namelen)
201{
202	int i;
203	struct devfs_dirent *de;
204	struct dirent d;
205
206	d.d_namlen = namelen;
207	i = sizeof (*de) + GENERIC_DIRSIZ(&d);
208	de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO);
209	de->de_dirent = (struct dirent *)(de + 1);
210	de->de_dirent->d_namlen = namelen;
211	de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d);
212	bcopy(name, de->de_dirent->d_name, namelen);
213	de->de_dirent->d_name[namelen] = '\0';
214	vfs_timestamp(&de->de_ctime);
215	de->de_mtime = de->de_atime = de->de_ctime;
216	de->de_links = 1;
217	de->de_holdcnt = 1;
218#ifdef MAC
219	mac_devfs_init(de);
220#endif
221	return (de);
222}
223
224struct devfs_dirent *
225devfs_parent_dirent(struct devfs_dirent *de)
226{
227
228	if (de->de_dirent->d_type != DT_DIR)
229		return (de->de_dir);
230
231	if (de->de_flags & (DE_DOT | DE_DOTDOT))
232		return (NULL);
233
234	de = TAILQ_FIRST(&de->de_dlist);	/* "." */
235	if (de == NULL)
236		return (NULL);
237	de = TAILQ_NEXT(de, de_list);		/* ".." */
238	if (de == NULL)
239		return (NULL);
240
241	return (de->de_dir);
242}
243
244struct devfs_dirent *
245devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, struct devfs_dirent *dotdot, u_int inode)
246{
247	struct devfs_dirent *dd;
248	struct devfs_dirent *de;
249
250	/* Create the new directory */
251	dd = devfs_newdirent(name, namelen);
252	TAILQ_INIT(&dd->de_dlist);
253	dd->de_dirent->d_type = DT_DIR;
254	dd->de_mode = 0555;
255	dd->de_links = 2;
256	dd->de_dir = dd;
257	if (inode != 0)
258		dd->de_inode = inode;
259	else
260		dd->de_inode = alloc_unr(devfs_inos);
261
262	/*
263	 * "." and ".." are always the two first entries in the
264	 * de_dlist list.
265	 *
266	 * Create the "." entry in the new directory.
267	 */
268	de = devfs_newdirent(".", 1);
269	de->de_dirent->d_type = DT_DIR;
270	de->de_flags |= DE_DOT;
271	TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
272	de->de_dir = dd;
273
274	/* Create the ".." entry in the new directory. */
275	de = devfs_newdirent("..", 2);
276	de->de_dirent->d_type = DT_DIR;
277	de->de_flags |= DE_DOTDOT;
278	TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
279	if (dotdot == NULL) {
280		de->de_dir = dd;
281	} else {
282		de->de_dir = dotdot;
283		sx_assert(&dmp->dm_lock, SX_XLOCKED);
284		TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list);
285		dotdot->de_links++;
286		devfs_rules_apply(dmp, dd);
287	}
288
289#ifdef MAC
290	mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd);
291#endif
292	return (dd);
293}
294
295void
296devfs_dirent_free(struct devfs_dirent *de)
297{
298	free(de, M_DEVFS3);
299}
300
301/*
302 * Removes a directory if it is empty. Also empty parent directories are
303 * removed recursively.
304 */
305static void
306devfs_rmdir_empty(struct devfs_mount *dm, struct devfs_dirent *de)
307{
308	struct devfs_dirent *dd, *de_dot, *de_dotdot;
309
310	sx_assert(&dm->dm_lock, SX_XLOCKED);
311
312	for (;;) {
313		KASSERT(de->de_dirent->d_type == DT_DIR,
314		    ("devfs_rmdir_empty: de is not a directory"));
315
316		if ((de->de_flags & DE_DOOMED) != 0 || de == dm->dm_rootdir)
317			return;
318
319		de_dot = TAILQ_FIRST(&de->de_dlist);
320		KASSERT(de_dot != NULL, ("devfs_rmdir_empty: . missing"));
321		de_dotdot = TAILQ_NEXT(de_dot, de_list);
322		KASSERT(de_dotdot != NULL, ("devfs_rmdir_empty: .. missing"));
323		/* Return if the directory is not empty. */
324		if (TAILQ_NEXT(de_dotdot, de_list) != NULL)
325			return;
326
327		dd = devfs_parent_dirent(de);
328		KASSERT(dd != NULL, ("devfs_rmdir_empty: NULL dd"));
329		TAILQ_REMOVE(&de->de_dlist, de_dot, de_list);
330		TAILQ_REMOVE(&de->de_dlist, de_dotdot, de_list);
331		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
332		DEVFS_DE_HOLD(dd);
333		devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
334		devfs_delete(dm, de_dot, DEVFS_DEL_NORECURSE);
335		devfs_delete(dm, de_dotdot, DEVFS_DEL_NORECURSE);
336		if (DEVFS_DE_DROP(dd)) {
337			devfs_dirent_free(dd);
338			return;
339		}
340
341		de = dd;
342	}
343}
344
345/*
346 * The caller needs to hold the dm for the duration of the call since
347 * dm->dm_lock may be temporary dropped.
348 */
349void
350devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int flags)
351{
352	struct devfs_dirent *dd;
353	struct vnode *vp;
354
355	KASSERT((de->de_flags & DE_DOOMED) == 0,
356		("devfs_delete doomed dirent"));
357	de->de_flags |= DE_DOOMED;
358
359	if ((flags & DEVFS_DEL_NORECURSE) == 0) {
360		dd = devfs_parent_dirent(de);
361		if (dd != NULL)
362			DEVFS_DE_HOLD(dd);
363		if (de->de_flags & DE_USER) {
364			KASSERT(dd != NULL, ("devfs_delete: NULL dd"));
365			devfs_dir_unref_de(dm, dd);
366		}
367	} else
368		dd = NULL;
369
370	mtx_lock(&devfs_de_interlock);
371	vp = de->de_vnode;
372	if (vp != NULL) {
373		VI_LOCK(vp);
374		mtx_unlock(&devfs_de_interlock);
375		vholdl(vp);
376		sx_unlock(&dm->dm_lock);
377		if ((flags & DEVFS_DEL_VNLOCKED) == 0)
378			vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
379		else
380			VI_UNLOCK(vp);
381		vgone(vp);
382		if ((flags & DEVFS_DEL_VNLOCKED) == 0)
383			VOP_UNLOCK(vp, 0);
384		vdrop(vp);
385		sx_xlock(&dm->dm_lock);
386	} else
387		mtx_unlock(&devfs_de_interlock);
388	if (de->de_symlink) {
389		free(de->de_symlink, M_DEVFS);
390		de->de_symlink = NULL;
391	}
392#ifdef MAC
393	mac_devfs_destroy(de);
394#endif
395	if (de->de_inode > DEVFS_ROOTINO) {
396		devfs_free_cdp_inode(de->de_inode);
397		de->de_inode = 0;
398	}
399	if (DEVFS_DE_DROP(de))
400		devfs_dirent_free(de);
401
402	if (dd != NULL) {
403		if (DEVFS_DE_DROP(dd))
404			devfs_dirent_free(dd);
405		else
406			devfs_rmdir_empty(dm, dd);
407	}
408}
409
410/*
411 * Called on unmount.
412 * Recursively removes the entire tree.
413 * The caller needs to hold the dm for the duration of the call.
414 */
415
416static void
417devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd)
418{
419	struct devfs_dirent *de;
420
421	sx_assert(&dm->dm_lock, SX_XLOCKED);
422
423	DEVFS_DE_HOLD(dd);
424	for (;;) {
425		/*
426		 * Use TAILQ_LAST() to remove "." and ".." last.
427		 * We might need ".." to resolve a path in
428		 * devfs_dir_unref_de().
429		 */
430		de = TAILQ_LAST(&dd->de_dlist, devfs_dlist_head);
431		if (de == NULL)
432			break;
433		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
434		if (de->de_flags & DE_USER)
435			devfs_dir_unref_de(dm, dd);
436		if (de->de_flags & (DE_DOT | DE_DOTDOT))
437			devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
438		else if (de->de_dirent->d_type == DT_DIR)
439			devfs_purge(dm, de);
440		else
441			devfs_delete(dm, de, DEVFS_DEL_NORECURSE);
442	}
443	if (DEVFS_DE_DROP(dd))
444		devfs_dirent_free(dd);
445	else if ((dd->de_flags & DE_DOOMED) == 0)
446		devfs_delete(dm, dd, DEVFS_DEL_NORECURSE);
447}
448
449/*
450 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed
451 * by the mount points dm_idx.
452 * This function extends the array when necessary, taking into account that
453 * the default array is 1 element and not malloc'ed.
454 */
455static void
456devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm)
457{
458	struct devfs_dirent **dep;
459	int siz;
460
461	siz = (dm->dm_idx + 1) * sizeof *dep;
462	dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO);
463	dev_lock();
464	if (dm->dm_idx <= cdp->cdp_maxdirent) {
465		/* We got raced */
466		dev_unlock();
467		free(dep, M_DEVFS2);
468		return;
469	}
470	memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep);
471	if (cdp->cdp_maxdirent > 0)
472		free(cdp->cdp_dirents, M_DEVFS2);
473	cdp->cdp_dirents = dep;
474	/*
475	 * XXX: if malloc told us how much we actually got this could
476	 * XXX: be optimized.
477	 */
478	cdp->cdp_maxdirent = dm->dm_idx;
479	dev_unlock();
480}
481
482/*
483 * The caller needs to hold the dm for the duration of the call.
484 */
485static int
486devfs_populate_loop(struct devfs_mount *dm, int cleanup)
487{
488	struct cdev_priv *cdp;
489	struct devfs_dirent *de;
490	struct devfs_dirent *dd;
491	struct cdev *pdev;
492	int de_flags, j;
493	char *q, *s;
494
495	sx_assert(&dm->dm_lock, SX_XLOCKED);
496	dev_lock();
497	TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
498
499		KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents"));
500
501		/*
502		 * If we are unmounting, or the device has been destroyed,
503		 * clean up our dirent.
504		 */
505		if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) &&
506		    dm->dm_idx <= cdp->cdp_maxdirent &&
507		    cdp->cdp_dirents[dm->dm_idx] != NULL) {
508			de = cdp->cdp_dirents[dm->dm_idx];
509			cdp->cdp_dirents[dm->dm_idx] = NULL;
510			KASSERT(cdp == de->de_cdp,
511			    ("%s %d %s %p %p", __func__, __LINE__,
512			    cdp->cdp_c.si_name, cdp, de->de_cdp));
513			KASSERT(de->de_dir != NULL, ("Null de->de_dir"));
514			dev_unlock();
515
516			TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list);
517			de->de_cdp = NULL;
518			de->de_inode = 0;
519			devfs_delete(dm, de, 0);
520			dev_lock();
521			cdp->cdp_inuse--;
522			dev_unlock();
523			return (1);
524		}
525		/*
526	 	 * GC any lingering devices
527		 */
528		if (!(cdp->cdp_flags & CDP_ACTIVE)) {
529			if (cdp->cdp_inuse > 0)
530				continue;
531			TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
532			dev_unlock();
533			dev_rel(&cdp->cdp_c);
534			return (1);
535		}
536		/*
537		 * Don't create any new dirents if we are unmounting
538		 */
539		if (cleanup)
540			continue;
541		KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!"));
542
543		if (dm->dm_idx <= cdp->cdp_maxdirent &&
544		    cdp->cdp_dirents[dm->dm_idx] != NULL) {
545			de = cdp->cdp_dirents[dm->dm_idx];
546			KASSERT(cdp == de->de_cdp, ("inconsistent cdp"));
547			continue;
548		}
549
550
551		cdp->cdp_inuse++;
552		dev_unlock();
553
554		if (dm->dm_idx > cdp->cdp_maxdirent)
555		        devfs_metoo(cdp, dm);
556
557		dd = dm->dm_rootdir;
558		s = cdp->cdp_c.si_name;
559		for (;;) {
560			for (q = s; *q != '/' && *q != '\0'; q++)
561				continue;
562			if (*q != '/')
563				break;
564			de = devfs_find(dd, s, q - s, 0);
565			if (de == NULL)
566				de = devfs_vmkdir(dm, s, q - s, dd, 0);
567			else if (de->de_dirent->d_type == DT_LNK) {
568				de = devfs_find(dd, s, q - s, DT_DIR);
569				if (de == NULL)
570					de = devfs_vmkdir(dm, s, q - s, dd, 0);
571				de->de_flags |= DE_COVERED;
572			}
573			s = q + 1;
574			dd = de;
575			KASSERT(dd->de_dirent->d_type == DT_DIR &&
576			    (dd->de_flags & (DE_DOT | DE_DOTDOT)) == 0,
577			    ("%s: invalid directory (si_name=%s)",
578			    __func__, cdp->cdp_c.si_name));
579
580		}
581		de_flags = 0;
582		de = devfs_find(dd, s, q - s, DT_LNK);
583		if (de != NULL)
584			de_flags |= DE_COVERED;
585
586		de = devfs_newdirent(s, q - s);
587		if (cdp->cdp_c.si_flags & SI_ALIAS) {
588			de->de_uid = 0;
589			de->de_gid = 0;
590			de->de_mode = 0755;
591			de->de_dirent->d_type = DT_LNK;
592			pdev = cdp->cdp_c.si_parent;
593			j = strlen(pdev->si_name) + 1;
594			de->de_symlink = malloc(j, M_DEVFS, M_WAITOK);
595			bcopy(pdev->si_name, de->de_symlink, j);
596		} else {
597			de->de_uid = cdp->cdp_c.si_uid;
598			de->de_gid = cdp->cdp_c.si_gid;
599			de->de_mode = cdp->cdp_c.si_mode;
600			de->de_dirent->d_type = DT_CHR;
601		}
602		de->de_flags |= de_flags;
603		de->de_inode = cdp->cdp_inode;
604		de->de_cdp = cdp;
605#ifdef MAC
606		mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount,
607		    &cdp->cdp_c, de);
608#endif
609		de->de_dir = dd;
610		TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
611		devfs_rules_apply(dm, de);
612		dev_lock();
613		/* XXX: could check that cdp is still active here */
614		KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL,
615		    ("%s %d\n", __func__, __LINE__));
616		cdp->cdp_dirents[dm->dm_idx] = de;
617		KASSERT(de->de_cdp != (void *)0xdeadc0de,
618		    ("%s %d\n", __func__, __LINE__));
619		dev_unlock();
620		return (1);
621	}
622	dev_unlock();
623	return (0);
624}
625
626/*
627 * The caller needs to hold the dm for the duration of the call.
628 */
629void
630devfs_populate(struct devfs_mount *dm)
631{
632	unsigned gen;
633
634	sx_assert(&dm->dm_lock, SX_XLOCKED);
635	gen = devfs_generation;
636	if (dm->dm_generation == gen)
637		return;
638	while (devfs_populate_loop(dm, 0))
639		continue;
640	dm->dm_generation = gen;
641}
642
643/*
644 * The caller needs to hold the dm for the duration of the call.
645 */
646void
647devfs_cleanup(struct devfs_mount *dm)
648{
649
650	sx_assert(&dm->dm_lock, SX_XLOCKED);
651	while (devfs_populate_loop(dm, 1))
652		continue;
653	devfs_purge(dm, dm->dm_rootdir);
654}
655
656/*
657 * devfs_create() and devfs_destroy() are called from kern_conf.c and
658 * in both cases the devlock() mutex is held, so no further locking
659 * is necesary and no sleeping allowed.
660 */
661
662void
663devfs_create(struct cdev *dev)
664{
665	struct cdev_priv *cdp;
666
667	mtx_assert(&devmtx, MA_OWNED);
668	cdp = cdev2priv(dev);
669	cdp->cdp_flags |= CDP_ACTIVE;
670	cdp->cdp_inode = alloc_unrl(devfs_inos);
671	dev_refl(dev);
672	TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list);
673	devfs_generation++;
674}
675
676void
677devfs_destroy(struct cdev *dev)
678{
679	struct cdev_priv *cdp;
680
681	mtx_assert(&devmtx, MA_OWNED);
682	cdp = cdev2priv(dev);
683	cdp->cdp_flags &= ~CDP_ACTIVE;
684	devfs_generation++;
685}
686
687ino_t
688devfs_alloc_cdp_inode(void)
689{
690
691	return (alloc_unr(devfs_inos));
692}
693
694void
695devfs_free_cdp_inode(ino_t ino)
696{
697
698	if (ino > 0)
699		free_unr(devfs_inos, ino);
700}
701
702static void
703devfs_devs_init(void *junk __unused)
704{
705
706	devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx);
707}
708
709SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL);
710