1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/conf.h>
34#include <sys/kernel.h>
35#include <sys/malloc.h>
36#include <sys/proc.h>
37
38#ifdef HAVE_KERNEL_OPTION_HEADERS
39#include "opt_snd.h"
40#endif
41
42#if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
43#include <dev/sound/pcm/sound.h>
44#endif
45
46#include <dev/sound/clone.h>
47
48/*
49 * So here we go again, another clonedevs manager. Unlike default clonedevs,
50 * this clone manager is designed to withstand various abusive behavior
51 * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
52 * after reaching certain expiration threshold, aggressive garbage collector,
53 * transparent device allocator and concurrency handling across multiple
54 * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
55 * we don't have much clues whether the caller wants a real open() or simply
56 * making fun of us with things like stat(), mtime() etc. Assuming that:
57 * 1) Time window between dev_clone EH <-> real open() should be small
58 * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
59 * operation, we can decide whether a new cdev must be created, old
60 * (expired) cdev can be reused or an existing cdev can be shared.
61 *
62 * Most of the operations and logics are generic enough and can be applied
63 * on other places (such as if_tap, snp, etc).  Perhaps this can be
64 * rearranged to complement clone_*(). However, due to this still being
65 * specific to the sound driver (and as a proof of concept on how it can be
66 * done), si_drv2 is used to keep the pointer of the clone list entry to
67 * avoid expensive lookup.
68 */
69
70/* clone entry */
71struct snd_clone_entry {
72	TAILQ_ENTRY(snd_clone_entry) link;
73	struct snd_clone *parent;
74	struct cdev *devt;
75	struct timespec tsp;
76	uint32_t flags;
77	pid_t pid;
78	int unit;
79};
80
81/* clone manager */
82struct snd_clone {
83	TAILQ_HEAD(link_head, snd_clone_entry) head;
84	struct timespec tsp;
85	int refcount;
86	int size;
87	int typemask;
88	int maxunit;
89	int deadline;
90	uint32_t flags;
91};
92
93#ifdef SND_DIAGNOSTIC
94#define SND_CLONE_ASSERT(x, y)		do {			\
95	if (!(x))						\
96		panic y;					\
97} while (0)
98#else
99#define SND_CLONE_ASSERT(...)		KASSERT(__VA_ARGS__)
100#endif
101
102/*
103 * Shamelessly ripped off from vfs_subr.c
104 * We need at least 1/HZ precision as default timestamping.
105 */
106enum { SND_TSP_SEC, SND_TSP_HZ, SND_TSP_USEC, SND_TSP_NSEC };
107
108static int snd_timestamp_precision = SND_TSP_HZ;
109TUNABLE_INT("hw.snd.timestamp_precision", &snd_timestamp_precision);
110
111void
112snd_timestamp(struct timespec *tsp)
113{
114	struct timeval tv;
115
116	switch (snd_timestamp_precision) {
117	case SND_TSP_SEC:
118		tsp->tv_sec = time_second;
119		tsp->tv_nsec = 0;
120		break;
121	case SND_TSP_HZ:
122		getnanouptime(tsp);
123		break;
124	case SND_TSP_USEC:
125		microuptime(&tv);
126		TIMEVAL_TO_TIMESPEC(&tv, tsp);
127		break;
128	case SND_TSP_NSEC:
129		nanouptime(tsp);
130		break;
131	default:
132		snd_timestamp_precision = SND_TSP_HZ;
133		getnanouptime(tsp);
134		break;
135	}
136}
137
138#if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
139static int
140sysctl_hw_snd_timestamp_precision(SYSCTL_HANDLER_ARGS)
141{
142	int err, val;
143
144	val = snd_timestamp_precision;
145	err = sysctl_handle_int(oidp, &val, 0, req);
146	if (err == 0 && req->newptr != NULL) {
147		switch (val) {
148		case SND_TSP_SEC:
149		case SND_TSP_HZ:
150		case SND_TSP_USEC:
151		case SND_TSP_NSEC:
152			snd_timestamp_precision = val;
153			break;
154		default:
155			break;
156		}
157	}
158
159	return (err);
160}
161SYSCTL_PROC(_hw_snd, OID_AUTO, timestamp_precision, CTLTYPE_INT | CTLFLAG_RW,
162    0, sizeof(int), sysctl_hw_snd_timestamp_precision, "I",
163    "timestamp precision (0=s 1=hz 2=us 3=ns)");
164#endif
165
166/*
167 * snd_clone_create() : Return opaque allocated clone manager.
168 */
169struct snd_clone *
170snd_clone_create(int typemask, int maxunit, int deadline, uint32_t flags)
171{
172	struct snd_clone *c;
173
174	SND_CLONE_ASSERT(!(typemask & ~SND_CLONE_MAXUNIT),
175	    ("invalid typemask: 0x%08x", typemask));
176	SND_CLONE_ASSERT(maxunit == -1 ||
177	    !(maxunit & ~(~typemask & SND_CLONE_MAXUNIT)),
178	    ("maxunit overflow: typemask=0x%08x maxunit=%d",
179	    typemask, maxunit));
180	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
181	    ("invalid clone flags=0x%08x", flags));
182
183	c = malloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
184	c->refcount = 0;
185	c->size = 0;
186	c->typemask = typemask;
187	c->maxunit = (maxunit == -1) ? (~typemask & SND_CLONE_MAXUNIT) :
188	    maxunit;
189	c->deadline = deadline;
190	c->flags = flags;
191	snd_timestamp(&c->tsp);
192	TAILQ_INIT(&c->head);
193
194	return (c);
195}
196
197int
198snd_clone_busy(struct snd_clone *c)
199{
200	struct snd_clone_entry *ce;
201
202	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
203
204	if (c->size == 0)
205		return (0);
206
207	TAILQ_FOREACH(ce, &c->head, link) {
208		if ((ce->flags & SND_CLONE_BUSY) ||
209		    (ce->devt != NULL && ce->devt->si_threadcount != 0))
210			return (EBUSY);
211	}
212
213	return (0);
214}
215
216/*
217 * snd_clone_enable()/disable() : Suspend/resume clone allocation through
218 * snd_clone_alloc(). Everything else will not be affected by this.
219 */
220int
221snd_clone_enable(struct snd_clone *c)
222{
223	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
224
225	if (c->flags & SND_CLONE_ENABLE)
226		return (EINVAL);
227
228	c->flags |= SND_CLONE_ENABLE;
229
230	return (0);
231}
232
233int
234snd_clone_disable(struct snd_clone *c)
235{
236	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
237
238	if (!(c->flags & SND_CLONE_ENABLE))
239		return (EINVAL);
240
241	c->flags &= ~SND_CLONE_ENABLE;
242
243	return (0);
244}
245
246/*
247 * Getters / Setters. Not worth explaining :)
248 */
249int
250snd_clone_getsize(struct snd_clone *c)
251{
252	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
253
254	return (c->size);
255}
256
257int
258snd_clone_getmaxunit(struct snd_clone *c)
259{
260	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
261
262	return (c->maxunit);
263}
264
265int
266snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
267{
268	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
269	SND_CLONE_ASSERT(maxunit == -1 ||
270	    !(maxunit & ~(~c->typemask & SND_CLONE_MAXUNIT)),
271	    ("maxunit overflow: typemask=0x%08x maxunit=%d",
272	    c->typemask, maxunit));
273
274	c->maxunit = (maxunit == -1) ? (~c->typemask & SND_CLONE_MAXUNIT) :
275	    maxunit;
276
277	return (c->maxunit);
278}
279
280int
281snd_clone_getdeadline(struct snd_clone *c)
282{
283	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
284
285	return (c->deadline);
286}
287
288int
289snd_clone_setdeadline(struct snd_clone *c, int deadline)
290{
291	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
292
293	c->deadline = deadline;
294
295	return (c->deadline);
296}
297
298int
299snd_clone_gettime(struct snd_clone *c, struct timespec *tsp)
300{
301	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
302	SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
303
304	*tsp = c->tsp;
305
306	return (0);
307}
308
309uint32_t
310snd_clone_getflags(struct snd_clone *c)
311{
312	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
313
314	return (c->flags);
315}
316
317uint32_t
318snd_clone_setflags(struct snd_clone *c, uint32_t flags)
319{
320	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
321	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
322	    ("invalid clone flags=0x%08x", flags));
323
324	c->flags = flags;
325
326	return (c->flags);
327}
328
329int
330snd_clone_getdevtime(struct cdev *dev, struct timespec *tsp)
331{
332	struct snd_clone_entry *ce;
333
334	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
335	SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
336
337	ce = dev->si_drv2;
338	if (ce == NULL)
339		return (ENODEV);
340
341	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
342
343	*tsp = ce->tsp;
344
345	return (0);
346}
347
348uint32_t
349snd_clone_getdevflags(struct cdev *dev)
350{
351	struct snd_clone_entry *ce;
352
353	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
354
355	ce = dev->si_drv2;
356	if (ce == NULL)
357		return (0xffffffff);
358
359	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
360
361	return (ce->flags);
362}
363
364uint32_t
365snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
366{
367	struct snd_clone_entry *ce;
368
369	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
370	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_DEVMASK),
371	    ("invalid clone dev flags=0x%08x", flags));
372
373	ce = dev->si_drv2;
374	if (ce == NULL)
375		return (0xffffffff);
376
377	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
378
379	ce->flags = flags;
380
381	return (ce->flags);
382}
383
384/* Elapsed time conversion to ms */
385#define SND_CLONE_ELAPSED(x, y)						\
386	((((x)->tv_sec - (y)->tv_sec) * 1000) +				\
387	(((y)->tv_nsec > (x)->tv_nsec) ?				\
388	(((1000000000L + (x)->tv_nsec -					\
389	(y)->tv_nsec) / 1000000) - 1000) :				\
390	(((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
391
392#define SND_CLONE_EXPIRED(x, y, z)					\
393	((x)->deadline < 1 ||						\
394	((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) ||		\
395	SND_CLONE_ELAPSED(y, z) > (x)->deadline)
396
397/*
398 * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
399 * clone.h for explanations on GC settings.
400 */
401int
402snd_clone_gc(struct snd_clone *c)
403{
404	struct snd_clone_entry *ce, *tce;
405	struct timespec now;
406	int pruned;
407
408	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
409
410	if (!(c->flags & SND_CLONE_GC_ENABLE) || c->size == 0)
411		return (0);
412
413	snd_timestamp(&now);
414
415	/*
416	 * Bail out if the last clone handler was invoked below the deadline
417	 * threshold.
418	 */
419	if ((c->flags & SND_CLONE_GC_EXPIRED) &&
420	    !SND_CLONE_EXPIRED(c, &now, &c->tsp))
421		return (0);
422
423	pruned = 0;
424
425	/*
426	 * Visit each object in reverse order. If the object is still being
427	 * referenced by a valid open(), skip it. Look for expired objects
428	 * and either revoke its clone invocation status or mercilessly
429	 * throw it away.
430	 */
431	TAILQ_FOREACH_REVERSE_SAFE(ce, &c->head, link_head, link, tce) {
432		if (!(ce->flags & SND_CLONE_BUSY) &&
433		    (!(ce->flags & SND_CLONE_INVOKE) ||
434		    SND_CLONE_EXPIRED(c, &now, &ce->tsp))) {
435			if ((c->flags & SND_CLONE_GC_REVOKE) ||
436			    ce->devt->si_threadcount != 0) {
437				ce->flags &= ~SND_CLONE_INVOKE;
438				ce->pid = -1;
439			} else {
440				TAILQ_REMOVE(&c->head, ce, link);
441				destroy_dev(ce->devt);
442				free(ce, M_DEVBUF);
443				c->size--;
444			}
445			pruned++;
446		}
447	}
448
449	/* return total pruned objects */
450	return (pruned);
451}
452
453void
454snd_clone_destroy(struct snd_clone *c)
455{
456	struct snd_clone_entry *ce, *tmp;
457
458	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
459
460	ce = TAILQ_FIRST(&c->head);
461	while (ce != NULL) {
462		tmp = TAILQ_NEXT(ce, link);
463		if (ce->devt != NULL)
464			destroy_dev(ce->devt);
465		free(ce, M_DEVBUF);
466		ce = tmp;
467	}
468
469	free(c, M_DEVBUF);
470}
471
472/*
473 * snd_clone_acquire() : The vital part of concurrency management. Must be
474 * called somewhere at the beginning of open() handler. ENODEV is not really
475 * fatal since it just tell the caller that this is not cloned stuff.
476 * EBUSY is *real*, don't forget that!
477 */
478int
479snd_clone_acquire(struct cdev *dev)
480{
481	struct snd_clone_entry *ce;
482
483	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
484
485	ce = dev->si_drv2;
486	if (ce == NULL)
487		return (ENODEV);
488
489	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
490
491	ce->flags &= ~SND_CLONE_INVOKE;
492
493	if (ce->flags & SND_CLONE_BUSY)
494		return (EBUSY);
495
496	ce->flags |= SND_CLONE_BUSY;
497
498	return (0);
499}
500
501/*
502 * snd_clone_release() : Release busy status. Must be called somewhere at
503 * the end of close() handler, or somewhere after fail open().
504 */
505int
506snd_clone_release(struct cdev *dev)
507{
508	struct snd_clone_entry *ce;
509
510	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
511
512	ce = dev->si_drv2;
513	if (ce == NULL)
514		return (ENODEV);
515
516	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
517
518	ce->flags &= ~SND_CLONE_INVOKE;
519
520	if (!(ce->flags & SND_CLONE_BUSY))
521		return (EBADF);
522
523	ce->flags &= ~SND_CLONE_BUSY;
524	ce->pid = -1;
525
526	return (0);
527}
528
529/*
530 * snd_clone_ref/unref() : Garbage collector reference counter. To make
531 * garbage collector run automatically, the sequence must be something like
532 * this (both in open() and close() handlers):
533 *
534 *  open() - 1) snd_clone_acquire()
535 *           2) .... check check ... if failed, snd_clone_release()
536 *           3) Success. Call snd_clone_ref()
537 *
538 * close() - 1) .... check check check ....
539 *           2) Success. snd_clone_release()
540 *           3) snd_clone_unref() . Garbage collector will run at this point
541 *              if this is the last referenced object.
542 */
543int
544snd_clone_ref(struct cdev *dev)
545{
546	struct snd_clone_entry *ce;
547	struct snd_clone *c;
548
549	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
550
551	ce = dev->si_drv2;
552	if (ce == NULL)
553		return (0);
554
555	c = ce->parent;
556	SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
557	SND_CLONE_ASSERT(c->refcount >= 0, ("refcount < 0"));
558
559	return (++c->refcount);
560}
561
562int
563snd_clone_unref(struct cdev *dev)
564{
565	struct snd_clone_entry *ce;
566	struct snd_clone *c;
567
568	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
569
570	ce = dev->si_drv2;
571	if (ce == NULL)
572		return (0);
573
574	c = ce->parent;
575	SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
576	SND_CLONE_ASSERT(c->refcount > 0, ("refcount <= 0"));
577
578	c->refcount--;
579
580	/*
581	 * Run automatic garbage collector, if needed.
582	 */
583	if ((c->flags & SND_CLONE_GC_UNREF) &&
584	    (!(c->flags & SND_CLONE_GC_LASTREF) ||
585	    (c->refcount == 0 && (c->flags & SND_CLONE_GC_LASTREF))))
586		(void)snd_clone_gc(c);
587
588	return (c->refcount);
589}
590
591void
592snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
593{
594	SND_CLONE_ASSERT(ce != NULL, ("NULL snd_clone_entry"));
595	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
596	SND_CLONE_ASSERT(dev->si_drv2 == NULL, ("dev->si_drv2 not NULL"));
597	SND_CLONE_ASSERT((ce->flags & SND_CLONE_ALLOC) == SND_CLONE_ALLOC,
598	    ("invalid clone alloc flags=0x%08x", ce->flags));
599	SND_CLONE_ASSERT(ce->devt == NULL, ("ce->devt not NULL"));
600	SND_CLONE_ASSERT(ce->unit == dev2unit(dev),
601	    ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
602	    ce->unit, dev2unit(dev)));
603
604	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
605
606	dev->si_drv2 = ce;
607	ce->devt = dev;
608	ce->flags &= ~SND_CLONE_ALLOC;
609	ce->flags |= SND_CLONE_INVOKE;
610}
611
612struct snd_clone_entry *
613snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
614{
615	struct snd_clone_entry *ce, *after, *bce, *cce, *nce, *tce;
616	struct timespec now;
617	int cunit, allocunit;
618	pid_t curpid;
619
620	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
621	SND_CLONE_ASSERT(dev != NULL, ("NULL dev pointer"));
622	SND_CLONE_ASSERT((c->typemask & tmask) == tmask,
623	    ("invalid tmask: typemask=0x%08x tmask=0x%08x",
624	    c->typemask, tmask));
625	SND_CLONE_ASSERT(unit != NULL, ("NULL unit pointer"));
626	SND_CLONE_ASSERT(*unit == -1 || !(*unit & (c->typemask | tmask)),
627	    ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
628	    c->typemask, tmask, *unit));
629
630	if (!(c->flags & SND_CLONE_ENABLE) ||
631	    (*unit != -1 && *unit > c->maxunit))
632		return (NULL);
633
634	ce = NULL;
635	after = NULL;
636	bce = NULL;	/* "b"usy candidate */
637	cce = NULL;	/* "c"urthread/proc candidate */
638	nce = NULL;	/* "n"ull, totally unbusy candidate */
639	tce = NULL;	/* Last "t"ry candidate */
640	cunit = 0;
641	allocunit = (*unit == -1) ? 0 : *unit;
642	curpid = curthread->td_proc->p_pid;
643
644	snd_timestamp(&now);
645
646	TAILQ_FOREACH(ce, &c->head, link) {
647		/*
648		 * Sort incrementally according to device type.
649		 */
650		if (tmask > (ce->unit & c->typemask)) {
651			if (cunit == 0)
652				after = ce;
653			continue;
654		} else if (tmask < (ce->unit & c->typemask))
655			break;
656
657		/*
658		 * Shoot.. this is where the grumpiness begin. Just
659		 * return immediately.
660		 */
661		if (*unit != -1 && *unit == (ce->unit & ~tmask))
662			goto snd_clone_alloc_out;
663
664		cunit++;
665		/*
666		 * Simmilar device type. Sort incrementally according
667		 * to allocation unit. While here, look for free slot
668		 * and possible collision for new / future allocation.
669		 */
670		if (*unit == -1 && (ce->unit & ~tmask) == allocunit)
671			allocunit++;
672		if ((ce->unit & ~tmask) < allocunit)
673			after = ce;
674		/*
675		 * Clone logic:
676		 *   1. Look for non busy, but keep track of the best
677		 *      possible busy cdev.
678		 *   2. Look for the best (oldest referenced) entry that is
679		 *      in a same process / thread.
680		 *   3. Look for the best (oldest referenced), absolute free
681		 *      entry.
682		 *   4. Lastly, look for the best (oldest referenced)
683		 *      any entries that doesn't fit with anything above.
684		 */
685		if (ce->flags & SND_CLONE_BUSY) {
686			if (ce->devt != NULL && (bce == NULL ||
687			    timespeccmp(&ce->tsp, &bce->tsp, <)))
688				bce = ce;
689			continue;
690		}
691		if (ce->pid == curpid &&
692		    (cce == NULL || timespeccmp(&ce->tsp, &cce->tsp, <)))
693			cce = ce;
694		else if (!(ce->flags & SND_CLONE_INVOKE) &&
695		    (nce == NULL || timespeccmp(&ce->tsp, &nce->tsp, <)))
696			nce = ce;
697		else if (tce == NULL || timespeccmp(&ce->tsp, &tce->tsp, <))
698			tce = ce;
699	}
700	if (*unit != -1)
701		goto snd_clone_alloc_new;
702	else if (cce != NULL) {
703		/* Same proc entry found, go for it */
704		ce = cce;
705		goto snd_clone_alloc_out;
706	} else if (nce != NULL) {
707		/*
708		 * Next, try absolute free entry. If the calculated
709		 * allocunit is smaller, create new entry instead.
710		 */
711		if (allocunit < (nce->unit & ~tmask))
712			goto snd_clone_alloc_new;
713		ce = nce;
714		goto snd_clone_alloc_out;
715	} else if (allocunit > c->maxunit) {
716		/*
717		 * Maximum allowable unit reached. Try returning any
718		 * available cdev and hope for the best. If the lookup is
719		 * done for things like stat(), mtime() etc. , things should
720		 * be ok. Otherwise, open() handler should do further checks
721		 * and decide whether to return correct error code or not.
722		 */
723		if (tce != NULL) {
724			ce = tce;
725			goto snd_clone_alloc_out;
726		} else if (bce != NULL) {
727			ce = bce;
728			goto snd_clone_alloc_out;
729		}
730		return (NULL);
731	}
732
733snd_clone_alloc_new:
734	/*
735	 * No free entries found, and we still haven't reached maximum
736	 * allowable units. Allocate, setup a minimal unique entry with busy
737	 * status so nobody will monkey on this new entry. Unit magic is set
738	 * right here to avoid collision with other contesting handler.
739	 * The caller must be carefull here to maintain its own
740	 * synchronization, as long as it will not conflict with malloc(9)
741	 * operations.
742	 *
743	 * That said, go figure.
744	 */
745	ce = malloc(sizeof(*ce), M_DEVBUF,
746	    ((c->flags & SND_CLONE_WAITOK) ? M_WAITOK : M_NOWAIT) | M_ZERO);
747	if (ce == NULL) {
748		if (*unit != -1)
749			return (NULL);
750		/*
751		 * We're being dense, ignorance is bliss,
752		 * Super Regulatory Measure (TM).. TRY AGAIN!
753		 */
754		if (nce != NULL) {
755			ce = nce;
756			goto snd_clone_alloc_out;
757		} else if (tce != NULL) {
758			ce = tce;
759			goto snd_clone_alloc_out;
760		} else if (bce != NULL) {
761			ce = bce;
762			goto snd_clone_alloc_out;
763		}
764		return (NULL);
765	}
766	/* Setup new entry */
767	ce->parent = c;
768	ce->unit = tmask | allocunit;
769	ce->pid = curpid;
770	ce->tsp = now;
771	ce->flags |= SND_CLONE_ALLOC;
772	if (after != NULL) {
773		TAILQ_INSERT_AFTER(&c->head, after, ce, link);
774	} else {
775		TAILQ_INSERT_HEAD(&c->head, ce, link);
776	}
777	c->size++;
778	c->tsp = now;
779	/*
780	 * Save new allocation unit for caller which will be used
781	 * by make_dev().
782	 */
783	*unit = allocunit;
784
785	return (ce);
786
787snd_clone_alloc_out:
788	/*
789	 * Set, mark, timestamp the entry if this is a truly free entry.
790	 * Leave busy entry alone.
791	 */
792	if (!(ce->flags & SND_CLONE_BUSY)) {
793		ce->pid = curpid;
794		ce->tsp = now;
795		ce->flags |= SND_CLONE_INVOKE;
796	}
797	c->tsp = now;
798	*dev = ce->devt;
799
800	return (NULL);
801}
802