1/*-
2 * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/conf.h>
32#include <sys/kernel.h>
33#include <sys/malloc.h>
34#include <sys/proc.h>
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_snd.h"
38#endif
39
40#if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
41#include <dev/sound/pcm/sound.h>
42#endif
43
44#include <dev/sound/clone.h>
45
46/*
47 * So here we go again, another clonedevs manager. Unlike default clonedevs,
48 * this clone manager is designed to withstand various abusive behavior
49 * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
50 * after reaching certain expiration threshold, aggressive garbage collector,
51 * transparent device allocator and concurrency handling across multiple
52 * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
53 * we don't have much clues whether the caller wants a real open() or simply
54 * making fun of us with things like stat(), mtime() etc. Assuming that:
55 * 1) Time window between dev_clone EH <-> real open() should be small
56 * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
57 * operation, we can decide whether a new cdev must be created, old
58 * (expired) cdev can be reused or an existing cdev can be shared.
59 *
60 * Most of the operations and logics are generic enough and can be applied
61 * on other places (such as if_tap, snp, etc).  Perhaps this can be
62 * rearranged to complement clone_*(). However, due to this still being
63 * specific to the sound driver (and as a proof of concept on how it can be
64 * done), si_drv2 is used to keep the pointer of the clone list entry to
65 * avoid expensive lookup.
66 */
67
68/* clone entry */
69struct snd_clone_entry {
70	TAILQ_ENTRY(snd_clone_entry) link;
71	struct snd_clone *parent;
72	struct cdev *devt;
73	struct timespec tsp;
74	uint32_t flags;
75	pid_t pid;
76	int unit;
77};
78
79/* clone manager */
80struct snd_clone {
81	TAILQ_HEAD(link_head, snd_clone_entry) head;
82	struct timespec tsp;
83	int refcount;
84	int size;
85	int typemask;
86	int maxunit;
87	int deadline;
88	uint32_t flags;
89};
90
91#ifdef SND_DIAGNOSTIC
92#define SND_CLONE_ASSERT(x, y)		do {			\
93	if (!(x))						\
94		panic y;					\
95} while (0)
96#else
97#define SND_CLONE_ASSERT(...)		KASSERT(__VA_ARGS__)
98#endif
99
100/*
101 * Shamelessly ripped off from vfs_subr.c
102 * We need at least 1/HZ precision as default timestamping.
103 */
104enum { SND_TSP_SEC, SND_TSP_HZ, SND_TSP_USEC, SND_TSP_NSEC };
105
106static int snd_timestamp_precision = SND_TSP_HZ;
107TUNABLE_INT("hw.snd.timestamp_precision", &snd_timestamp_precision);
108
109void
110snd_timestamp(struct timespec *tsp)
111{
112	struct timeval tv;
113
114	switch (snd_timestamp_precision) {
115	case SND_TSP_SEC:
116		tsp->tv_sec = time_second;
117		tsp->tv_nsec = 0;
118		break;
119	case SND_TSP_HZ:
120		getnanouptime(tsp);
121		break;
122	case SND_TSP_USEC:
123		microuptime(&tv);
124		TIMEVAL_TO_TIMESPEC(&tv, tsp);
125		break;
126	case SND_TSP_NSEC:
127		nanouptime(tsp);
128		break;
129	default:
130		snd_timestamp_precision = SND_TSP_HZ;
131		getnanouptime(tsp);
132		break;
133	}
134}
135
136#if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
137static int
138sysctl_hw_snd_timestamp_precision(SYSCTL_HANDLER_ARGS)
139{
140	int err, val;
141
142	val = snd_timestamp_precision;
143	err = sysctl_handle_int(oidp, &val, 0, req);
144	if (err == 0 && req->newptr != NULL) {
145		switch (val) {
146		case SND_TSP_SEC:
147		case SND_TSP_HZ:
148		case SND_TSP_USEC:
149		case SND_TSP_NSEC:
150			snd_timestamp_precision = val;
151			break;
152		default:
153			break;
154		}
155	}
156
157	return (err);
158}
159SYSCTL_PROC(_hw_snd, OID_AUTO, timestamp_precision, CTLTYPE_INT | CTLFLAG_RW,
160    0, sizeof(int), sysctl_hw_snd_timestamp_precision, "I",
161    "timestamp precision (0=s 1=hz 2=us 3=ns)");
162#endif
163
164/*
165 * snd_clone_create() : Return opaque allocated clone manager.
166 */
167struct snd_clone *
168snd_clone_create(int typemask, int maxunit, int deadline, uint32_t flags)
169{
170	struct snd_clone *c;
171
172	SND_CLONE_ASSERT(!(typemask & ~SND_CLONE_MAXUNIT),
173	    ("invalid typemask: 0x%08x", typemask));
174	SND_CLONE_ASSERT(maxunit == -1 ||
175	    !(maxunit & ~(~typemask & SND_CLONE_MAXUNIT)),
176	    ("maxunit overflow: typemask=0x%08x maxunit=%d",
177	    typemask, maxunit));
178	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
179	    ("invalid clone flags=0x%08x", flags));
180
181	c = malloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
182	c->refcount = 0;
183	c->size = 0;
184	c->typemask = typemask;
185	c->maxunit = (maxunit == -1) ? (~typemask & SND_CLONE_MAXUNIT) :
186	    maxunit;
187	c->deadline = deadline;
188	c->flags = flags;
189	snd_timestamp(&c->tsp);
190	TAILQ_INIT(&c->head);
191
192	return (c);
193}
194
195int
196snd_clone_busy(struct snd_clone *c)
197{
198	struct snd_clone_entry *ce;
199
200	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
201
202	if (c->size == 0)
203		return (0);
204
205	TAILQ_FOREACH(ce, &c->head, link) {
206		if ((ce->flags & SND_CLONE_BUSY) ||
207		    (ce->devt != NULL && ce->devt->si_threadcount != 0))
208			return (EBUSY);
209	}
210
211	return (0);
212}
213
214/*
215 * snd_clone_enable()/disable() : Suspend/resume clone allocation through
216 * snd_clone_alloc(). Everything else will not be affected by this.
217 */
218int
219snd_clone_enable(struct snd_clone *c)
220{
221	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
222
223	if (c->flags & SND_CLONE_ENABLE)
224		return (EINVAL);
225
226	c->flags |= SND_CLONE_ENABLE;
227
228	return (0);
229}
230
231int
232snd_clone_disable(struct snd_clone *c)
233{
234	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
235
236	if (!(c->flags & SND_CLONE_ENABLE))
237		return (EINVAL);
238
239	c->flags &= ~SND_CLONE_ENABLE;
240
241	return (0);
242}
243
244/*
245 * Getters / Setters. Not worth explaining :)
246 */
247int
248snd_clone_getsize(struct snd_clone *c)
249{
250	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
251
252	return (c->size);
253}
254
255int
256snd_clone_getmaxunit(struct snd_clone *c)
257{
258	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
259
260	return (c->maxunit);
261}
262
263int
264snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
265{
266	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
267	SND_CLONE_ASSERT(maxunit == -1 ||
268	    !(maxunit & ~(~c->typemask & SND_CLONE_MAXUNIT)),
269	    ("maxunit overflow: typemask=0x%08x maxunit=%d",
270	    c->typemask, maxunit));
271
272	c->maxunit = (maxunit == -1) ? (~c->typemask & SND_CLONE_MAXUNIT) :
273	    maxunit;
274
275	return (c->maxunit);
276}
277
278int
279snd_clone_getdeadline(struct snd_clone *c)
280{
281	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
282
283	return (c->deadline);
284}
285
286int
287snd_clone_setdeadline(struct snd_clone *c, int deadline)
288{
289	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
290
291	c->deadline = deadline;
292
293	return (c->deadline);
294}
295
296int
297snd_clone_gettime(struct snd_clone *c, struct timespec *tsp)
298{
299	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
300	SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
301
302	*tsp = c->tsp;
303
304	return (0);
305}
306
307uint32_t
308snd_clone_getflags(struct snd_clone *c)
309{
310	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
311
312	return (c->flags);
313}
314
315uint32_t
316snd_clone_setflags(struct snd_clone *c, uint32_t flags)
317{
318	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
319	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
320	    ("invalid clone flags=0x%08x", flags));
321
322	c->flags = flags;
323
324	return (c->flags);
325}
326
327int
328snd_clone_getdevtime(struct cdev *dev, struct timespec *tsp)
329{
330	struct snd_clone_entry *ce;
331
332	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
333	SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
334
335	ce = dev->si_drv2;
336	if (ce == NULL)
337		return (ENODEV);
338
339	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
340
341	*tsp = ce->tsp;
342
343	return (0);
344}
345
346uint32_t
347snd_clone_getdevflags(struct cdev *dev)
348{
349	struct snd_clone_entry *ce;
350
351	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
352
353	ce = dev->si_drv2;
354	if (ce == NULL)
355		return (0xffffffff);
356
357	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
358
359	return (ce->flags);
360}
361
362uint32_t
363snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
364{
365	struct snd_clone_entry *ce;
366
367	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
368	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_DEVMASK),
369	    ("invalid clone dev flags=0x%08x", flags));
370
371	ce = dev->si_drv2;
372	if (ce == NULL)
373		return (0xffffffff);
374
375	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
376
377	ce->flags = flags;
378
379	return (ce->flags);
380}
381
382/* Elapsed time conversion to ms */
383#define SND_CLONE_ELAPSED(x, y)						\
384	((((x)->tv_sec - (y)->tv_sec) * 1000) +				\
385	(((y)->tv_nsec > (x)->tv_nsec) ?				\
386	(((1000000000L + (x)->tv_nsec -					\
387	(y)->tv_nsec) / 1000000) - 1000) :				\
388	(((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
389
390#define SND_CLONE_EXPIRED(x, y, z)					\
391	((x)->deadline < 1 ||						\
392	((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) ||		\
393	SND_CLONE_ELAPSED(y, z) > (x)->deadline)
394
395/*
396 * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
397 * clone.h for explanations on GC settings.
398 */
399int
400snd_clone_gc(struct snd_clone *c)
401{
402	struct snd_clone_entry *ce, *tce;
403	struct timespec now;
404	int pruned;
405
406	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
407
408	if (!(c->flags & SND_CLONE_GC_ENABLE) || c->size == 0)
409		return (0);
410
411	snd_timestamp(&now);
412
413	/*
414	 * Bail out if the last clone handler was invoked below the deadline
415	 * threshold.
416	 */
417	if ((c->flags & SND_CLONE_GC_EXPIRED) &&
418	    !SND_CLONE_EXPIRED(c, &now, &c->tsp))
419		return (0);
420
421	pruned = 0;
422
423	/*
424	 * Visit each object in reverse order. If the object is still being
425	 * referenced by a valid open(), skip it. Look for expired objects
426	 * and either revoke its clone invocation status or mercilessly
427	 * throw it away.
428	 */
429	TAILQ_FOREACH_REVERSE_SAFE(ce, &c->head, link_head, link, tce) {
430		if (!(ce->flags & SND_CLONE_BUSY) &&
431		    (!(ce->flags & SND_CLONE_INVOKE) ||
432		    SND_CLONE_EXPIRED(c, &now, &ce->tsp))) {
433			if ((c->flags & SND_CLONE_GC_REVOKE) ||
434			    ce->devt->si_threadcount != 0) {
435				ce->flags &= ~SND_CLONE_INVOKE;
436				ce->pid = -1;
437			} else {
438				TAILQ_REMOVE(&c->head, ce, link);
439				destroy_dev(ce->devt);
440				free(ce, M_DEVBUF);
441				c->size--;
442			}
443			pruned++;
444		}
445	}
446
447	/* return total pruned objects */
448	return (pruned);
449}
450
451void
452snd_clone_destroy(struct snd_clone *c)
453{
454	struct snd_clone_entry *ce, *tmp;
455
456	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
457
458	ce = TAILQ_FIRST(&c->head);
459	while (ce != NULL) {
460		tmp = TAILQ_NEXT(ce, link);
461		if (ce->devt != NULL)
462			destroy_dev(ce->devt);
463		free(ce, M_DEVBUF);
464		ce = tmp;
465	}
466
467	free(c, M_DEVBUF);
468}
469
470/*
471 * snd_clone_acquire() : The vital part of concurrency management. Must be
472 * called somewhere at the beginning of open() handler. ENODEV is not really
473 * fatal since it just tell the caller that this is not cloned stuff.
474 * EBUSY is *real*, don't forget that!
475 */
476int
477snd_clone_acquire(struct cdev *dev)
478{
479	struct snd_clone_entry *ce;
480
481	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
482
483	ce = dev->si_drv2;
484	if (ce == NULL)
485		return (ENODEV);
486
487	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
488
489	ce->flags &= ~SND_CLONE_INVOKE;
490
491	if (ce->flags & SND_CLONE_BUSY)
492		return (EBUSY);
493
494	ce->flags |= SND_CLONE_BUSY;
495
496	return (0);
497}
498
499/*
500 * snd_clone_release() : Release busy status. Must be called somewhere at
501 * the end of close() handler, or somewhere after fail open().
502 */
503int
504snd_clone_release(struct cdev *dev)
505{
506	struct snd_clone_entry *ce;
507
508	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
509
510	ce = dev->si_drv2;
511	if (ce == NULL)
512		return (ENODEV);
513
514	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
515
516	ce->flags &= ~SND_CLONE_INVOKE;
517
518	if (!(ce->flags & SND_CLONE_BUSY))
519		return (EBADF);
520
521	ce->flags &= ~SND_CLONE_BUSY;
522	ce->pid = -1;
523
524	return (0);
525}
526
527/*
528 * snd_clone_ref/unref() : Garbage collector reference counter. To make
529 * garbage collector run automatically, the sequence must be something like
530 * this (both in open() and close() handlers):
531 *
532 *  open() - 1) snd_clone_acquire()
533 *           2) .... check check ... if failed, snd_clone_release()
534 *           3) Success. Call snd_clone_ref()
535 *
536 * close() - 1) .... check check check ....
537 *           2) Success. snd_clone_release()
538 *           3) snd_clone_unref() . Garbage collector will run at this point
539 *              if this is the last referenced object.
540 */
541int
542snd_clone_ref(struct cdev *dev)
543{
544	struct snd_clone_entry *ce;
545	struct snd_clone *c;
546
547	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
548
549	ce = dev->si_drv2;
550	if (ce == NULL)
551		return (0);
552
553	c = ce->parent;
554	SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
555	SND_CLONE_ASSERT(c->refcount >= 0, ("refcount < 0"));
556
557	return (++c->refcount);
558}
559
560int
561snd_clone_unref(struct cdev *dev)
562{
563	struct snd_clone_entry *ce;
564	struct snd_clone *c;
565
566	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
567
568	ce = dev->si_drv2;
569	if (ce == NULL)
570		return (0);
571
572	c = ce->parent;
573	SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
574	SND_CLONE_ASSERT(c->refcount > 0, ("refcount <= 0"));
575
576	c->refcount--;
577
578	/*
579	 * Run automatic garbage collector, if needed.
580	 */
581	if ((c->flags & SND_CLONE_GC_UNREF) &&
582	    (!(c->flags & SND_CLONE_GC_LASTREF) ||
583	    (c->refcount == 0 && (c->flags & SND_CLONE_GC_LASTREF))))
584		(void)snd_clone_gc(c);
585
586	return (c->refcount);
587}
588
589void
590snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
591{
592	SND_CLONE_ASSERT(ce != NULL, ("NULL snd_clone_entry"));
593	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
594	SND_CLONE_ASSERT(dev->si_drv2 == NULL, ("dev->si_drv2 not NULL"));
595	SND_CLONE_ASSERT((ce->flags & SND_CLONE_ALLOC) == SND_CLONE_ALLOC,
596	    ("invalid clone alloc flags=0x%08x", ce->flags));
597	SND_CLONE_ASSERT(ce->devt == NULL, ("ce->devt not NULL"));
598	SND_CLONE_ASSERT(ce->unit == dev2unit(dev),
599	    ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
600	    ce->unit, dev2unit(dev)));
601
602	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
603
604	dev->si_drv2 = ce;
605	ce->devt = dev;
606	ce->flags &= ~SND_CLONE_ALLOC;
607	ce->flags |= SND_CLONE_INVOKE;
608}
609
610struct snd_clone_entry *
611snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
612{
613	struct snd_clone_entry *ce, *after, *bce, *cce, *nce, *tce;
614	struct timespec now;
615	int cunit, allocunit;
616	pid_t curpid;
617
618	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
619	SND_CLONE_ASSERT(dev != NULL, ("NULL dev pointer"));
620	SND_CLONE_ASSERT((c->typemask & tmask) == tmask,
621	    ("invalid tmask: typemask=0x%08x tmask=0x%08x",
622	    c->typemask, tmask));
623	SND_CLONE_ASSERT(unit != NULL, ("NULL unit pointer"));
624	SND_CLONE_ASSERT(*unit == -1 || !(*unit & (c->typemask | tmask)),
625	    ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
626	    c->typemask, tmask, *unit));
627
628	if (!(c->flags & SND_CLONE_ENABLE) ||
629	    (*unit != -1 && *unit > c->maxunit))
630		return (NULL);
631
632	ce = NULL;
633	after = NULL;
634	bce = NULL;	/* "b"usy candidate */
635	cce = NULL;	/* "c"urthread/proc candidate */
636	nce = NULL;	/* "n"ull, totally unbusy candidate */
637	tce = NULL;	/* Last "t"ry candidate */
638	cunit = 0;
639	allocunit = (*unit == -1) ? 0 : *unit;
640	curpid = curthread->td_proc->p_pid;
641
642	snd_timestamp(&now);
643
644	TAILQ_FOREACH(ce, &c->head, link) {
645		/*
646		 * Sort incrementally according to device type.
647		 */
648		if (tmask > (ce->unit & c->typemask)) {
649			if (cunit == 0)
650				after = ce;
651			continue;
652		} else if (tmask < (ce->unit & c->typemask))
653			break;
654
655		/*
656		 * Shoot.. this is where the grumpiness begin. Just
657		 * return immediately.
658		 */
659		if (*unit != -1 && *unit == (ce->unit & ~tmask))
660			goto snd_clone_alloc_out;
661
662		cunit++;
663		/*
664		 * Simmilar device type. Sort incrementally according
665		 * to allocation unit. While here, look for free slot
666		 * and possible collision for new / future allocation.
667		 */
668		if (*unit == -1 && (ce->unit & ~tmask) == allocunit)
669			allocunit++;
670		if ((ce->unit & ~tmask) < allocunit)
671			after = ce;
672		/*
673		 * Clone logic:
674		 *   1. Look for non busy, but keep track of the best
675		 *      possible busy cdev.
676		 *   2. Look for the best (oldest referenced) entry that is
677		 *      in a same process / thread.
678		 *   3. Look for the best (oldest referenced), absolute free
679		 *      entry.
680		 *   4. Lastly, look for the best (oldest referenced)
681		 *      any entries that doesn't fit with anything above.
682		 */
683		if (ce->flags & SND_CLONE_BUSY) {
684			if (ce->devt != NULL && (bce == NULL ||
685			    timespeccmp(&ce->tsp, &bce->tsp, <)))
686				bce = ce;
687			continue;
688		}
689		if (ce->pid == curpid &&
690		    (cce == NULL || timespeccmp(&ce->tsp, &cce->tsp, <)))
691			cce = ce;
692		else if (!(ce->flags & SND_CLONE_INVOKE) &&
693		    (nce == NULL || timespeccmp(&ce->tsp, &nce->tsp, <)))
694			nce = ce;
695		else if (tce == NULL || timespeccmp(&ce->tsp, &tce->tsp, <))
696			tce = ce;
697	}
698	if (*unit != -1)
699		goto snd_clone_alloc_new;
700	else if (cce != NULL) {
701		/* Same proc entry found, go for it */
702		ce = cce;
703		goto snd_clone_alloc_out;
704	} else if (nce != NULL) {
705		/*
706		 * Next, try absolute free entry. If the calculated
707		 * allocunit is smaller, create new entry instead.
708		 */
709		if (allocunit < (nce->unit & ~tmask))
710			goto snd_clone_alloc_new;
711		ce = nce;
712		goto snd_clone_alloc_out;
713	} else if (allocunit > c->maxunit) {
714		/*
715		 * Maximum allowable unit reached. Try returning any
716		 * available cdev and hope for the best. If the lookup is
717		 * done for things like stat(), mtime() etc. , things should
718		 * be ok. Otherwise, open() handler should do further checks
719		 * and decide whether to return correct error code or not.
720		 */
721		if (tce != NULL) {
722			ce = tce;
723			goto snd_clone_alloc_out;
724		} else if (bce != NULL) {
725			ce = bce;
726			goto snd_clone_alloc_out;
727		}
728		return (NULL);
729	}
730
731snd_clone_alloc_new:
732	/*
733	 * No free entries found, and we still haven't reached maximum
734	 * allowable units. Allocate, setup a minimal unique entry with busy
735	 * status so nobody will monkey on this new entry. Unit magic is set
736	 * right here to avoid collision with other contesting handler.
737	 * The caller must be carefull here to maintain its own
738	 * synchronization, as long as it will not conflict with malloc(9)
739	 * operations.
740	 *
741	 * That said, go figure.
742	 */
743	ce = malloc(sizeof(*ce), M_DEVBUF,
744	    ((c->flags & SND_CLONE_WAITOK) ? M_WAITOK : M_NOWAIT) | M_ZERO);
745	if (ce == NULL) {
746		if (*unit != -1)
747			return (NULL);
748		/*
749		 * We're being dense, ignorance is bliss,
750		 * Super Regulatory Measure (TM).. TRY AGAIN!
751		 */
752		if (nce != NULL) {
753			ce = nce;
754			goto snd_clone_alloc_out;
755		} else if (tce != NULL) {
756			ce = tce;
757			goto snd_clone_alloc_out;
758		} else if (bce != NULL) {
759			ce = bce;
760			goto snd_clone_alloc_out;
761		}
762		return (NULL);
763	}
764	/* Setup new entry */
765	ce->parent = c;
766	ce->unit = tmask | allocunit;
767	ce->pid = curpid;
768	ce->tsp = now;
769	ce->flags |= SND_CLONE_ALLOC;
770	if (after != NULL) {
771		TAILQ_INSERT_AFTER(&c->head, after, ce, link);
772	} else {
773		TAILQ_INSERT_HEAD(&c->head, ce, link);
774	}
775	c->size++;
776	c->tsp = now;
777	/*
778	 * Save new allocation unit for caller which will be used
779	 * by make_dev().
780	 */
781	*unit = allocunit;
782
783	return (ce);
784
785snd_clone_alloc_out:
786	/*
787	 * Set, mark, timestamp the entry if this is a truly free entry.
788	 * Leave busy entry alone.
789	 */
790	if (!(ce->flags & SND_CLONE_BUSY)) {
791		ce->pid = curpid;
792		ce->tsp = now;
793		ce->flags |= SND_CLONE_INVOKE;
794	}
795	c->tsp = now;
796	*dev = ce->devt;
797
798	return (NULL);
799}
800