subr_unit.c revision 143283
1/*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/subr_unit.c 143283 2005-03-08 10:40:48Z phk $
27 *
28 *
29 * Unit number allocation functions.
30 *
31 * These functions implement a mixed run-length/bitmap management of unit
32 * number spaces in a very memory efficient manner.
33 *
34 * Allocation policy is always lowest free number first.
35 *
36 * A return value of -1 signals that no more unit numbers are available.
37 *
38 * There is no cost associated with the range of unitnumbers, so unless
39 * the resource really is finite, specify INT_MAX to new_unrhdr() and
40 * forget about checking the return value.
41 *
42 * If a mutex is not provided when the unit number space is created, a
43 * default global mutex is used.  The advantage to passing a mutex in, is
44 * that the the alloc_unrl() function can be called with the mutex already
45 * held (it will not be released by alloc_unrl()).
46 *
47 * The allocation function alloc_unr{l}() never sleeps (but it may block on
48 * the mutex of course).
49 *
50 * Freeing a unit number may require allocating memory, and can therefore
51 * sleep so the free_unr() function does not come in a pre-locked variant.
52 *
53 * A userland test program is included.
54 *
55 * Memory usage is a very complex function of the the exact allocation
56 * pattern, but always very compact:
57 *    * For the very typical case where a single unbroken run of unit
58 *      numbers are allocated 44 bytes are used on i386.
59 *    * For a unit number space of 1000 units and the random pattern
60 *      in the usermode test program included, the worst case usage
61 *	was 252 bytes on i386 for 500 allocated and 500 free units.
62 *    * For a unit number space of 10000 units and the random pattern
63 *      in the usermode test program included, the worst case usage
64 *	was 798 bytes on i386 for 5000 allocated and 5000 free units.
65 *    * The worst case is where every other unit number is allocated and
66 *	the the rest are free.  In that case 44 + N/4 bytes are used where
67 *	N is the number of the highest unit allocated.
68 */
69
70#include <sys/types.h>
71#include <sys/queue.h>
72#include <sys/bitstring.h>
73
74#ifdef _KERNEL
75
76#include <sys/param.h>
77#include <sys/malloc.h>
78#include <sys/kernel.h>
79#include <sys/systm.h>
80#include <sys/limits.h>
81#include <sys/lock.h>
82#include <sys/mutex.h>
83
84/*
85 * In theory it would be smarter to allocate the individual blocks
86 * with the zone allocator, but at this time the expectation is that
87 * there will typically not even be enough allocations to fill a single
88 * page, so we stick with malloc for now.
89 */
90static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
91
92#define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
93#define Free(foo) free(foo, M_UNIT)
94
95static struct mtx unitmtx;
96
97MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
98
99#else /* ...USERLAND */
100
101#include <stdio.h>
102#include <stdlib.h>
103#include <string.h>
104
105#define KASSERT(cond, arg) \
106	do { \
107		if (!(cond)) { \
108			printf arg; \
109			abort(); \
110		} \
111	} while (0)
112
113static int no_alloc;
114#define Malloc(foo) _Malloc(foo, __LINE__)
115static void *
116_Malloc(size_t foo, int line)
117{
118
119	KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
120	return (calloc(foo, 1));
121}
122#define Free(foo) free(foo)
123
124struct unrhdr;
125
126
127struct mtx {
128	int	state;
129} unitmtx;
130
131static void
132mtx_lock(struct mtx *mp)
133{
134	KASSERT(mp->state == 0, ("mutex already locked"));
135	mp->state = 1;
136}
137
138static void
139mtx_unlock(struct mtx *mp)
140{
141	KASSERT(mp->state == 1, ("mutex not locked"));
142	mp->state = 0;
143}
144
145#define MA_OWNED	9
146
147static void
148mtx_assert(struct mtx *mp, int flag)
149{
150	if (flag == MA_OWNED) {
151		KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
152	}
153}
154
155#define CTASSERT(foo)
156
157#endif /* USERLAND */
158
159/*
160 * This is our basic building block.
161 *
162 * It can be used in three different ways depending on the value of the ptr
163 * element:
164 *     If ptr is NULL, it represents a run of free items.
165 *     If ptr points to the unrhdr it represents a run of allocated items.
166 *     Otherwise it points to an bitstring of allocated items.
167 *
168 * For runs the len field is the length of the run.
169 * For bitmaps the len field represents the number of allocated items.
170 *
171 * The bitmap is the same size as struct unr to optimize memory management.
172 */
173struct unr {
174	TAILQ_ENTRY(unr)	list;
175	u_int			len;
176	void			*ptr;
177};
178
179struct unrb {
180	u_char			busy;
181	bitstr_t		map[sizeof(struct unr) - 1];
182};
183
184CTASSERT(sizeof(struct unr) == sizeof(struct unrb));
185
186/* Number of bits in the bitmap */
187#define NBITS	((int)sizeof(((struct unrb *)NULL)->map) * 8)
188
189/* Header element for a unr number space. */
190
191struct unrhdr {
192	TAILQ_HEAD(unrhd,unr)	head;
193	u_int			low;	/* Lowest item */
194	u_int			high;	/* Highest item */
195	u_int			busy;	/* Count of allocated items */
196	u_int			alloc;	/* Count of memory allocations */
197	u_int			first;	/* items in allocated from start */
198	u_int			last;	/* items free at end */
199	struct mtx		*mtx;
200};
201
202static void print_unrhdr(struct unrhdr *uh);
203
204#if defined(DIAGNOSTIC) || !defined(_KERNEL)
205/*
206 * Consistency check function.
207 *
208 * Checks the internal consistency as well as we can.
209 *
210 * Called at all boundaries of this API.
211 */
212static void
213check_unrhdr(struct unrhdr *uh, int line)
214{
215	struct unr *up;
216	struct unrb *ub;
217	u_int x, y, z, w;
218
219	y = uh->first;
220	z = 0;
221	TAILQ_FOREACH(up, &uh->head, list) {
222		z++;
223		if (up->ptr != uh && up->ptr != NULL) {
224			ub = up->ptr;
225			KASSERT (up->len <= NBITS,
226			    ("UNR inconsistency: len %u max %d (line %d)\n",
227			    up->len, NBITS, line));
228			z++;
229			w = 0;
230			for (x = 0; x < up->len; x++)
231				if (bit_test(ub->map, x))
232					w++;
233			KASSERT (w == ub->busy,
234			    ("UNR inconsistency: busy %u found %u (line %d)\n",
235			    ub->busy, w, line));
236			y += w;
237		} else if (up->ptr != NULL)
238			y += up->len;
239	}
240	KASSERT (y == uh->busy,
241	    ("UNR inconsistency: items %u found %u (line %d)\n",
242	    uh->busy, y, line));
243	KASSERT (z == uh->alloc,
244	    ("UNR inconsistency: chunks %u found %u (line %d)\n",
245	    uh->alloc, z, line));
246}
247
248#else
249
250static __inline void
251check_unrhdr(struct unrhdr *uh, int line)
252{
253
254}
255
256#endif
257
258
259/*
260 * Userland memory management.  Just use calloc and keep track of how
261 * many elements we have allocated for check_unrhdr().
262 */
263
264static __inline void *
265new_unr(struct unrhdr *uh, void **p1, void **p2)
266{
267	void *p;
268
269	uh->alloc++;
270	KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
271	if (*p1 != NULL) {
272		p = *p1;
273		*p1 = NULL;
274		return (p);
275	} else {
276		p = *p2;
277		*p2 = NULL;
278		return (p);
279	}
280}
281
282static __inline void
283delete_unr(struct unrhdr *uh, void *ptr)
284{
285
286	uh->alloc--;
287	Free(ptr);
288}
289
290/*
291 * Allocate a new unrheader set.
292 *
293 * Highest and lowest valid values given as paramters.
294 */
295
296struct unrhdr *
297new_unrhdr(int low, int high, struct mtx *mutex)
298{
299	struct unrhdr *uh;
300
301	KASSERT(low <= high,
302	    ("UNR: use error: new_unrhdr(%u, %u)", low, high));
303	uh = Malloc(sizeof *uh);
304	if (mutex != NULL)
305		uh->mtx = mutex;
306	else
307		uh->mtx = &unitmtx;
308	TAILQ_INIT(&uh->head);
309	uh->low = low;
310	uh->high = high;
311	uh->first = 0;
312	uh->last = 1 + (high - low);
313	check_unrhdr(uh, __LINE__);
314printf("NEW_UNRHDR %x-%x -> %p\n", low, high, uh);
315	return (uh);
316}
317
318void
319delete_unrhdr(struct unrhdr *uh)
320{
321
322	check_unrhdr(uh, __LINE__);
323	KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
324	KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
325	Free(uh);
326}
327
328static __inline int
329is_bitmap(struct unrhdr *uh, struct unr *up)
330{
331	return (up->ptr != uh && up->ptr != NULL);
332}
333
334/*
335 * Look for sequence of items which can be combined into a bitmap, if
336 * multiple are present, take the one which saves most memory.
337 *
338 * Return (1) if a sequence was found to indicate that another call
339 * might be able to do more.  Return (0) if we found no suitable sequence.
340 *
341 * NB: called from alloc_unr(), no new memory allocation allowed.
342 */
343static int
344optimize_unr(struct unrhdr *uh)
345{
346	struct unr *up, *uf, *us;
347	struct unrb *ub, *ubf;
348	u_int a, l, ba;
349
350	/*
351	 * Look for the run of items (if any) which when collapsed into
352	 * a bitmap would save most memory.
353	 */
354	us = NULL;
355	ba = 0;
356	TAILQ_FOREACH(uf, &uh->head, list) {
357		if (uf->len >= NBITS)
358			continue;
359		a = 1;
360		if (is_bitmap(uh, uf))
361			a++;
362		l = uf->len;
363		up = uf;
364		while (1) {
365			up = TAILQ_NEXT(up, list);
366			if (up == NULL)
367				break;
368			if ((up->len + l) > NBITS)
369				break;
370			a++;
371			if (is_bitmap(uh, up))
372				a++;
373			l += up->len;
374		}
375		if (a > ba) {
376			ba = a;
377			us = uf;
378		}
379	}
380	if (ba < 3)
381		return (0);
382
383	/*
384	 * If the first element is not a bitmap, make it one.
385	 * Trying to do so without allocating more memory complicates things
386	 * a bit
387	 */
388	if (!is_bitmap(uh, us)) {
389		uf = TAILQ_NEXT(us, list);
390		TAILQ_REMOVE(&uh->head, us, list);
391		a = us->len;
392		l = us->ptr == uh ? 1 : 0;
393		ub = (void *)us;
394		ub->busy = 0;
395		if (l) {
396			bit_nset(ub->map, 0, a);
397			ub->busy += a;
398		} else {
399			bit_nclear(ub->map, 0, a);
400		}
401		if (!is_bitmap(uh, uf)) {
402			if (uf->ptr == NULL) {
403				bit_nclear(ub->map, a, a + uf->len - 1);
404			} else {
405				bit_nset(ub->map, a, a + uf->len - 1);
406				ub->busy += uf->len;
407			}
408			uf->ptr = ub;
409			uf->len += a;
410			us = uf;
411		} else {
412			ubf = uf->ptr;
413			for (l = 0; l < uf->len; l++, a++) {
414				if (bit_test(ubf->map, l)) {
415					bit_set(ub->map, a);
416					ub->busy++;
417				} else {
418					bit_clear(ub->map, a);
419				}
420			}
421			uf->len = a;
422			delete_unr(uh, uf->ptr);
423			uf->ptr = ub;
424			us = uf;
425		}
426	}
427	ub = us->ptr;
428	while (1) {
429		uf = TAILQ_NEXT(us, list);
430		if (uf == NULL)
431			return (1);
432		if (uf->len + us->len > NBITS)
433			return (1);
434		if (uf->ptr == NULL) {
435			bit_nclear(ub->map, us->len, us->len + uf->len - 1);
436			us->len += uf->len;
437			TAILQ_REMOVE(&uh->head, uf, list);
438			delete_unr(uh, uf);
439		} else if (uf->ptr == uh) {
440			bit_nset(ub->map, us->len, us->len + uf->len - 1);
441			ub->busy += uf->len;
442			us->len += uf->len;
443			TAILQ_REMOVE(&uh->head, uf, list);
444			delete_unr(uh, uf);
445		} else {
446			ubf = uf->ptr;
447			for (l = 0; l < uf->len; l++, us->len++) {
448				if (bit_test(ubf->map, l)) {
449					bit_set(ub->map, us->len);
450					ub->busy++;
451				} else {
452					bit_clear(ub->map, us->len);
453				}
454			}
455			TAILQ_REMOVE(&uh->head, uf, list);
456			delete_unr(uh, ubf);
457			delete_unr(uh, uf);
458		}
459	}
460}
461
462/*
463 * See if a given unr should be collapsed with a neighbor.
464 *
465 * NB: called from alloc_unr(), no new memory allocation allowed.
466 */
467static void
468collapse_unr(struct unrhdr *uh, struct unr *up)
469{
470	struct unr *upp;
471	struct unrb *ub;
472
473	/* If bitmap is all set or clear, change it to runlength */
474	if (is_bitmap(uh, up)) {
475		ub = up->ptr;
476		if (ub->busy == up->len) {
477			delete_unr(uh, up->ptr);
478			up->ptr = uh;
479		} else if (ub->busy == 0) {
480			delete_unr(uh, up->ptr);
481			up->ptr = NULL;
482		}
483	}
484
485	/* If nothing left in runlength, delete it */
486	if (up->len == 0) {
487		upp = TAILQ_PREV(up, unrhd, list);
488		if (upp == NULL)
489			upp = TAILQ_NEXT(up, list);
490		TAILQ_REMOVE(&uh->head, up, list);
491		delete_unr(uh, up);
492		up = upp;
493	}
494
495	/* If we have "hot-spot" still, merge with neighbor if possible */
496	if (up != NULL) {
497		upp = TAILQ_PREV(up, unrhd, list);
498		if (upp != NULL && up->ptr == upp->ptr) {
499			up->len += upp->len;
500			TAILQ_REMOVE(&uh->head, upp, list);
501			delete_unr(uh, upp);
502			}
503		upp = TAILQ_NEXT(up, list);
504		if (upp != NULL && up->ptr == upp->ptr) {
505			up->len += upp->len;
506			TAILQ_REMOVE(&uh->head, upp, list);
507			delete_unr(uh, upp);
508		}
509	}
510
511	/* Merge into ->first if possible */
512	upp = TAILQ_FIRST(&uh->head);
513	if (upp != NULL && upp->ptr == uh) {
514		uh->first += upp->len;
515		TAILQ_REMOVE(&uh->head, upp, list);
516		delete_unr(uh, upp);
517		if (up == upp)
518			up = NULL;
519	}
520
521	/* Merge into ->last if possible */
522	upp = TAILQ_LAST(&uh->head, unrhd);
523	if (upp != NULL && upp->ptr == NULL) {
524		uh->last += upp->len;
525		TAILQ_REMOVE(&uh->head, upp, list);
526		delete_unr(uh, upp);
527		if (up == upp)
528			up = NULL;
529	}
530
531	/* Try to make bitmaps */
532	while (optimize_unr(uh))
533		continue;
534}
535
536/*
537 * Allocate a free unr.
538 */
539int
540alloc_unrl(struct unrhdr *uh)
541{
542	struct unr *up;
543	struct unrb *ub;
544	u_int x;
545	int y;
546
547	mtx_assert(uh->mtx, MA_OWNED);
548	check_unrhdr(uh, __LINE__);
549	x = uh->low + uh->first;
550
551	up = TAILQ_FIRST(&uh->head);
552
553	/*
554	 * If we have an ideal split, just adjust the first+last
555	 */
556	if (up == NULL && uh->last > 0) {
557		uh->first++;
558		uh->last--;
559		uh->busy++;
560		return (x);
561	}
562
563	/*
564	 * We can always allocate from the first list element, so if we have
565	 * nothing on the list, we must have run out of unit numbers.
566	 */
567	if (up == NULL) {
568printf("Out of units %p\n", uh);
569print_unrhdr(uh);
570		return (-1);
571	}
572
573	KASSERT(up->ptr != uh, ("UNR first element is allocated"));
574
575	if (up->ptr == NULL) {	/* free run */
576		uh->first++;
577		up->len--;
578	} else {		/* bitmap */
579		ub = up->ptr;
580		KASSERT(ub->busy < up->len, ("UNR bitmap confusion"));
581		bit_ffc(ub->map, up->len, &y);
582		KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
583		bit_set(ub->map, y);
584		ub->busy++;
585		x += y;
586	}
587	uh->busy++;
588	collapse_unr(uh, up);
589	return (x);
590}
591
592int
593alloc_unr(struct unrhdr *uh)
594{
595	int i;
596
597	mtx_lock(uh->mtx);
598	i = alloc_unrl(uh);
599	mtx_unlock(uh->mtx);
600	return (i);
601}
602
603/*
604 * Free a unr.
605 *
606 * If we can save unrs by using a bitmap, do so.
607 */
608static void
609free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
610{
611	struct unr *up, *upp, *upn;
612	struct unrb *ub;
613	u_int pl;
614
615	KASSERT(item >= uh->low && item <= uh->high,
616	    ("UNR: free_unr(%u) out of range [%u...%u]",
617	     item, uh->low, uh->high));
618	check_unrhdr(uh, __LINE__);
619	item -= uh->low;
620	upp = TAILQ_FIRST(&uh->head);
621	/*
622	 * Freeing in the ideal split case
623	 */
624	if (item + 1 == uh->first && upp == NULL) {
625		uh->last++;
626		uh->first--;
627		uh->busy--;
628		check_unrhdr(uh, __LINE__);
629		return;
630	}
631	/*
632 	 * Freeing in the ->first section.  Create a run starting at the
633	 * freed item.  The code below will subdivide it.
634	 */
635	if (item < uh->first) {
636		up = new_unr(uh, p1, p2);
637		up->ptr = uh;
638		up->len = uh->first - item;
639		TAILQ_INSERT_HEAD(&uh->head, up, list);
640		uh->first -= up->len;
641	}
642
643	item -= uh->first;
644
645	/* Find the item which contains the unit we want to free */
646	TAILQ_FOREACH(up, &uh->head, list) {
647		if (up->len > item)
648			break;
649		item -= up->len;
650	}
651
652	/* Handle bitmap items */
653	if (is_bitmap(uh, up)) {
654		ub = up->ptr;
655
656		KASSERT(bit_test(ub->map, item) != 0,
657		    ("UNR: Freeing free item %d (bitmap)\n", item));
658		bit_clear(ub->map, item);
659		uh->busy--;
660		ub->busy--;
661		collapse_unr(uh, up);
662		return;
663	}
664
665	KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
666
667	/* Just this one left, reap it */
668	if (up->len == 1) {
669		up->ptr = NULL;
670		uh->busy--;
671		collapse_unr(uh, up);
672		return;
673	}
674
675	/* Check if we can shift the item into the previous 'free' run */
676	upp = TAILQ_PREV(up, unrhd, list);
677	if (item == 0 && upp != NULL && upp->ptr == NULL) {
678		upp->len++;
679		up->len--;
680		uh->busy--;
681		collapse_unr(uh, up);
682		return;
683	}
684
685	/* Check if we can shift the item to the next 'free' run */
686	upn = TAILQ_NEXT(up, list);
687	if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
688		upn->len++;
689		up->len--;
690		uh->busy--;
691		collapse_unr(uh, up);
692		return;
693	}
694
695	/* Split off the tail end, if any. */
696	pl = up->len - (1 + item);
697	if (pl > 0) {
698		upp = new_unr(uh, p1, p2);
699		upp->ptr = uh;
700		upp->len = pl;
701		TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
702	}
703
704	/* Split off head end, if any */
705	if (item > 0) {
706		upp = new_unr(uh, p1, p2);
707		upp->len = item;
708		upp->ptr = uh;
709		TAILQ_INSERT_BEFORE(up, upp, list);
710	}
711	up->len = 1;
712	up->ptr = NULL;
713	uh->busy--;
714	collapse_unr(uh, up);
715}
716
717void
718free_unr(struct unrhdr *uh, u_int item)
719{
720	void *p1, *p2;
721
722	p1 = Malloc(sizeof(struct unr));
723	p2 = Malloc(sizeof(struct unr));
724	mtx_lock(uh->mtx);
725	free_unrl(uh, item, &p1, &p2);
726	mtx_unlock(uh->mtx);
727	if (p1 != NULL)
728		Free(p1);
729	if (p2 != NULL)
730		Free(p2);
731}
732
733/*
734 * Simple stochastic test driver for the above functions
735 */
736
737static void
738print_unr(struct unrhdr *uh, struct unr *up)
739{
740	u_int x;
741	struct unrb *ub;
742
743	printf("  %p len = %5u ", up, up->len);
744	if (up->ptr == NULL)
745		printf("free\n");
746	else if (up->ptr == uh)
747		printf("alloc\n");
748	else {
749		ub = up->ptr;
750		printf("bitmap(%d) [", ub->busy);
751		for (x = 0; x < up->len; x++) {
752			if (bit_test(ub->map, x))
753				printf("#");
754			else
755				printf(" ");
756		}
757		printf("]\n");
758	}
759}
760
761static void
762print_unrhdr(struct unrhdr *uh)
763{
764	struct unr *up;
765	u_int x;
766
767	printf(
768	    "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
769	    uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
770	x = uh->low + uh->first;
771	TAILQ_FOREACH(up, &uh->head, list) {
772		printf("  from = %5u", x);
773		print_unr(uh, up);
774		if (up->ptr == NULL || up->ptr == uh)
775			x += up->len;
776		else
777			x += NBITS;
778	}
779}
780
781#ifndef _KERNEL	/* USERLAND test driver */
782
783/* Number of unrs to test */
784#define NN	10000
785
786int
787main(int argc __unused, const char **argv __unused)
788{
789	struct unrhdr *uh;
790	u_int i, x, m, j;
791	char a[NN];
792
793	setbuf(stdout, NULL);
794	uh = new_unrhdr(0, NN - 1, NULL);
795	print_unrhdr(uh);
796
797	memset(a, 0, sizeof a);
798
799	fprintf(stderr, "sizeof(struct unr) %d\n", sizeof (struct unr));
800	fprintf(stderr, "sizeof(struct unrb) %d\n", sizeof (struct unrb));
801	fprintf(stderr, "sizeof(struct unrhdr) %d\n", sizeof (struct unrhdr));
802	fprintf(stderr, "NBITS %d\n", NBITS);
803	x = 1;
804	for (m = 0; m < NN * 100; m++) {
805		j = random();
806		i = (j >> 1) % NN;
807#if 0
808		if (a[i] && (j & 1))
809			continue;
810#endif
811		if (a[i]) {
812			printf("F %u\n", i);
813			free_unr(uh, i);
814			a[i] = 0;
815		} else {
816			no_alloc = 1;
817			i = alloc_unr(uh);
818			if (i != -1) {
819				a[i] = 1;
820				printf("A %u\n", i);
821			}
822			no_alloc = 0;
823		}
824		if (1)	/* XXX: change this for detailed debug printout */
825			print_unrhdr(uh);
826		check_unrhdr(uh, __LINE__);
827	}
828	for (i = 0; i < NN; i++) {
829		if (a[i]) {
830			printf("C %u\n", i);
831			free_unr(uh, i);
832			print_unrhdr(uh);
833		}
834	}
835	print_unrhdr(uh);
836	delete_unrhdr(uh);
837	return (0);
838}
839#endif
840