subr_extent.c revision 1.2
1/*	$OpenBSD: subr_extent.c,v 1.2 1996/12/09 09:27:04 niklas Exp $	*/
2/*	$NetBSD: subr_extent.c,v 1.7 1996/11/21 18:46:34 cgd Exp $	*/
3
4/*-
5 * Copyright (c) 1996 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe and Matthias Drochner.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *        This product includes software developed by the NetBSD
22 *        Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * General purpose extent manager.
42 */
43
44#ifdef _KERNEL
45#include <sys/param.h>
46#include <sys/extent.h>
47#include <sys/malloc.h>
48#include <sys/time.h>
49#include <sys/systm.h>
50#include <sys/proc.h>
51#else
52/*
53 * user-land definitions, so it can fit into a testing harness.
54 */
55#include <sys/param.h>
56#include <sys/extent.h>
57#include <errno.h>
58#include <stdlib.h>
59#include <stdio.h>
60
61#define	malloc(s, t, flags)		malloc(s)
62#define	free(p, t)			free(p)
63#define	tsleep(chan, pri, str, timo)	(EWOULDBLOCK)
64#define	wakeup(chan)			((void)0)
65#endif
66
67static	void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
68	    int, struct extent_region *, struct extent_region *));
69static	struct extent_region *extent_alloc_region_descriptor
70	    __P((struct extent *, int));
71static	void extent_free_region_descriptor __P((struct extent *,
72	    struct extent_region *));
73
74/*
75 * Macro to align to an arbitrary power-of-two boundary.
76 */
77#define EXTENT_ALIGN(_start, _align)			\
78	(((_start) + ((_align) - 1)) & (-(_align)))
79
80/*
81 * Allocate and initialize an extent map.
82 */
83struct extent *
84extent_create(name, start, end, mtype, storage, storagesize, flags)
85	char *name;
86	u_long start, end;
87	int mtype;
88	caddr_t storage;
89	size_t storagesize;
90	int flags;
91{
92	struct extent *ex;
93	caddr_t cp = storage;
94	size_t sz = storagesize;
95	struct extent_region *rp;
96	int fixed_extent = (storage != NULL);
97
98#ifdef DIAGNOSTIC
99	/* Check arguments. */
100	if (name == NULL)
101		panic("extent_create: name == NULL");
102	if (end < start) {
103		printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
104		    name, start, end);
105		panic("extent_create: end < start");
106	}
107	if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
108		panic("extent_create: fixed extent, bad storagesize 0x%x",
109		    storagesize);
110	if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
111		panic("extent_create: storage provided for non-fixed");
112#endif
113
114	/* Allocate extent descriptor. */
115	if (fixed_extent) {
116		struct extent_fixed *fex;
117
118		bzero(storage, storagesize);
119
120		/*
121		 * Align all descriptors on "long" boundaries.
122		 */
123		fex = (struct extent_fixed *)cp;
124		ex = (struct extent *)fex;
125		cp += ALIGN(sizeof(struct extent_fixed));
126		sz -= ALIGN(sizeof(struct extent_fixed));
127		fex->fex_storage = storage;
128		fex->fex_storagesize = storagesize;
129
130		/*
131		 * In a fixed extent, we have to pre-allocate region
132		 * descriptors and place them in the extent's freelist.
133		 */
134		LIST_INIT(&fex->fex_freelist);
135		while (sz >= ALIGN(sizeof(struct extent_region))) {
136			rp = (struct extent_region *)cp;
137			cp += ALIGN(sizeof(struct extent_region));
138			sz -= ALIGN(sizeof(struct extent_region));
139			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
140		}
141	} else {
142		ex = (struct extent *)malloc(sizeof(struct extent),
143		    mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
144		if (ex == NULL)
145			return (NULL);
146	}
147
148	/* Fill in the extent descriptor and return it to the caller. */
149	LIST_INIT(&ex->ex_regions);
150	ex->ex_name = name;
151	ex->ex_start = start;
152	ex->ex_end = end;
153	ex->ex_mtype = mtype;
154	ex->ex_flags = 0;
155	if (fixed_extent)
156		ex->ex_flags |= EXF_FIXED;
157	if (flags & EX_NOCOALESCE)
158		ex->ex_flags |= EXF_NOCOALESCE;
159	return (ex);
160}
161
162/*
163 * Destroy an extent map.
164 */
165void
166extent_destroy(ex)
167	struct extent *ex;
168{
169	struct extent_region *rp, *orp;
170
171#ifdef DIAGNOSTIC
172	/* Check arguments. */
173	if (ex == NULL)
174		panic("extent_destroy: NULL extent");
175#endif
176
177	/* Free all region descriptors in extent. */
178	for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
179		orp = rp;
180		rp = rp->er_link.le_next;
181		LIST_REMOVE(orp, er_link);
182		extent_free_region_descriptor(ex, orp);
183	}
184
185	/* If we're not a fixed extent, free the extent descriptor itself. */
186	if ((ex->ex_flags & EXF_FIXED) == 0)
187		free(ex, ex->ex_mtype);
188}
189
190/*
191 * Insert a region descriptor into the sorted region list after the
192 * entry "after" or at the head of the list (if "after" is NULL).
193 * The region descriptor we insert is passed in "rp".  We must
194 * allocate the region descriptor before calling this function!
195 * If we don't need the region descriptor, it will be freed here.
196 */
197static void
198extent_insert_and_optimize(ex, start, size, flags, after, rp)
199	struct extent *ex;
200	u_long start, size;
201	int flags;
202	struct extent_region *after, *rp;
203{
204	struct extent_region *nextr;
205	int appended = 0;
206
207	if (after == NULL) {
208		/*
209		 * We're the first in the region list.  If there's
210		 * a region after us, attempt to coalesce to save
211		 * descriptor overhead.
212		 */
213		if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
214		    (ex->ex_regions.lh_first != NULL) &&
215		    ((start + size) == ex->ex_regions.lh_first->er_start)) {
216			/*
217			 * We can coalesce.  Prepend us to the first region.
218			 */
219			ex->ex_regions.lh_first->er_start = start;
220			extent_free_region_descriptor(ex, rp);
221			return;
222		}
223
224		/*
225		 * Can't coalesce.  Fill in the region descriptor
226		 * in, and insert us at the head of the region list.
227		 */
228		rp->er_start = start;
229		rp->er_end = start + (size - 1);
230		LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
231		return;
232	}
233
234	/*
235	 * If EXF_NOCOALESCE is set, coalescing is disallowed.
236	 */
237	if (ex->ex_flags & EXF_NOCOALESCE)
238		goto cant_coalesce;
239
240	/*
241	 * Attempt to coalesce with the region before us.
242	 */
243	if ((after->er_end + 1) == start) {
244		/*
245		 * We can coalesce.  Append ourselves and make
246		 * note of it.
247		 */
248		after->er_end = start + (size - 1);
249		appended = 1;
250	}
251
252	/*
253	 * Attempt to coalesce with the region after us.
254	 */
255	if ((after->er_link.le_next != NULL) &&
256	    ((start + size) == after->er_link.le_next->er_start)) {
257		/*
258		 * We can coalesce.  Note that if we appended ourselves
259		 * to the previous region, we exactly fit the gap, and
260		 * can free the "next" region descriptor.
261		 */
262		if (appended) {
263			/*
264			 * Yup, we can free it up.
265			 */
266			after->er_end = after->er_link.le_next->er_end;
267			nextr = after->er_link.le_next;
268			LIST_REMOVE(nextr, er_link);
269			extent_free_region_descriptor(ex, nextr);
270		} else {
271			/*
272			 * Nope, just prepend us to the next region.
273			 */
274			after->er_link.le_next->er_start = start;
275		}
276
277		extent_free_region_descriptor(ex, rp);
278		return;
279	}
280
281	/*
282	 * We weren't able to coalesce with the next region, but
283	 * we don't need to allocate a region descriptor if we
284	 * appended ourselves to the previous region.
285	 */
286	if (appended) {
287		extent_free_region_descriptor(ex, rp);
288		return;
289	}
290
291 cant_coalesce:
292
293	/*
294	 * Fill in the region descriptor and insert ourselves
295	 * into the region list.
296	 */
297	rp->er_start = start;
298	rp->er_end = start + (size - 1);
299	LIST_INSERT_AFTER(after, rp, er_link);
300}
301
302/*
303 * Allocate a specific region in an extent map.
304 */
305int
306extent_alloc_region(ex, start, size, flags)
307	struct extent *ex;
308	u_long start, size;
309	int flags;
310{
311	struct extent_region *rp, *last, *myrp;
312	u_long end = start + (size - 1);
313	int error;
314
315#ifdef DIAGNOSTIC
316	/* Check arguments. */
317	if (ex == NULL)
318		panic("extent_alloc_region: NULL extent");
319	if (size < 1) {
320		printf("extent_alloc_region: extent `%s', size 0x%lx\n",
321		    ex->ex_name, size);
322		panic("extent_alloc_region: bad size");
323	}
324	if (end < start) {
325		printf(
326		 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
327		 ex->ex_name, start, size);
328		panic("extent_alloc_region: overflow");
329	}
330#endif
331
332	/*
333	 * Make sure the requested region lies within the
334	 * extent.
335	 */
336	if ((start < ex->ex_start) || (end > ex->ex_end)) {
337#ifdef DIAGNOSTIC
338		printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
339		    ex->ex_name, ex->ex_start, ex->ex_end);
340		printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
341		    start, end);
342		panic("extent_alloc_region: region lies outside extent");
343#else
344		return (EINVAL);
345#endif
346	}
347
348	/*
349	 * Allocate the region descriptor.  It will be freed later
350	 * if we can coalesce with another region.
351	 */
352	myrp = extent_alloc_region_descriptor(ex, flags);
353	if (myrp == NULL) {
354#ifdef DIAGNOSTIC
355		printf(
356		    "extent_alloc_region: can't allocate region descriptor\n");
357#endif
358		return (ENOMEM);
359	}
360
361 alloc_start:
362	/*
363	 * Attempt to place ourselves in the desired area of the
364	 * extent.  We save ourselves some work by keeping the list sorted.
365	 * In other words, if the start of the current region is greater
366	 * than the end of our region, we don't have to search any further.
367	 */
368
369	/*
370	 * Keep a pointer to the last region we looked at so
371	 * that we don't have to traverse the list again when
372	 * we insert ourselves.  If "last" is NULL when we
373	 * finally insert ourselves, we go at the head of the
374	 * list.  See extent_insert_and_optimize() for details.
375	 */
376	last = NULL;
377
378	for (rp = ex->ex_regions.lh_first; rp != NULL;
379	    rp = rp->er_link.le_next) {
380		if (rp->er_start > end) {
381			/*
382			 * We lie before this region and don't
383			 * conflict.
384			 */
385			 break;
386		}
387
388		/*
389		 * The current region begins before we end.
390		 * Check for a conflict.
391		 */
392		if (rp->er_end >= start) {
393			/*
394			 * We conflict.  If we can (and want to) wait,
395			 * do so.
396			 */
397			if (flags & EX_WAITSPACE) {
398				ex->ex_flags |= EXF_WANTED;
399				error = tsleep(ex,
400				    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
401				    "extnt", 0);
402				if (error)
403					return (error);
404				goto alloc_start;
405			}
406			extent_free_region_descriptor(ex, myrp);
407			return (EAGAIN);
408		}
409		/*
410		 * We don't conflict, but this region lies before
411		 * us.  Keep a pointer to this region, and keep
412		 * trying.
413		 */
414		last = rp;
415	}
416
417	/*
418	 * We don't conflict with any regions.  "last" points
419	 * to the region we fall after, or is NULL if we belong
420	 * at the beginning of the region list.  Insert ourselves.
421	 */
422	extent_insert_and_optimize(ex, start, size, flags, last, myrp);
423	return (0);
424}
425
426/*
427 * Macro to check (x + y) <= z.  This check is designed to fail
428 * if an overflow occurs.
429 */
430#define LE_OV(x, y, z)	((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
431
432/*
433 * Allocate a region in an extent map subregion.
434 *
435 * If EX_FAST is specified, we return the first fit in the map.
436 * Otherwise, we try to minimize fragmentation by finding the
437 * smallest gap that will hold the request.
438 *
439 * The allocated region is aligned to "alignment", which must be
440 * a power of 2.
441 */
442int
443extent_alloc_subregion(ex, substart, subend, size, alignment, boundary,
444    flags, result)
445	struct extent *ex;
446	u_long substart, subend, size, alignment, boundary;
447	int flags;
448	u_long *result;
449{
450	struct extent_region *rp, *myrp, *last, *bestlast;
451	u_long newstart, newend, beststart, bestovh, ovh;
452	u_long dontcross, odontcross;
453	int error;
454
455#ifdef DIAGNOSTIC
456	/* Check arguments. */
457	if (ex == NULL)
458		panic("extent_alloc_subregion: NULL extent");
459	if (result == NULL)
460		panic("extent_alloc_subregion: NULL result pointer");
461	if ((substart < ex->ex_start) || (substart >= ex->ex_end) ||
462	    (subend > ex->ex_end) || (subend <= ex->ex_start)) {
463  printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
464		    ex->ex_name, ex->ex_start, ex->ex_end);
465		printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
466		    substart, subend);
467		panic("extent_alloc_subregion: bad subregion");
468	}
469	if ((size < 1) || (size > ((subend - substart) + 1))) {
470		printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
471		    ex->ex_name, size);
472		panic("extent_alloc_subregion: bad size");
473	}
474	if (alignment == 0)
475		panic("extent_alloc_subregion: bad alignment");
476	if (boundary && (boundary < size)) {
477		printf(
478		    "extent_alloc_subregion: extent `%s', size 0x%lx,
479		    boundary 0x%lx\n", ex->ex_name, size, boundary);
480		panic("extent_alloc_subregion: bad boundary");
481	}
482#endif
483
484	/*
485	 * Allocate the region descriptor.  It will be freed later
486	 * if we can coalesce with another region.
487	 */
488	myrp = extent_alloc_region_descriptor(ex, flags);
489	if (myrp == NULL) {
490#ifdef DIAGNOSTIC
491		printf(
492		 "extent_alloc_subregion: can't allocate region descriptor\n");
493#endif
494		return (ENOMEM);
495	}
496
497 alloc_start:
498	/*
499	 * Keep a pointer to the last region we looked at so
500	 * that we don't have to traverse the list again when
501	 * we insert ourselves.  If "last" is NULL when we
502	 * finally insert ourselves, we go at the head of the
503	 * list.  See extent_insert_and_optimize() for deatails.
504	 */
505	last = NULL;
506
507	/*
508	 * Initialize the "don't cross" boundary, a.k.a a line
509	 * that a region should not cross.  If the boundary lies
510	 * before the region starts, we add the "boundary" argument
511	 * until we get a meaningful comparison.
512	 *
513	 * Start the boundary lines at 0 if the caller requests it.
514	 */
515	dontcross = 0;
516	if (boundary) {
517		dontcross =
518		    ((flags & EX_BOUNDZERO) ? 0 : ex->ex_start) + boundary;
519		while (dontcross < substart)
520			dontcross += boundary;
521	}
522
523	/*
524	 * Keep track of size and location of the smallest
525	 * chunk we fit in.
526	 *
527	 * Since the extent can be as large as the numeric range
528	 * of the CPU (0 - 0xffffffff for 32-bit systems), the
529	 * best overhead value can be the maximum unsigned integer.
530	 * Thus, we initialize "bestovh" to 0, since we insert ourselves
531	 * into the region list immediately on an exact match (which
532	 * is the only case where "bestovh" would be set to 0).
533	 */
534	bestovh = 0;
535	beststart = 0;
536	bestlast = NULL;
537
538	/*
539	 * For N allocated regions, we must make (N + 1)
540	 * checks for unallocated space.  The first chunk we
541	 * check is the area from the beginning of the subregion
542	 * to the first allocated region.
543	 */
544	newstart = EXTENT_ALIGN(substart, alignment);
545	if (newstart < ex->ex_start) {
546#ifdef DIAGNOSTIC
547		printf(
548      "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
549		 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
550		panic("extent_alloc_subregion: overflow after alignment");
551#else
552		extent_free_region_descriptor(ex, myrp);
553		return (EINVAL);
554#endif
555	}
556
557	for (rp = ex->ex_regions.lh_first; rp != NULL;
558	    rp = rp->er_link.le_next) {
559		/*
560		 * Check the chunk before "rp".  Note that our
561		 * comparison is safe from overflow conditions.
562		 */
563		if (LE_OV(newstart, size, rp->er_start)) {
564			/*
565			 * Do a boundary check, if necessary.  Note
566			 * that a region may *begin* on the boundary,
567			 * but it must end before the boundary.
568			 */
569			if (boundary) {
570				newend = newstart + (size - 1);
571
572				/*
573				 * Adjust boundary for a meaningful
574				 * comparison.
575				 */
576				while (dontcross <= newstart) {
577					odontcross = dontcross;
578					dontcross += boundary;
579
580					/*
581					 * If we run past the end of
582					 * the extent or the boundary
583					 * overflows, then the request
584					 * can't fit.
585					 */
586					if ((dontcross > ex->ex_end) ||
587					    (dontcross < odontcross))
588						goto fail;
589				}
590
591				/* Do the boundary check. */
592				if (newend >= dontcross) {
593					/*
594					 * Candidate region crosses
595					 * boundary.  Try again.
596					 */
597					continue;
598				}
599			}
600
601			/*
602			 * We would fit into this space.  Calculate
603			 * the overhead (wasted space).  If we exactly
604			 * fit, or we're taking the first fit, insert
605			 * ourselves into the region list.
606			 */
607			ovh = rp->er_start - newstart - size;
608			if ((flags & EX_FAST) || (ovh == 0))
609				goto found;
610
611			/*
612			 * Don't exactly fit, but check to see
613			 * if we're better than any current choice.
614			 */
615			if ((bestovh == 0) || (ovh < bestovh)) {
616				bestovh = ovh;
617				beststart = newstart;
618				bestlast = last;
619			}
620		}
621
622		/*
623		 * Skip past the current region and check again.
624		 */
625		newstart = EXTENT_ALIGN((rp->er_end + 1), alignment);
626		if (newstart < rp->er_end) {
627			/*
628			 * Overflow condition.  Don't error out, since
629			 * we might have a chunk of space that we can
630			 * use.
631			 */
632			goto fail;
633		}
634
635		last = rp;
636	}
637
638	/*
639	 * The final check is from the current starting point to the
640	 * end of the subregion.  If there were no allocated regions,
641	 * "newstart" is set to the beginning of the subregion, or
642	 * just past the end of the last allocated region, adjusted
643	 * for alignment in either case.
644	 */
645	if (LE_OV(newstart, (size - 1), subend)) {
646		/*
647		 * We would fit into this space.  Calculate
648		 * the overhead (wasted space).  If we exactly
649		 * fit, or we're taking the first fit, insert
650		 * ourselves into the region list.
651		 */
652		ovh = ex->ex_end - newstart - (size - 1);
653		if ((flags & EX_FAST) || (ovh == 0))
654			goto found;
655
656		/*
657		 * Don't exactly fit, but check to see
658		 * if we're better than any current choice.
659		 */
660		if ((bestovh == 0) || (ovh < bestovh)) {
661			bestovh = ovh;
662			beststart = newstart;
663			bestlast = last;
664		}
665	}
666
667 fail:
668	/*
669	 * One of the following two conditions have
670	 * occurred:
671	 *
672	 *	There is no chunk large enough to hold the request.
673	 *
674	 *	If EX_FAST was not specified, there is not an
675	 *	exact match for the request.
676	 *
677	 * Note that if we reach this point and EX_FAST is
678	 * set, then we know there is no space in the extent for
679	 * the request.
680	 */
681	if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
682		/*
683		 * We have a match that's "good enough".
684		 */
685		newstart = beststart;
686		last = bestlast;
687		goto found;
688	}
689
690	/*
691	 * No space currently available.  Wait for it to free up,
692	 * if possible.
693	 */
694	if (flags & EX_WAITSPACE) {
695		ex->ex_flags |= EXF_WANTED;
696		error = tsleep(ex,
697		    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
698		if (error)
699			return (error);
700		goto alloc_start;
701	}
702
703	extent_free_region_descriptor(ex, myrp);
704	return (EAGAIN);
705
706 found:
707	/*
708	 * Insert ourselves into the region list.
709	 */
710	extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
711	*result = newstart;
712	return (0);
713}
714
715int
716extent_free(ex, start, size, flags)
717	struct extent *ex;
718	u_long start, size;
719	int flags;
720{
721	struct extent_region *rp;
722	u_long end = start + (size - 1);
723
724#ifdef DIAGNOSTIC
725	/* Check arguments. */
726	if (ex == NULL)
727		panic("extent_free: NULL extent");
728	if ((start < ex->ex_start) || (start > ex->ex_end)) {
729		extent_print(ex);
730		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
731		    ex->ex_name, start, size);
732		panic("extent_free: extent `%s', region not within extent",
733		    ex->ex_name);
734	}
735	/* Check for an overflow. */
736	if (end < start) {
737		extent_print(ex);
738		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
739		    ex->ex_name, start, size);
740		panic("extent_free: overflow");
741	}
742#endif
743
744	/*
745	 * Find region and deallocate.  Several possibilities:
746	 *
747	 *	1. (start == er_start) && (end == er_end):
748	 *	   Free descriptor.
749	 *
750	 *	2. (start == er_start) && (end < er_end):
751	 *	   Adjust er_start.
752	 *
753	 *	3. (start > er_start) && (end == er_end):
754	 *	   Adjust er_end.
755	 *
756	 *	4. (start > er_start) && (end < er_end):
757	 *	   Fragment region.  Requires descriptor alloc.
758	 *
759	 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
760	 * is not set.
761	 */
762	for (rp = ex->ex_regions.lh_first; rp != NULL;
763	    rp = rp->er_link.le_next) {
764		/*
765		 * Save ourselves some comparisons; does the current
766		 * region end before chunk to be freed begins?  If so,
767		 * then we haven't found the appropriate region descriptor.
768		 */
769		if (rp->er_end < start)
770			continue;
771
772		/*
773		 * Save ourselves some traversal; does the current
774		 * region begin after the chunk to be freed ends?  If so,
775		 * then we've already passed any possible region descriptors
776		 * that might have contained the chunk to be freed.
777		 */
778		if (rp->er_start > end)
779			break;
780
781		/* Case 1. */
782		if ((start == rp->er_start) && (end == rp->er_end)) {
783			LIST_REMOVE(rp, er_link);
784			extent_free_region_descriptor(ex, rp);
785			goto done;
786		}
787
788		/*
789		 * The following cases all require that EXF_NOCOALESCE
790		 * is not set.
791		 */
792		if (ex->ex_flags & EXF_NOCOALESCE)
793			continue;
794
795		/* Case 2. */
796		if ((start == rp->er_start) && (end < rp->er_end)) {
797			rp->er_start = (end + 1);
798			goto done;
799		}
800
801		/* Case 3. */
802		if ((start > rp->er_start) && (end == rp->er_end)) {
803			rp->er_end = (start - 1);
804			goto done;
805		}
806
807		/* Case 4. */
808		if ((start > rp->er_start) && (end < rp->er_end)) {
809			struct extent_region *nrp;
810
811			/* Allocate a region descriptor. */
812			nrp = extent_alloc_region_descriptor(ex, flags);
813			if (nrp == NULL)
814				return (ENOMEM);
815
816			/* Fill in new descriptor. */
817			nrp->er_start = end + 1;
818			nrp->er_end = rp->er_end;
819
820			/* Adjust current descriptor. */
821			rp->er_end = start - 1;
822
823			/* Instert new descriptor after current. */
824			LIST_INSERT_AFTER(rp, nrp, er_link);
825			goto done;
826		}
827	}
828
829	/* Region not found, or request otherwise invalid. */
830	extent_print(ex);
831	printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
832	panic("extent_free: region not found");
833
834 done:
835	if (ex->ex_flags & EXF_WANTED) {
836		ex->ex_flags &= ~EXF_WANTED;
837		wakeup(ex);
838	}
839	return (0);
840}
841
842static struct extent_region *
843extent_alloc_region_descriptor(ex, flags)
844	struct extent *ex;
845	int flags;
846{
847	struct extent_region *rp;
848
849	if (ex->ex_flags & EXF_FIXED) {
850		struct extent_fixed *fex = (struct extent_fixed *)ex;
851
852		while (fex->fex_freelist.lh_first == NULL) {
853			if (flags & EX_MALLOCOK)
854				goto alloc;
855
856			if ((flags & EX_WAITOK) == 0)
857				return (NULL);
858			ex->ex_flags |= EXF_FLWANTED;
859			if (tsleep(&fex->fex_freelist,
860			    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
861			    "extnt", 0))
862				return (NULL);
863		}
864		rp = fex->fex_freelist.lh_first;
865		LIST_REMOVE(rp, er_link);
866
867		/*
868		 * Don't muck with flags after pulling it off the
869		 * freelist; it may be a dynamiclly allocated
870		 * region pointer that was kindly given to us,
871		 * and we need to preserve that information.
872		 */
873
874		return (rp);
875	}
876
877 alloc:
878	rp = (struct extent_region *)
879	    malloc(sizeof(struct extent_region), ex->ex_mtype,
880	    (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
881
882	if (rp != NULL)
883		rp->er_flags = ER_ALLOC;
884
885	return (rp);
886}
887
888static void
889extent_free_region_descriptor(ex, rp)
890	struct extent *ex;
891	struct extent_region *rp;
892{
893
894	if (ex->ex_flags & EXF_FIXED) {
895		struct extent_fixed *fex = (struct extent_fixed *)ex;
896
897		/*
898		 * If someone's waiting for a region descriptor,
899		 * be nice and give them this one, rather than
900		 * just free'ing it back to the system.
901		 */
902		if (rp->er_flags & ER_ALLOC) {
903			if (ex->ex_flags & EXF_FLWANTED) {
904				/* Clear all but ER_ALLOC flag. */
905				rp->er_flags = ER_ALLOC;
906				LIST_INSERT_HEAD(&fex->fex_freelist, rp,
907				    er_link);
908				goto wake_em_up;
909			} else {
910				free(rp, ex->ex_mtype);
911			}
912		} else {
913			/* Clear all flags. */
914			rp->er_flags = 0;
915			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
916		}
917
918		if (ex->ex_flags & EXF_FLWANTED) {
919 wake_em_up:
920			ex->ex_flags &= ~EXF_FLWANTED;
921			wakeup(&fex->fex_freelist);
922		}
923		return;
924	}
925
926	/*
927	 * We know it's dynamically allocated if we get here.
928	 */
929	free(rp, ex->ex_mtype);
930}
931
932void
933extent_print(ex)
934	struct extent *ex;
935{
936	struct extent_region *rp;
937
938	if (ex == NULL)
939		panic("extent_print: NULL extent");
940
941	printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
942	    ex->ex_start, ex->ex_end, ex->ex_flags);
943
944	for (rp = ex->ex_regions.lh_first; rp != NULL;
945	    rp = rp->er_link.le_next)
946		printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
947}
948