subr_rman.c revision 265923
1/*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission.  M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose.  It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * The kernel resource manager.  This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly.  Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code.  The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order.  Most of the resources
44 * are of this type, as it is the most familiar.  The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance).  The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share.  RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices.  That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58#include "opt_ddb.h"
59
60#include <sys/cdefs.h>
61__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 265923 2014-05-12 17:56:52Z truckman $");
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/limits.h>
67#include <sys/lock.h>
68#include <sys/malloc.h>
69#include <sys/mutex.h>
70#include <sys/bus.h>		/* XXX debugging */
71#include <machine/bus.h>
72#include <sys/rman.h>
73#include <sys/sysctl.h>
74
75#ifdef DDB
76#include <ddb/ddb.h>
77#endif
78
79/*
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space).  That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
87 */
88struct resource_i {
89	struct resource		r_r;
90	TAILQ_ENTRY(resource_i)	r_link;
91	LIST_ENTRY(resource_i)	r_sharelink;
92	LIST_HEAD(, resource_i)	*r_sharehead;
93	u_long	r_start;	/* index of the first entry in this resource */
94	u_long	r_end;		/* index of the last entry (inclusive) */
95	u_int	r_flags;
96	void	*r_virtual;	/* virtual address of this resource */
97	struct	device *r_dev;	/* device which has allocated this resource */
98	struct	rman *r_rm;	/* resource manager from whence this came */
99	int	r_rid;		/* optional rid for this resource. */
100};
101
102static int     rman_debug = 0;
103TUNABLE_INT("debug.rman_debug", &rman_debug);
104SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105    &rman_debug, 0, "rman debug");
106
107#define DPRINTF(params) if (rman_debug) printf params
108
109static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
110
111struct	rman_head rman_head;
112static	struct mtx rman_mtx; /* mutex to protect rman_head */
113static	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114				       struct resource_i **whohas);
115static	int int_rman_deactivate_resource(struct resource_i *r);
116static	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
117
118static __inline struct resource_i *
119int_alloc_resource(int malloc_flag)
120{
121	struct resource_i *r;
122
123	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124	if (r != NULL) {
125		r->r_r.__r_i = r;
126	}
127	return (r);
128}
129
130int
131rman_init(struct rman *rm)
132{
133	static int once = 0;
134
135	if (once == 0) {
136		once = 1;
137		TAILQ_INIT(&rman_head);
138		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
139	}
140
141	if (rm->rm_start == 0 && rm->rm_end == 0)
142		rm->rm_end = ~0ul;
143	if (rm->rm_type == RMAN_UNINIT)
144		panic("rman_init");
145	if (rm->rm_type == RMAN_GAUGE)
146		panic("implement RMAN_GAUGE");
147
148	TAILQ_INIT(&rm->rm_list);
149	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
150	if (rm->rm_mtx == NULL)
151		return ENOMEM;
152	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
153
154	mtx_lock(&rman_mtx);
155	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
156	mtx_unlock(&rman_mtx);
157	return 0;
158}
159
160int
161rman_manage_region(struct rman *rm, u_long start, u_long end)
162{
163	struct resource_i *r, *s, *t;
164	int rv = 0;
165
166	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
167	    rm->rm_descr, start, end));
168	if (start < rm->rm_start || end > rm->rm_end)
169		return EINVAL;
170	r = int_alloc_resource(M_NOWAIT);
171	if (r == NULL)
172		return ENOMEM;
173	r->r_start = start;
174	r->r_end = end;
175	r->r_rm = rm;
176
177	mtx_lock(rm->rm_mtx);
178
179	/* Skip entries before us. */
180	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
181		if (s->r_end == ULONG_MAX)
182			break;
183		if (s->r_end + 1 >= r->r_start)
184			break;
185	}
186
187	/* If we ran off the end of the list, insert at the tail. */
188	if (s == NULL) {
189		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
190	} else {
191		/* Check for any overlap with the current region. */
192		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
193			rv = EBUSY;
194			goto out;
195		}
196
197		/* Check for any overlap with the next region. */
198		t = TAILQ_NEXT(s, r_link);
199		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
200			rv = EBUSY;
201			goto out;
202		}
203
204		/*
205		 * See if this region can be merged with the next region.  If
206		 * not, clear the pointer.
207		 */
208		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
209			t = NULL;
210
211		/* See if we can merge with the current region. */
212		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
213			/* Can we merge all 3 regions? */
214			if (t != NULL) {
215				s->r_end = t->r_end;
216				TAILQ_REMOVE(&rm->rm_list, t, r_link);
217				free(r, M_RMAN);
218				free(t, M_RMAN);
219			} else {
220				s->r_end = r->r_end;
221				free(r, M_RMAN);
222			}
223		} else if (t != NULL) {
224			/* Can we merge with just the next region? */
225			t->r_start = r->r_start;
226			free(r, M_RMAN);
227		} else if (s->r_end < r->r_start) {
228			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
229		} else {
230			TAILQ_INSERT_BEFORE(s, r, r_link);
231		}
232	}
233out:
234	mtx_unlock(rm->rm_mtx);
235	return rv;
236}
237
238int
239rman_init_from_resource(struct rman *rm, struct resource *r)
240{
241	int rv;
242
243	if ((rv = rman_init(rm)) != 0)
244		return (rv);
245	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
246}
247
248int
249rman_fini(struct rman *rm)
250{
251	struct resource_i *r;
252
253	mtx_lock(rm->rm_mtx);
254	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
255		if (r->r_flags & RF_ALLOCATED) {
256			mtx_unlock(rm->rm_mtx);
257			return EBUSY;
258		}
259	}
260
261	/*
262	 * There really should only be one of these if we are in this
263	 * state and the code is working properly, but it can't hurt.
264	 */
265	while (!TAILQ_EMPTY(&rm->rm_list)) {
266		r = TAILQ_FIRST(&rm->rm_list);
267		TAILQ_REMOVE(&rm->rm_list, r, r_link);
268		free(r, M_RMAN);
269	}
270	mtx_unlock(rm->rm_mtx);
271	mtx_lock(&rman_mtx);
272	TAILQ_REMOVE(&rman_head, rm, rm_link);
273	mtx_unlock(&rman_mtx);
274	mtx_destroy(rm->rm_mtx);
275	free(rm->rm_mtx, M_RMAN);
276
277	return 0;
278}
279
280int
281rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
282{
283	struct resource_i *r;
284
285	mtx_lock(rm->rm_mtx);
286	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
287		if (!(r->r_flags & RF_ALLOCATED)) {
288			*start = r->r_start;
289			*end = r->r_end;
290			mtx_unlock(rm->rm_mtx);
291			return (0);
292		}
293	}
294	mtx_unlock(rm->rm_mtx);
295	return (ENOENT);
296}
297
298int
299rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
300{
301	struct resource_i *r;
302
303	mtx_lock(rm->rm_mtx);
304	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
305		if (!(r->r_flags & RF_ALLOCATED)) {
306			*start = r->r_start;
307			*end = r->r_end;
308			mtx_unlock(rm->rm_mtx);
309			return (0);
310		}
311	}
312	mtx_unlock(rm->rm_mtx);
313	return (ENOENT);
314}
315
316/* Shrink or extend one or both ends of an allocated resource. */
317int
318rman_adjust_resource(struct resource *rr, u_long start, u_long end)
319{
320	struct	resource_i *r, *s, *t, *new;
321	struct	rman *rm;
322
323	/* Not supported for shared resources. */
324	r = rr->__r_i;
325	if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
326		return (EINVAL);
327
328	/*
329	 * This does not support wholesale moving of a resource.  At
330	 * least part of the desired new range must overlap with the
331	 * existing resource.
332	 */
333	if (end < r->r_start || r->r_end < start)
334		return (EINVAL);
335
336	/*
337	 * Find the two resource regions immediately adjacent to the
338	 * allocated resource.
339	 */
340	rm = r->r_rm;
341	mtx_lock(rm->rm_mtx);
342#ifdef INVARIANTS
343	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
344		if (s == r)
345			break;
346	}
347	if (s == NULL)
348		panic("resource not in list");
349#endif
350	s = TAILQ_PREV(r, resource_head, r_link);
351	t = TAILQ_NEXT(r, r_link);
352	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
353	    ("prev resource mismatch"));
354	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
355	    ("next resource mismatch"));
356
357	/*
358	 * See if the changes are permitted.  Shrinking is always allowed,
359	 * but growing requires sufficient room in the adjacent region.
360	 */
361	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
362	    s->r_start > start)) {
363		mtx_unlock(rm->rm_mtx);
364		return (EBUSY);
365	}
366	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
367	    t->r_end < end)) {
368		mtx_unlock(rm->rm_mtx);
369		return (EBUSY);
370	}
371
372	/*
373	 * While holding the lock, grow either end of the resource as
374	 * needed and shrink either end if the shrinking does not require
375	 * allocating a new resource.  We can safely drop the lock and then
376	 * insert a new range to handle the shrinking case afterwards.
377	 */
378	if (start < r->r_start ||
379	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
380		KASSERT(s->r_flags == 0, ("prev is busy"));
381		r->r_start = start;
382		if (s->r_start == start) {
383			TAILQ_REMOVE(&rm->rm_list, s, r_link);
384			free(s, M_RMAN);
385		} else
386			s->r_end = start - 1;
387	}
388	if (end > r->r_end ||
389	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
390		KASSERT(t->r_flags == 0, ("next is busy"));
391		r->r_end = end;
392		if (t->r_end == end) {
393			TAILQ_REMOVE(&rm->rm_list, t, r_link);
394			free(t, M_RMAN);
395		} else
396			t->r_start = end + 1;
397	}
398	mtx_unlock(rm->rm_mtx);
399
400	/*
401	 * Handle the shrinking cases that require allocating a new
402	 * resource to hold the newly-free region.  We have to recheck
403	 * if we still need this new region after acquiring the lock.
404	 */
405	if (start > r->r_start) {
406		new = int_alloc_resource(M_WAITOK);
407		new->r_start = r->r_start;
408		new->r_end = start - 1;
409		new->r_rm = rm;
410		mtx_lock(rm->rm_mtx);
411		r->r_start = start;
412		s = TAILQ_PREV(r, resource_head, r_link);
413		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
414			s->r_end = start - 1;
415			free(new, M_RMAN);
416		} else
417			TAILQ_INSERT_BEFORE(r, new, r_link);
418		mtx_unlock(rm->rm_mtx);
419	}
420	if (end < r->r_end) {
421		new = int_alloc_resource(M_WAITOK);
422		new->r_start = end + 1;
423		new->r_end = r->r_end;
424		new->r_rm = rm;
425		mtx_lock(rm->rm_mtx);
426		r->r_end = end;
427		t = TAILQ_NEXT(r, r_link);
428		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
429			t->r_start = end + 1;
430			free(new, M_RMAN);
431		} else
432			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
433		mtx_unlock(rm->rm_mtx);
434	}
435	return (0);
436}
437
438struct resource *
439rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
440		      u_long count, u_long bound,  u_int flags,
441		      struct device *dev)
442{
443	u_int	want_activate;
444	struct	resource_i *r, *s, *rv;
445	u_long	rstart, rend, amask, bmask;
446
447	rv = NULL;
448
449	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
450	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
451	       count, flags,
452	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
453	want_activate = (flags & RF_ACTIVE);
454	flags &= ~RF_ACTIVE;
455
456	mtx_lock(rm->rm_mtx);
457
458	for (r = TAILQ_FIRST(&rm->rm_list);
459	     r && r->r_end < start + count - 1;
460	     r = TAILQ_NEXT(r, r_link))
461		;
462
463	if (r == NULL) {
464		DPRINTF(("could not find a region\n"));
465		goto out;
466	}
467
468	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
469	if (start + amask < start) {
470		DPRINTF(("start+amask wrapped around\n"));
471		goto out;
472	}
473
474	/* If bound is 0, bmask will also be 0 */
475	bmask = ~(bound - 1);
476	/*
477	 * First try to find an acceptable totally-unshared region.
478	 */
479	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
480		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
481		/*
482		 * The resource list is sorted, so there is no point in
483		 * searching further once r_start is too large.
484		 */
485		if (s->r_start > end - (count - 1)) {
486			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
487			    s->r_start, end));
488			break;
489		}
490		if (s->r_start + amask < s->r_start) {
491			DPRINTF(("s->r_start (%#lx) + amask (%#lx) wrapped\n",
492			    s->r_start, amask));
493			break;
494		}
495		if (s->r_flags & RF_ALLOCATED) {
496			DPRINTF(("region is allocated\n"));
497			continue;
498		}
499		rstart = ulmax(s->r_start, start);
500		/*
501		 * Try to find a region by adjusting to boundary and alignment
502		 * until both conditions are satisfied. This is not an optimal
503		 * algorithm, but in most cases it isn't really bad, either.
504		 */
505		do {
506			rstart = (rstart + amask) & ~amask;
507			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
508				rstart += bound - (rstart & ~bmask);
509		} while ((rstart & amask) != 0 && rstart < end &&
510		    rstart < s->r_end);
511		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
512		if (rstart > rend) {
513			DPRINTF(("adjusted start exceeds end\n"));
514			continue;
515		}
516		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
517		       rstart, rend, (rend - rstart + 1), count));
518
519		if ((rend - rstart + 1) >= count) {
520			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
521			       rstart, rend, (rend - rstart + 1)));
522			if ((s->r_end - s->r_start + 1) == count) {
523				DPRINTF(("candidate region is entire chunk\n"));
524				rv = s;
525				rv->r_flags |= RF_ALLOCATED | flags;
526				rv->r_dev = dev;
527				goto out;
528			}
529
530			/*
531			 * If s->r_start < rstart and
532			 *    s->r_end > rstart + count - 1, then
533			 * we need to split the region into three pieces
534			 * (the middle one will get returned to the user).
535			 * Otherwise, we are allocating at either the
536			 * beginning or the end of s, so we only need to
537			 * split it in two.  The first case requires
538			 * two new allocations; the second requires but one.
539			 */
540			rv = int_alloc_resource(M_NOWAIT);
541			if (rv == NULL)
542				goto out;
543			rv->r_start = rstart;
544			rv->r_end = rstart + count - 1;
545			rv->r_flags = flags | RF_ALLOCATED;
546			rv->r_dev = dev;
547			rv->r_rm = rm;
548
549			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
550				DPRINTF(("splitting region in three parts: "
551				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
552				       s->r_start, rv->r_start - 1,
553				       rv->r_start, rv->r_end,
554				       rv->r_end + 1, s->r_end));
555				/*
556				 * We are allocating in the middle.
557				 */
558				r = int_alloc_resource(M_NOWAIT);
559				if (r == NULL) {
560					free(rv, M_RMAN);
561					rv = NULL;
562					goto out;
563				}
564				r->r_start = rv->r_end + 1;
565				r->r_end = s->r_end;
566				r->r_flags = s->r_flags;
567				r->r_rm = rm;
568				s->r_end = rv->r_start - 1;
569				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
570						     r_link);
571				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
572						     r_link);
573			} else if (s->r_start == rv->r_start) {
574				DPRINTF(("allocating from the beginning\n"));
575				/*
576				 * We are allocating at the beginning.
577				 */
578				s->r_start = rv->r_end + 1;
579				TAILQ_INSERT_BEFORE(s, rv, r_link);
580			} else {
581				DPRINTF(("allocating at the end\n"));
582				/*
583				 * We are allocating at the end.
584				 */
585				s->r_end = rv->r_start - 1;
586				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
587						     r_link);
588			}
589			goto out;
590		}
591	}
592
593	/*
594	 * Now find an acceptable shared region, if the client's requirements
595	 * allow sharing.  By our implementation restriction, a candidate
596	 * region must match exactly by both size and sharing type in order
597	 * to be considered compatible with the client's request.  (The
598	 * former restriction could probably be lifted without too much
599	 * additional work, but this does not seem warranted.)
600	 */
601	DPRINTF(("no unshared regions found\n"));
602	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
603		goto out;
604
605	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
606		if (s->r_start > end)
607			break;
608		if ((s->r_flags & flags) != flags)
609			continue;
610		if (s->r_start >= start && s->r_end <= end
611		    && (s->r_end - s->r_start + 1) == count &&
612		    (s->r_start & amask) == 0 &&
613		    ((s->r_start ^ s->r_end) & bmask) == 0) {
614			rv = int_alloc_resource(M_NOWAIT);
615			if (rv == NULL)
616				goto out;
617			rv->r_start = s->r_start;
618			rv->r_end = s->r_end;
619			rv->r_flags = s->r_flags &
620				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
621			rv->r_dev = dev;
622			rv->r_rm = rm;
623			if (s->r_sharehead == NULL) {
624				s->r_sharehead = malloc(sizeof *s->r_sharehead,
625						M_RMAN, M_NOWAIT | M_ZERO);
626				if (s->r_sharehead == NULL) {
627					free(rv, M_RMAN);
628					rv = NULL;
629					goto out;
630				}
631				LIST_INIT(s->r_sharehead);
632				LIST_INSERT_HEAD(s->r_sharehead, s,
633						 r_sharelink);
634				s->r_flags |= RF_FIRSTSHARE;
635			}
636			rv->r_sharehead = s->r_sharehead;
637			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
638			goto out;
639		}
640	}
641
642	/*
643	 * We couldn't find anything.
644	 */
645out:
646	/*
647	 * If the user specified RF_ACTIVE in the initial flags,
648	 * which is reflected in `want_activate', we attempt to atomically
649	 * activate the resource.  If this fails, we release the resource
650	 * and indicate overall failure.  (This behavior probably doesn't
651	 * make sense for RF_TIMESHARE-type resources.)
652	 */
653	if (rv && want_activate) {
654		struct resource_i *whohas;
655		if (int_rman_activate_resource(rm, rv, &whohas)) {
656			int_rman_release_resource(rm, rv);
657			rv = NULL;
658		}
659	}
660
661	mtx_unlock(rm->rm_mtx);
662	return (rv == NULL ? NULL : &rv->r_r);
663}
664
665struct resource *
666rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
667		      u_int flags, struct device *dev)
668{
669
670	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
671	    dev));
672}
673
674static int
675int_rman_activate_resource(struct rman *rm, struct resource_i *r,
676			   struct resource_i **whohas)
677{
678	struct resource_i *s;
679	int ok;
680
681	/*
682	 * If we are not timesharing, then there is nothing much to do.
683	 * If we already have the resource, then there is nothing at all to do.
684	 * If we are not on a sharing list with anybody else, then there is
685	 * little to do.
686	 */
687	if ((r->r_flags & RF_TIMESHARE) == 0
688	    || (r->r_flags & RF_ACTIVE) != 0
689	    || r->r_sharehead == NULL) {
690		r->r_flags |= RF_ACTIVE;
691		return 0;
692	}
693
694	ok = 1;
695	for (s = LIST_FIRST(r->r_sharehead); s && ok;
696	     s = LIST_NEXT(s, r_sharelink)) {
697		if ((s->r_flags & RF_ACTIVE) != 0) {
698			ok = 0;
699			*whohas = s;
700		}
701	}
702	if (ok) {
703		r->r_flags |= RF_ACTIVE;
704		return 0;
705	}
706	return EBUSY;
707}
708
709int
710rman_activate_resource(struct resource *re)
711{
712	int rv;
713	struct resource_i *r, *whohas;
714	struct rman *rm;
715
716	r = re->__r_i;
717	rm = r->r_rm;
718	mtx_lock(rm->rm_mtx);
719	rv = int_rman_activate_resource(rm, r, &whohas);
720	mtx_unlock(rm->rm_mtx);
721	return rv;
722}
723
724int
725rman_await_resource(struct resource *re, int pri, int timo)
726{
727	int	rv;
728	struct	resource_i *r, *whohas;
729	struct	rman *rm;
730
731	r = re->__r_i;
732	rm = r->r_rm;
733	mtx_lock(rm->rm_mtx);
734	for (;;) {
735		rv = int_rman_activate_resource(rm, r, &whohas);
736		if (rv != EBUSY)
737			return (rv);	/* returns with mutex held */
738
739		if (r->r_sharehead == NULL)
740			panic("rman_await_resource");
741		whohas->r_flags |= RF_WANTED;
742		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
743		if (rv) {
744			mtx_unlock(rm->rm_mtx);
745			return (rv);
746		}
747	}
748}
749
750static int
751int_rman_deactivate_resource(struct resource_i *r)
752{
753
754	r->r_flags &= ~RF_ACTIVE;
755	if (r->r_flags & RF_WANTED) {
756		r->r_flags &= ~RF_WANTED;
757		wakeup(r->r_sharehead);
758	}
759	return 0;
760}
761
762int
763rman_deactivate_resource(struct resource *r)
764{
765	struct	rman *rm;
766
767	rm = r->__r_i->r_rm;
768	mtx_lock(rm->rm_mtx);
769	int_rman_deactivate_resource(r->__r_i);
770	mtx_unlock(rm->rm_mtx);
771	return 0;
772}
773
774static int
775int_rman_release_resource(struct rman *rm, struct resource_i *r)
776{
777	struct	resource_i *s, *t;
778
779	if (r->r_flags & RF_ACTIVE)
780		int_rman_deactivate_resource(r);
781
782	/*
783	 * Check for a sharing list first.  If there is one, then we don't
784	 * have to think as hard.
785	 */
786	if (r->r_sharehead) {
787		/*
788		 * If a sharing list exists, then we know there are at
789		 * least two sharers.
790		 *
791		 * If we are in the main circleq, appoint someone else.
792		 */
793		LIST_REMOVE(r, r_sharelink);
794		s = LIST_FIRST(r->r_sharehead);
795		if (r->r_flags & RF_FIRSTSHARE) {
796			s->r_flags |= RF_FIRSTSHARE;
797			TAILQ_INSERT_BEFORE(r, s, r_link);
798			TAILQ_REMOVE(&rm->rm_list, r, r_link);
799		}
800
801		/*
802		 * Make sure that the sharing list goes away completely
803		 * if the resource is no longer being shared at all.
804		 */
805		if (LIST_NEXT(s, r_sharelink) == NULL) {
806			free(s->r_sharehead, M_RMAN);
807			s->r_sharehead = NULL;
808			s->r_flags &= ~RF_FIRSTSHARE;
809		}
810		goto out;
811	}
812
813	/*
814	 * Look at the adjacent resources in the list and see if our
815	 * segment can be merged with any of them.  If either of the
816	 * resources is allocated or is not exactly adjacent then they
817	 * cannot be merged with our segment.
818	 */
819	s = TAILQ_PREV(r, resource_head, r_link);
820	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
821	    s->r_end + 1 != r->r_start))
822		s = NULL;
823	t = TAILQ_NEXT(r, r_link);
824	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
825	    r->r_end + 1 != t->r_start))
826		t = NULL;
827
828	if (s != NULL && t != NULL) {
829		/*
830		 * Merge all three segments.
831		 */
832		s->r_end = t->r_end;
833		TAILQ_REMOVE(&rm->rm_list, r, r_link);
834		TAILQ_REMOVE(&rm->rm_list, t, r_link);
835		free(t, M_RMAN);
836	} else if (s != NULL) {
837		/*
838		 * Merge previous segment with ours.
839		 */
840		s->r_end = r->r_end;
841		TAILQ_REMOVE(&rm->rm_list, r, r_link);
842	} else if (t != NULL) {
843		/*
844		 * Merge next segment with ours.
845		 */
846		t->r_start = r->r_start;
847		TAILQ_REMOVE(&rm->rm_list, r, r_link);
848	} else {
849		/*
850		 * At this point, we know there is nothing we
851		 * can potentially merge with, because on each
852		 * side, there is either nothing there or what is
853		 * there is still allocated.  In that case, we don't
854		 * want to remove r from the list; we simply want to
855		 * change it to an unallocated region and return
856		 * without freeing anything.
857		 */
858		r->r_flags &= ~RF_ALLOCATED;
859		r->r_dev = NULL;
860		return 0;
861	}
862
863out:
864	free(r, M_RMAN);
865	return 0;
866}
867
868int
869rman_release_resource(struct resource *re)
870{
871	int	rv;
872	struct	resource_i *r;
873	struct	rman *rm;
874
875	r = re->__r_i;
876	rm = r->r_rm;
877	mtx_lock(rm->rm_mtx);
878	rv = int_rman_release_resource(rm, r);
879	mtx_unlock(rm->rm_mtx);
880	return (rv);
881}
882
883uint32_t
884rman_make_alignment_flags(uint32_t size)
885{
886	int	i;
887
888	/*
889	 * Find the hightest bit set, and add one if more than one bit
890	 * set.  We're effectively computing the ceil(log2(size)) here.
891	 */
892	for (i = 31; i > 0; i--)
893		if ((1 << i) & size)
894			break;
895	if (~(1 << i) & size)
896		i++;
897
898	return(RF_ALIGNMENT_LOG2(i));
899}
900
901void
902rman_set_start(struct resource *r, u_long start)
903{
904	r->__r_i->r_start = start;
905}
906
907u_long
908rman_get_start(struct resource *r)
909{
910	return (r->__r_i->r_start);
911}
912
913void
914rman_set_end(struct resource *r, u_long end)
915{
916	r->__r_i->r_end = end;
917}
918
919u_long
920rman_get_end(struct resource *r)
921{
922	return (r->__r_i->r_end);
923}
924
925u_long
926rman_get_size(struct resource *r)
927{
928	return (r->__r_i->r_end - r->__r_i->r_start + 1);
929}
930
931u_int
932rman_get_flags(struct resource *r)
933{
934	return (r->__r_i->r_flags);
935}
936
937void
938rman_set_virtual(struct resource *r, void *v)
939{
940	r->__r_i->r_virtual = v;
941}
942
943void *
944rman_get_virtual(struct resource *r)
945{
946	return (r->__r_i->r_virtual);
947}
948
949void
950rman_set_bustag(struct resource *r, bus_space_tag_t t)
951{
952	r->r_bustag = t;
953}
954
955bus_space_tag_t
956rman_get_bustag(struct resource *r)
957{
958	return (r->r_bustag);
959}
960
961void
962rman_set_bushandle(struct resource *r, bus_space_handle_t h)
963{
964	r->r_bushandle = h;
965}
966
967bus_space_handle_t
968rman_get_bushandle(struct resource *r)
969{
970	return (r->r_bushandle);
971}
972
973void
974rman_set_rid(struct resource *r, int rid)
975{
976	r->__r_i->r_rid = rid;
977}
978
979int
980rman_get_rid(struct resource *r)
981{
982	return (r->__r_i->r_rid);
983}
984
985void
986rman_set_device(struct resource *r, struct device *dev)
987{
988	r->__r_i->r_dev = dev;
989}
990
991struct device *
992rman_get_device(struct resource *r)
993{
994	return (r->__r_i->r_dev);
995}
996
997int
998rman_is_region_manager(struct resource *r, struct rman *rm)
999{
1000
1001	return (r->__r_i->r_rm == rm);
1002}
1003
1004/*
1005 * Sysctl interface for scanning the resource lists.
1006 *
1007 * We take two input parameters; the index into the list of resource
1008 * managers, and the resource offset into the list.
1009 */
1010static int
1011sysctl_rman(SYSCTL_HANDLER_ARGS)
1012{
1013	int			*name = (int *)arg1;
1014	u_int			namelen = arg2;
1015	int			rman_idx, res_idx;
1016	struct rman		*rm;
1017	struct resource_i	*res;
1018	struct resource_i	*sres;
1019	struct u_rman		urm;
1020	struct u_resource	ures;
1021	int			error;
1022
1023	if (namelen != 3)
1024		return (EINVAL);
1025
1026	if (bus_data_generation_check(name[0]))
1027		return (EINVAL);
1028	rman_idx = name[1];
1029	res_idx = name[2];
1030
1031	/*
1032	 * Find the indexed resource manager
1033	 */
1034	mtx_lock(&rman_mtx);
1035	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1036		if (rman_idx-- == 0)
1037			break;
1038	}
1039	mtx_unlock(&rman_mtx);
1040	if (rm == NULL)
1041		return (ENOENT);
1042
1043	/*
1044	 * If the resource index is -1, we want details on the
1045	 * resource manager.
1046	 */
1047	if (res_idx == -1) {
1048		bzero(&urm, sizeof(urm));
1049		urm.rm_handle = (uintptr_t)rm;
1050		if (rm->rm_descr != NULL)
1051			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1052		urm.rm_start = rm->rm_start;
1053		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1054		urm.rm_type = rm->rm_type;
1055
1056		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1057		return (error);
1058	}
1059
1060	/*
1061	 * Find the indexed resource and return it.
1062	 */
1063	mtx_lock(rm->rm_mtx);
1064	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1065		if (res->r_sharehead != NULL) {
1066			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1067				if (res_idx-- == 0) {
1068					res = sres;
1069					goto found;
1070				}
1071		}
1072		else if (res_idx-- == 0)
1073				goto found;
1074	}
1075	mtx_unlock(rm->rm_mtx);
1076	return (ENOENT);
1077
1078found:
1079	bzero(&ures, sizeof(ures));
1080	ures.r_handle = (uintptr_t)res;
1081	ures.r_parent = (uintptr_t)res->r_rm;
1082	ures.r_device = (uintptr_t)res->r_dev;
1083	if (res->r_dev != NULL) {
1084		if (device_get_name(res->r_dev) != NULL) {
1085			snprintf(ures.r_devname, RM_TEXTLEN,
1086			    "%s%d",
1087			    device_get_name(res->r_dev),
1088			    device_get_unit(res->r_dev));
1089		} else {
1090			strlcpy(ures.r_devname, "nomatch",
1091			    RM_TEXTLEN);
1092		}
1093	} else {
1094		ures.r_devname[0] = '\0';
1095	}
1096	ures.r_start = res->r_start;
1097	ures.r_size = res->r_end - res->r_start + 1;
1098	ures.r_flags = res->r_flags;
1099
1100	mtx_unlock(rm->rm_mtx);
1101	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1102	return (error);
1103}
1104
1105static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1106    "kernel resource manager");
1107
1108#ifdef DDB
1109static void
1110dump_rman_header(struct rman *rm)
1111{
1112
1113	if (db_pager_quit)
1114		return;
1115	db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1116	    rm, rm->rm_descr, rm->rm_start, rm->rm_end);
1117}
1118
1119static void
1120dump_rman(struct rman *rm)
1121{
1122	struct resource_i *r;
1123	const char *devname;
1124
1125	if (db_pager_quit)
1126		return;
1127	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1128		if (r->r_dev != NULL) {
1129			devname = device_get_nameunit(r->r_dev);
1130			if (devname == NULL)
1131				devname = "nomatch";
1132		} else
1133			devname = NULL;
1134		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
1135		if (devname != NULL)
1136			db_printf("(%s)\n", devname);
1137		else
1138			db_printf("----\n");
1139		if (db_pager_quit)
1140			return;
1141	}
1142}
1143
1144DB_SHOW_COMMAND(rman, db_show_rman)
1145{
1146
1147	if (have_addr) {
1148		dump_rman_header((struct rman *)addr);
1149		dump_rman((struct rman *)addr);
1150	}
1151}
1152
1153DB_SHOW_COMMAND(rmans, db_show_rmans)
1154{
1155	struct rman *rm;
1156
1157	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1158		dump_rman_header(rm);
1159	}
1160}
1161
1162DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1163{
1164	struct rman *rm;
1165
1166	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1167		dump_rman_header(rm);
1168		dump_rman(rm);
1169	}
1170}
1171DB_SHOW_ALIAS(allrman, db_show_all_rman);
1172#endif
1173