subr_rman.c revision 266426
1/*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission.  M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose.  It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * The kernel resource manager.  This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly.  Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code.  The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order.  Most of the resources
44 * are of this type, as it is the most familiar.  The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance).  The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share.  RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices.  That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58#include "opt_ddb.h"
59
60#include <sys/cdefs.h>
61__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 266426 2014-05-19 04:44:27Z truckman $");
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/limits.h>
67#include <sys/lock.h>
68#include <sys/malloc.h>
69#include <sys/mutex.h>
70#include <sys/bus.h>		/* XXX debugging */
71#include <machine/bus.h>
72#include <sys/rman.h>
73#include <sys/sysctl.h>
74
75#ifdef DDB
76#include <ddb/ddb.h>
77#endif
78
79/*
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space).  That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
87 */
88struct resource_i {
89	struct resource		r_r;
90	TAILQ_ENTRY(resource_i)	r_link;
91	LIST_ENTRY(resource_i)	r_sharelink;
92	LIST_HEAD(, resource_i)	*r_sharehead;
93	u_long	r_start;	/* index of the first entry in this resource */
94	u_long	r_end;		/* index of the last entry (inclusive) */
95	u_int	r_flags;
96	void	*r_virtual;	/* virtual address of this resource */
97	struct	device *r_dev;	/* device which has allocated this resource */
98	struct	rman *r_rm;	/* resource manager from whence this came */
99	int	r_rid;		/* optional rid for this resource. */
100};
101
102static int     rman_debug = 0;
103TUNABLE_INT("debug.rman_debug", &rman_debug);
104SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105    &rman_debug, 0, "rman debug");
106
107#define DPRINTF(params) if (rman_debug) printf params
108
109static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
110
111struct	rman_head rman_head;
112static	struct mtx rman_mtx; /* mutex to protect rman_head */
113static	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114				       struct resource_i **whohas);
115static	int int_rman_deactivate_resource(struct resource_i *r);
116static	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
117
118static __inline struct resource_i *
119int_alloc_resource(int malloc_flag)
120{
121	struct resource_i *r;
122
123	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124	if (r != NULL) {
125		r->r_r.__r_i = r;
126	}
127	return (r);
128}
129
130int
131rman_init(struct rman *rm)
132{
133	static int once = 0;
134
135	if (once == 0) {
136		once = 1;
137		TAILQ_INIT(&rman_head);
138		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
139	}
140
141	if (rm->rm_start == 0 && rm->rm_end == 0)
142		rm->rm_end = ~0ul;
143	if (rm->rm_type == RMAN_UNINIT)
144		panic("rman_init");
145	if (rm->rm_type == RMAN_GAUGE)
146		panic("implement RMAN_GAUGE");
147
148	TAILQ_INIT(&rm->rm_list);
149	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
150	if (rm->rm_mtx == NULL)
151		return ENOMEM;
152	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
153
154	mtx_lock(&rman_mtx);
155	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
156	mtx_unlock(&rman_mtx);
157	return 0;
158}
159
160int
161rman_manage_region(struct rman *rm, u_long start, u_long end)
162{
163	struct resource_i *r, *s, *t;
164	int rv = 0;
165
166	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
167	    rm->rm_descr, start, end));
168	if (start < rm->rm_start || end > rm->rm_end)
169		return EINVAL;
170	r = int_alloc_resource(M_NOWAIT);
171	if (r == NULL)
172		return ENOMEM;
173	r->r_start = start;
174	r->r_end = end;
175	r->r_rm = rm;
176
177	mtx_lock(rm->rm_mtx);
178
179	/* Skip entries before us. */
180	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
181		if (s->r_end == ULONG_MAX)
182			break;
183		if (s->r_end + 1 >= r->r_start)
184			break;
185	}
186
187	/* If we ran off the end of the list, insert at the tail. */
188	if (s == NULL) {
189		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
190	} else {
191		/* Check for any overlap with the current region. */
192		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
193			rv = EBUSY;
194			goto out;
195		}
196
197		/* Check for any overlap with the next region. */
198		t = TAILQ_NEXT(s, r_link);
199		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
200			rv = EBUSY;
201			goto out;
202		}
203
204		/*
205		 * See if this region can be merged with the next region.  If
206		 * not, clear the pointer.
207		 */
208		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
209			t = NULL;
210
211		/* See if we can merge with the current region. */
212		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
213			/* Can we merge all 3 regions? */
214			if (t != NULL) {
215				s->r_end = t->r_end;
216				TAILQ_REMOVE(&rm->rm_list, t, r_link);
217				free(r, M_RMAN);
218				free(t, M_RMAN);
219			} else {
220				s->r_end = r->r_end;
221				free(r, M_RMAN);
222			}
223		} else if (t != NULL) {
224			/* Can we merge with just the next region? */
225			t->r_start = r->r_start;
226			free(r, M_RMAN);
227		} else if (s->r_end < r->r_start) {
228			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
229		} else {
230			TAILQ_INSERT_BEFORE(s, r, r_link);
231		}
232	}
233out:
234	mtx_unlock(rm->rm_mtx);
235	return rv;
236}
237
238int
239rman_init_from_resource(struct rman *rm, struct resource *r)
240{
241	int rv;
242
243	if ((rv = rman_init(rm)) != 0)
244		return (rv);
245	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
246}
247
248int
249rman_fini(struct rman *rm)
250{
251	struct resource_i *r;
252
253	mtx_lock(rm->rm_mtx);
254	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
255		if (r->r_flags & RF_ALLOCATED) {
256			mtx_unlock(rm->rm_mtx);
257			return EBUSY;
258		}
259	}
260
261	/*
262	 * There really should only be one of these if we are in this
263	 * state and the code is working properly, but it can't hurt.
264	 */
265	while (!TAILQ_EMPTY(&rm->rm_list)) {
266		r = TAILQ_FIRST(&rm->rm_list);
267		TAILQ_REMOVE(&rm->rm_list, r, r_link);
268		free(r, M_RMAN);
269	}
270	mtx_unlock(rm->rm_mtx);
271	mtx_lock(&rman_mtx);
272	TAILQ_REMOVE(&rman_head, rm, rm_link);
273	mtx_unlock(&rman_mtx);
274	mtx_destroy(rm->rm_mtx);
275	free(rm->rm_mtx, M_RMAN);
276
277	return 0;
278}
279
280int
281rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
282{
283	struct resource_i *r;
284
285	mtx_lock(rm->rm_mtx);
286	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
287		if (!(r->r_flags & RF_ALLOCATED)) {
288			*start = r->r_start;
289			*end = r->r_end;
290			mtx_unlock(rm->rm_mtx);
291			return (0);
292		}
293	}
294	mtx_unlock(rm->rm_mtx);
295	return (ENOENT);
296}
297
298int
299rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
300{
301	struct resource_i *r;
302
303	mtx_lock(rm->rm_mtx);
304	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
305		if (!(r->r_flags & RF_ALLOCATED)) {
306			*start = r->r_start;
307			*end = r->r_end;
308			mtx_unlock(rm->rm_mtx);
309			return (0);
310		}
311	}
312	mtx_unlock(rm->rm_mtx);
313	return (ENOENT);
314}
315
316/* Shrink or extend one or both ends of an allocated resource. */
317int
318rman_adjust_resource(struct resource *rr, u_long start, u_long end)
319{
320	struct	resource_i *r, *s, *t, *new;
321	struct	rman *rm;
322
323	/* Not supported for shared resources. */
324	r = rr->__r_i;
325	if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
326		return (EINVAL);
327
328	/*
329	 * This does not support wholesale moving of a resource.  At
330	 * least part of the desired new range must overlap with the
331	 * existing resource.
332	 */
333	if (end < r->r_start || r->r_end < start)
334		return (EINVAL);
335
336	/*
337	 * Find the two resource regions immediately adjacent to the
338	 * allocated resource.
339	 */
340	rm = r->r_rm;
341	mtx_lock(rm->rm_mtx);
342#ifdef INVARIANTS
343	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
344		if (s == r)
345			break;
346	}
347	if (s == NULL)
348		panic("resource not in list");
349#endif
350	s = TAILQ_PREV(r, resource_head, r_link);
351	t = TAILQ_NEXT(r, r_link);
352	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
353	    ("prev resource mismatch"));
354	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
355	    ("next resource mismatch"));
356
357	/*
358	 * See if the changes are permitted.  Shrinking is always allowed,
359	 * but growing requires sufficient room in the adjacent region.
360	 */
361	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
362	    s->r_start > start)) {
363		mtx_unlock(rm->rm_mtx);
364		return (EBUSY);
365	}
366	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
367	    t->r_end < end)) {
368		mtx_unlock(rm->rm_mtx);
369		return (EBUSY);
370	}
371
372	/*
373	 * While holding the lock, grow either end of the resource as
374	 * needed and shrink either end if the shrinking does not require
375	 * allocating a new resource.  We can safely drop the lock and then
376	 * insert a new range to handle the shrinking case afterwards.
377	 */
378	if (start < r->r_start ||
379	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
380		KASSERT(s->r_flags == 0, ("prev is busy"));
381		r->r_start = start;
382		if (s->r_start == start) {
383			TAILQ_REMOVE(&rm->rm_list, s, r_link);
384			free(s, M_RMAN);
385		} else
386			s->r_end = start - 1;
387	}
388	if (end > r->r_end ||
389	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
390		KASSERT(t->r_flags == 0, ("next is busy"));
391		r->r_end = end;
392		if (t->r_end == end) {
393			TAILQ_REMOVE(&rm->rm_list, t, r_link);
394			free(t, M_RMAN);
395		} else
396			t->r_start = end + 1;
397	}
398	mtx_unlock(rm->rm_mtx);
399
400	/*
401	 * Handle the shrinking cases that require allocating a new
402	 * resource to hold the newly-free region.  We have to recheck
403	 * if we still need this new region after acquiring the lock.
404	 */
405	if (start > r->r_start) {
406		new = int_alloc_resource(M_WAITOK);
407		new->r_start = r->r_start;
408		new->r_end = start - 1;
409		new->r_rm = rm;
410		mtx_lock(rm->rm_mtx);
411		r->r_start = start;
412		s = TAILQ_PREV(r, resource_head, r_link);
413		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
414			s->r_end = start - 1;
415			free(new, M_RMAN);
416		} else
417			TAILQ_INSERT_BEFORE(r, new, r_link);
418		mtx_unlock(rm->rm_mtx);
419	}
420	if (end < r->r_end) {
421		new = int_alloc_resource(M_WAITOK);
422		new->r_start = end + 1;
423		new->r_end = r->r_end;
424		new->r_rm = rm;
425		mtx_lock(rm->rm_mtx);
426		r->r_end = end;
427		t = TAILQ_NEXT(r, r_link);
428		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
429			t->r_start = end + 1;
430			free(new, M_RMAN);
431		} else
432			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
433		mtx_unlock(rm->rm_mtx);
434	}
435	return (0);
436}
437
438struct resource *
439rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
440		      u_long count, u_long bound,  u_int flags,
441		      struct device *dev)
442{
443	u_int	want_activate;
444	struct	resource_i *r, *s, *rv;
445	u_long	rstart, rend, amask, bmask;
446
447	rv = NULL;
448
449	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
450	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
451	       count, flags,
452	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
453	want_activate = (flags & RF_ACTIVE);
454	flags &= ~RF_ACTIVE;
455
456	mtx_lock(rm->rm_mtx);
457
458	for (r = TAILQ_FIRST(&rm->rm_list);
459	     r && r->r_end < start + count - 1;
460	     r = TAILQ_NEXT(r, r_link))
461		;
462
463	if (r == NULL) {
464		DPRINTF(("could not find a region\n"));
465		goto out;
466	}
467
468	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
469	if (start > ULONG_MAX - amask) {
470		DPRINTF(("start+amask would wrap around\n"));
471		goto out;
472	}
473
474	/* If bound is 0, bmask will also be 0 */
475	bmask = ~(bound - 1);
476	/*
477	 * First try to find an acceptable totally-unshared region.
478	 */
479	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
480		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
481		/*
482		 * The resource list is sorted, so there is no point in
483		 * searching further once r_start is too large.
484		 */
485		if (s->r_start > end - (count - 1)) {
486			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
487			    s->r_start, end));
488			break;
489		}
490		if (s->r_start > ULONG_MAX - amask) {
491			DPRINTF(("s->r_start (%#lx) + amask (%#lx) too large\n",
492			    s->r_start, amask));
493			break;
494		}
495		if (s->r_flags & RF_ALLOCATED) {
496			DPRINTF(("region is allocated\n"));
497			continue;
498		}
499		rstart = ulmax(s->r_start, start);
500		/*
501		 * Try to find a region by adjusting to boundary and alignment
502		 * until both conditions are satisfied. This is not an optimal
503		 * algorithm, but in most cases it isn't really bad, either.
504		 */
505		do {
506			rstart = (rstart + amask) & ~amask;
507			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
508				rstart += bound - (rstart & ~bmask);
509		} while ((rstart & amask) != 0 && rstart < end &&
510		    rstart < s->r_end);
511		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
512		if (rstart > rend) {
513			DPRINTF(("adjusted start exceeds end\n"));
514			continue;
515		}
516		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
517		       rstart, rend, (rend - rstart + 1), count));
518
519		if ((rend - rstart + 1) >= count) {
520			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
521			       rstart, rend, (rend - rstart + 1)));
522			if ((s->r_end - s->r_start + 1) == count) {
523				DPRINTF(("candidate region is entire chunk\n"));
524				rv = s;
525				rv->r_flags |= RF_ALLOCATED | flags;
526				rv->r_dev = dev;
527				goto out;
528			}
529
530			/*
531			 * If s->r_start < rstart and
532			 *    s->r_end > rstart + count - 1, then
533			 * we need to split the region into three pieces
534			 * (the middle one will get returned to the user).
535			 * Otherwise, we are allocating at either the
536			 * beginning or the end of s, so we only need to
537			 * split it in two.  The first case requires
538			 * two new allocations; the second requires but one.
539			 */
540			rv = int_alloc_resource(M_NOWAIT);
541			if (rv == NULL)
542				goto out;
543			rv->r_start = rstart;
544			rv->r_end = rstart + count - 1;
545			rv->r_flags = flags | RF_ALLOCATED;
546			rv->r_dev = dev;
547			rv->r_rm = rm;
548
549			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
550				DPRINTF(("splitting region in three parts: "
551				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
552				       s->r_start, rv->r_start - 1,
553				       rv->r_start, rv->r_end,
554				       rv->r_end + 1, s->r_end));
555				/*
556				 * We are allocating in the middle.
557				 */
558				r = int_alloc_resource(M_NOWAIT);
559				if (r == NULL) {
560					free(rv, M_RMAN);
561					rv = NULL;
562					goto out;
563				}
564				r->r_start = rv->r_end + 1;
565				r->r_end = s->r_end;
566				r->r_flags = s->r_flags;
567				r->r_rm = rm;
568				s->r_end = rv->r_start - 1;
569				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
570						     r_link);
571				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
572						     r_link);
573			} else if (s->r_start == rv->r_start) {
574				DPRINTF(("allocating from the beginning\n"));
575				/*
576				 * We are allocating at the beginning.
577				 */
578				s->r_start = rv->r_end + 1;
579				TAILQ_INSERT_BEFORE(s, rv, r_link);
580			} else {
581				DPRINTF(("allocating at the end\n"));
582				/*
583				 * We are allocating at the end.
584				 */
585				s->r_end = rv->r_start - 1;
586				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
587						     r_link);
588			}
589			goto out;
590		}
591	}
592
593	/*
594	 * Now find an acceptable shared region, if the client's requirements
595	 * allow sharing.  By our implementation restriction, a candidate
596	 * region must match exactly by both size and sharing type in order
597	 * to be considered compatible with the client's request.  (The
598	 * former restriction could probably be lifted without too much
599	 * additional work, but this does not seem warranted.)
600	 */
601	DPRINTF(("no unshared regions found\n"));
602	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
603		goto out;
604
605	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
606		if ((s->r_flags & flags) == flags &&
607		    s->r_start >= start &&
608		    (s->r_end - s->r_start + 1) == count &&
609		    (s->r_start & amask) == 0 &&
610		    ((s->r_start ^ s->r_end) & bmask) == 0) {
611			rv = int_alloc_resource(M_NOWAIT);
612			if (rv == NULL)
613				goto out;
614			rv->r_start = s->r_start;
615			rv->r_end = s->r_end;
616			rv->r_flags = s->r_flags &
617				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
618			rv->r_dev = dev;
619			rv->r_rm = rm;
620			if (s->r_sharehead == NULL) {
621				s->r_sharehead = malloc(sizeof *s->r_sharehead,
622						M_RMAN, M_NOWAIT | M_ZERO);
623				if (s->r_sharehead == NULL) {
624					free(rv, M_RMAN);
625					rv = NULL;
626					goto out;
627				}
628				LIST_INIT(s->r_sharehead);
629				LIST_INSERT_HEAD(s->r_sharehead, s,
630						 r_sharelink);
631				s->r_flags |= RF_FIRSTSHARE;
632			}
633			rv->r_sharehead = s->r_sharehead;
634			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
635			goto out;
636		}
637	}
638
639	/*
640	 * We couldn't find anything.
641	 */
642out:
643	/*
644	 * If the user specified RF_ACTIVE in the initial flags,
645	 * which is reflected in `want_activate', we attempt to atomically
646	 * activate the resource.  If this fails, we release the resource
647	 * and indicate overall failure.  (This behavior probably doesn't
648	 * make sense for RF_TIMESHARE-type resources.)
649	 */
650	if (rv && want_activate) {
651		struct resource_i *whohas;
652		if (int_rman_activate_resource(rm, rv, &whohas)) {
653			int_rman_release_resource(rm, rv);
654			rv = NULL;
655		}
656	}
657
658	mtx_unlock(rm->rm_mtx);
659	return (rv == NULL ? NULL : &rv->r_r);
660}
661
662struct resource *
663rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
664		      u_int flags, struct device *dev)
665{
666
667	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
668	    dev));
669}
670
671static int
672int_rman_activate_resource(struct rman *rm, struct resource_i *r,
673			   struct resource_i **whohas)
674{
675	struct resource_i *s;
676	int ok;
677
678	/*
679	 * If we are not timesharing, then there is nothing much to do.
680	 * If we already have the resource, then there is nothing at all to do.
681	 * If we are not on a sharing list with anybody else, then there is
682	 * little to do.
683	 */
684	if ((r->r_flags & RF_TIMESHARE) == 0
685	    || (r->r_flags & RF_ACTIVE) != 0
686	    || r->r_sharehead == NULL) {
687		r->r_flags |= RF_ACTIVE;
688		return 0;
689	}
690
691	ok = 1;
692	for (s = LIST_FIRST(r->r_sharehead); s && ok;
693	     s = LIST_NEXT(s, r_sharelink)) {
694		if ((s->r_flags & RF_ACTIVE) != 0) {
695			ok = 0;
696			*whohas = s;
697		}
698	}
699	if (ok) {
700		r->r_flags |= RF_ACTIVE;
701		return 0;
702	}
703	return EBUSY;
704}
705
706int
707rman_activate_resource(struct resource *re)
708{
709	int rv;
710	struct resource_i *r, *whohas;
711	struct rman *rm;
712
713	r = re->__r_i;
714	rm = r->r_rm;
715	mtx_lock(rm->rm_mtx);
716	rv = int_rman_activate_resource(rm, r, &whohas);
717	mtx_unlock(rm->rm_mtx);
718	return rv;
719}
720
721int
722rman_await_resource(struct resource *re, int pri, int timo)
723{
724	int	rv;
725	struct	resource_i *r, *whohas;
726	struct	rman *rm;
727
728	r = re->__r_i;
729	rm = r->r_rm;
730	mtx_lock(rm->rm_mtx);
731	for (;;) {
732		rv = int_rman_activate_resource(rm, r, &whohas);
733		if (rv != EBUSY)
734			return (rv);	/* returns with mutex held */
735
736		if (r->r_sharehead == NULL)
737			panic("rman_await_resource");
738		whohas->r_flags |= RF_WANTED;
739		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
740		if (rv) {
741			mtx_unlock(rm->rm_mtx);
742			return (rv);
743		}
744	}
745}
746
747static int
748int_rman_deactivate_resource(struct resource_i *r)
749{
750
751	r->r_flags &= ~RF_ACTIVE;
752	if (r->r_flags & RF_WANTED) {
753		r->r_flags &= ~RF_WANTED;
754		wakeup(r->r_sharehead);
755	}
756	return 0;
757}
758
759int
760rman_deactivate_resource(struct resource *r)
761{
762	struct	rman *rm;
763
764	rm = r->__r_i->r_rm;
765	mtx_lock(rm->rm_mtx);
766	int_rman_deactivate_resource(r->__r_i);
767	mtx_unlock(rm->rm_mtx);
768	return 0;
769}
770
771static int
772int_rman_release_resource(struct rman *rm, struct resource_i *r)
773{
774	struct	resource_i *s, *t;
775
776	if (r->r_flags & RF_ACTIVE)
777		int_rman_deactivate_resource(r);
778
779	/*
780	 * Check for a sharing list first.  If there is one, then we don't
781	 * have to think as hard.
782	 */
783	if (r->r_sharehead) {
784		/*
785		 * If a sharing list exists, then we know there are at
786		 * least two sharers.
787		 *
788		 * If we are in the main circleq, appoint someone else.
789		 */
790		LIST_REMOVE(r, r_sharelink);
791		s = LIST_FIRST(r->r_sharehead);
792		if (r->r_flags & RF_FIRSTSHARE) {
793			s->r_flags |= RF_FIRSTSHARE;
794			TAILQ_INSERT_BEFORE(r, s, r_link);
795			TAILQ_REMOVE(&rm->rm_list, r, r_link);
796		}
797
798		/*
799		 * Make sure that the sharing list goes away completely
800		 * if the resource is no longer being shared at all.
801		 */
802		if (LIST_NEXT(s, r_sharelink) == NULL) {
803			free(s->r_sharehead, M_RMAN);
804			s->r_sharehead = NULL;
805			s->r_flags &= ~RF_FIRSTSHARE;
806		}
807		goto out;
808	}
809
810	/*
811	 * Look at the adjacent resources in the list and see if our
812	 * segment can be merged with any of them.  If either of the
813	 * resources is allocated or is not exactly adjacent then they
814	 * cannot be merged with our segment.
815	 */
816	s = TAILQ_PREV(r, resource_head, r_link);
817	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
818	    s->r_end + 1 != r->r_start))
819		s = NULL;
820	t = TAILQ_NEXT(r, r_link);
821	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
822	    r->r_end + 1 != t->r_start))
823		t = NULL;
824
825	if (s != NULL && t != NULL) {
826		/*
827		 * Merge all three segments.
828		 */
829		s->r_end = t->r_end;
830		TAILQ_REMOVE(&rm->rm_list, r, r_link);
831		TAILQ_REMOVE(&rm->rm_list, t, r_link);
832		free(t, M_RMAN);
833	} else if (s != NULL) {
834		/*
835		 * Merge previous segment with ours.
836		 */
837		s->r_end = r->r_end;
838		TAILQ_REMOVE(&rm->rm_list, r, r_link);
839	} else if (t != NULL) {
840		/*
841		 * Merge next segment with ours.
842		 */
843		t->r_start = r->r_start;
844		TAILQ_REMOVE(&rm->rm_list, r, r_link);
845	} else {
846		/*
847		 * At this point, we know there is nothing we
848		 * can potentially merge with, because on each
849		 * side, there is either nothing there or what is
850		 * there is still allocated.  In that case, we don't
851		 * want to remove r from the list; we simply want to
852		 * change it to an unallocated region and return
853		 * without freeing anything.
854		 */
855		r->r_flags &= ~RF_ALLOCATED;
856		r->r_dev = NULL;
857		return 0;
858	}
859
860out:
861	free(r, M_RMAN);
862	return 0;
863}
864
865int
866rman_release_resource(struct resource *re)
867{
868	int	rv;
869	struct	resource_i *r;
870	struct	rman *rm;
871
872	r = re->__r_i;
873	rm = r->r_rm;
874	mtx_lock(rm->rm_mtx);
875	rv = int_rman_release_resource(rm, r);
876	mtx_unlock(rm->rm_mtx);
877	return (rv);
878}
879
880uint32_t
881rman_make_alignment_flags(uint32_t size)
882{
883	int	i;
884
885	/*
886	 * Find the hightest bit set, and add one if more than one bit
887	 * set.  We're effectively computing the ceil(log2(size)) here.
888	 */
889	for (i = 31; i > 0; i--)
890		if ((1 << i) & size)
891			break;
892	if (~(1 << i) & size)
893		i++;
894
895	return(RF_ALIGNMENT_LOG2(i));
896}
897
898void
899rman_set_start(struct resource *r, u_long start)
900{
901	r->__r_i->r_start = start;
902}
903
904u_long
905rman_get_start(struct resource *r)
906{
907	return (r->__r_i->r_start);
908}
909
910void
911rman_set_end(struct resource *r, u_long end)
912{
913	r->__r_i->r_end = end;
914}
915
916u_long
917rman_get_end(struct resource *r)
918{
919	return (r->__r_i->r_end);
920}
921
922u_long
923rman_get_size(struct resource *r)
924{
925	return (r->__r_i->r_end - r->__r_i->r_start + 1);
926}
927
928u_int
929rman_get_flags(struct resource *r)
930{
931	return (r->__r_i->r_flags);
932}
933
934void
935rman_set_virtual(struct resource *r, void *v)
936{
937	r->__r_i->r_virtual = v;
938}
939
940void *
941rman_get_virtual(struct resource *r)
942{
943	return (r->__r_i->r_virtual);
944}
945
946void
947rman_set_bustag(struct resource *r, bus_space_tag_t t)
948{
949	r->r_bustag = t;
950}
951
952bus_space_tag_t
953rman_get_bustag(struct resource *r)
954{
955	return (r->r_bustag);
956}
957
958void
959rman_set_bushandle(struct resource *r, bus_space_handle_t h)
960{
961	r->r_bushandle = h;
962}
963
964bus_space_handle_t
965rman_get_bushandle(struct resource *r)
966{
967	return (r->r_bushandle);
968}
969
970void
971rman_set_rid(struct resource *r, int rid)
972{
973	r->__r_i->r_rid = rid;
974}
975
976int
977rman_get_rid(struct resource *r)
978{
979	return (r->__r_i->r_rid);
980}
981
982void
983rman_set_device(struct resource *r, struct device *dev)
984{
985	r->__r_i->r_dev = dev;
986}
987
988struct device *
989rman_get_device(struct resource *r)
990{
991	return (r->__r_i->r_dev);
992}
993
994int
995rman_is_region_manager(struct resource *r, struct rman *rm)
996{
997
998	return (r->__r_i->r_rm == rm);
999}
1000
1001/*
1002 * Sysctl interface for scanning the resource lists.
1003 *
1004 * We take two input parameters; the index into the list of resource
1005 * managers, and the resource offset into the list.
1006 */
1007static int
1008sysctl_rman(SYSCTL_HANDLER_ARGS)
1009{
1010	int			*name = (int *)arg1;
1011	u_int			namelen = arg2;
1012	int			rman_idx, res_idx;
1013	struct rman		*rm;
1014	struct resource_i	*res;
1015	struct resource_i	*sres;
1016	struct u_rman		urm;
1017	struct u_resource	ures;
1018	int			error;
1019
1020	if (namelen != 3)
1021		return (EINVAL);
1022
1023	if (bus_data_generation_check(name[0]))
1024		return (EINVAL);
1025	rman_idx = name[1];
1026	res_idx = name[2];
1027
1028	/*
1029	 * Find the indexed resource manager
1030	 */
1031	mtx_lock(&rman_mtx);
1032	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1033		if (rman_idx-- == 0)
1034			break;
1035	}
1036	mtx_unlock(&rman_mtx);
1037	if (rm == NULL)
1038		return (ENOENT);
1039
1040	/*
1041	 * If the resource index is -1, we want details on the
1042	 * resource manager.
1043	 */
1044	if (res_idx == -1) {
1045		bzero(&urm, sizeof(urm));
1046		urm.rm_handle = (uintptr_t)rm;
1047		if (rm->rm_descr != NULL)
1048			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1049		urm.rm_start = rm->rm_start;
1050		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1051		urm.rm_type = rm->rm_type;
1052
1053		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1054		return (error);
1055	}
1056
1057	/*
1058	 * Find the indexed resource and return it.
1059	 */
1060	mtx_lock(rm->rm_mtx);
1061	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1062		if (res->r_sharehead != NULL) {
1063			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1064				if (res_idx-- == 0) {
1065					res = sres;
1066					goto found;
1067				}
1068		}
1069		else if (res_idx-- == 0)
1070				goto found;
1071	}
1072	mtx_unlock(rm->rm_mtx);
1073	return (ENOENT);
1074
1075found:
1076	bzero(&ures, sizeof(ures));
1077	ures.r_handle = (uintptr_t)res;
1078	ures.r_parent = (uintptr_t)res->r_rm;
1079	ures.r_device = (uintptr_t)res->r_dev;
1080	if (res->r_dev != NULL) {
1081		if (device_get_name(res->r_dev) != NULL) {
1082			snprintf(ures.r_devname, RM_TEXTLEN,
1083			    "%s%d",
1084			    device_get_name(res->r_dev),
1085			    device_get_unit(res->r_dev));
1086		} else {
1087			strlcpy(ures.r_devname, "nomatch",
1088			    RM_TEXTLEN);
1089		}
1090	} else {
1091		ures.r_devname[0] = '\0';
1092	}
1093	ures.r_start = res->r_start;
1094	ures.r_size = res->r_end - res->r_start + 1;
1095	ures.r_flags = res->r_flags;
1096
1097	mtx_unlock(rm->rm_mtx);
1098	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1099	return (error);
1100}
1101
1102static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1103    "kernel resource manager");
1104
1105#ifdef DDB
1106static void
1107dump_rman_header(struct rman *rm)
1108{
1109
1110	if (db_pager_quit)
1111		return;
1112	db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1113	    rm, rm->rm_descr, rm->rm_start, rm->rm_end);
1114}
1115
1116static void
1117dump_rman(struct rman *rm)
1118{
1119	struct resource_i *r;
1120	const char *devname;
1121
1122	if (db_pager_quit)
1123		return;
1124	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1125		if (r->r_dev != NULL) {
1126			devname = device_get_nameunit(r->r_dev);
1127			if (devname == NULL)
1128				devname = "nomatch";
1129		} else
1130			devname = NULL;
1131		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
1132		if (devname != NULL)
1133			db_printf("(%s)\n", devname);
1134		else
1135			db_printf("----\n");
1136		if (db_pager_quit)
1137			return;
1138	}
1139}
1140
1141DB_SHOW_COMMAND(rman, db_show_rman)
1142{
1143
1144	if (have_addr) {
1145		dump_rman_header((struct rman *)addr);
1146		dump_rman((struct rman *)addr);
1147	}
1148}
1149
1150DB_SHOW_COMMAND(rmans, db_show_rmans)
1151{
1152	struct rman *rm;
1153
1154	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1155		dump_rman_header(rm);
1156	}
1157}
1158
1159DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1160{
1161	struct rman *rm;
1162
1163	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1164		dump_rman_header(rm);
1165		dump_rman(rm);
1166	}
1167}
1168DB_SHOW_ALIAS(allrman, db_show_all_rman);
1169#endif
1170