subr_rman.c revision 265363
1/*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission.  M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose.  It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * The kernel resource manager.  This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly.  Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code.  The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order.  Most of the resources
44 * are of this type, as it is the most familiar.  The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance).  The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share.  RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices.  That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58#include "opt_ddb.h"
59
60#include <sys/cdefs.h>
61__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 265363 2014-05-05 15:59:31Z truckman $");
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/limits.h>
67#include <sys/lock.h>
68#include <sys/malloc.h>
69#include <sys/mutex.h>
70#include <sys/bus.h>		/* XXX debugging */
71#include <machine/bus.h>
72#include <sys/rman.h>
73#include <sys/sysctl.h>
74
75#ifdef DDB
76#include <ddb/ddb.h>
77#endif
78
79/*
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space).  That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
87 */
88struct resource_i {
89	struct resource		r_r;
90	TAILQ_ENTRY(resource_i)	r_link;
91	LIST_ENTRY(resource_i)	r_sharelink;
92	LIST_HEAD(, resource_i)	*r_sharehead;
93	u_long	r_start;	/* index of the first entry in this resource */
94	u_long	r_end;		/* index of the last entry (inclusive) */
95	u_int	r_flags;
96	void	*r_virtual;	/* virtual address of this resource */
97	struct	device *r_dev;	/* device which has allocated this resource */
98	struct	rman *r_rm;	/* resource manager from whence this came */
99	int	r_rid;		/* optional rid for this resource. */
100};
101
102static int     rman_debug = 0;
103TUNABLE_INT("debug.rman_debug", &rman_debug);
104SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105    &rman_debug, 0, "rman debug");
106
107#define DPRINTF(params) if (rman_debug) printf params
108
109static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
110
111struct	rman_head rman_head;
112static	struct mtx rman_mtx; /* mutex to protect rman_head */
113static	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114				       struct resource_i **whohas);
115static	int int_rman_deactivate_resource(struct resource_i *r);
116static	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
117
118static __inline struct resource_i *
119int_alloc_resource(int malloc_flag)
120{
121	struct resource_i *r;
122
123	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124	if (r != NULL) {
125		r->r_r.__r_i = r;
126	}
127	return (r);
128}
129
130int
131rman_init(struct rman *rm)
132{
133	static int once = 0;
134
135	if (once == 0) {
136		once = 1;
137		TAILQ_INIT(&rman_head);
138		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
139	}
140
141	if (rm->rm_start == 0 && rm->rm_end == 0)
142		rm->rm_end = ~0ul;
143	if (rm->rm_type == RMAN_UNINIT)
144		panic("rman_init");
145	if (rm->rm_type == RMAN_GAUGE)
146		panic("implement RMAN_GAUGE");
147
148	TAILQ_INIT(&rm->rm_list);
149	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
150	if (rm->rm_mtx == NULL)
151		return ENOMEM;
152	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
153
154	mtx_lock(&rman_mtx);
155	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
156	mtx_unlock(&rman_mtx);
157	return 0;
158}
159
160int
161rman_manage_region(struct rman *rm, u_long start, u_long end)
162{
163	struct resource_i *r, *s, *t;
164	int rv = 0;
165
166	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
167	    rm->rm_descr, start, end));
168	if (start < rm->rm_start || end > rm->rm_end)
169		return EINVAL;
170	r = int_alloc_resource(M_NOWAIT);
171	if (r == NULL)
172		return ENOMEM;
173	r->r_start = start;
174	r->r_end = end;
175	r->r_rm = rm;
176
177	mtx_lock(rm->rm_mtx);
178
179	/* Skip entries before us. */
180	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
181		if (s->r_end == ULONG_MAX)
182			break;
183		if (s->r_end + 1 >= r->r_start)
184			break;
185	}
186
187	/* If we ran off the end of the list, insert at the tail. */
188	if (s == NULL) {
189		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
190	} else {
191		/* Check for any overlap with the current region. */
192		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
193			rv = EBUSY;
194			goto out;
195		}
196
197		/* Check for any overlap with the next region. */
198		t = TAILQ_NEXT(s, r_link);
199		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
200			rv = EBUSY;
201			goto out;
202		}
203
204		/*
205		 * See if this region can be merged with the next region.  If
206		 * not, clear the pointer.
207		 */
208		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
209			t = NULL;
210
211		/* See if we can merge with the current region. */
212		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
213			/* Can we merge all 3 regions? */
214			if (t != NULL) {
215				s->r_end = t->r_end;
216				TAILQ_REMOVE(&rm->rm_list, t, r_link);
217				free(r, M_RMAN);
218				free(t, M_RMAN);
219			} else {
220				s->r_end = r->r_end;
221				free(r, M_RMAN);
222			}
223		} else if (t != NULL) {
224			/* Can we merge with just the next region? */
225			t->r_start = r->r_start;
226			free(r, M_RMAN);
227		} else if (s->r_end < r->r_start) {
228			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
229		} else {
230			TAILQ_INSERT_BEFORE(s, r, r_link);
231		}
232	}
233out:
234	mtx_unlock(rm->rm_mtx);
235	return rv;
236}
237
238int
239rman_init_from_resource(struct rman *rm, struct resource *r)
240{
241	int rv;
242
243	if ((rv = rman_init(rm)) != 0)
244		return (rv);
245	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
246}
247
248int
249rman_fini(struct rman *rm)
250{
251	struct resource_i *r;
252
253	mtx_lock(rm->rm_mtx);
254	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
255		if (r->r_flags & RF_ALLOCATED) {
256			mtx_unlock(rm->rm_mtx);
257			return EBUSY;
258		}
259	}
260
261	/*
262	 * There really should only be one of these if we are in this
263	 * state and the code is working properly, but it can't hurt.
264	 */
265	while (!TAILQ_EMPTY(&rm->rm_list)) {
266		r = TAILQ_FIRST(&rm->rm_list);
267		TAILQ_REMOVE(&rm->rm_list, r, r_link);
268		free(r, M_RMAN);
269	}
270	mtx_unlock(rm->rm_mtx);
271	mtx_lock(&rman_mtx);
272	TAILQ_REMOVE(&rman_head, rm, rm_link);
273	mtx_unlock(&rman_mtx);
274	mtx_destroy(rm->rm_mtx);
275	free(rm->rm_mtx, M_RMAN);
276
277	return 0;
278}
279
280int
281rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
282{
283	struct resource_i *r;
284
285	mtx_lock(rm->rm_mtx);
286	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
287		if (!(r->r_flags & RF_ALLOCATED)) {
288			*start = r->r_start;
289			*end = r->r_end;
290			mtx_unlock(rm->rm_mtx);
291			return (0);
292		}
293	}
294	mtx_unlock(rm->rm_mtx);
295	return (ENOENT);
296}
297
298int
299rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
300{
301	struct resource_i *r;
302
303	mtx_lock(rm->rm_mtx);
304	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
305		if (!(r->r_flags & RF_ALLOCATED)) {
306			*start = r->r_start;
307			*end = r->r_end;
308			mtx_unlock(rm->rm_mtx);
309			return (0);
310		}
311	}
312	mtx_unlock(rm->rm_mtx);
313	return (ENOENT);
314}
315
316/* Shrink or extend one or both ends of an allocated resource. */
317int
318rman_adjust_resource(struct resource *rr, u_long start, u_long end)
319{
320	struct	resource_i *r, *s, *t, *new;
321	struct	rman *rm;
322
323	/* Not supported for shared resources. */
324	r = rr->__r_i;
325	if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
326		return (EINVAL);
327
328	/*
329	 * This does not support wholesale moving of a resource.  At
330	 * least part of the desired new range must overlap with the
331	 * existing resource.
332	 */
333	if (end < r->r_start || r->r_end < start)
334		return (EINVAL);
335
336	/*
337	 * Find the two resource regions immediately adjacent to the
338	 * allocated resource.
339	 */
340	rm = r->r_rm;
341	mtx_lock(rm->rm_mtx);
342#ifdef INVARIANTS
343	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
344		if (s == r)
345			break;
346	}
347	if (s == NULL)
348		panic("resource not in list");
349#endif
350	s = TAILQ_PREV(r, resource_head, r_link);
351	t = TAILQ_NEXT(r, r_link);
352	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
353	    ("prev resource mismatch"));
354	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
355	    ("next resource mismatch"));
356
357	/*
358	 * See if the changes are permitted.  Shrinking is always allowed,
359	 * but growing requires sufficient room in the adjacent region.
360	 */
361	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
362	    s->r_start > start)) {
363		mtx_unlock(rm->rm_mtx);
364		return (EBUSY);
365	}
366	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
367	    t->r_end < end)) {
368		mtx_unlock(rm->rm_mtx);
369		return (EBUSY);
370	}
371
372	/*
373	 * While holding the lock, grow either end of the resource as
374	 * needed and shrink either end if the shrinking does not require
375	 * allocating a new resource.  We can safely drop the lock and then
376	 * insert a new range to handle the shrinking case afterwards.
377	 */
378	if (start < r->r_start ||
379	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
380		KASSERT(s->r_flags == 0, ("prev is busy"));
381		r->r_start = start;
382		if (s->r_start == start) {
383			TAILQ_REMOVE(&rm->rm_list, s, r_link);
384			free(s, M_RMAN);
385		} else
386			s->r_end = start - 1;
387	}
388	if (end > r->r_end ||
389	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
390		KASSERT(t->r_flags == 0, ("next is busy"));
391		r->r_end = end;
392		if (t->r_end == end) {
393			TAILQ_REMOVE(&rm->rm_list, t, r_link);
394			free(t, M_RMAN);
395		} else
396			t->r_start = end + 1;
397	}
398	mtx_unlock(rm->rm_mtx);
399
400	/*
401	 * Handle the shrinking cases that require allocating a new
402	 * resource to hold the newly-free region.  We have to recheck
403	 * if we still need this new region after acquiring the lock.
404	 */
405	if (start > r->r_start) {
406		new = int_alloc_resource(M_WAITOK);
407		new->r_start = r->r_start;
408		new->r_end = start - 1;
409		new->r_rm = rm;
410		mtx_lock(rm->rm_mtx);
411		r->r_start = start;
412		s = TAILQ_PREV(r, resource_head, r_link);
413		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
414			s->r_end = start - 1;
415			free(new, M_RMAN);
416		} else
417			TAILQ_INSERT_BEFORE(r, new, r_link);
418		mtx_unlock(rm->rm_mtx);
419	}
420	if (end < r->r_end) {
421		new = int_alloc_resource(M_WAITOK);
422		new->r_start = end + 1;
423		new->r_end = r->r_end;
424		new->r_rm = rm;
425		mtx_lock(rm->rm_mtx);
426		r->r_end = end;
427		t = TAILQ_NEXT(r, r_link);
428		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
429			t->r_start = end + 1;
430			free(new, M_RMAN);
431		} else
432			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
433		mtx_unlock(rm->rm_mtx);
434	}
435	return (0);
436}
437
438struct resource *
439rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
440		      u_long count, u_long bound,  u_int flags,
441		      struct device *dev)
442{
443	u_int	want_activate;
444	struct	resource_i *r, *s, *rv;
445	u_long	rstart, rend, amask, bmask;
446
447	rv = NULL;
448
449	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
450	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
451	       count, flags,
452	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
453	want_activate = (flags & RF_ACTIVE);
454	flags &= ~RF_ACTIVE;
455
456	mtx_lock(rm->rm_mtx);
457
458	for (r = TAILQ_FIRST(&rm->rm_list);
459	     r && r->r_end < start + count - 1;
460	     r = TAILQ_NEXT(r, r_link))
461		;
462
463	if (r == NULL) {
464		DPRINTF(("could not find a region\n"));
465		goto out;
466	}
467
468	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
469	if (start + amask < start) {
470		DPRINTF(("start+amask wrapped around\n"));
471		goto out;
472	}
473
474	/* If bound is 0, bmask will also be 0 */
475	bmask = ~(bound - 1);
476	/*
477	 * First try to find an acceptable totally-unshared region.
478	 */
479	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
480		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
481		/*
482		 * The resource list is sorted, so there is no point in
483		 * searching further once r_start is too large.
484		 */
485		if (s->r_start > end - (count - 1)) {
486			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
487			    s->r_start, end));
488			break;
489		}
490		if (s->r_start + amask < s->r_start) {
491			DPRINTF(("s->r_start (%#lx) + amask (%#lx) wrapped\n",
492			    s->r_start, amask));
493			break;
494		}
495		if (s->r_flags & RF_ALLOCATED) {
496			DPRINTF(("region is allocated\n"));
497			continue;
498		}
499		rstart = ulmax(s->r_start, start);
500		/*
501		 * Try to find a region by adjusting to boundary and alignment
502		 * until both conditions are satisfied. This is not an optimal
503		 * algorithm, but in most cases it isn't really bad, either.
504		 */
505		do {
506			rstart = (rstart + amask) & ~amask;
507			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
508				rstart += bound - (rstart & ~bmask);
509		} while ((rstart & amask) != 0 && rstart < end &&
510		    rstart < s->r_end);
511		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
512		if (rstart > rend) {
513			DPRINTF(("adjusted start exceeds end\n"));
514			continue;
515		}
516		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
517		       rstart, rend, (rend - rstart + 1), count));
518
519		if ((rend - rstart + 1) >= count) {
520			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
521			       rstart, rend, (rend - rstart + 1)));
522			if ((s->r_end - s->r_start + 1) == count) {
523				DPRINTF(("candidate region is entire chunk\n"));
524				rv = s;
525				rv->r_flags |= RF_ALLOCATED | flags;
526				rv->r_dev = dev;
527				goto out;
528			}
529
530			/*
531			 * If s->r_start < rstart and
532			 *    s->r_end > rstart + count - 1, then
533			 * we need to split the region into three pieces
534			 * (the middle one will get returned to the user).
535			 * Otherwise, we are allocating at either the
536			 * beginning or the end of s, so we only need to
537			 * split it in two.  The first case requires
538			 * two new allocations; the second requires but one.
539			 */
540			rv = int_alloc_resource(M_NOWAIT);
541			if (rv == NULL)
542				goto out;
543			rv->r_start = rstart;
544			rv->r_end = rstart + count - 1;
545			rv->r_flags = flags | RF_ALLOCATED;
546			rv->r_dev = dev;
547			rv->r_rm = rm;
548
549			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
550				DPRINTF(("splitting region in three parts: "
551				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
552				       s->r_start, rv->r_start - 1,
553				       rv->r_start, rv->r_end,
554				       rv->r_end + 1, s->r_end));
555				/*
556				 * We are allocating in the middle.
557				 */
558				r = int_alloc_resource(M_NOWAIT);
559				if (r == NULL) {
560					free(rv, M_RMAN);
561					rv = NULL;
562					goto out;
563				}
564				r->r_start = rv->r_end + 1;
565				r->r_end = s->r_end;
566				r->r_flags = s->r_flags;
567				r->r_rm = rm;
568				s->r_end = rv->r_start - 1;
569				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
570						     r_link);
571				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
572						     r_link);
573			} else if (s->r_start == rv->r_start) {
574				DPRINTF(("allocating from the beginning\n"));
575				/*
576				 * We are allocating at the beginning.
577				 */
578				s->r_start = rv->r_end + 1;
579				TAILQ_INSERT_BEFORE(s, rv, r_link);
580			} else {
581				DPRINTF(("allocating at the end\n"));
582				/*
583				 * We are allocating at the end.
584				 */
585				s->r_end = rv->r_start - 1;
586				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
587						     r_link);
588			}
589			goto out;
590		}
591	}
592
593	/*
594	 * Now find an acceptable shared region, if the client's requirements
595	 * allow sharing.  By our implementation restriction, a candidate
596	 * region must match exactly by both size and sharing type in order
597	 * to be considered compatible with the client's request.  (The
598	 * former restriction could probably be lifted without too much
599	 * additional work, but this does not seem warranted.)
600	 */
601	DPRINTF(("no unshared regions found\n"));
602	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
603		goto out;
604
605	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
606		if (s->r_start > end)
607			break;
608		if ((s->r_flags & flags) != flags)
609			continue;
610		rstart = ulmax(s->r_start, start);
611		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
612		if (s->r_start >= start && s->r_end <= end
613		    && (s->r_end - s->r_start + 1) == count &&
614		    (s->r_start & amask) == 0 &&
615		    ((s->r_start ^ s->r_end) & bmask) == 0) {
616			rv = int_alloc_resource(M_NOWAIT);
617			if (rv == NULL)
618				goto out;
619			rv->r_start = s->r_start;
620			rv->r_end = s->r_end;
621			rv->r_flags = s->r_flags &
622				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
623			rv->r_dev = dev;
624			rv->r_rm = rm;
625			if (s->r_sharehead == NULL) {
626				s->r_sharehead = malloc(sizeof *s->r_sharehead,
627						M_RMAN, M_NOWAIT | M_ZERO);
628				if (s->r_sharehead == NULL) {
629					free(rv, M_RMAN);
630					rv = NULL;
631					goto out;
632				}
633				LIST_INIT(s->r_sharehead);
634				LIST_INSERT_HEAD(s->r_sharehead, s,
635						 r_sharelink);
636				s->r_flags |= RF_FIRSTSHARE;
637			}
638			rv->r_sharehead = s->r_sharehead;
639			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
640			goto out;
641		}
642	}
643
644	/*
645	 * We couldn't find anything.
646	 */
647out:
648	/*
649	 * If the user specified RF_ACTIVE in the initial flags,
650	 * which is reflected in `want_activate', we attempt to atomically
651	 * activate the resource.  If this fails, we release the resource
652	 * and indicate overall failure.  (This behavior probably doesn't
653	 * make sense for RF_TIMESHARE-type resources.)
654	 */
655	if (rv && want_activate) {
656		struct resource_i *whohas;
657		if (int_rman_activate_resource(rm, rv, &whohas)) {
658			int_rman_release_resource(rm, rv);
659			rv = NULL;
660		}
661	}
662
663	mtx_unlock(rm->rm_mtx);
664	return (rv == NULL ? NULL : &rv->r_r);
665}
666
667struct resource *
668rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
669		      u_int flags, struct device *dev)
670{
671
672	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
673	    dev));
674}
675
676static int
677int_rman_activate_resource(struct rman *rm, struct resource_i *r,
678			   struct resource_i **whohas)
679{
680	struct resource_i *s;
681	int ok;
682
683	/*
684	 * If we are not timesharing, then there is nothing much to do.
685	 * If we already have the resource, then there is nothing at all to do.
686	 * If we are not on a sharing list with anybody else, then there is
687	 * little to do.
688	 */
689	if ((r->r_flags & RF_TIMESHARE) == 0
690	    || (r->r_flags & RF_ACTIVE) != 0
691	    || r->r_sharehead == NULL) {
692		r->r_flags |= RF_ACTIVE;
693		return 0;
694	}
695
696	ok = 1;
697	for (s = LIST_FIRST(r->r_sharehead); s && ok;
698	     s = LIST_NEXT(s, r_sharelink)) {
699		if ((s->r_flags & RF_ACTIVE) != 0) {
700			ok = 0;
701			*whohas = s;
702		}
703	}
704	if (ok) {
705		r->r_flags |= RF_ACTIVE;
706		return 0;
707	}
708	return EBUSY;
709}
710
711int
712rman_activate_resource(struct resource *re)
713{
714	int rv;
715	struct resource_i *r, *whohas;
716	struct rman *rm;
717
718	r = re->__r_i;
719	rm = r->r_rm;
720	mtx_lock(rm->rm_mtx);
721	rv = int_rman_activate_resource(rm, r, &whohas);
722	mtx_unlock(rm->rm_mtx);
723	return rv;
724}
725
726int
727rman_await_resource(struct resource *re, int pri, int timo)
728{
729	int	rv;
730	struct	resource_i *r, *whohas;
731	struct	rman *rm;
732
733	r = re->__r_i;
734	rm = r->r_rm;
735	mtx_lock(rm->rm_mtx);
736	for (;;) {
737		rv = int_rman_activate_resource(rm, r, &whohas);
738		if (rv != EBUSY)
739			return (rv);	/* returns with mutex held */
740
741		if (r->r_sharehead == NULL)
742			panic("rman_await_resource");
743		whohas->r_flags |= RF_WANTED;
744		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
745		if (rv) {
746			mtx_unlock(rm->rm_mtx);
747			return (rv);
748		}
749	}
750}
751
752static int
753int_rman_deactivate_resource(struct resource_i *r)
754{
755
756	r->r_flags &= ~RF_ACTIVE;
757	if (r->r_flags & RF_WANTED) {
758		r->r_flags &= ~RF_WANTED;
759		wakeup(r->r_sharehead);
760	}
761	return 0;
762}
763
764int
765rman_deactivate_resource(struct resource *r)
766{
767	struct	rman *rm;
768
769	rm = r->__r_i->r_rm;
770	mtx_lock(rm->rm_mtx);
771	int_rman_deactivate_resource(r->__r_i);
772	mtx_unlock(rm->rm_mtx);
773	return 0;
774}
775
776static int
777int_rman_release_resource(struct rman *rm, struct resource_i *r)
778{
779	struct	resource_i *s, *t;
780
781	if (r->r_flags & RF_ACTIVE)
782		int_rman_deactivate_resource(r);
783
784	/*
785	 * Check for a sharing list first.  If there is one, then we don't
786	 * have to think as hard.
787	 */
788	if (r->r_sharehead) {
789		/*
790		 * If a sharing list exists, then we know there are at
791		 * least two sharers.
792		 *
793		 * If we are in the main circleq, appoint someone else.
794		 */
795		LIST_REMOVE(r, r_sharelink);
796		s = LIST_FIRST(r->r_sharehead);
797		if (r->r_flags & RF_FIRSTSHARE) {
798			s->r_flags |= RF_FIRSTSHARE;
799			TAILQ_INSERT_BEFORE(r, s, r_link);
800			TAILQ_REMOVE(&rm->rm_list, r, r_link);
801		}
802
803		/*
804		 * Make sure that the sharing list goes away completely
805		 * if the resource is no longer being shared at all.
806		 */
807		if (LIST_NEXT(s, r_sharelink) == NULL) {
808			free(s->r_sharehead, M_RMAN);
809			s->r_sharehead = NULL;
810			s->r_flags &= ~RF_FIRSTSHARE;
811		}
812		goto out;
813	}
814
815	/*
816	 * Look at the adjacent resources in the list and see if our
817	 * segment can be merged with any of them.  If either of the
818	 * resources is allocated or is not exactly adjacent then they
819	 * cannot be merged with our segment.
820	 */
821	s = TAILQ_PREV(r, resource_head, r_link);
822	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
823	    s->r_end + 1 != r->r_start))
824		s = NULL;
825	t = TAILQ_NEXT(r, r_link);
826	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
827	    r->r_end + 1 != t->r_start))
828		t = NULL;
829
830	if (s != NULL && t != NULL) {
831		/*
832		 * Merge all three segments.
833		 */
834		s->r_end = t->r_end;
835		TAILQ_REMOVE(&rm->rm_list, r, r_link);
836		TAILQ_REMOVE(&rm->rm_list, t, r_link);
837		free(t, M_RMAN);
838	} else if (s != NULL) {
839		/*
840		 * Merge previous segment with ours.
841		 */
842		s->r_end = r->r_end;
843		TAILQ_REMOVE(&rm->rm_list, r, r_link);
844	} else if (t != NULL) {
845		/*
846		 * Merge next segment with ours.
847		 */
848		t->r_start = r->r_start;
849		TAILQ_REMOVE(&rm->rm_list, r, r_link);
850	} else {
851		/*
852		 * At this point, we know there is nothing we
853		 * can potentially merge with, because on each
854		 * side, there is either nothing there or what is
855		 * there is still allocated.  In that case, we don't
856		 * want to remove r from the list; we simply want to
857		 * change it to an unallocated region and return
858		 * without freeing anything.
859		 */
860		r->r_flags &= ~RF_ALLOCATED;
861		r->r_dev = NULL;
862		return 0;
863	}
864
865out:
866	free(r, M_RMAN);
867	return 0;
868}
869
870int
871rman_release_resource(struct resource *re)
872{
873	int	rv;
874	struct	resource_i *r;
875	struct	rman *rm;
876
877	r = re->__r_i;
878	rm = r->r_rm;
879	mtx_lock(rm->rm_mtx);
880	rv = int_rman_release_resource(rm, r);
881	mtx_unlock(rm->rm_mtx);
882	return (rv);
883}
884
885uint32_t
886rman_make_alignment_flags(uint32_t size)
887{
888	int	i;
889
890	/*
891	 * Find the hightest bit set, and add one if more than one bit
892	 * set.  We're effectively computing the ceil(log2(size)) here.
893	 */
894	for (i = 31; i > 0; i--)
895		if ((1 << i) & size)
896			break;
897	if (~(1 << i) & size)
898		i++;
899
900	return(RF_ALIGNMENT_LOG2(i));
901}
902
903void
904rman_set_start(struct resource *r, u_long start)
905{
906	r->__r_i->r_start = start;
907}
908
909u_long
910rman_get_start(struct resource *r)
911{
912	return (r->__r_i->r_start);
913}
914
915void
916rman_set_end(struct resource *r, u_long end)
917{
918	r->__r_i->r_end = end;
919}
920
921u_long
922rman_get_end(struct resource *r)
923{
924	return (r->__r_i->r_end);
925}
926
927u_long
928rman_get_size(struct resource *r)
929{
930	return (r->__r_i->r_end - r->__r_i->r_start + 1);
931}
932
933u_int
934rman_get_flags(struct resource *r)
935{
936	return (r->__r_i->r_flags);
937}
938
939void
940rman_set_virtual(struct resource *r, void *v)
941{
942	r->__r_i->r_virtual = v;
943}
944
945void *
946rman_get_virtual(struct resource *r)
947{
948	return (r->__r_i->r_virtual);
949}
950
951void
952rman_set_bustag(struct resource *r, bus_space_tag_t t)
953{
954	r->r_bustag = t;
955}
956
957bus_space_tag_t
958rman_get_bustag(struct resource *r)
959{
960	return (r->r_bustag);
961}
962
963void
964rman_set_bushandle(struct resource *r, bus_space_handle_t h)
965{
966	r->r_bushandle = h;
967}
968
969bus_space_handle_t
970rman_get_bushandle(struct resource *r)
971{
972	return (r->r_bushandle);
973}
974
975void
976rman_set_rid(struct resource *r, int rid)
977{
978	r->__r_i->r_rid = rid;
979}
980
981int
982rman_get_rid(struct resource *r)
983{
984	return (r->__r_i->r_rid);
985}
986
987void
988rman_set_device(struct resource *r, struct device *dev)
989{
990	r->__r_i->r_dev = dev;
991}
992
993struct device *
994rman_get_device(struct resource *r)
995{
996	return (r->__r_i->r_dev);
997}
998
999int
1000rman_is_region_manager(struct resource *r, struct rman *rm)
1001{
1002
1003	return (r->__r_i->r_rm == rm);
1004}
1005
1006/*
1007 * Sysctl interface for scanning the resource lists.
1008 *
1009 * We take two input parameters; the index into the list of resource
1010 * managers, and the resource offset into the list.
1011 */
1012static int
1013sysctl_rman(SYSCTL_HANDLER_ARGS)
1014{
1015	int			*name = (int *)arg1;
1016	u_int			namelen = arg2;
1017	int			rman_idx, res_idx;
1018	struct rman		*rm;
1019	struct resource_i	*res;
1020	struct resource_i	*sres;
1021	struct u_rman		urm;
1022	struct u_resource	ures;
1023	int			error;
1024
1025	if (namelen != 3)
1026		return (EINVAL);
1027
1028	if (bus_data_generation_check(name[0]))
1029		return (EINVAL);
1030	rman_idx = name[1];
1031	res_idx = name[2];
1032
1033	/*
1034	 * Find the indexed resource manager
1035	 */
1036	mtx_lock(&rman_mtx);
1037	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1038		if (rman_idx-- == 0)
1039			break;
1040	}
1041	mtx_unlock(&rman_mtx);
1042	if (rm == NULL)
1043		return (ENOENT);
1044
1045	/*
1046	 * If the resource index is -1, we want details on the
1047	 * resource manager.
1048	 */
1049	if (res_idx == -1) {
1050		bzero(&urm, sizeof(urm));
1051		urm.rm_handle = (uintptr_t)rm;
1052		if (rm->rm_descr != NULL)
1053			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1054		urm.rm_start = rm->rm_start;
1055		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1056		urm.rm_type = rm->rm_type;
1057
1058		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1059		return (error);
1060	}
1061
1062	/*
1063	 * Find the indexed resource and return it.
1064	 */
1065	mtx_lock(rm->rm_mtx);
1066	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1067		if (res->r_sharehead != NULL) {
1068			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1069				if (res_idx-- == 0) {
1070					res = sres;
1071					goto found;
1072				}
1073		}
1074		else if (res_idx-- == 0)
1075				goto found;
1076	}
1077	mtx_unlock(rm->rm_mtx);
1078	return (ENOENT);
1079
1080found:
1081	bzero(&ures, sizeof(ures));
1082	ures.r_handle = (uintptr_t)res;
1083	ures.r_parent = (uintptr_t)res->r_rm;
1084	ures.r_device = (uintptr_t)res->r_dev;
1085	if (res->r_dev != NULL) {
1086		if (device_get_name(res->r_dev) != NULL) {
1087			snprintf(ures.r_devname, RM_TEXTLEN,
1088			    "%s%d",
1089			    device_get_name(res->r_dev),
1090			    device_get_unit(res->r_dev));
1091		} else {
1092			strlcpy(ures.r_devname, "nomatch",
1093			    RM_TEXTLEN);
1094		}
1095	} else {
1096		ures.r_devname[0] = '\0';
1097	}
1098	ures.r_start = res->r_start;
1099	ures.r_size = res->r_end - res->r_start + 1;
1100	ures.r_flags = res->r_flags;
1101
1102	mtx_unlock(rm->rm_mtx);
1103	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1104	return (error);
1105}
1106
1107static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1108    "kernel resource manager");
1109
1110#ifdef DDB
1111static void
1112dump_rman_header(struct rman *rm)
1113{
1114
1115	if (db_pager_quit)
1116		return;
1117	db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1118	    rm, rm->rm_descr, rm->rm_start, rm->rm_end);
1119}
1120
1121static void
1122dump_rman(struct rman *rm)
1123{
1124	struct resource_i *r;
1125	const char *devname;
1126
1127	if (db_pager_quit)
1128		return;
1129	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1130		if (r->r_dev != NULL) {
1131			devname = device_get_nameunit(r->r_dev);
1132			if (devname == NULL)
1133				devname = "nomatch";
1134		} else
1135			devname = NULL;
1136		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
1137		if (devname != NULL)
1138			db_printf("(%s)\n", devname);
1139		else
1140			db_printf("----\n");
1141		if (db_pager_quit)
1142			return;
1143	}
1144}
1145
1146DB_SHOW_COMMAND(rman, db_show_rman)
1147{
1148
1149	if (have_addr) {
1150		dump_rman_header((struct rman *)addr);
1151		dump_rman((struct rman *)addr);
1152	}
1153}
1154
1155DB_SHOW_COMMAND(rmans, db_show_rmans)
1156{
1157	struct rman *rm;
1158
1159	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1160		dump_rman_header(rm);
1161	}
1162}
1163
1164DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1165{
1166	struct rman *rm;
1167
1168	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1169		dump_rman_header(rm);
1170		dump_rman(rm);
1171	}
1172}
1173DB_SHOW_ALIAS(allrman, db_show_all_rman);
1174#endif
1175