subr_rman.c revision 131414
140711Swollman/*
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
1540711Swollman *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman */
2940711Swollman
3040711Swollman/*
3140711Swollman * The kernel resource manager.  This code is responsible for keeping track
3240711Swollman * of hardware resources which are apportioned out to various drivers.
3340711Swollman * It does not actually assign those resources, and it is not expected
3440711Swollman * that end-device drivers will call into this code directly.  Rather,
3540711Swollman * the code which implements the buses that those devices are attached to,
3640711Swollman * and the code which manages CPU resources, will call this code, and the
3740711Swollman * end-device drivers will make upcalls to that code to actually perform
3840711Swollman * the allocation.
3940711Swollman *
4040711Swollman * There are two sorts of resources managed by this code.  The first is
4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4240711Swollman * consist of a sequence of individually-allocatable objects which have
4340711Swollman * been numbered in some well-defined order.  Most of the resources
4440711Swollman * are of this type, as it is the most familiar.  The second type is
4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4640711Swollman * resources in which each instance is indistinguishable from every
4740711Swollman * other instance).  The principal anticipated application of gauges
4840711Swollman * is in the context of power consumption, where a bus may have a specific
4940711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5040711Swollman * implemented yet.
5140711Swollman *
5240711Swollman * For array resources, we make one simplifying assumption: two clients
5340711Swollman * sharing the same resource must use the same range of indices.  That
5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5540711Swollman * permitted.
5640711Swollman */
5740711Swollman
58116182Sobrien#include <sys/cdefs.h>
59116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 131414 2004-07-01 16:22:10Z imp $");
60116182Sobrien
61131344Simp#define __RMAN_RESOURCE_VISIBLE
6240711Swollman#include <sys/param.h>
6340711Swollman#include <sys/systm.h>
6441304Sbde#include <sys/kernel.h>
6540711Swollman#include <sys/lock.h>
6640711Swollman#include <sys/malloc.h>
6771576Sjasone#include <sys/mutex.h>
6845720Speter#include <sys/bus.h>		/* XXX debugging */
6945720Speter#include <machine/bus.h>
7040711Swollman#include <sys/rman.h>
71102962Siwasaki#include <sys/sysctl.h>
7240711Swollman
73102962Siwasakiint     rman_debug = 0;
74102962SiwasakiTUNABLE_INT("debug.rman_debug", &rman_debug);
75102962SiwasakiSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
76102962Siwasaki    &rman_debug, 0, "rman debug");
7759910Spaul
78102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params
79102962Siwasaki
8045569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
8140711Swollman
8240711Swollmanstruct	rman_head rman_head;
8371576Sjasonestatic	struct mtx rman_mtx; /* mutex to protect rman_head */
8440711Swollmanstatic	int int_rman_activate_resource(struct rman *rm, struct resource *r,
8540711Swollman				       struct resource **whohas);
8645720Speterstatic	int int_rman_deactivate_resource(struct resource *r);
8740711Swollmanstatic	int int_rman_release_resource(struct rman *rm, struct resource *r);
8840711Swollman
8940711Swollmanint
9040711Swollmanrman_init(struct rman *rm)
9140711Swollman{
9240711Swollman	static int once;
9340711Swollman
9440711Swollman	if (once == 0) {
9540711Swollman		once = 1;
9640711Swollman		TAILQ_INIT(&rman_head);
9793818Sjhb		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
9840711Swollman	}
9940711Swollman
10040711Swollman	if (rm->rm_type == RMAN_UNINIT)
10140711Swollman		panic("rman_init");
10240711Swollman	if (rm->rm_type == RMAN_GAUGE)
10340711Swollman		panic("implement RMAN_GAUGE");
10440711Swollman
10568727Smckusick	TAILQ_INIT(&rm->rm_list);
10684781Sjhb	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
10771576Sjasone	if (rm->rm_mtx == 0)
10840711Swollman		return ENOMEM;
10993818Sjhb	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
11040711Swollman
11172200Sbmilekic	mtx_lock(&rman_mtx);
11240711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
11372200Sbmilekic	mtx_unlock(&rman_mtx);
11440711Swollman	return 0;
11540711Swollman}
11640711Swollman
11740711Swollman/*
11840711Swollman * NB: this interface is not robust against programming errors which
11940711Swollman * add multiple copies of the same region.
12040711Swollman */
12140711Swollmanint
12240711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
12340711Swollman{
12440711Swollman	struct resource *r, *s;
12540711Swollman
12669781Sdwmalone	r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
12740711Swollman	if (r == 0)
12840711Swollman		return ENOMEM;
12940711Swollman	r->r_start = start;
13040711Swollman	r->r_end = end;
13140711Swollman	r->r_rm = rm;
13240711Swollman
13372200Sbmilekic	mtx_lock(rm->rm_mtx);
13468727Smckusick	for (s = TAILQ_FIRST(&rm->rm_list);
13568727Smckusick	     s && s->r_end < r->r_start;
13668727Smckusick	     s = TAILQ_NEXT(s, r_link))
13740711Swollman		;
13840711Swollman
13968727Smckusick	if (s == NULL) {
14068727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
14140711Swollman	} else {
14268727Smckusick		TAILQ_INSERT_BEFORE(s, r, r_link);
14340711Swollman	}
14440711Swollman
14572200Sbmilekic	mtx_unlock(rm->rm_mtx);
14640711Swollman	return 0;
14740711Swollman}
14840711Swollman
14940711Swollmanint
15040711Swollmanrman_fini(struct rman *rm)
15140711Swollman{
15240711Swollman	struct resource *r;
15340711Swollman
15472200Sbmilekic	mtx_lock(rm->rm_mtx);
15568727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
15645720Speter		if (r->r_flags & RF_ALLOCATED) {
15772200Sbmilekic			mtx_unlock(rm->rm_mtx);
15840711Swollman			return EBUSY;
15945720Speter		}
16040711Swollman	}
16140711Swollman
16240711Swollman	/*
16340711Swollman	 * There really should only be one of these if we are in this
16440711Swollman	 * state and the code is working properly, but it can't hurt.
16540711Swollman	 */
16668727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
16768727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
16868727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
16940711Swollman		free(r, M_RMAN);
17040711Swollman	}
17172200Sbmilekic	mtx_unlock(rm->rm_mtx);
17272200Sbmilekic	mtx_lock(&rman_mtx);
17340711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
17472200Sbmilekic	mtx_unlock(&rman_mtx);
17571576Sjasone	mtx_destroy(rm->rm_mtx);
17671576Sjasone	free(rm->rm_mtx, M_RMAN);
17740711Swollman
17840711Swollman	return 0;
17940711Swollman}
18040711Swollman
18140711Swollmanstruct resource *
18288372Stmmrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
18388372Stmm		      u_long count, u_long bound,  u_int flags,
18488372Stmm		      struct device *dev)
18540711Swollman{
18640711Swollman	u_int	want_activate;
18740711Swollman	struct	resource *r, *s, *rv;
18888372Stmm	u_long	rstart, rend, amask, bmask;
18940711Swollman
19040711Swollman	rv = 0;
19140711Swollman
19259910Spaul	DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
19377288Sbrian	       "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
19477288Sbrian	       flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
19540711Swollman	want_activate = (flags & RF_ACTIVE);
19640711Swollman	flags &= ~RF_ACTIVE;
19740711Swollman
19872200Sbmilekic	mtx_lock(rm->rm_mtx);
19940711Swollman
20068727Smckusick	for (r = TAILQ_FIRST(&rm->rm_list);
20168727Smckusick	     r && r->r_end < start;
20268727Smckusick	     r = TAILQ_NEXT(r, r_link))
20340711Swollman		;
20440711Swollman
20568727Smckusick	if (r == NULL) {
20659910Spaul		DPRINTF(("could not find a region\n"));
20740711Swollman		goto out;
20840711Swollman	}
20940711Swollman
21088372Stmm	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
21188372Stmm	/* If bound is 0, bmask will also be 0 */
21288372Stmm	bmask = ~(bound - 1);
21340711Swollman	/*
21440711Swollman	 * First try to find an acceptable totally-unshared region.
21540711Swollman	 */
21668727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
21759910Spaul		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
21840711Swollman		if (s->r_start > end) {
21959910Spaul			DPRINTF(("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end));
22040711Swollman			break;
22140711Swollman		}
22240711Swollman		if (s->r_flags & RF_ALLOCATED) {
22359910Spaul			DPRINTF(("region is allocated\n"));
22440711Swollman			continue;
22540711Swollman		}
22688372Stmm		rstart = ulmax(s->r_start, start);
22788372Stmm		/*
22888372Stmm		 * Try to find a region by adjusting to boundary and alignment
22988372Stmm		 * until both conditions are satisfied. This is not an optimal
23088372Stmm		 * algorithm, but in most cases it isn't really bad, either.
23188372Stmm		 */
23288372Stmm		do {
23388372Stmm			rstart = (rstart + amask) & ~amask;
234109646Stmm			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
23588372Stmm				rstart += bound - (rstart & ~bmask);
23688372Stmm		} while ((rstart & amask) != 0 && rstart < end &&
23788372Stmm		    rstart < s->r_end);
238128172Simp		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
239102572Siwasaki		if (rstart > rend) {
240102572Siwasaki			DPRINTF(("adjusted start exceeds end\n"));
241102572Siwasaki			continue;
242102572Siwasaki		}
24359910Spaul		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
24459910Spaul		       rstart, rend, (rend - rstart + 1), count));
24540711Swollman
24640711Swollman		if ((rend - rstart + 1) >= count) {
24759910Spaul			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
24859910Spaul			       rend, rstart, (rend - rstart + 1)));
24940711Swollman			if ((s->r_end - s->r_start + 1) == count) {
25059910Spaul				DPRINTF(("candidate region is entire chunk\n"));
25140711Swollman				rv = s;
25248235Sdfr				rv->r_flags |= RF_ALLOCATED | flags;
25340711Swollman				rv->r_dev = dev;
25440711Swollman				goto out;
25540711Swollman			}
25640711Swollman
25740711Swollman			/*
25840711Swollman			 * If s->r_start < rstart and
25940711Swollman			 *    s->r_end > rstart + count - 1, then
26040711Swollman			 * we need to split the region into three pieces
26140711Swollman			 * (the middle one will get returned to the user).
26240711Swollman			 * Otherwise, we are allocating at either the
26340711Swollman			 * beginning or the end of s, so we only need to
26440711Swollman			 * split it in two.  The first case requires
26540711Swollman			 * two new allocations; the second requires but one.
26640711Swollman			 */
26769781Sdwmalone			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
26840711Swollman			if (rv == 0)
26940711Swollman				goto out;
27040711Swollman			rv->r_start = rstart;
27140711Swollman			rv->r_end = rstart + count - 1;
27240711Swollman			rv->r_flags = flags | RF_ALLOCATED;
27340711Swollman			rv->r_dev = dev;
27445720Speter			rv->r_rm = rm;
27540711Swollman
27640711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
27759910Spaul				DPRINTF(("splitting region in three parts: "
27840711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
27940711Swollman				       s->r_start, rv->r_start - 1,
28040711Swollman				       rv->r_start, rv->r_end,
28159910Spaul				       rv->r_end + 1, s->r_end));
28240711Swollman				/*
28340711Swollman				 * We are allocating in the middle.
28440711Swollman				 */
28569781Sdwmalone				r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
28640711Swollman				if (r == 0) {
28740711Swollman					free(rv, M_RMAN);
28840711Swollman					rv = 0;
28940711Swollman					goto out;
29040711Swollman				}
29140711Swollman				r->r_start = rv->r_end + 1;
29240711Swollman				r->r_end = s->r_end;
29340711Swollman				r->r_flags = s->r_flags;
29445720Speter				r->r_rm = rm;
29540711Swollman				s->r_end = rv->r_start - 1;
29668727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
29740711Swollman						     r_link);
29868727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
29940711Swollman						     r_link);
30040711Swollman			} else if (s->r_start == rv->r_start) {
30159910Spaul				DPRINTF(("allocating from the beginning\n"));
30240711Swollman				/*
30340711Swollman				 * We are allocating at the beginning.
30440711Swollman				 */
30540711Swollman				s->r_start = rv->r_end + 1;
30668727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
30740711Swollman			} else {
30859910Spaul				DPRINTF(("allocating at the end\n"));
30940711Swollman				/*
31040711Swollman				 * We are allocating at the end.
31140711Swollman				 */
31240711Swollman				s->r_end = rv->r_start - 1;
31368727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
31440711Swollman						     r_link);
31540711Swollman			}
31640711Swollman			goto out;
31740711Swollman		}
31840711Swollman	}
31940711Swollman
32040711Swollman	/*
32140711Swollman	 * Now find an acceptable shared region, if the client's requirements
32240711Swollman	 * allow sharing.  By our implementation restriction, a candidate
32340711Swollman	 * region must match exactly by both size and sharing type in order
32440711Swollman	 * to be considered compatible with the client's request.  (The
32540711Swollman	 * former restriction could probably be lifted without too much
32640711Swollman	 * additional work, but this does not seem warranted.)
32740711Swollman	 */
32859910Spaul	DPRINTF(("no unshared regions found\n"));
32940711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
33040711Swollman		goto out;
33140711Swollman
33268727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
33340711Swollman		if (s->r_start > end)
33440711Swollman			break;
33540711Swollman		if ((s->r_flags & flags) != flags)
33640711Swollman			continue;
33788372Stmm		rstart = ulmax(s->r_start, start);
338128172Simp		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
33940711Swollman		if (s->r_start >= start && s->r_end <= end
34088372Stmm		    && (s->r_end - s->r_start + 1) == count &&
34188372Stmm		    (s->r_start & amask) == 0 &&
34288372Stmm		    ((s->r_start ^ s->r_end) & bmask) == 0) {
34369781Sdwmalone			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
34440711Swollman			if (rv == 0)
34540711Swollman				goto out;
34640711Swollman			rv->r_start = s->r_start;
34740711Swollman			rv->r_end = s->r_end;
34840711Swollman			rv->r_flags = s->r_flags &
34940711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
35040711Swollman			rv->r_dev = dev;
35140711Swollman			rv->r_rm = rm;
35240711Swollman			if (s->r_sharehead == 0) {
35340711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
35469781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
35540711Swollman				if (s->r_sharehead == 0) {
35640711Swollman					free(rv, M_RMAN);
35740711Swollman					rv = 0;
35840711Swollman					goto out;
35940711Swollman				}
36040711Swollman				LIST_INIT(s->r_sharehead);
36140711Swollman				LIST_INSERT_HEAD(s->r_sharehead, s,
36240711Swollman						 r_sharelink);
36345106Sdfr				s->r_flags |= RF_FIRSTSHARE;
36440711Swollman			}
36540711Swollman			rv->r_sharehead = s->r_sharehead;
36640711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
36740711Swollman			goto out;
36840711Swollman		}
36940711Swollman	}
37040711Swollman
37140711Swollman	/*
37240711Swollman	 * We couldn't find anything.
37340711Swollman	 */
37440711Swollmanout:
37540711Swollman	/*
37640711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
37740711Swollman	 * which is reflected in `want_activate', we attempt to atomically
37840711Swollman	 * activate the resource.  If this fails, we release the resource
37940711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
38040711Swollman	 * make sense for RF_TIMESHARE-type resources.)
38140711Swollman	 */
38240711Swollman	if (rv && want_activate) {
38340711Swollman		struct resource *whohas;
38440711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
38540711Swollman			int_rman_release_resource(rm, rv);
38640711Swollman			rv = 0;
38740711Swollman		}
38840711Swollman	}
38940711Swollman
39072200Sbmilekic	mtx_unlock(rm->rm_mtx);
39140711Swollman	return (rv);
39240711Swollman}
39340711Swollman
39488372Stmmstruct resource *
39588372Stmmrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
39688372Stmm		      u_int flags, struct device *dev)
39788372Stmm{
39888372Stmm
39988372Stmm	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
40088372Stmm	    dev));
40188372Stmm}
40288372Stmm
40340711Swollmanstatic int
40440711Swollmanint_rman_activate_resource(struct rman *rm, struct resource *r,
40540711Swollman			   struct resource **whohas)
40640711Swollman{
40740711Swollman	struct resource *s;
40840711Swollman	int ok;
40940711Swollman
41040711Swollman	/*
41140711Swollman	 * If we are not timesharing, then there is nothing much to do.
41240711Swollman	 * If we already have the resource, then there is nothing at all to do.
41340711Swollman	 * If we are not on a sharing list with anybody else, then there is
41440711Swollman	 * little to do.
41540711Swollman	 */
41640711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
41740711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
41840711Swollman	    || r->r_sharehead == 0) {
41940711Swollman		r->r_flags |= RF_ACTIVE;
42040711Swollman		return 0;
42140711Swollman	}
42240711Swollman
42340711Swollman	ok = 1;
42453225Sphk	for (s = LIST_FIRST(r->r_sharehead); s && ok;
42553225Sphk	     s = LIST_NEXT(s, r_sharelink)) {
42640711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
42740711Swollman			ok = 0;
42840711Swollman			*whohas = s;
42940711Swollman		}
43040711Swollman	}
43140711Swollman	if (ok) {
43240711Swollman		r->r_flags |= RF_ACTIVE;
43340711Swollman		return 0;
43440711Swollman	}
43540711Swollman	return EBUSY;
43640711Swollman}
43740711Swollman
43840711Swollmanint
43940711Swollmanrman_activate_resource(struct resource *r)
44040711Swollman{
44140711Swollman	int rv;
44240711Swollman	struct resource *whohas;
44340711Swollman	struct rman *rm;
44440711Swollman
44540711Swollman	rm = r->r_rm;
44672200Sbmilekic	mtx_lock(rm->rm_mtx);
44740711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
44872200Sbmilekic	mtx_unlock(rm->rm_mtx);
44940711Swollman	return rv;
45040711Swollman}
45140711Swollman
45240711Swollmanint
45340711Swollmanrman_await_resource(struct resource *r, int pri, int timo)
45440711Swollman{
45585519Sjhb	int	rv;
45640711Swollman	struct	resource *whohas;
45740711Swollman	struct	rman *rm;
45840711Swollman
45940711Swollman	rm = r->r_rm;
46085519Sjhb	mtx_lock(rm->rm_mtx);
46140711Swollman	for (;;) {
46240711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
46340711Swollman		if (rv != EBUSY)
46471576Sjasone			return (rv);	/* returns with mutex held */
46540711Swollman
46640711Swollman		if (r->r_sharehead == 0)
46740711Swollman			panic("rman_await_resource");
46840711Swollman		whohas->r_flags |= RF_WANTED;
46985519Sjhb		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
47040711Swollman		if (rv) {
47185519Sjhb			mtx_unlock(rm->rm_mtx);
47285519Sjhb			return (rv);
47340711Swollman		}
47440711Swollman	}
47540711Swollman}
47640711Swollman
47745720Speterstatic int
47845720Speterint_rman_deactivate_resource(struct resource *r)
47940711Swollman{
48040711Swollman
48140711Swollman	r->r_flags &= ~RF_ACTIVE;
48240711Swollman	if (r->r_flags & RF_WANTED) {
48340711Swollman		r->r_flags &= ~RF_WANTED;
48440711Swollman		wakeup(r->r_sharehead);
48540711Swollman	}
48645720Speter	return 0;
48745720Speter}
48845720Speter
48945720Speterint
49045720Speterrman_deactivate_resource(struct resource *r)
49145720Speter{
49245720Speter	struct	rman *rm;
49345720Speter
49445720Speter	rm = r->r_rm;
49572200Sbmilekic	mtx_lock(rm->rm_mtx);
49645720Speter	int_rman_deactivate_resource(r);
49772200Sbmilekic	mtx_unlock(rm->rm_mtx);
49840711Swollman	return 0;
49940711Swollman}
50040711Swollman
50140711Swollmanstatic int
50240711Swollmanint_rman_release_resource(struct rman *rm, struct resource *r)
50340711Swollman{
50440711Swollman	struct	resource *s, *t;
50540711Swollman
50640711Swollman	if (r->r_flags & RF_ACTIVE)
50745720Speter		int_rman_deactivate_resource(r);
50840711Swollman
50940711Swollman	/*
51040711Swollman	 * Check for a sharing list first.  If there is one, then we don't
51140711Swollman	 * have to think as hard.
51240711Swollman	 */
51340711Swollman	if (r->r_sharehead) {
51440711Swollman		/*
51540711Swollman		 * If a sharing list exists, then we know there are at
51640711Swollman		 * least two sharers.
51740711Swollman		 *
51840711Swollman		 * If we are in the main circleq, appoint someone else.
51940711Swollman		 */
52040711Swollman		LIST_REMOVE(r, r_sharelink);
52153225Sphk		s = LIST_FIRST(r->r_sharehead);
52240711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
52340711Swollman			s->r_flags |= RF_FIRSTSHARE;
52468727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
52568727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
52640711Swollman		}
52740711Swollman
52840711Swollman		/*
52940711Swollman		 * Make sure that the sharing list goes away completely
53040711Swollman		 * if the resource is no longer being shared at all.
53140711Swollman		 */
53253225Sphk		if (LIST_NEXT(s, r_sharelink) == 0) {
53340711Swollman			free(s->r_sharehead, M_RMAN);
53440711Swollman			s->r_sharehead = 0;
53540711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
53640711Swollman		}
53740711Swollman		goto out;
53840711Swollman	}
53940711Swollman
54040711Swollman	/*
54140711Swollman	 * Look at the adjacent resources in the list and see if our
54240711Swollman	 * segment can be merged with any of them.
54340711Swollman	 */
54468727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
54568727Smckusick	t = TAILQ_NEXT(r, r_link);
54640711Swollman
54768764Smckusick	if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0
54868764Smckusick	    && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
54940711Swollman		/*
55040711Swollman		 * Merge all three segments.
55140711Swollman		 */
55240711Swollman		s->r_end = t->r_end;
55368727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
55468727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
55540711Swollman		free(t, M_RMAN);
55668764Smckusick	} else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) {
55740711Swollman		/*
55840711Swollman		 * Merge previous segment with ours.
55940711Swollman		 */
56040711Swollman		s->r_end = r->r_end;
56168727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
56268764Smckusick	} else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
56340711Swollman		/*
56440711Swollman		 * Merge next segment with ours.
56540711Swollman		 */
56640711Swollman		t->r_start = r->r_start;
56768727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
56840711Swollman	} else {
56940711Swollman		/*
57040711Swollman		 * At this point, we know there is nothing we
57140711Swollman		 * can potentially merge with, because on each
57240711Swollman		 * side, there is either nothing there or what is
57340711Swollman		 * there is still allocated.  In that case, we don't
57440711Swollman		 * want to remove r from the list; we simply want to
57540711Swollman		 * change it to an unallocated region and return
57640711Swollman		 * without freeing anything.
57740711Swollman		 */
57840711Swollman		r->r_flags &= ~RF_ALLOCATED;
57940711Swollman		return 0;
58040711Swollman	}
58140711Swollman
58240711Swollmanout:
58340711Swollman	free(r, M_RMAN);
58440711Swollman	return 0;
58540711Swollman}
58640711Swollman
58740711Swollmanint
58840711Swollmanrman_release_resource(struct resource *r)
58940711Swollman{
59040711Swollman	int	rv;
59140711Swollman	struct	rman *rm = r->r_rm;
59240711Swollman
59372200Sbmilekic	mtx_lock(rm->rm_mtx);
59440711Swollman	rv = int_rman_release_resource(rm, r);
59572200Sbmilekic	mtx_unlock(rm->rm_mtx);
59640711Swollman	return (rv);
59740711Swollman}
59867261Simp
59967261Simpuint32_t
60067261Simprman_make_alignment_flags(uint32_t size)
60167261Simp{
60267261Simp	int	i;
60367261Simp
60467425Simp	/*
60567425Simp	 * Find the hightest bit set, and add one if more than one bit
60667425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
60767425Simp	 */
60888372Stmm	for (i = 31; i > 0; i--)
60967425Simp		if ((1 << i) & size)
61067425Simp			break;
61167425Simp	if (~(1 << i) & size)
61267425Simp		i++;
61367261Simp
61467261Simp	return(RF_ALIGNMENT_LOG2(i));
61567425Simp}
616107296Simp
617107296Simpu_long
618107296Simprman_get_start(struct resource *r)
619107296Simp{
620107296Simp	return (r->r_start);
621107296Simp}
622107296Simp
623107296Simpu_long
624107296Simprman_get_end(struct resource *r)
625107296Simp{
626107296Simp	return (r->r_end);
627107296Simp}
628107296Simp
629107296Simpu_long
630107296Simprman_get_size(struct resource *r)
631107296Simp{
632107296Simp	return (r->r_end - r->r_start + 1);
633107296Simp}
634107296Simp
635107296Simpu_int
636107296Simprman_get_flags(struct resource *r)
637107296Simp{
638107296Simp	return (r->r_flags);
639107296Simp}
640107296Simp
641107296Simpvoid
642107296Simprman_set_virtual(struct resource *r, void *v)
643107296Simp{
644107296Simp	r->r_virtual = v;
645107296Simp}
646107296Simp
647107296Simpvoid *
648107296Simprman_get_virtual(struct resource *r)
649107296Simp{
650107296Simp	return (r->r_virtual);
651107296Simp}
652107296Simp
653107296Simpvoid
654107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t)
655107296Simp{
656107296Simp	r->r_bustag = t;
657107296Simp}
658107296Simp
659107296Simpbus_space_tag_t
660107296Simprman_get_bustag(struct resource *r)
661107296Simp{
662107296Simp	return (r->r_bustag);
663107296Simp}
664107296Simp
665107296Simpvoid
666107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h)
667107296Simp{
668107296Simp	r->r_bushandle = h;
669107296Simp}
670107296Simp
671107296Simpbus_space_handle_t
672107296Simprman_get_bushandle(struct resource *r)
673107296Simp{
674107296Simp	return (r->r_bushandle);
675107296Simp}
676107296Simp
677107296Simpvoid
678107296Simprman_set_rid(struct resource *r, int rid)
679107296Simp{
680107296Simp	r->r_rid = rid;
681107296Simp}
682107296Simp
683131414Simpvoid
684131414Simprman_set_start(struct resource *r, u_long start)
685131414Simp{
686131414Simp	r->r_start = start;
687131414Simp}
688131414Simp
689131414Simpvoid
690131414Simprman_set_end(struct resource *r, u_long end)
691131414Simp{
692131414Simp	r->r_end = end;
693131414Simp}
694131414Simp
695107296Simpint
696107296Simprman_get_rid(struct resource *r)
697107296Simp{
698107296Simp	return (r->r_rid);
699107296Simp}
700110753Simp
701110753Simpstruct device *
702110753Simprman_get_device(struct resource *r)
703110753Simp{
704110753Simp	return (r->r_dev);
705110753Simp}
706