subr_rman.c revision 85519
140711Swollman/*
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
1540711Swollman *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman *
2950477Speter * $FreeBSD: head/sys/kern/subr_rman.c 85519 2001-10-26 06:09:01Z jhb $
3040711Swollman */
3140711Swollman
3240711Swollman/*
3340711Swollman * The kernel resource manager.  This code is responsible for keeping track
3440711Swollman * of hardware resources which are apportioned out to various drivers.
3540711Swollman * It does not actually assign those resources, and it is not expected
3640711Swollman * that end-device drivers will call into this code directly.  Rather,
3740711Swollman * the code which implements the buses that those devices are attached to,
3840711Swollman * and the code which manages CPU resources, will call this code, and the
3940711Swollman * end-device drivers will make upcalls to that code to actually perform
4040711Swollman * the allocation.
4140711Swollman *
4240711Swollman * There are two sorts of resources managed by this code.  The first is
4340711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4440711Swollman * consist of a sequence of individually-allocatable objects which have
4540711Swollman * been numbered in some well-defined order.  Most of the resources
4640711Swollman * are of this type, as it is the most familiar.  The second type is
4740711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4840711Swollman * resources in which each instance is indistinguishable from every
4940711Swollman * other instance).  The principal anticipated application of gauges
5040711Swollman * is in the context of power consumption, where a bus may have a specific
5140711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5240711Swollman * implemented yet.
5340711Swollman *
5440711Swollman * For array resources, we make one simplifying assumption: two clients
5540711Swollman * sharing the same resource must use the same range of indices.  That
5640711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5740711Swollman * permitted.
5840711Swollman */
5940711Swollman
6040711Swollman#include <sys/param.h>
6140711Swollman#include <sys/systm.h>
6241304Sbde#include <sys/kernel.h>
6340711Swollman#include <sys/lock.h>
6440711Swollman#include <sys/malloc.h>
6571576Sjasone#include <sys/mutex.h>
6645720Speter#include <sys/bus.h>		/* XXX debugging */
6745720Speter#include <machine/bus.h>
6840711Swollman#include <sys/rman.h>
6940711Swollman
7059910Spaul#ifdef RMAN_DEBUG
7159910Spaul#define DPRINTF(params) printf##params
7259910Spaul#else
7359910Spaul#define DPRINTF(params)
7459910Spaul#endif
7559910Spaul
7645569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
7740711Swollman
7840711Swollmanstruct	rman_head rman_head;
7971576Sjasonestatic	struct mtx rman_mtx; /* mutex to protect rman_head */
8040711Swollmanstatic	int int_rman_activate_resource(struct rman *rm, struct resource *r,
8140711Swollman				       struct resource **whohas);
8245720Speterstatic	int int_rman_deactivate_resource(struct resource *r);
8340711Swollmanstatic	int int_rman_release_resource(struct rman *rm, struct resource *r);
8440711Swollman
8540711Swollmanint
8640711Swollmanrman_init(struct rman *rm)
8740711Swollman{
8840711Swollman	static int once;
8940711Swollman
9040711Swollman	if (once == 0) {
9140711Swollman		once = 1;
9240711Swollman		TAILQ_INIT(&rman_head);
9371576Sjasone		mtx_init(&rman_mtx, "rman head", MTX_DEF);
9440711Swollman	}
9540711Swollman
9640711Swollman	if (rm->rm_type == RMAN_UNINIT)
9740711Swollman		panic("rman_init");
9840711Swollman	if (rm->rm_type == RMAN_GAUGE)
9940711Swollman		panic("implement RMAN_GAUGE");
10040711Swollman
10168727Smckusick	TAILQ_INIT(&rm->rm_list);
10284781Sjhb	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
10371576Sjasone	if (rm->rm_mtx == 0)
10440711Swollman		return ENOMEM;
10571576Sjasone	mtx_init(rm->rm_mtx, "rman", MTX_DEF);
10640711Swollman
10772200Sbmilekic	mtx_lock(&rman_mtx);
10840711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
10972200Sbmilekic	mtx_unlock(&rman_mtx);
11040711Swollman	return 0;
11140711Swollman}
11240711Swollman
11340711Swollman/*
11440711Swollman * NB: this interface is not robust against programming errors which
11540711Swollman * add multiple copies of the same region.
11640711Swollman */
11740711Swollmanint
11840711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
11940711Swollman{
12040711Swollman	struct resource *r, *s;
12140711Swollman
12269781Sdwmalone	r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
12340711Swollman	if (r == 0)
12440711Swollman		return ENOMEM;
12540711Swollman	r->r_sharehead = 0;
12640711Swollman	r->r_start = start;
12740711Swollman	r->r_end = end;
12840711Swollman	r->r_flags = 0;
12940711Swollman	r->r_dev = 0;
13040711Swollman	r->r_rm = rm;
13140711Swollman
13272200Sbmilekic	mtx_lock(rm->rm_mtx);
13368727Smckusick	for (s = TAILQ_FIRST(&rm->rm_list);
13468727Smckusick	     s && s->r_end < r->r_start;
13568727Smckusick	     s = TAILQ_NEXT(s, r_link))
13640711Swollman		;
13740711Swollman
13868727Smckusick	if (s == NULL) {
13968727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
14040711Swollman	} else {
14168727Smckusick		TAILQ_INSERT_BEFORE(s, r, r_link);
14240711Swollman	}
14340711Swollman
14472200Sbmilekic	mtx_unlock(rm->rm_mtx);
14540711Swollman	return 0;
14640711Swollman}
14740711Swollman
14840711Swollmanint
14940711Swollmanrman_fini(struct rman *rm)
15040711Swollman{
15140711Swollman	struct resource *r;
15240711Swollman
15372200Sbmilekic	mtx_lock(rm->rm_mtx);
15468727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
15545720Speter		if (r->r_flags & RF_ALLOCATED) {
15672200Sbmilekic			mtx_unlock(rm->rm_mtx);
15740711Swollman			return EBUSY;
15845720Speter		}
15940711Swollman	}
16040711Swollman
16140711Swollman	/*
16240711Swollman	 * There really should only be one of these if we are in this
16340711Swollman	 * state and the code is working properly, but it can't hurt.
16440711Swollman	 */
16568727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
16668727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
16768727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
16840711Swollman		free(r, M_RMAN);
16940711Swollman	}
17072200Sbmilekic	mtx_unlock(rm->rm_mtx);
17172200Sbmilekic	mtx_lock(&rman_mtx);
17240711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
17372200Sbmilekic	mtx_unlock(&rman_mtx);
17471576Sjasone	mtx_destroy(rm->rm_mtx);
17571576Sjasone	free(rm->rm_mtx, M_RMAN);
17640711Swollman
17740711Swollman	return 0;
17840711Swollman}
17940711Swollman
18040711Swollmanstruct resource *
18140711Swollmanrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
18240711Swollman		      u_int flags, struct device *dev)
18340711Swollman{
18440711Swollman	u_int	want_activate;
18540711Swollman	struct	resource *r, *s, *rv;
18640711Swollman	u_long	rstart, rend;
18740711Swollman
18840711Swollman	rv = 0;
18940711Swollman
19059910Spaul	DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
19177288Sbrian	       "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
19277288Sbrian	       flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
19340711Swollman	want_activate = (flags & RF_ACTIVE);
19440711Swollman	flags &= ~RF_ACTIVE;
19540711Swollman
19672200Sbmilekic	mtx_lock(rm->rm_mtx);
19740711Swollman
19868727Smckusick	for (r = TAILQ_FIRST(&rm->rm_list);
19968727Smckusick	     r && r->r_end < start;
20068727Smckusick	     r = TAILQ_NEXT(r, r_link))
20140711Swollman		;
20240711Swollman
20368727Smckusick	if (r == NULL) {
20459910Spaul		DPRINTF(("could not find a region\n"));
20540711Swollman		goto out;
20640711Swollman	}
20740711Swollman
20840711Swollman	/*
20940711Swollman	 * First try to find an acceptable totally-unshared region.
21040711Swollman	 */
21168727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
21259910Spaul		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
21340711Swollman		if (s->r_start > end) {
21459910Spaul			DPRINTF(("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end));
21540711Swollman			break;
21640711Swollman		}
21740711Swollman		if (s->r_flags & RF_ALLOCATED) {
21859910Spaul			DPRINTF(("region is allocated\n"));
21940711Swollman			continue;
22040711Swollman		}
22140711Swollman		rstart = max(s->r_start, start);
22267261Simp		rstart = (rstart + ((1ul << RF_ALIGNMENT(flags))) - 1) &
22367261Simp		    ~((1ul << RF_ALIGNMENT(flags)) - 1);
22467261Simp		rend = min(s->r_end, max(rstart + count, end));
22559910Spaul		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
22659910Spaul		       rstart, rend, (rend - rstart + 1), count));
22740711Swollman
22840711Swollman		if ((rend - rstart + 1) >= count) {
22959910Spaul			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
23059910Spaul			       rend, rstart, (rend - rstart + 1)));
23140711Swollman			if ((s->r_end - s->r_start + 1) == count) {
23259910Spaul				DPRINTF(("candidate region is entire chunk\n"));
23340711Swollman				rv = s;
23448235Sdfr				rv->r_flags |= RF_ALLOCATED | flags;
23540711Swollman				rv->r_dev = dev;
23640711Swollman				goto out;
23740711Swollman			}
23840711Swollman
23940711Swollman			/*
24040711Swollman			 * If s->r_start < rstart and
24140711Swollman			 *    s->r_end > rstart + count - 1, then
24240711Swollman			 * we need to split the region into three pieces
24340711Swollman			 * (the middle one will get returned to the user).
24440711Swollman			 * Otherwise, we are allocating at either the
24540711Swollman			 * beginning or the end of s, so we only need to
24640711Swollman			 * split it in two.  The first case requires
24740711Swollman			 * two new allocations; the second requires but one.
24840711Swollman			 */
24969781Sdwmalone			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
25040711Swollman			if (rv == 0)
25140711Swollman				goto out;
25240711Swollman			rv->r_start = rstart;
25340711Swollman			rv->r_end = rstart + count - 1;
25440711Swollman			rv->r_flags = flags | RF_ALLOCATED;
25540711Swollman			rv->r_dev = dev;
25640711Swollman			rv->r_sharehead = 0;
25745720Speter			rv->r_rm = rm;
25840711Swollman
25940711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
26059910Spaul				DPRINTF(("splitting region in three parts: "
26140711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
26240711Swollman				       s->r_start, rv->r_start - 1,
26340711Swollman				       rv->r_start, rv->r_end,
26459910Spaul				       rv->r_end + 1, s->r_end));
26540711Swollman				/*
26640711Swollman				 * We are allocating in the middle.
26740711Swollman				 */
26869781Sdwmalone				r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
26940711Swollman				if (r == 0) {
27040711Swollman					free(rv, M_RMAN);
27140711Swollman					rv = 0;
27240711Swollman					goto out;
27340711Swollman				}
27440711Swollman				r->r_start = rv->r_end + 1;
27540711Swollman				r->r_end = s->r_end;
27640711Swollman				r->r_flags = s->r_flags;
27740711Swollman				r->r_dev = 0;
27840711Swollman				r->r_sharehead = 0;
27945720Speter				r->r_rm = rm;
28040711Swollman				s->r_end = rv->r_start - 1;
28168727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
28240711Swollman						     r_link);
28368727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
28440711Swollman						     r_link);
28540711Swollman			} else if (s->r_start == rv->r_start) {
28659910Spaul				DPRINTF(("allocating from the beginning\n"));
28740711Swollman				/*
28840711Swollman				 * We are allocating at the beginning.
28940711Swollman				 */
29040711Swollman				s->r_start = rv->r_end + 1;
29168727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
29240711Swollman			} else {
29359910Spaul				DPRINTF(("allocating at the end\n"));
29440711Swollman				/*
29540711Swollman				 * We are allocating at the end.
29640711Swollman				 */
29740711Swollman				s->r_end = rv->r_start - 1;
29868727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
29940711Swollman						     r_link);
30040711Swollman			}
30140711Swollman			goto out;
30240711Swollman		}
30340711Swollman	}
30440711Swollman
30540711Swollman	/*
30640711Swollman	 * Now find an acceptable shared region, if the client's requirements
30740711Swollman	 * allow sharing.  By our implementation restriction, a candidate
30840711Swollman	 * region must match exactly by both size and sharing type in order
30940711Swollman	 * to be considered compatible with the client's request.  (The
31040711Swollman	 * former restriction could probably be lifted without too much
31140711Swollman	 * additional work, but this does not seem warranted.)
31240711Swollman	 */
31359910Spaul	DPRINTF(("no unshared regions found\n"));
31440711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
31540711Swollman		goto out;
31640711Swollman
31768727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
31840711Swollman		if (s->r_start > end)
31940711Swollman			break;
32040711Swollman		if ((s->r_flags & flags) != flags)
32140711Swollman			continue;
32240711Swollman		rstart = max(s->r_start, start);
32340711Swollman		rend = min(s->r_end, max(start + count, end));
32440711Swollman		if (s->r_start >= start && s->r_end <= end
32540711Swollman		    && (s->r_end - s->r_start + 1) == count) {
32669781Sdwmalone			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
32740711Swollman			if (rv == 0)
32840711Swollman				goto out;
32940711Swollman			rv->r_start = s->r_start;
33040711Swollman			rv->r_end = s->r_end;
33140711Swollman			rv->r_flags = s->r_flags &
33240711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
33340711Swollman			rv->r_dev = dev;
33440711Swollman			rv->r_rm = rm;
33540711Swollman			if (s->r_sharehead == 0) {
33640711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
33769781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
33840711Swollman				if (s->r_sharehead == 0) {
33940711Swollman					free(rv, M_RMAN);
34040711Swollman					rv = 0;
34140711Swollman					goto out;
34240711Swollman				}
34340711Swollman				LIST_INIT(s->r_sharehead);
34440711Swollman				LIST_INSERT_HEAD(s->r_sharehead, s,
34540711Swollman						 r_sharelink);
34645106Sdfr				s->r_flags |= RF_FIRSTSHARE;
34740711Swollman			}
34840711Swollman			rv->r_sharehead = s->r_sharehead;
34940711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
35040711Swollman			goto out;
35140711Swollman		}
35240711Swollman	}
35340711Swollman
35440711Swollman	/*
35540711Swollman	 * We couldn't find anything.
35640711Swollman	 */
35740711Swollmanout:
35840711Swollman	/*
35940711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
36040711Swollman	 * which is reflected in `want_activate', we attempt to atomically
36140711Swollman	 * activate the resource.  If this fails, we release the resource
36240711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
36340711Swollman	 * make sense for RF_TIMESHARE-type resources.)
36440711Swollman	 */
36540711Swollman	if (rv && want_activate) {
36640711Swollman		struct resource *whohas;
36740711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
36840711Swollman			int_rman_release_resource(rm, rv);
36940711Swollman			rv = 0;
37040711Swollman		}
37140711Swollman	}
37240711Swollman
37372200Sbmilekic	mtx_unlock(rm->rm_mtx);
37440711Swollman	return (rv);
37540711Swollman}
37640711Swollman
37740711Swollmanstatic int
37840711Swollmanint_rman_activate_resource(struct rman *rm, struct resource *r,
37940711Swollman			   struct resource **whohas)
38040711Swollman{
38140711Swollman	struct resource *s;
38240711Swollman	int ok;
38340711Swollman
38440711Swollman	/*
38540711Swollman	 * If we are not timesharing, then there is nothing much to do.
38640711Swollman	 * If we already have the resource, then there is nothing at all to do.
38740711Swollman	 * If we are not on a sharing list with anybody else, then there is
38840711Swollman	 * little to do.
38940711Swollman	 */
39040711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
39140711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
39240711Swollman	    || r->r_sharehead == 0) {
39340711Swollman		r->r_flags |= RF_ACTIVE;
39440711Swollman		return 0;
39540711Swollman	}
39640711Swollman
39740711Swollman	ok = 1;
39853225Sphk	for (s = LIST_FIRST(r->r_sharehead); s && ok;
39953225Sphk	     s = LIST_NEXT(s, r_sharelink)) {
40040711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
40140711Swollman			ok = 0;
40240711Swollman			*whohas = s;
40340711Swollman		}
40440711Swollman	}
40540711Swollman	if (ok) {
40640711Swollman		r->r_flags |= RF_ACTIVE;
40740711Swollman		return 0;
40840711Swollman	}
40940711Swollman	return EBUSY;
41040711Swollman}
41140711Swollman
41240711Swollmanint
41340711Swollmanrman_activate_resource(struct resource *r)
41440711Swollman{
41540711Swollman	int rv;
41640711Swollman	struct resource *whohas;
41740711Swollman	struct rman *rm;
41840711Swollman
41940711Swollman	rm = r->r_rm;
42072200Sbmilekic	mtx_lock(rm->rm_mtx);
42140711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
42272200Sbmilekic	mtx_unlock(rm->rm_mtx);
42340711Swollman	return rv;
42440711Swollman}
42540711Swollman
42640711Swollmanint
42740711Swollmanrman_await_resource(struct resource *r, int pri, int timo)
42840711Swollman{
42985519Sjhb	int	rv;
43040711Swollman	struct	resource *whohas;
43140711Swollman	struct	rman *rm;
43240711Swollman
43340711Swollman	rm = r->r_rm;
43485519Sjhb	mtx_lock(rm->rm_mtx);
43540711Swollman	for (;;) {
43640711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
43740711Swollman		if (rv != EBUSY)
43871576Sjasone			return (rv);	/* returns with mutex held */
43940711Swollman
44040711Swollman		if (r->r_sharehead == 0)
44140711Swollman			panic("rman_await_resource");
44240711Swollman		whohas->r_flags |= RF_WANTED;
44385519Sjhb		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
44440711Swollman		if (rv) {
44585519Sjhb			mtx_unlock(rm->rm_mtx);
44685519Sjhb			return (rv);
44740711Swollman		}
44840711Swollman	}
44940711Swollman}
45040711Swollman
45145720Speterstatic int
45245720Speterint_rman_deactivate_resource(struct resource *r)
45340711Swollman{
45440711Swollman	struct	rman *rm;
45540711Swollman
45640711Swollman	rm = r->r_rm;
45740711Swollman	r->r_flags &= ~RF_ACTIVE;
45840711Swollman	if (r->r_flags & RF_WANTED) {
45940711Swollman		r->r_flags &= ~RF_WANTED;
46040711Swollman		wakeup(r->r_sharehead);
46140711Swollman	}
46245720Speter	return 0;
46345720Speter}
46445720Speter
46545720Speterint
46645720Speterrman_deactivate_resource(struct resource *r)
46745720Speter{
46845720Speter	struct	rman *rm;
46945720Speter
47045720Speter	rm = r->r_rm;
47172200Sbmilekic	mtx_lock(rm->rm_mtx);
47245720Speter	int_rman_deactivate_resource(r);
47372200Sbmilekic	mtx_unlock(rm->rm_mtx);
47440711Swollman	return 0;
47540711Swollman}
47640711Swollman
47740711Swollmanstatic int
47840711Swollmanint_rman_release_resource(struct rman *rm, struct resource *r)
47940711Swollman{
48040711Swollman	struct	resource *s, *t;
48140711Swollman
48240711Swollman	if (r->r_flags & RF_ACTIVE)
48345720Speter		int_rman_deactivate_resource(r);
48440711Swollman
48540711Swollman	/*
48640711Swollman	 * Check for a sharing list first.  If there is one, then we don't
48740711Swollman	 * have to think as hard.
48840711Swollman	 */
48940711Swollman	if (r->r_sharehead) {
49040711Swollman		/*
49140711Swollman		 * If a sharing list exists, then we know there are at
49240711Swollman		 * least two sharers.
49340711Swollman		 *
49440711Swollman		 * If we are in the main circleq, appoint someone else.
49540711Swollman		 */
49640711Swollman		LIST_REMOVE(r, r_sharelink);
49753225Sphk		s = LIST_FIRST(r->r_sharehead);
49840711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
49940711Swollman			s->r_flags |= RF_FIRSTSHARE;
50068727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
50168727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
50240711Swollman		}
50340711Swollman
50440711Swollman		/*
50540711Swollman		 * Make sure that the sharing list goes away completely
50640711Swollman		 * if the resource is no longer being shared at all.
50740711Swollman		 */
50853225Sphk		if (LIST_NEXT(s, r_sharelink) == 0) {
50940711Swollman			free(s->r_sharehead, M_RMAN);
51040711Swollman			s->r_sharehead = 0;
51140711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
51240711Swollman		}
51340711Swollman		goto out;
51440711Swollman	}
51540711Swollman
51640711Swollman	/*
51740711Swollman	 * Look at the adjacent resources in the list and see if our
51840711Swollman	 * segment can be merged with any of them.
51940711Swollman	 */
52068727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
52168727Smckusick	t = TAILQ_NEXT(r, r_link);
52240711Swollman
52368764Smckusick	if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0
52468764Smckusick	    && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
52540711Swollman		/*
52640711Swollman		 * Merge all three segments.
52740711Swollman		 */
52840711Swollman		s->r_end = t->r_end;
52968727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
53068727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
53140711Swollman		free(t, M_RMAN);
53268764Smckusick	} else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) {
53340711Swollman		/*
53440711Swollman		 * Merge previous segment with ours.
53540711Swollman		 */
53640711Swollman		s->r_end = r->r_end;
53768727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
53868764Smckusick	} else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
53940711Swollman		/*
54040711Swollman		 * Merge next segment with ours.
54140711Swollman		 */
54240711Swollman		t->r_start = r->r_start;
54368727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
54440711Swollman	} else {
54540711Swollman		/*
54640711Swollman		 * At this point, we know there is nothing we
54740711Swollman		 * can potentially merge with, because on each
54840711Swollman		 * side, there is either nothing there or what is
54940711Swollman		 * there is still allocated.  In that case, we don't
55040711Swollman		 * want to remove r from the list; we simply want to
55140711Swollman		 * change it to an unallocated region and return
55240711Swollman		 * without freeing anything.
55340711Swollman		 */
55440711Swollman		r->r_flags &= ~RF_ALLOCATED;
55540711Swollman		return 0;
55640711Swollman	}
55740711Swollman
55840711Swollmanout:
55940711Swollman	free(r, M_RMAN);
56040711Swollman	return 0;
56140711Swollman}
56240711Swollman
56340711Swollmanint
56440711Swollmanrman_release_resource(struct resource *r)
56540711Swollman{
56640711Swollman	int	rv;
56740711Swollman	struct	rman *rm = r->r_rm;
56840711Swollman
56972200Sbmilekic	mtx_lock(rm->rm_mtx);
57040711Swollman	rv = int_rman_release_resource(rm, r);
57172200Sbmilekic	mtx_unlock(rm->rm_mtx);
57240711Swollman	return (rv);
57340711Swollman}
57467261Simp
57567261Simpuint32_t
57667261Simprman_make_alignment_flags(uint32_t size)
57767261Simp{
57867261Simp	int	i;
57967261Simp
58067425Simp	/*
58167425Simp	 * Find the hightest bit set, and add one if more than one bit
58267425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
58367425Simp	 */
58467425Simp	for (i = 32; i > 0; i--)
58567425Simp		if ((1 << i) & size)
58667425Simp			break;
58767425Simp	if (~(1 << i) & size)
58867425Simp		i++;
58967261Simp
59067261Simp	return(RF_ALIGNMENT_LOG2(i));
59167425Simp}
592