subr_rman.c revision 150547
1139804Simp/*-
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
1540711Swollman *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman */
2940711Swollman
3040711Swollman/*
3140711Swollman * The kernel resource manager.  This code is responsible for keeping track
3240711Swollman * of hardware resources which are apportioned out to various drivers.
3340711Swollman * It does not actually assign those resources, and it is not expected
3440711Swollman * that end-device drivers will call into this code directly.  Rather,
3540711Swollman * the code which implements the buses that those devices are attached to,
3640711Swollman * and the code which manages CPU resources, will call this code, and the
3740711Swollman * end-device drivers will make upcalls to that code to actually perform
3840711Swollman * the allocation.
3940711Swollman *
4040711Swollman * There are two sorts of resources managed by this code.  The first is
4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4240711Swollman * consist of a sequence of individually-allocatable objects which have
4340711Swollman * been numbered in some well-defined order.  Most of the resources
4440711Swollman * are of this type, as it is the most familiar.  The second type is
4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4640711Swollman * resources in which each instance is indistinguishable from every
4740711Swollman * other instance).  The principal anticipated application of gauges
4840711Swollman * is in the context of power consumption, where a bus may have a specific
4940711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5040711Swollman * implemented yet.
5140711Swollman *
5240711Swollman * For array resources, we make one simplifying assumption: two clients
5340711Swollman * sharing the same resource must use the same range of indices.  That
5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5540711Swollman * permitted.
5640711Swollman */
5740711Swollman
58116182Sobrien#include <sys/cdefs.h>
59116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 150547 2005-09-25 20:10:10Z phk $");
60116182Sobrien
61131344Simp#define __RMAN_RESOURCE_VISIBLE
6240711Swollman#include <sys/param.h>
6340711Swollman#include <sys/systm.h>
6441304Sbde#include <sys/kernel.h>
6540711Swollman#include <sys/lock.h>
6640711Swollman#include <sys/malloc.h>
6771576Sjasone#include <sys/mutex.h>
6845720Speter#include <sys/bus.h>		/* XXX debugging */
6945720Speter#include <machine/bus.h>
7040711Swollman#include <sys/rman.h>
71102962Siwasaki#include <sys/sysctl.h>
7240711Swollman
73102962Siwasakiint     rman_debug = 0;
74102962SiwasakiTUNABLE_INT("debug.rman_debug", &rman_debug);
75102962SiwasakiSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
76102962Siwasaki    &rman_debug, 0, "rman debug");
7759910Spaul
78102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params
79102962Siwasaki
8045569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
8140711Swollman
8240711Swollmanstruct	rman_head rman_head;
8371576Sjasonestatic	struct mtx rman_mtx; /* mutex to protect rman_head */
84150523Sphkstatic	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
85150523Sphk				       struct resource_i **whohas);
86150523Sphkstatic	int int_rman_deactivate_resource(struct resource_i *r);
87150523Sphkstatic	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
8840711Swollman
89150523Sphkstatic __inline struct resource_i *
90150523Sphkint_alloc_resource(int malloc_flag)
91150523Sphk{
92150523Sphk	struct resource_i *r;
93150523Sphk
94150523Sphk	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
95150523Sphk	if (r != NULL) {
96150523Sphk		r->r_r.__r_i = r;
97150523Sphk	}
98150523Sphk	return (r);
99150523Sphk}
100150523Sphk
10140711Swollmanint
10240711Swollmanrman_init(struct rman *rm)
10340711Swollman{
10440711Swollman	static int once;
10540711Swollman
10640711Swollman	if (once == 0) {
10740711Swollman		once = 1;
10840711Swollman		TAILQ_INIT(&rman_head);
10993818Sjhb		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
11040711Swollman	}
11140711Swollman
11240711Swollman	if (rm->rm_type == RMAN_UNINIT)
11340711Swollman		panic("rman_init");
11440711Swollman	if (rm->rm_type == RMAN_GAUGE)
11540711Swollman		panic("implement RMAN_GAUGE");
11640711Swollman
11768727Smckusick	TAILQ_INIT(&rm->rm_list);
11884781Sjhb	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
11971576Sjasone	if (rm->rm_mtx == 0)
12040711Swollman		return ENOMEM;
12193818Sjhb	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
12240711Swollman
12372200Sbmilekic	mtx_lock(&rman_mtx);
12440711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
12572200Sbmilekic	mtx_unlock(&rman_mtx);
12640711Swollman	return 0;
12740711Swollman}
12840711Swollman
12940711Swollman/*
13040711Swollman * NB: this interface is not robust against programming errors which
13140711Swollman * add multiple copies of the same region.
13240711Swollman */
13340711Swollmanint
13440711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
13540711Swollman{
136150523Sphk	struct resource_i *r, *s;
13740711Swollman
138134040Snjl	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
139134021Snjl	    rm->rm_descr, start, end));
140150523Sphk	r = int_alloc_resource(M_NOWAIT);
14140711Swollman	if (r == 0)
14240711Swollman		return ENOMEM;
14340711Swollman	r->r_start = start;
14440711Swollman	r->r_end = end;
14540711Swollman	r->r_rm = rm;
14640711Swollman
14772200Sbmilekic	mtx_lock(rm->rm_mtx);
14868727Smckusick	for (s = TAILQ_FIRST(&rm->rm_list);
14968727Smckusick	     s && s->r_end < r->r_start;
15068727Smckusick	     s = TAILQ_NEXT(s, r_link))
15140711Swollman		;
15240711Swollman
15368727Smckusick	if (s == NULL) {
15468727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
15540711Swollman	} else {
15668727Smckusick		TAILQ_INSERT_BEFORE(s, r, r_link);
15740711Swollman	}
15840711Swollman
15972200Sbmilekic	mtx_unlock(rm->rm_mtx);
16040711Swollman	return 0;
16140711Swollman}
16240711Swollman
16340711Swollmanint
16440711Swollmanrman_fini(struct rman *rm)
16540711Swollman{
166150523Sphk	struct resource_i *r;
16740711Swollman
16872200Sbmilekic	mtx_lock(rm->rm_mtx);
16968727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
17045720Speter		if (r->r_flags & RF_ALLOCATED) {
17172200Sbmilekic			mtx_unlock(rm->rm_mtx);
17240711Swollman			return EBUSY;
17345720Speter		}
17440711Swollman	}
17540711Swollman
17640711Swollman	/*
17740711Swollman	 * There really should only be one of these if we are in this
17840711Swollman	 * state and the code is working properly, but it can't hurt.
17940711Swollman	 */
18068727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
18168727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
18268727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
18340711Swollman		free(r, M_RMAN);
18440711Swollman	}
18572200Sbmilekic	mtx_unlock(rm->rm_mtx);
18672200Sbmilekic	mtx_lock(&rman_mtx);
18740711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
18872200Sbmilekic	mtx_unlock(&rman_mtx);
18971576Sjasone	mtx_destroy(rm->rm_mtx);
19071576Sjasone	free(rm->rm_mtx, M_RMAN);
19140711Swollman
19240711Swollman	return 0;
19340711Swollman}
19440711Swollman
19540711Swollmanstruct resource *
19688372Stmmrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
19788372Stmm		      u_long count, u_long bound,  u_int flags,
19888372Stmm		      struct device *dev)
19940711Swollman{
20040711Swollman	u_int	want_activate;
201150523Sphk	struct	resource_i *r, *s, *rv;
20288372Stmm	u_long	rstart, rend, amask, bmask;
20340711Swollman
20440711Swollman	rv = 0;
20540711Swollman
20659910Spaul	DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
20777288Sbrian	       "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
20877288Sbrian	       flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
20940711Swollman	want_activate = (flags & RF_ACTIVE);
21040711Swollman	flags &= ~RF_ACTIVE;
21140711Swollman
21272200Sbmilekic	mtx_lock(rm->rm_mtx);
21340711Swollman
21468727Smckusick	for (r = TAILQ_FIRST(&rm->rm_list);
21568727Smckusick	     r && r->r_end < start;
21668727Smckusick	     r = TAILQ_NEXT(r, r_link))
21740711Swollman		;
21840711Swollman
21968727Smckusick	if (r == NULL) {
22059910Spaul		DPRINTF(("could not find a region\n"));
22140711Swollman		goto out;
22240711Swollman	}
22340711Swollman
22488372Stmm	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
22588372Stmm	/* If bound is 0, bmask will also be 0 */
22688372Stmm	bmask = ~(bound - 1);
22740711Swollman	/*
22840711Swollman	 * First try to find an acceptable totally-unshared region.
22940711Swollman	 */
23068727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
23159910Spaul		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
232143665Simp		if (s->r_start + count - 1 > end) {
233143665Simp			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
234143665Simp			    s->r_start, end));
23540711Swollman			break;
23640711Swollman		}
23740711Swollman		if (s->r_flags & RF_ALLOCATED) {
23859910Spaul			DPRINTF(("region is allocated\n"));
23940711Swollman			continue;
24040711Swollman		}
24188372Stmm		rstart = ulmax(s->r_start, start);
24288372Stmm		/*
24388372Stmm		 * Try to find a region by adjusting to boundary and alignment
24488372Stmm		 * until both conditions are satisfied. This is not an optimal
24588372Stmm		 * algorithm, but in most cases it isn't really bad, either.
24688372Stmm		 */
24788372Stmm		do {
24888372Stmm			rstart = (rstart + amask) & ~amask;
249109646Stmm			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
25088372Stmm				rstart += bound - (rstart & ~bmask);
25188372Stmm		} while ((rstart & amask) != 0 && rstart < end &&
25288372Stmm		    rstart < s->r_end);
253128172Simp		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
254102572Siwasaki		if (rstart > rend) {
255102572Siwasaki			DPRINTF(("adjusted start exceeds end\n"));
256102572Siwasaki			continue;
257102572Siwasaki		}
25859910Spaul		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
25959910Spaul		       rstart, rend, (rend - rstart + 1), count));
26040711Swollman
26140711Swollman		if ((rend - rstart + 1) >= count) {
26259910Spaul			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
263143664Simp			       rstart, rend, (rend - rstart + 1)));
26440711Swollman			if ((s->r_end - s->r_start + 1) == count) {
26559910Spaul				DPRINTF(("candidate region is entire chunk\n"));
26640711Swollman				rv = s;
26748235Sdfr				rv->r_flags |= RF_ALLOCATED | flags;
26840711Swollman				rv->r_dev = dev;
26940711Swollman				goto out;
27040711Swollman			}
27140711Swollman
27240711Swollman			/*
27340711Swollman			 * If s->r_start < rstart and
27440711Swollman			 *    s->r_end > rstart + count - 1, then
27540711Swollman			 * we need to split the region into three pieces
27640711Swollman			 * (the middle one will get returned to the user).
27740711Swollman			 * Otherwise, we are allocating at either the
27840711Swollman			 * beginning or the end of s, so we only need to
27940711Swollman			 * split it in two.  The first case requires
28040711Swollman			 * two new allocations; the second requires but one.
28140711Swollman			 */
282150523Sphk			rv = int_alloc_resource(M_NOWAIT);
28340711Swollman			if (rv == 0)
28440711Swollman				goto out;
28540711Swollman			rv->r_start = rstart;
28640711Swollman			rv->r_end = rstart + count - 1;
28740711Swollman			rv->r_flags = flags | RF_ALLOCATED;
28840711Swollman			rv->r_dev = dev;
28945720Speter			rv->r_rm = rm;
29040711Swollman
29140711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
29259910Spaul				DPRINTF(("splitting region in three parts: "
29340711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
29440711Swollman				       s->r_start, rv->r_start - 1,
29540711Swollman				       rv->r_start, rv->r_end,
29659910Spaul				       rv->r_end + 1, s->r_end));
29740711Swollman				/*
29840711Swollman				 * We are allocating in the middle.
29940711Swollman				 */
300150523Sphk				r = int_alloc_resource(M_NOWAIT);
30140711Swollman				if (r == 0) {
30240711Swollman					free(rv, M_RMAN);
30340711Swollman					rv = 0;
30440711Swollman					goto out;
30540711Swollman				}
30640711Swollman				r->r_start = rv->r_end + 1;
30740711Swollman				r->r_end = s->r_end;
30840711Swollman				r->r_flags = s->r_flags;
30945720Speter				r->r_rm = rm;
31040711Swollman				s->r_end = rv->r_start - 1;
31168727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
31240711Swollman						     r_link);
31368727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
31440711Swollman						     r_link);
31540711Swollman			} else if (s->r_start == rv->r_start) {
31659910Spaul				DPRINTF(("allocating from the beginning\n"));
31740711Swollman				/*
31840711Swollman				 * We are allocating at the beginning.
31940711Swollman				 */
32040711Swollman				s->r_start = rv->r_end + 1;
32168727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
32240711Swollman			} else {
32359910Spaul				DPRINTF(("allocating at the end\n"));
32440711Swollman				/*
32540711Swollman				 * We are allocating at the end.
32640711Swollman				 */
32740711Swollman				s->r_end = rv->r_start - 1;
32868727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
32940711Swollman						     r_link);
33040711Swollman			}
33140711Swollman			goto out;
33240711Swollman		}
33340711Swollman	}
33440711Swollman
33540711Swollman	/*
33640711Swollman	 * Now find an acceptable shared region, if the client's requirements
33740711Swollman	 * allow sharing.  By our implementation restriction, a candidate
33840711Swollman	 * region must match exactly by both size and sharing type in order
33940711Swollman	 * to be considered compatible with the client's request.  (The
34040711Swollman	 * former restriction could probably be lifted without too much
34140711Swollman	 * additional work, but this does not seem warranted.)
34240711Swollman	 */
34359910Spaul	DPRINTF(("no unshared regions found\n"));
34440711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
34540711Swollman		goto out;
34640711Swollman
34768727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
34840711Swollman		if (s->r_start > end)
34940711Swollman			break;
35040711Swollman		if ((s->r_flags & flags) != flags)
35140711Swollman			continue;
35288372Stmm		rstart = ulmax(s->r_start, start);
353128172Simp		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
35440711Swollman		if (s->r_start >= start && s->r_end <= end
35588372Stmm		    && (s->r_end - s->r_start + 1) == count &&
35688372Stmm		    (s->r_start & amask) == 0 &&
35788372Stmm		    ((s->r_start ^ s->r_end) & bmask) == 0) {
358150523Sphk			rv = int_alloc_resource(M_NOWAIT);
35940711Swollman			if (rv == 0)
36040711Swollman				goto out;
36140711Swollman			rv->r_start = s->r_start;
36240711Swollman			rv->r_end = s->r_end;
36340711Swollman			rv->r_flags = s->r_flags &
36440711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
36540711Swollman			rv->r_dev = dev;
36640711Swollman			rv->r_rm = rm;
36740711Swollman			if (s->r_sharehead == 0) {
36840711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
36969781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
37040711Swollman				if (s->r_sharehead == 0) {
37140711Swollman					free(rv, M_RMAN);
37240711Swollman					rv = 0;
37340711Swollman					goto out;
37440711Swollman				}
37540711Swollman				LIST_INIT(s->r_sharehead);
37640711Swollman				LIST_INSERT_HEAD(s->r_sharehead, s,
37740711Swollman						 r_sharelink);
37845106Sdfr				s->r_flags |= RF_FIRSTSHARE;
37940711Swollman			}
38040711Swollman			rv->r_sharehead = s->r_sharehead;
38140711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
38240711Swollman			goto out;
38340711Swollman		}
38440711Swollman	}
38540711Swollman
38640711Swollman	/*
38740711Swollman	 * We couldn't find anything.
38840711Swollman	 */
38940711Swollmanout:
39040711Swollman	/*
39140711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
39240711Swollman	 * which is reflected in `want_activate', we attempt to atomically
39340711Swollman	 * activate the resource.  If this fails, we release the resource
39440711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
39540711Swollman	 * make sense for RF_TIMESHARE-type resources.)
39640711Swollman	 */
39740711Swollman	if (rv && want_activate) {
398150523Sphk		struct resource_i *whohas;
39940711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
40040711Swollman			int_rman_release_resource(rm, rv);
40140711Swollman			rv = 0;
40240711Swollman		}
40340711Swollman	}
40440711Swollman
40572200Sbmilekic	mtx_unlock(rm->rm_mtx);
406150523Sphk	return (&rv->r_r);
40740711Swollman}
40840711Swollman
40988372Stmmstruct resource *
41088372Stmmrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
41188372Stmm		      u_int flags, struct device *dev)
41288372Stmm{
41388372Stmm
41488372Stmm	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
41588372Stmm	    dev));
41688372Stmm}
41788372Stmm
41840711Swollmanstatic int
419150523Sphkint_rman_activate_resource(struct rman *rm, struct resource_i *r,
420150523Sphk			   struct resource_i **whohas)
42140711Swollman{
422150523Sphk	struct resource_i *s;
42340711Swollman	int ok;
42440711Swollman
42540711Swollman	/*
42640711Swollman	 * If we are not timesharing, then there is nothing much to do.
42740711Swollman	 * If we already have the resource, then there is nothing at all to do.
42840711Swollman	 * If we are not on a sharing list with anybody else, then there is
42940711Swollman	 * little to do.
43040711Swollman	 */
43140711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
43240711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
43340711Swollman	    || r->r_sharehead == 0) {
43440711Swollman		r->r_flags |= RF_ACTIVE;
43540711Swollman		return 0;
43640711Swollman	}
43740711Swollman
43840711Swollman	ok = 1;
43953225Sphk	for (s = LIST_FIRST(r->r_sharehead); s && ok;
44053225Sphk	     s = LIST_NEXT(s, r_sharelink)) {
44140711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
44240711Swollman			ok = 0;
44340711Swollman			*whohas = s;
44440711Swollman		}
44540711Swollman	}
44640711Swollman	if (ok) {
44740711Swollman		r->r_flags |= RF_ACTIVE;
44840711Swollman		return 0;
44940711Swollman	}
45040711Swollman	return EBUSY;
45140711Swollman}
45240711Swollman
45340711Swollmanint
454150523Sphkrman_activate_resource(struct resource *re)
45540711Swollman{
45640711Swollman	int rv;
457150523Sphk	struct resource_i *r, *whohas;
45840711Swollman	struct rman *rm;
45940711Swollman
460150523Sphk	r = re->__r_i;
46140711Swollman	rm = r->r_rm;
46272200Sbmilekic	mtx_lock(rm->rm_mtx);
46340711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
46472200Sbmilekic	mtx_unlock(rm->rm_mtx);
46540711Swollman	return rv;
46640711Swollman}
46740711Swollman
46840711Swollmanint
469150523Sphkrman_await_resource(struct resource *re, int pri, int timo)
47040711Swollman{
47185519Sjhb	int	rv;
472150523Sphk	struct	resource_i *r, *whohas;
47340711Swollman	struct	rman *rm;
47440711Swollman
475150523Sphk	r = re->__r_i;
47640711Swollman	rm = r->r_rm;
47785519Sjhb	mtx_lock(rm->rm_mtx);
47840711Swollman	for (;;) {
47940711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
48040711Swollman		if (rv != EBUSY)
48171576Sjasone			return (rv);	/* returns with mutex held */
48240711Swollman
48340711Swollman		if (r->r_sharehead == 0)
48440711Swollman			panic("rman_await_resource");
48540711Swollman		whohas->r_flags |= RF_WANTED;
48685519Sjhb		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
48740711Swollman		if (rv) {
48885519Sjhb			mtx_unlock(rm->rm_mtx);
48985519Sjhb			return (rv);
49040711Swollman		}
49140711Swollman	}
49240711Swollman}
49340711Swollman
49445720Speterstatic int
495150523Sphkint_rman_deactivate_resource(struct resource_i *r)
49640711Swollman{
49740711Swollman
49840711Swollman	r->r_flags &= ~RF_ACTIVE;
49940711Swollman	if (r->r_flags & RF_WANTED) {
50040711Swollman		r->r_flags &= ~RF_WANTED;
50140711Swollman		wakeup(r->r_sharehead);
50240711Swollman	}
50345720Speter	return 0;
50445720Speter}
50545720Speter
50645720Speterint
50745720Speterrman_deactivate_resource(struct resource *r)
50845720Speter{
50945720Speter	struct	rman *rm;
51045720Speter
511150523Sphk	rm = r->__r_i->r_rm;
51272200Sbmilekic	mtx_lock(rm->rm_mtx);
513150523Sphk	int_rman_deactivate_resource(r->__r_i);
51472200Sbmilekic	mtx_unlock(rm->rm_mtx);
51540711Swollman	return 0;
51640711Swollman}
51740711Swollman
51840711Swollmanstatic int
519150523Sphkint_rman_release_resource(struct rman *rm, struct resource_i *r)
52040711Swollman{
521150523Sphk	struct	resource_i *s, *t;
52240711Swollman
52340711Swollman	if (r->r_flags & RF_ACTIVE)
52445720Speter		int_rman_deactivate_resource(r);
52540711Swollman
52640711Swollman	/*
52740711Swollman	 * Check for a sharing list first.  If there is one, then we don't
52840711Swollman	 * have to think as hard.
52940711Swollman	 */
53040711Swollman	if (r->r_sharehead) {
53140711Swollman		/*
53240711Swollman		 * If a sharing list exists, then we know there are at
53340711Swollman		 * least two sharers.
53440711Swollman		 *
53540711Swollman		 * If we are in the main circleq, appoint someone else.
53640711Swollman		 */
53740711Swollman		LIST_REMOVE(r, r_sharelink);
53853225Sphk		s = LIST_FIRST(r->r_sharehead);
53940711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
54040711Swollman			s->r_flags |= RF_FIRSTSHARE;
54168727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
54268727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
54340711Swollman		}
54440711Swollman
54540711Swollman		/*
54640711Swollman		 * Make sure that the sharing list goes away completely
54740711Swollman		 * if the resource is no longer being shared at all.
54840711Swollman		 */
54953225Sphk		if (LIST_NEXT(s, r_sharelink) == 0) {
55040711Swollman			free(s->r_sharehead, M_RMAN);
55140711Swollman			s->r_sharehead = 0;
55240711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
55340711Swollman		}
55440711Swollman		goto out;
55540711Swollman	}
55640711Swollman
55740711Swollman	/*
55840711Swollman	 * Look at the adjacent resources in the list and see if our
559133177Sjhb	 * segment can be merged with any of them.  If either of the
560133177Sjhb	 * resources is allocated or is not exactly adjacent then they
561133177Sjhb	 * cannot be merged with our segment.
56240711Swollman	 */
56368727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
564133177Sjhb	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
565133177Sjhb	    s->r_end + 1 != r->r_start))
566133177Sjhb		s = NULL;
56768727Smckusick	t = TAILQ_NEXT(r, r_link);
568133177Sjhb	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
569133177Sjhb	    r->r_end + 1 != t->r_start))
570133177Sjhb		t = NULL;
57140711Swollman
572133177Sjhb	if (s != NULL && t != NULL) {
57340711Swollman		/*
57440711Swollman		 * Merge all three segments.
57540711Swollman		 */
57640711Swollman		s->r_end = t->r_end;
57768727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
57868727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
57940711Swollman		free(t, M_RMAN);
580133177Sjhb	} else if (s != NULL) {
58140711Swollman		/*
58240711Swollman		 * Merge previous segment with ours.
58340711Swollman		 */
58440711Swollman		s->r_end = r->r_end;
58568727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
586133177Sjhb	} else if (t != NULL) {
58740711Swollman		/*
58840711Swollman		 * Merge next segment with ours.
58940711Swollman		 */
59040711Swollman		t->r_start = r->r_start;
59168727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
59240711Swollman	} else {
59340711Swollman		/*
59440711Swollman		 * At this point, we know there is nothing we
59540711Swollman		 * can potentially merge with, because on each
59640711Swollman		 * side, there is either nothing there or what is
59740711Swollman		 * there is still allocated.  In that case, we don't
59840711Swollman		 * want to remove r from the list; we simply want to
59940711Swollman		 * change it to an unallocated region and return
60040711Swollman		 * without freeing anything.
60140711Swollman		 */
60240711Swollman		r->r_flags &= ~RF_ALLOCATED;
60340711Swollman		return 0;
60440711Swollman	}
60540711Swollman
60640711Swollmanout:
60740711Swollman	free(r, M_RMAN);
60840711Swollman	return 0;
60940711Swollman}
61040711Swollman
61140711Swollmanint
612150523Sphkrman_release_resource(struct resource *re)
61340711Swollman{
61440711Swollman	int	rv;
615150523Sphk	struct	resource_i *r;
616150523Sphk	struct	rman *rm;
61740711Swollman
618150523Sphk	r = re->__r_i;
619150523Sphk	rm = r->r_rm;
62072200Sbmilekic	mtx_lock(rm->rm_mtx);
62140711Swollman	rv = int_rman_release_resource(rm, r);
62272200Sbmilekic	mtx_unlock(rm->rm_mtx);
62340711Swollman	return (rv);
62440711Swollman}
62567261Simp
62667261Simpuint32_t
62767261Simprman_make_alignment_flags(uint32_t size)
62867261Simp{
62967261Simp	int	i;
63067261Simp
63167425Simp	/*
63267425Simp	 * Find the hightest bit set, and add one if more than one bit
63367425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
63467425Simp	 */
63588372Stmm	for (i = 31; i > 0; i--)
63667425Simp		if ((1 << i) & size)
63767425Simp			break;
63867425Simp	if (~(1 << i) & size)
63967425Simp		i++;
64067261Simp
64167261Simp	return(RF_ALIGNMENT_LOG2(i));
64267425Simp}
643107296Simp
644107296Simpu_long
645107296Simprman_get_start(struct resource *r)
646107296Simp{
647150523Sphk	return (r->__r_i->r_start);
648107296Simp}
649107296Simp
650107296Simpu_long
651107296Simprman_get_end(struct resource *r)
652107296Simp{
653150523Sphk	return (r->__r_i->r_end);
654107296Simp}
655107296Simp
656107296Simpu_long
657107296Simprman_get_size(struct resource *r)
658107296Simp{
659150523Sphk	return (r->__r_i->r_end - r->__r_i->r_start + 1);
660107296Simp}
661107296Simp
662107296Simpu_int
663107296Simprman_get_flags(struct resource *r)
664107296Simp{
665150523Sphk	return (r->__r_i->r_flags);
666107296Simp}
667107296Simp
668107296Simpvoid
669107296Simprman_set_virtual(struct resource *r, void *v)
670107296Simp{
671150523Sphk	r->__r_i->r_virtual = v;
672107296Simp}
673107296Simp
674107296Simpvoid *
675107296Simprman_get_virtual(struct resource *r)
676107296Simp{
677150523Sphk	return (r->__r_i->r_virtual);
678107296Simp}
679107296Simp
680107296Simpvoid
681107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t)
682107296Simp{
683107296Simp	r->r_bustag = t;
684107296Simp}
685107296Simp
686107296Simpbus_space_tag_t
687107296Simprman_get_bustag(struct resource *r)
688107296Simp{
689107296Simp	return (r->r_bustag);
690107296Simp}
691107296Simp
692107296Simpvoid
693107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h)
694107296Simp{
695107296Simp	r->r_bushandle = h;
696107296Simp}
697107296Simp
698107296Simpbus_space_handle_t
699107296Simprman_get_bushandle(struct resource *r)
700107296Simp{
701107296Simp	return (r->r_bushandle);
702107296Simp}
703107296Simp
704107296Simpvoid
705107296Simprman_set_rid(struct resource *r, int rid)
706107296Simp{
707150523Sphk	r->__r_i->r_rid = rid;
708107296Simp}
709107296Simp
710131414Simpvoid
711131414Simprman_set_start(struct resource *r, u_long start)
712131414Simp{
713150523Sphk	r->__r_i->r_start = start;
714131414Simp}
715131414Simp
716131414Simpvoid
717131414Simprman_set_end(struct resource *r, u_long end)
718131414Simp{
719150523Sphk	r->__r_i->r_end = end;
720131414Simp}
721131414Simp
722107296Simpint
723107296Simprman_get_rid(struct resource *r)
724107296Simp{
725150523Sphk	return (r->__r_i->r_rid);
726107296Simp}
727110753Simp
728110753Simpstruct device *
729110753Simprman_get_device(struct resource *r)
730110753Simp{
731150523Sphk	return (r->__r_i->r_dev);
732110753Simp}
733144071Sphk
734144932Simpvoid
735144932Simprman_set_device(struct resource *r, struct device *dev)
736144932Simp{
737150523Sphk	r->__r_i->r_dev = dev;
738144932Simp}
739144932Simp
740150547Sphkint
741150547Sphkrman_is_region_manager(struct resource *r, struct rman *rm)
742150547Sphk{
743150547Sphk
744150547Sphk	return (r->__r_i->r_rm == rm);
745150547Sphk}
746150547Sphk
747144071Sphk/*
748144071Sphk * Sysctl interface for scanning the resource lists.
749144071Sphk *
750144071Sphk * We take two input parameters; the index into the list of resource
751144071Sphk * managers, and the resource offset into the list.
752144071Sphk */
753144071Sphkstatic int
754144071Sphksysctl_rman(SYSCTL_HANDLER_ARGS)
755144071Sphk{
756144071Sphk	int			*name = (int *)arg1;
757144071Sphk	u_int			namelen = arg2;
758144071Sphk	int			rman_idx, res_idx;
759144071Sphk	struct rman		*rm;
760150523Sphk	struct resource_i	*res;
761144071Sphk	struct u_rman		urm;
762144071Sphk	struct u_resource	ures;
763144071Sphk	int			error;
764144071Sphk
765144071Sphk	if (namelen != 3)
766144071Sphk		return (EINVAL);
767144071Sphk
768144071Sphk	if (bus_data_generation_check(name[0]))
769144071Sphk		return (EINVAL);
770144071Sphk	rman_idx = name[1];
771144071Sphk	res_idx = name[2];
772144071Sphk
773144071Sphk	/*
774144071Sphk	 * Find the indexed resource manager
775144071Sphk	 */
776144071Sphk	TAILQ_FOREACH(rm, &rman_head, rm_link) {
777144071Sphk		if (rman_idx-- == 0)
778144071Sphk			break;
779144071Sphk	}
780144071Sphk	if (rm == NULL)
781144071Sphk		return (ENOENT);
782144071Sphk
783144071Sphk	/*
784144071Sphk	 * If the resource index is -1, we want details on the
785144071Sphk	 * resource manager.
786144071Sphk	 */
787144071Sphk	if (res_idx == -1) {
788145953Scperciva		bzero(&urm, sizeof(urm));
789144071Sphk		urm.rm_handle = (uintptr_t)rm;
790144071Sphk		strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
791144071Sphk		urm.rm_start = rm->rm_start;
792144071Sphk		urm.rm_size = rm->rm_end - rm->rm_start + 1;
793144071Sphk		urm.rm_type = rm->rm_type;
794144071Sphk
795144071Sphk		error = SYSCTL_OUT(req, &urm, sizeof(urm));
796144071Sphk		return (error);
797144071Sphk	}
798144071Sphk
799144071Sphk	/*
800144071Sphk	 * Find the indexed resource and return it.
801144071Sphk	 */
802144071Sphk	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
803144071Sphk		if (res_idx-- == 0) {
804145953Scperciva			bzero(&ures, sizeof(ures));
805144071Sphk			ures.r_handle = (uintptr_t)res;
806144071Sphk			ures.r_parent = (uintptr_t)res->r_rm;
807144071Sphk			ures.r_device = (uintptr_t)res->r_dev;
808144071Sphk			if (res->r_dev != NULL) {
809144071Sphk				if (device_get_name(res->r_dev) != NULL) {
810144071Sphk					snprintf(ures.r_devname, RM_TEXTLEN,
811144071Sphk					    "%s%d",
812144071Sphk					    device_get_name(res->r_dev),
813144071Sphk					    device_get_unit(res->r_dev));
814144071Sphk				} else {
815144071Sphk					strlcpy(ures.r_devname, "nomatch",
816144071Sphk					    RM_TEXTLEN);
817144071Sphk				}
818144071Sphk			} else {
819144071Sphk				ures.r_devname[0] = '\0';
820144071Sphk			}
821144071Sphk			ures.r_start = res->r_start;
822144071Sphk			ures.r_size = res->r_end - res->r_start + 1;
823144071Sphk			ures.r_flags = res->r_flags;
824144071Sphk
825144071Sphk			error = SYSCTL_OUT(req, &ures, sizeof(ures));
826144071Sphk			return (error);
827144071Sphk		}
828144071Sphk	}
829144071Sphk	return (ENOENT);
830144071Sphk}
831144071Sphk
832144071SphkSYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
833144071Sphk    "kernel resource manager");
834144071Sphk
835