subr_rman.c revision 160958
1139804Simp/*-
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
15152543Syongari *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman */
2940711Swollman
3040711Swollman/*
3140711Swollman * The kernel resource manager.  This code is responsible for keeping track
3240711Swollman * of hardware resources which are apportioned out to various drivers.
3340711Swollman * It does not actually assign those resources, and it is not expected
3440711Swollman * that end-device drivers will call into this code directly.  Rather,
3540711Swollman * the code which implements the buses that those devices are attached to,
3640711Swollman * and the code which manages CPU resources, will call this code, and the
3740711Swollman * end-device drivers will make upcalls to that code to actually perform
3840711Swollman * the allocation.
3940711Swollman *
4040711Swollman * There are two sorts of resources managed by this code.  The first is
4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4240711Swollman * consist of a sequence of individually-allocatable objects which have
4340711Swollman * been numbered in some well-defined order.  Most of the resources
4440711Swollman * are of this type, as it is the most familiar.  The second type is
4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4640711Swollman * resources in which each instance is indistinguishable from every
4740711Swollman * other instance).  The principal anticipated application of gauges
4840711Swollman * is in the context of power consumption, where a bus may have a specific
4940711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5040711Swollman * implemented yet.
5140711Swollman *
5240711Swollman * For array resources, we make one simplifying assumption: two clients
5340711Swollman * sharing the same resource must use the same range of indices.  That
5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5540711Swollman * permitted.
5640711Swollman */
5740711Swollman
58116182Sobrien#include <sys/cdefs.h>
59116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 160958 2006-08-03 21:19:13Z jb $");
60116182Sobrien
6140711Swollman#include <sys/param.h>
6240711Swollman#include <sys/systm.h>
6341304Sbde#include <sys/kernel.h>
6440711Swollman#include <sys/lock.h>
6540711Swollman#include <sys/malloc.h>
6671576Sjasone#include <sys/mutex.h>
6745720Speter#include <sys/bus.h>		/* XXX debugging */
6845720Speter#include <machine/bus.h>
6940711Swollman#include <sys/rman.h>
70102962Siwasaki#include <sys/sysctl.h>
7140711Swollman
72151037Sphk/*
73151037Sphk * We use a linked list rather than a bitmap because we need to be able to
74151037Sphk * represent potentially huge objects (like all of a processor's physical
75151037Sphk * address space).  That is also why the indices are defined to have type
76151037Sphk * `unsigned long' -- that being the largest integral type in ISO C (1990).
77151037Sphk * The 1999 version of C allows `long long'; we may need to switch to that
78151037Sphk * at some point in the future, particularly if we want to support 36-bit
79151037Sphk * addresses on IA32 hardware.
80151037Sphk */
81151037Sphkstruct resource_i {
82151037Sphk	struct resource		r_r;
83151037Sphk	TAILQ_ENTRY(resource_i)	r_link;
84151037Sphk	LIST_ENTRY(resource_i)	r_sharelink;
85151037Sphk	LIST_HEAD(, resource_i)	*r_sharehead;
86151037Sphk	u_long	r_start;	/* index of the first entry in this resource */
87151037Sphk	u_long	r_end;		/* index of the last entry (inclusive) */
88151037Sphk	u_int	r_flags;
89151037Sphk	void	*r_virtual;	/* virtual address of this resource */
90151037Sphk	struct	device *r_dev;	/* device which has allocated this resource */
91151037Sphk	struct	rman *r_rm;	/* resource manager from whence this came */
92151037Sphk	int	r_rid;		/* optional rid for this resource. */
93151037Sphk};
94151037Sphk
95102962Siwasakiint     rman_debug = 0;
96102962SiwasakiTUNABLE_INT("debug.rman_debug", &rman_debug);
97102962SiwasakiSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
98102962Siwasaki    &rman_debug, 0, "rman debug");
9959910Spaul
100102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params
101102962Siwasaki
10245569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
10340711Swollman
10440711Swollmanstruct	rman_head rman_head;
10571576Sjasonestatic	struct mtx rman_mtx; /* mutex to protect rman_head */
106150523Sphkstatic	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
107150523Sphk				       struct resource_i **whohas);
108150523Sphkstatic	int int_rman_deactivate_resource(struct resource_i *r);
109150523Sphkstatic	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
11040711Swollman
111150523Sphkstatic __inline struct resource_i *
112150523Sphkint_alloc_resource(int malloc_flag)
113150523Sphk{
114150523Sphk	struct resource_i *r;
115150523Sphk
116150523Sphk	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
117150523Sphk	if (r != NULL) {
118150523Sphk		r->r_r.__r_i = r;
119150523Sphk	}
120150523Sphk	return (r);
121150523Sphk}
122150523Sphk
12340711Swollmanint
12440711Swollmanrman_init(struct rman *rm)
12540711Swollman{
126152543Syongari	static int once = 0;
12740711Swollman
12840711Swollman	if (once == 0) {
12940711Swollman		once = 1;
13040711Swollman		TAILQ_INIT(&rman_head);
13193818Sjhb		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
13240711Swollman	}
13340711Swollman
13440711Swollman	if (rm->rm_type == RMAN_UNINIT)
13540711Swollman		panic("rman_init");
13640711Swollman	if (rm->rm_type == RMAN_GAUGE)
13740711Swollman		panic("implement RMAN_GAUGE");
13840711Swollman
13968727Smckusick	TAILQ_INIT(&rm->rm_list);
14084781Sjhb	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
141152543Syongari	if (rm->rm_mtx == NULL)
14240711Swollman		return ENOMEM;
14393818Sjhb	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
14440711Swollman
14572200Sbmilekic	mtx_lock(&rman_mtx);
14640711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
14772200Sbmilekic	mtx_unlock(&rman_mtx);
14840711Swollman	return 0;
14940711Swollman}
15040711Swollman
15140711Swollman/*
15240711Swollman * NB: this interface is not robust against programming errors which
15340711Swollman * add multiple copies of the same region.
15440711Swollman */
15540711Swollmanint
15640711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
15740711Swollman{
158150523Sphk	struct resource_i *r, *s;
15940711Swollman
160134040Snjl	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
161134021Snjl	    rm->rm_descr, start, end));
162150523Sphk	r = int_alloc_resource(M_NOWAIT);
163152543Syongari	if (r == NULL)
16440711Swollman		return ENOMEM;
16540711Swollman	r->r_start = start;
16640711Swollman	r->r_end = end;
16740711Swollman	r->r_rm = rm;
16840711Swollman
16972200Sbmilekic	mtx_lock(rm->rm_mtx);
170152543Syongari	for (s = TAILQ_FIRST(&rm->rm_list);
17168727Smckusick	     s && s->r_end < r->r_start;
17268727Smckusick	     s = TAILQ_NEXT(s, r_link))
17340711Swollman		;
17440711Swollman
17568727Smckusick	if (s == NULL) {
17668727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
17740711Swollman	} else {
17868727Smckusick		TAILQ_INSERT_BEFORE(s, r, r_link);
17940711Swollman	}
18040711Swollman
18172200Sbmilekic	mtx_unlock(rm->rm_mtx);
18240711Swollman	return 0;
18340711Swollman}
18440711Swollman
18540711Swollmanint
186159536Simprman_init_from_resource(struct rman *rm, struct resource *r)
187159536Simp{
188159536Simp	int rv;
189159536Simp
190159536Simp	if ((rv = rman_init(rm)) != 0)
191159536Simp		return (rv);
192159536Simp	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
193159536Simp}
194159536Simp
195159536Simpint
19640711Swollmanrman_fini(struct rman *rm)
19740711Swollman{
198150523Sphk	struct resource_i *r;
19940711Swollman
20072200Sbmilekic	mtx_lock(rm->rm_mtx);
20168727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
20245720Speter		if (r->r_flags & RF_ALLOCATED) {
20372200Sbmilekic			mtx_unlock(rm->rm_mtx);
20440711Swollman			return EBUSY;
20545720Speter		}
20640711Swollman	}
20740711Swollman
20840711Swollman	/*
20940711Swollman	 * There really should only be one of these if we are in this
21040711Swollman	 * state and the code is working properly, but it can't hurt.
21140711Swollman	 */
21268727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
21368727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
21468727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
21540711Swollman		free(r, M_RMAN);
21640711Swollman	}
21772200Sbmilekic	mtx_unlock(rm->rm_mtx);
21872200Sbmilekic	mtx_lock(&rman_mtx);
21940711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
22072200Sbmilekic	mtx_unlock(&rman_mtx);
22171576Sjasone	mtx_destroy(rm->rm_mtx);
22271576Sjasone	free(rm->rm_mtx, M_RMAN);
22340711Swollman
22440711Swollman	return 0;
22540711Swollman}
22640711Swollman
22740711Swollmanstruct resource *
22888372Stmmrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
22988372Stmm		      u_long count, u_long bound,  u_int flags,
23088372Stmm		      struct device *dev)
23140711Swollman{
23240711Swollman	u_int	want_activate;
233150523Sphk	struct	resource_i *r, *s, *rv;
23488372Stmm	u_long	rstart, rend, amask, bmask;
23540711Swollman
236152543Syongari	rv = NULL;
23740711Swollman
238160958Sjb	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
239160958Sjb	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
240160958Sjb	       count, flags,
241160958Sjb	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
24240711Swollman	want_activate = (flags & RF_ACTIVE);
24340711Swollman	flags &= ~RF_ACTIVE;
24440711Swollman
24572200Sbmilekic	mtx_lock(rm->rm_mtx);
24640711Swollman
247152543Syongari	for (r = TAILQ_FIRST(&rm->rm_list);
24868727Smckusick	     r && r->r_end < start;
24968727Smckusick	     r = TAILQ_NEXT(r, r_link))
25040711Swollman		;
25140711Swollman
25268727Smckusick	if (r == NULL) {
25359910Spaul		DPRINTF(("could not find a region\n"));
25440711Swollman		goto out;
25540711Swollman	}
25640711Swollman
25788372Stmm	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
25888372Stmm	/* If bound is 0, bmask will also be 0 */
25988372Stmm	bmask = ~(bound - 1);
26040711Swollman	/*
26140711Swollman	 * First try to find an acceptable totally-unshared region.
26240711Swollman	 */
26368727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
26459910Spaul		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
265143665Simp		if (s->r_start + count - 1 > end) {
266143665Simp			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
267143665Simp			    s->r_start, end));
26840711Swollman			break;
26940711Swollman		}
27040711Swollman		if (s->r_flags & RF_ALLOCATED) {
27159910Spaul			DPRINTF(("region is allocated\n"));
27240711Swollman			continue;
27340711Swollman		}
27488372Stmm		rstart = ulmax(s->r_start, start);
27588372Stmm		/*
27688372Stmm		 * Try to find a region by adjusting to boundary and alignment
27788372Stmm		 * until both conditions are satisfied. This is not an optimal
27888372Stmm		 * algorithm, but in most cases it isn't really bad, either.
27988372Stmm		 */
28088372Stmm		do {
28188372Stmm			rstart = (rstart + amask) & ~amask;
282109646Stmm			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
28388372Stmm				rstart += bound - (rstart & ~bmask);
28488372Stmm		} while ((rstart & amask) != 0 && rstart < end &&
28588372Stmm		    rstart < s->r_end);
286128172Simp		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
287102572Siwasaki		if (rstart > rend) {
288102572Siwasaki			DPRINTF(("adjusted start exceeds end\n"));
289102572Siwasaki			continue;
290102572Siwasaki		}
29159910Spaul		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
29259910Spaul		       rstart, rend, (rend - rstart + 1), count));
29340711Swollman
29440711Swollman		if ((rend - rstart + 1) >= count) {
29559910Spaul			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
296143664Simp			       rstart, rend, (rend - rstart + 1)));
29740711Swollman			if ((s->r_end - s->r_start + 1) == count) {
29859910Spaul				DPRINTF(("candidate region is entire chunk\n"));
29940711Swollman				rv = s;
30048235Sdfr				rv->r_flags |= RF_ALLOCATED | flags;
30140711Swollman				rv->r_dev = dev;
30240711Swollman				goto out;
30340711Swollman			}
30440711Swollman
30540711Swollman			/*
30640711Swollman			 * If s->r_start < rstart and
30740711Swollman			 *    s->r_end > rstart + count - 1, then
30840711Swollman			 * we need to split the region into three pieces
30940711Swollman			 * (the middle one will get returned to the user).
31040711Swollman			 * Otherwise, we are allocating at either the
31140711Swollman			 * beginning or the end of s, so we only need to
31240711Swollman			 * split it in two.  The first case requires
31340711Swollman			 * two new allocations; the second requires but one.
31440711Swollman			 */
315150523Sphk			rv = int_alloc_resource(M_NOWAIT);
316152543Syongari			if (rv == NULL)
31740711Swollman				goto out;
31840711Swollman			rv->r_start = rstart;
31940711Swollman			rv->r_end = rstart + count - 1;
32040711Swollman			rv->r_flags = flags | RF_ALLOCATED;
32140711Swollman			rv->r_dev = dev;
32245720Speter			rv->r_rm = rm;
323152543Syongari
32440711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
32559910Spaul				DPRINTF(("splitting region in three parts: "
32640711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
32740711Swollman				       s->r_start, rv->r_start - 1,
32840711Swollman				       rv->r_start, rv->r_end,
32959910Spaul				       rv->r_end + 1, s->r_end));
33040711Swollman				/*
33140711Swollman				 * We are allocating in the middle.
33240711Swollman				 */
333150523Sphk				r = int_alloc_resource(M_NOWAIT);
334152543Syongari				if (r == NULL) {
33540711Swollman					free(rv, M_RMAN);
336152543Syongari					rv = NULL;
33740711Swollman					goto out;
33840711Swollman				}
33940711Swollman				r->r_start = rv->r_end + 1;
34040711Swollman				r->r_end = s->r_end;
34140711Swollman				r->r_flags = s->r_flags;
34245720Speter				r->r_rm = rm;
34340711Swollman				s->r_end = rv->r_start - 1;
34468727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
34540711Swollman						     r_link);
34668727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
34740711Swollman						     r_link);
34840711Swollman			} else if (s->r_start == rv->r_start) {
34959910Spaul				DPRINTF(("allocating from the beginning\n"));
35040711Swollman				/*
35140711Swollman				 * We are allocating at the beginning.
35240711Swollman				 */
35340711Swollman				s->r_start = rv->r_end + 1;
35468727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
35540711Swollman			} else {
35659910Spaul				DPRINTF(("allocating at the end\n"));
35740711Swollman				/*
35840711Swollman				 * We are allocating at the end.
35940711Swollman				 */
36040711Swollman				s->r_end = rv->r_start - 1;
36168727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
36240711Swollman						     r_link);
36340711Swollman			}
36440711Swollman			goto out;
36540711Swollman		}
36640711Swollman	}
36740711Swollman
36840711Swollman	/*
36940711Swollman	 * Now find an acceptable shared region, if the client's requirements
37040711Swollman	 * allow sharing.  By our implementation restriction, a candidate
37140711Swollman	 * region must match exactly by both size and sharing type in order
37240711Swollman	 * to be considered compatible with the client's request.  (The
37340711Swollman	 * former restriction could probably be lifted without too much
37440711Swollman	 * additional work, but this does not seem warranted.)
37540711Swollman	 */
37659910Spaul	DPRINTF(("no unshared regions found\n"));
37740711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
37840711Swollman		goto out;
37940711Swollman
38068727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
38140711Swollman		if (s->r_start > end)
38240711Swollman			break;
38340711Swollman		if ((s->r_flags & flags) != flags)
38440711Swollman			continue;
38588372Stmm		rstart = ulmax(s->r_start, start);
386128172Simp		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
38740711Swollman		if (s->r_start >= start && s->r_end <= end
38888372Stmm		    && (s->r_end - s->r_start + 1) == count &&
38988372Stmm		    (s->r_start & amask) == 0 &&
39088372Stmm		    ((s->r_start ^ s->r_end) & bmask) == 0) {
391150523Sphk			rv = int_alloc_resource(M_NOWAIT);
392152543Syongari			if (rv == NULL)
39340711Swollman				goto out;
39440711Swollman			rv->r_start = s->r_start;
39540711Swollman			rv->r_end = s->r_end;
396152543Syongari			rv->r_flags = s->r_flags &
39740711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
39840711Swollman			rv->r_dev = dev;
39940711Swollman			rv->r_rm = rm;
400152543Syongari			if (s->r_sharehead == NULL) {
40140711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
40269781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
403152543Syongari				if (s->r_sharehead == NULL) {
40440711Swollman					free(rv, M_RMAN);
405152543Syongari					rv = NULL;
40640711Swollman					goto out;
40740711Swollman				}
40840711Swollman				LIST_INIT(s->r_sharehead);
409152543Syongari				LIST_INSERT_HEAD(s->r_sharehead, s,
41040711Swollman						 r_sharelink);
41145106Sdfr				s->r_flags |= RF_FIRSTSHARE;
41240711Swollman			}
41340711Swollman			rv->r_sharehead = s->r_sharehead;
41440711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
41540711Swollman			goto out;
41640711Swollman		}
41740711Swollman	}
41840711Swollman
41940711Swollman	/*
42040711Swollman	 * We couldn't find anything.
42140711Swollman	 */
42240711Swollmanout:
42340711Swollman	/*
42440711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
42540711Swollman	 * which is reflected in `want_activate', we attempt to atomically
42640711Swollman	 * activate the resource.  If this fails, we release the resource
42740711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
42840711Swollman	 * make sense for RF_TIMESHARE-type resources.)
42940711Swollman	 */
43040711Swollman	if (rv && want_activate) {
431150523Sphk		struct resource_i *whohas;
43240711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
43340711Swollman			int_rman_release_resource(rm, rv);
434152543Syongari			rv = NULL;
43540711Swollman		}
43640711Swollman	}
437152543Syongari
43872200Sbmilekic	mtx_unlock(rm->rm_mtx);
439152543Syongari	return (rv == NULL ? NULL : &rv->r_r);
44040711Swollman}
44140711Swollman
44288372Stmmstruct resource *
44388372Stmmrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
44488372Stmm		      u_int flags, struct device *dev)
44588372Stmm{
44688372Stmm
44788372Stmm	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
44888372Stmm	    dev));
44988372Stmm}
45088372Stmm
45140711Swollmanstatic int
452150523Sphkint_rman_activate_resource(struct rman *rm, struct resource_i *r,
453150523Sphk			   struct resource_i **whohas)
45440711Swollman{
455150523Sphk	struct resource_i *s;
45640711Swollman	int ok;
45740711Swollman
45840711Swollman	/*
45940711Swollman	 * If we are not timesharing, then there is nothing much to do.
46040711Swollman	 * If we already have the resource, then there is nothing at all to do.
46140711Swollman	 * If we are not on a sharing list with anybody else, then there is
46240711Swollman	 * little to do.
46340711Swollman	 */
46440711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
46540711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
466152543Syongari	    || r->r_sharehead == NULL) {
46740711Swollman		r->r_flags |= RF_ACTIVE;
46840711Swollman		return 0;
46940711Swollman	}
47040711Swollman
47140711Swollman	ok = 1;
47253225Sphk	for (s = LIST_FIRST(r->r_sharehead); s && ok;
47353225Sphk	     s = LIST_NEXT(s, r_sharelink)) {
47440711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
47540711Swollman			ok = 0;
47640711Swollman			*whohas = s;
47740711Swollman		}
47840711Swollman	}
47940711Swollman	if (ok) {
48040711Swollman		r->r_flags |= RF_ACTIVE;
48140711Swollman		return 0;
48240711Swollman	}
48340711Swollman	return EBUSY;
48440711Swollman}
48540711Swollman
48640711Swollmanint
487150523Sphkrman_activate_resource(struct resource *re)
48840711Swollman{
48940711Swollman	int rv;
490150523Sphk	struct resource_i *r, *whohas;
49140711Swollman	struct rman *rm;
49240711Swollman
493150523Sphk	r = re->__r_i;
49440711Swollman	rm = r->r_rm;
49572200Sbmilekic	mtx_lock(rm->rm_mtx);
49640711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
49772200Sbmilekic	mtx_unlock(rm->rm_mtx);
49840711Swollman	return rv;
49940711Swollman}
50040711Swollman
50140711Swollmanint
502150523Sphkrman_await_resource(struct resource *re, int pri, int timo)
50340711Swollman{
50485519Sjhb	int	rv;
505150523Sphk	struct	resource_i *r, *whohas;
50640711Swollman	struct	rman *rm;
50740711Swollman
508150523Sphk	r = re->__r_i;
50940711Swollman	rm = r->r_rm;
51085519Sjhb	mtx_lock(rm->rm_mtx);
51140711Swollman	for (;;) {
51240711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
51340711Swollman		if (rv != EBUSY)
51471576Sjasone			return (rv);	/* returns with mutex held */
51540711Swollman
516152543Syongari		if (r->r_sharehead == NULL)
51740711Swollman			panic("rman_await_resource");
51840711Swollman		whohas->r_flags |= RF_WANTED;
51985519Sjhb		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
52040711Swollman		if (rv) {
52185519Sjhb			mtx_unlock(rm->rm_mtx);
52285519Sjhb			return (rv);
52340711Swollman		}
52440711Swollman	}
52540711Swollman}
52640711Swollman
52745720Speterstatic int
528150523Sphkint_rman_deactivate_resource(struct resource_i *r)
52940711Swollman{
53040711Swollman
53140711Swollman	r->r_flags &= ~RF_ACTIVE;
53240711Swollman	if (r->r_flags & RF_WANTED) {
53340711Swollman		r->r_flags &= ~RF_WANTED;
53440711Swollman		wakeup(r->r_sharehead);
53540711Swollman	}
53645720Speter	return 0;
53745720Speter}
53845720Speter
53945720Speterint
54045720Speterrman_deactivate_resource(struct resource *r)
54145720Speter{
54245720Speter	struct	rman *rm;
54345720Speter
544150523Sphk	rm = r->__r_i->r_rm;
54572200Sbmilekic	mtx_lock(rm->rm_mtx);
546150523Sphk	int_rman_deactivate_resource(r->__r_i);
54772200Sbmilekic	mtx_unlock(rm->rm_mtx);
54840711Swollman	return 0;
54940711Swollman}
55040711Swollman
55140711Swollmanstatic int
552150523Sphkint_rman_release_resource(struct rman *rm, struct resource_i *r)
55340711Swollman{
554150523Sphk	struct	resource_i *s, *t;
55540711Swollman
55640711Swollman	if (r->r_flags & RF_ACTIVE)
55745720Speter		int_rman_deactivate_resource(r);
55840711Swollman
55940711Swollman	/*
56040711Swollman	 * Check for a sharing list first.  If there is one, then we don't
56140711Swollman	 * have to think as hard.
56240711Swollman	 */
56340711Swollman	if (r->r_sharehead) {
56440711Swollman		/*
56540711Swollman		 * If a sharing list exists, then we know there are at
56640711Swollman		 * least two sharers.
56740711Swollman		 *
56840711Swollman		 * If we are in the main circleq, appoint someone else.
56940711Swollman		 */
57040711Swollman		LIST_REMOVE(r, r_sharelink);
57153225Sphk		s = LIST_FIRST(r->r_sharehead);
57240711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
57340711Swollman			s->r_flags |= RF_FIRSTSHARE;
57468727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
57568727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
57640711Swollman		}
57740711Swollman
57840711Swollman		/*
57940711Swollman		 * Make sure that the sharing list goes away completely
58040711Swollman		 * if the resource is no longer being shared at all.
58140711Swollman		 */
582152543Syongari		if (LIST_NEXT(s, r_sharelink) == NULL) {
58340711Swollman			free(s->r_sharehead, M_RMAN);
584152543Syongari			s->r_sharehead = NULL;
58540711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
58640711Swollman		}
58740711Swollman		goto out;
58840711Swollman	}
58940711Swollman
59040711Swollman	/*
59140711Swollman	 * Look at the adjacent resources in the list and see if our
592133177Sjhb	 * segment can be merged with any of them.  If either of the
593133177Sjhb	 * resources is allocated or is not exactly adjacent then they
594133177Sjhb	 * cannot be merged with our segment.
59540711Swollman	 */
59668727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
597133177Sjhb	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
598133177Sjhb	    s->r_end + 1 != r->r_start))
599133177Sjhb		s = NULL;
60068727Smckusick	t = TAILQ_NEXT(r, r_link);
601133177Sjhb	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
602133177Sjhb	    r->r_end + 1 != t->r_start))
603133177Sjhb		t = NULL;
60440711Swollman
605133177Sjhb	if (s != NULL && t != NULL) {
60640711Swollman		/*
60740711Swollman		 * Merge all three segments.
60840711Swollman		 */
60940711Swollman		s->r_end = t->r_end;
61068727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
61168727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
61240711Swollman		free(t, M_RMAN);
613133177Sjhb	} else if (s != NULL) {
61440711Swollman		/*
61540711Swollman		 * Merge previous segment with ours.
61640711Swollman		 */
61740711Swollman		s->r_end = r->r_end;
61868727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
619133177Sjhb	} else if (t != NULL) {
62040711Swollman		/*
62140711Swollman		 * Merge next segment with ours.
62240711Swollman		 */
62340711Swollman		t->r_start = r->r_start;
62468727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
62540711Swollman	} else {
62640711Swollman		/*
62740711Swollman		 * At this point, we know there is nothing we
62840711Swollman		 * can potentially merge with, because on each
62940711Swollman		 * side, there is either nothing there or what is
63040711Swollman		 * there is still allocated.  In that case, we don't
63140711Swollman		 * want to remove r from the list; we simply want to
63240711Swollman		 * change it to an unallocated region and return
63340711Swollman		 * without freeing anything.
63440711Swollman		 */
63540711Swollman		r->r_flags &= ~RF_ALLOCATED;
63640711Swollman		return 0;
63740711Swollman	}
63840711Swollman
63940711Swollmanout:
64040711Swollman	free(r, M_RMAN);
64140711Swollman	return 0;
64240711Swollman}
64340711Swollman
64440711Swollmanint
645150523Sphkrman_release_resource(struct resource *re)
64640711Swollman{
64740711Swollman	int	rv;
648150523Sphk	struct	resource_i *r;
649150523Sphk	struct	rman *rm;
65040711Swollman
651150523Sphk	r = re->__r_i;
652150523Sphk	rm = r->r_rm;
65372200Sbmilekic	mtx_lock(rm->rm_mtx);
65440711Swollman	rv = int_rman_release_resource(rm, r);
65572200Sbmilekic	mtx_unlock(rm->rm_mtx);
65640711Swollman	return (rv);
65740711Swollman}
65867261Simp
65967261Simpuint32_t
66067261Simprman_make_alignment_flags(uint32_t size)
66167261Simp{
66267261Simp	int	i;
66367261Simp
66467425Simp	/*
66567425Simp	 * Find the hightest bit set, and add one if more than one bit
66667425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
66767425Simp	 */
66888372Stmm	for (i = 31; i > 0; i--)
66967425Simp		if ((1 << i) & size)
67067425Simp			break;
67167425Simp	if (~(1 << i) & size)
67267425Simp		i++;
67367261Simp
67467261Simp	return(RF_ALIGNMENT_LOG2(i));
67567425Simp}
676107296Simp
677107296Simpu_long
678107296Simprman_get_start(struct resource *r)
679107296Simp{
680150523Sphk	return (r->__r_i->r_start);
681107296Simp}
682107296Simp
683107296Simpu_long
684107296Simprman_get_end(struct resource *r)
685107296Simp{
686150523Sphk	return (r->__r_i->r_end);
687107296Simp}
688107296Simp
689107296Simpu_long
690107296Simprman_get_size(struct resource *r)
691107296Simp{
692150523Sphk	return (r->__r_i->r_end - r->__r_i->r_start + 1);
693107296Simp}
694107296Simp
695107296Simpu_int
696107296Simprman_get_flags(struct resource *r)
697107296Simp{
698150523Sphk	return (r->__r_i->r_flags);
699107296Simp}
700107296Simp
701107296Simpvoid
702107296Simprman_set_virtual(struct resource *r, void *v)
703107296Simp{
704150523Sphk	r->__r_i->r_virtual = v;
705107296Simp}
706107296Simp
707107296Simpvoid *
708107296Simprman_get_virtual(struct resource *r)
709107296Simp{
710150523Sphk	return (r->__r_i->r_virtual);
711107296Simp}
712107296Simp
713107296Simpvoid
714107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t)
715107296Simp{
716107296Simp	r->r_bustag = t;
717107296Simp}
718107296Simp
719107296Simpbus_space_tag_t
720107296Simprman_get_bustag(struct resource *r)
721107296Simp{
722107296Simp	return (r->r_bustag);
723107296Simp}
724107296Simp
725107296Simpvoid
726107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h)
727107296Simp{
728107296Simp	r->r_bushandle = h;
729107296Simp}
730107296Simp
731107296Simpbus_space_handle_t
732107296Simprman_get_bushandle(struct resource *r)
733107296Simp{
734107296Simp	return (r->r_bushandle);
735107296Simp}
736107296Simp
737107296Simpvoid
738107296Simprman_set_rid(struct resource *r, int rid)
739107296Simp{
740150523Sphk	r->__r_i->r_rid = rid;
741107296Simp}
742107296Simp
743131414Simpvoid
744131414Simprman_set_start(struct resource *r, u_long start)
745131414Simp{
746150523Sphk	r->__r_i->r_start = start;
747131414Simp}
748131414Simp
749131414Simpvoid
750131414Simprman_set_end(struct resource *r, u_long end)
751131414Simp{
752150523Sphk	r->__r_i->r_end = end;
753131414Simp}
754131414Simp
755107296Simpint
756107296Simprman_get_rid(struct resource *r)
757107296Simp{
758150523Sphk	return (r->__r_i->r_rid);
759107296Simp}
760110753Simp
761110753Simpstruct device *
762110753Simprman_get_device(struct resource *r)
763110753Simp{
764150523Sphk	return (r->__r_i->r_dev);
765110753Simp}
766144071Sphk
767144932Simpvoid
768144932Simprman_set_device(struct resource *r, struct device *dev)
769144932Simp{
770150523Sphk	r->__r_i->r_dev = dev;
771144932Simp}
772144932Simp
773150547Sphkint
774150547Sphkrman_is_region_manager(struct resource *r, struct rman *rm)
775150547Sphk{
776150547Sphk
777150547Sphk	return (r->__r_i->r_rm == rm);
778150547Sphk}
779150547Sphk
780144071Sphk/*
781144071Sphk * Sysctl interface for scanning the resource lists.
782144071Sphk *
783144071Sphk * We take two input parameters; the index into the list of resource
784144071Sphk * managers, and the resource offset into the list.
785144071Sphk */
786144071Sphkstatic int
787144071Sphksysctl_rman(SYSCTL_HANDLER_ARGS)
788144071Sphk{
789144071Sphk	int			*name = (int *)arg1;
790144071Sphk	u_int			namelen = arg2;
791144071Sphk	int			rman_idx, res_idx;
792144071Sphk	struct rman		*rm;
793150523Sphk	struct resource_i	*res;
794144071Sphk	struct u_rman		urm;
795144071Sphk	struct u_resource	ures;
796144071Sphk	int			error;
797144071Sphk
798144071Sphk	if (namelen != 3)
799144071Sphk		return (EINVAL);
800144071Sphk
801144071Sphk	if (bus_data_generation_check(name[0]))
802144071Sphk		return (EINVAL);
803144071Sphk	rman_idx = name[1];
804144071Sphk	res_idx = name[2];
805144071Sphk
806144071Sphk	/*
807144071Sphk	 * Find the indexed resource manager
808144071Sphk	 */
809152543Syongari	mtx_lock(&rman_mtx);
810144071Sphk	TAILQ_FOREACH(rm, &rman_head, rm_link) {
811144071Sphk		if (rman_idx-- == 0)
812144071Sphk			break;
813144071Sphk	}
814152543Syongari	mtx_unlock(&rman_mtx);
815144071Sphk	if (rm == NULL)
816144071Sphk		return (ENOENT);
817144071Sphk
818144071Sphk	/*
819144071Sphk	 * If the resource index is -1, we want details on the
820144071Sphk	 * resource manager.
821144071Sphk	 */
822144071Sphk	if (res_idx == -1) {
823145953Scperciva		bzero(&urm, sizeof(urm));
824144071Sphk		urm.rm_handle = (uintptr_t)rm;
825144071Sphk		strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
826144071Sphk		urm.rm_start = rm->rm_start;
827144071Sphk		urm.rm_size = rm->rm_end - rm->rm_start + 1;
828144071Sphk		urm.rm_type = rm->rm_type;
829144071Sphk
830144071Sphk		error = SYSCTL_OUT(req, &urm, sizeof(urm));
831144071Sphk		return (error);
832144071Sphk	}
833144071Sphk
834144071Sphk	/*
835144071Sphk	 * Find the indexed resource and return it.
836144071Sphk	 */
837152543Syongari	mtx_lock(rm->rm_mtx);
838144071Sphk	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
839144071Sphk		if (res_idx-- == 0) {
840145953Scperciva			bzero(&ures, sizeof(ures));
841144071Sphk			ures.r_handle = (uintptr_t)res;
842144071Sphk			ures.r_parent = (uintptr_t)res->r_rm;
843144071Sphk			ures.r_device = (uintptr_t)res->r_dev;
844144071Sphk			if (res->r_dev != NULL) {
845144071Sphk				if (device_get_name(res->r_dev) != NULL) {
846144071Sphk					snprintf(ures.r_devname, RM_TEXTLEN,
847144071Sphk					    "%s%d",
848144071Sphk					    device_get_name(res->r_dev),
849144071Sphk					    device_get_unit(res->r_dev));
850144071Sphk				} else {
851144071Sphk					strlcpy(ures.r_devname, "nomatch",
852144071Sphk					    RM_TEXTLEN);
853144071Sphk				}
854144071Sphk			} else {
855144071Sphk				ures.r_devname[0] = '\0';
856144071Sphk			}
857144071Sphk			ures.r_start = res->r_start;
858144071Sphk			ures.r_size = res->r_end - res->r_start + 1;
859144071Sphk			ures.r_flags = res->r_flags;
860144071Sphk
861152543Syongari			mtx_unlock(rm->rm_mtx);
862144071Sphk			error = SYSCTL_OUT(req, &ures, sizeof(ures));
863144071Sphk			return (error);
864144071Sphk		}
865144071Sphk	}
866152543Syongari	mtx_unlock(rm->rm_mtx);
867144071Sphk	return (ENOENT);
868144071Sphk}
869144071Sphk
870144071SphkSYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
871144071Sphk    "kernel resource manager");
872