subr_rman.c revision 266814
1139804Simp/*-
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
15152543Syongari *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman */
2940711Swollman
3040711Swollman/*
3140711Swollman * The kernel resource manager.  This code is responsible for keeping track
3240711Swollman * of hardware resources which are apportioned out to various drivers.
3340711Swollman * It does not actually assign those resources, and it is not expected
3440711Swollman * that end-device drivers will call into this code directly.  Rather,
3540711Swollman * the code which implements the buses that those devices are attached to,
3640711Swollman * and the code which manages CPU resources, will call this code, and the
3740711Swollman * end-device drivers will make upcalls to that code to actually perform
3840711Swollman * the allocation.
3940711Swollman *
4040711Swollman * There are two sorts of resources managed by this code.  The first is
4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4240711Swollman * consist of a sequence of individually-allocatable objects which have
4340711Swollman * been numbered in some well-defined order.  Most of the resources
4440711Swollman * are of this type, as it is the most familiar.  The second type is
4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4640711Swollman * resources in which each instance is indistinguishable from every
4740711Swollman * other instance).  The principal anticipated application of gauges
4840711Swollman * is in the context of power consumption, where a bus may have a specific
4940711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5040711Swollman * implemented yet.
5140711Swollman *
5240711Swollman * For array resources, we make one simplifying assumption: two clients
5340711Swollman * sharing the same resource must use the same range of indices.  That
5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5540711Swollman * permitted.
5640711Swollman */
5740711Swollman
58168791Sjhb#include "opt_ddb.h"
59168791Sjhb
60116182Sobrien#include <sys/cdefs.h>
61116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 266814 2014-05-28 16:57:17Z truckman $");
62116182Sobrien
6340711Swollman#include <sys/param.h>
6440711Swollman#include <sys/systm.h>
6541304Sbde#include <sys/kernel.h>
66164881Sjhb#include <sys/limits.h>
6740711Swollman#include <sys/lock.h>
6840711Swollman#include <sys/malloc.h>
6971576Sjasone#include <sys/mutex.h>
7045720Speter#include <sys/bus.h>		/* XXX debugging */
7145720Speter#include <machine/bus.h>
7240711Swollman#include <sys/rman.h>
73102962Siwasaki#include <sys/sysctl.h>
7440711Swollman
75168791Sjhb#ifdef DDB
76168791Sjhb#include <ddb/ddb.h>
77168791Sjhb#endif
78168791Sjhb
79151037Sphk/*
80151037Sphk * We use a linked list rather than a bitmap because we need to be able to
81151037Sphk * represent potentially huge objects (like all of a processor's physical
82151037Sphk * address space).  That is also why the indices are defined to have type
83151037Sphk * `unsigned long' -- that being the largest integral type in ISO C (1990).
84151037Sphk * The 1999 version of C allows `long long'; we may need to switch to that
85151037Sphk * at some point in the future, particularly if we want to support 36-bit
86151037Sphk * addresses on IA32 hardware.
87151037Sphk */
88151037Sphkstruct resource_i {
89151037Sphk	struct resource		r_r;
90151037Sphk	TAILQ_ENTRY(resource_i)	r_link;
91151037Sphk	LIST_ENTRY(resource_i)	r_sharelink;
92151037Sphk	LIST_HEAD(, resource_i)	*r_sharehead;
93151037Sphk	u_long	r_start;	/* index of the first entry in this resource */
94151037Sphk	u_long	r_end;		/* index of the last entry (inclusive) */
95151037Sphk	u_int	r_flags;
96151037Sphk	void	*r_virtual;	/* virtual address of this resource */
97151037Sphk	struct	device *r_dev;	/* device which has allocated this resource */
98151037Sphk	struct	rman *r_rm;	/* resource manager from whence this came */
99151037Sphk	int	r_rid;		/* optional rid for this resource. */
100151037Sphk};
101151037Sphk
102188061Simpstatic int     rman_debug = 0;
103102962SiwasakiTUNABLE_INT("debug.rman_debug", &rman_debug);
104102962SiwasakiSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105102962Siwasaki    &rman_debug, 0, "rman debug");
10659910Spaul
107102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params
108102962Siwasaki
10945569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
11040711Swollman
11140711Swollmanstruct	rman_head rman_head;
11271576Sjasonestatic	struct mtx rman_mtx; /* mutex to protect rman_head */
113150523Sphkstatic	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114150523Sphk				       struct resource_i **whohas);
115150523Sphkstatic	int int_rman_deactivate_resource(struct resource_i *r);
116150523Sphkstatic	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
11740711Swollman
118150523Sphkstatic __inline struct resource_i *
119150523Sphkint_alloc_resource(int malloc_flag)
120150523Sphk{
121150523Sphk	struct resource_i *r;
122150523Sphk
123150523Sphk	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124150523Sphk	if (r != NULL) {
125150523Sphk		r->r_r.__r_i = r;
126150523Sphk	}
127150523Sphk	return (r);
128150523Sphk}
129150523Sphk
13040711Swollmanint
13140711Swollmanrman_init(struct rman *rm)
13240711Swollman{
133152543Syongari	static int once = 0;
13440711Swollman
13540711Swollman	if (once == 0) {
13640711Swollman		once = 1;
13740711Swollman		TAILQ_INIT(&rman_head);
13893818Sjhb		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
13940711Swollman	}
14040711Swollman
141221218Sjhb	if (rm->rm_start == 0 && rm->rm_end == 0)
142221218Sjhb		rm->rm_end = ~0ul;
14340711Swollman	if (rm->rm_type == RMAN_UNINIT)
14440711Swollman		panic("rman_init");
14540711Swollman	if (rm->rm_type == RMAN_GAUGE)
14640711Swollman		panic("implement RMAN_GAUGE");
14740711Swollman
14868727Smckusick	TAILQ_INIT(&rm->rm_list);
14984781Sjhb	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
150152543Syongari	if (rm->rm_mtx == NULL)
15140711Swollman		return ENOMEM;
15293818Sjhb	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
15340711Swollman
15472200Sbmilekic	mtx_lock(&rman_mtx);
15540711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
15672200Sbmilekic	mtx_unlock(&rman_mtx);
15740711Swollman	return 0;
15840711Swollman}
15940711Swollman
16040711Swollmanint
16140711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
16240711Swollman{
163162224Sjhb	struct resource_i *r, *s, *t;
164236359Simp	int rv = 0;
16540711Swollman
166134040Snjl	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
167134021Snjl	    rm->rm_descr, start, end));
168221218Sjhb	if (start < rm->rm_start || end > rm->rm_end)
169221218Sjhb		return EINVAL;
170150523Sphk	r = int_alloc_resource(M_NOWAIT);
171152543Syongari	if (r == NULL)
17240711Swollman		return ENOMEM;
17340711Swollman	r->r_start = start;
17440711Swollman	r->r_end = end;
17540711Swollman	r->r_rm = rm;
17640711Swollman
17772200Sbmilekic	mtx_lock(rm->rm_mtx);
178162224Sjhb
179162224Sjhb	/* Skip entries before us. */
180164881Sjhb	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
181164881Sjhb		if (s->r_end == ULONG_MAX)
182164881Sjhb			break;
183164881Sjhb		if (s->r_end + 1 >= r->r_start)
184164881Sjhb			break;
185164881Sjhb	}
18640711Swollman
187162224Sjhb	/* If we ran off the end of the list, insert at the tail. */
18868727Smckusick	if (s == NULL) {
18968727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
19040711Swollman	} else {
191162224Sjhb		/* Check for any overlap with the current region. */
192236359Simp		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
193236359Simp			rv = EBUSY;
194236359Simp			goto out;
195236359Simp		}
196162224Sjhb
197162224Sjhb		/* Check for any overlap with the next region. */
198162224Sjhb		t = TAILQ_NEXT(s, r_link);
199236359Simp		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
200236359Simp			rv = EBUSY;
201236359Simp			goto out;
202236359Simp		}
203162224Sjhb
204162224Sjhb		/*
205162224Sjhb		 * See if this region can be merged with the next region.  If
206162224Sjhb		 * not, clear the pointer.
207162224Sjhb		 */
208162224Sjhb		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
209162224Sjhb			t = NULL;
210162224Sjhb
211162224Sjhb		/* See if we can merge with the current region. */
212162224Sjhb		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
213162224Sjhb			/* Can we merge all 3 regions? */
214162224Sjhb			if (t != NULL) {
215162224Sjhb				s->r_end = t->r_end;
216162224Sjhb				TAILQ_REMOVE(&rm->rm_list, t, r_link);
217162224Sjhb				free(r, M_RMAN);
218162224Sjhb				free(t, M_RMAN);
219162224Sjhb			} else {
220162224Sjhb				s->r_end = r->r_end;
221162224Sjhb				free(r, M_RMAN);
222162224Sjhb			}
223166932Sscottl		} else if (t != NULL) {
224166932Sscottl			/* Can we merge with just the next region? */
225166932Sscottl			t->r_start = r->r_start;
226166932Sscottl			free(r, M_RMAN);
227166932Sscottl		} else if (s->r_end < r->r_start) {
228166932Sscottl			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
229162224Sjhb		} else {
230166932Sscottl			TAILQ_INSERT_BEFORE(s, r, r_link);
231162224Sjhb		}
23240711Swollman	}
233236359Simpout:
23472200Sbmilekic	mtx_unlock(rm->rm_mtx);
235236359Simp	return rv;
23640711Swollman}
23740711Swollman
23840711Swollmanint
239159536Simprman_init_from_resource(struct rman *rm, struct resource *r)
240159536Simp{
241159536Simp	int rv;
242159536Simp
243159536Simp	if ((rv = rman_init(rm)) != 0)
244159536Simp		return (rv);
245159536Simp	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
246159536Simp}
247159536Simp
248159536Simpint
24940711Swollmanrman_fini(struct rman *rm)
25040711Swollman{
251150523Sphk	struct resource_i *r;
25240711Swollman
25372200Sbmilekic	mtx_lock(rm->rm_mtx);
25468727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
25545720Speter		if (r->r_flags & RF_ALLOCATED) {
25672200Sbmilekic			mtx_unlock(rm->rm_mtx);
25740711Swollman			return EBUSY;
25845720Speter		}
25940711Swollman	}
26040711Swollman
26140711Swollman	/*
26240711Swollman	 * There really should only be one of these if we are in this
26340711Swollman	 * state and the code is working properly, but it can't hurt.
26440711Swollman	 */
26568727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
26668727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
26768727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
26840711Swollman		free(r, M_RMAN);
26940711Swollman	}
27072200Sbmilekic	mtx_unlock(rm->rm_mtx);
27172200Sbmilekic	mtx_lock(&rman_mtx);
27240711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
27372200Sbmilekic	mtx_unlock(&rman_mtx);
27471576Sjasone	mtx_destroy(rm->rm_mtx);
27571576Sjasone	free(rm->rm_mtx, M_RMAN);
27640711Swollman
27740711Swollman	return 0;
27840711Swollman}
27940711Swollman
280221220Sjhbint
281221220Sjhbrman_first_free_region(struct rman *rm, u_long *start, u_long *end)
282221220Sjhb{
283221220Sjhb	struct resource_i *r;
284221220Sjhb
285221220Sjhb	mtx_lock(rm->rm_mtx);
286221220Sjhb	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
287221220Sjhb		if (!(r->r_flags & RF_ALLOCATED)) {
288221220Sjhb			*start = r->r_start;
289221220Sjhb			*end = r->r_end;
290221220Sjhb			mtx_unlock(rm->rm_mtx);
291221220Sjhb			return (0);
292221220Sjhb		}
293221220Sjhb	}
294221220Sjhb	mtx_unlock(rm->rm_mtx);
295221220Sjhb	return (ENOENT);
296221220Sjhb}
297221220Sjhb
298221220Sjhbint
299221220Sjhbrman_last_free_region(struct rman *rm, u_long *start, u_long *end)
300221220Sjhb{
301221220Sjhb	struct resource_i *r;
302221220Sjhb
303221220Sjhb	mtx_lock(rm->rm_mtx);
304221220Sjhb	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
305221220Sjhb		if (!(r->r_flags & RF_ALLOCATED)) {
306221220Sjhb			*start = r->r_start;
307221220Sjhb			*end = r->r_end;
308221220Sjhb			mtx_unlock(rm->rm_mtx);
309221220Sjhb			return (0);
310221220Sjhb		}
311221220Sjhb	}
312221220Sjhb	mtx_unlock(rm->rm_mtx);
313221220Sjhb	return (ENOENT);
314221220Sjhb}
315221220Sjhb
316221220Sjhb/* Shrink or extend one or both ends of an allocated resource. */
317221220Sjhbint
318221220Sjhbrman_adjust_resource(struct resource *rr, u_long start, u_long end)
319221220Sjhb{
320221220Sjhb	struct	resource_i *r, *s, *t, *new;
321221220Sjhb	struct	rman *rm;
322221220Sjhb
323221220Sjhb	/* Not supported for shared resources. */
324221220Sjhb	r = rr->__r_i;
325221220Sjhb	if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
326221220Sjhb		return (EINVAL);
327221220Sjhb
328221220Sjhb	/*
329221220Sjhb	 * This does not support wholesale moving of a resource.  At
330221220Sjhb	 * least part of the desired new range must overlap with the
331221220Sjhb	 * existing resource.
332221220Sjhb	 */
333221220Sjhb	if (end < r->r_start || r->r_end < start)
334221220Sjhb		return (EINVAL);
335221220Sjhb
336221220Sjhb	/*
337221220Sjhb	 * Find the two resource regions immediately adjacent to the
338221220Sjhb	 * allocated resource.
339221220Sjhb	 */
340221220Sjhb	rm = r->r_rm;
341221220Sjhb	mtx_lock(rm->rm_mtx);
342221220Sjhb#ifdef INVARIANTS
343221220Sjhb	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
344221220Sjhb		if (s == r)
345221220Sjhb			break;
346221220Sjhb	}
347221220Sjhb	if (s == NULL)
348221220Sjhb		panic("resource not in list");
349221220Sjhb#endif
350221220Sjhb	s = TAILQ_PREV(r, resource_head, r_link);
351221220Sjhb	t = TAILQ_NEXT(r, r_link);
352221220Sjhb	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
353221220Sjhb	    ("prev resource mismatch"));
354221220Sjhb	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
355221220Sjhb	    ("next resource mismatch"));
356221220Sjhb
357221220Sjhb	/*
358221220Sjhb	 * See if the changes are permitted.  Shrinking is always allowed,
359221220Sjhb	 * but growing requires sufficient room in the adjacent region.
360221220Sjhb	 */
361221220Sjhb	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
362221220Sjhb	    s->r_start > start)) {
363221220Sjhb		mtx_unlock(rm->rm_mtx);
364221220Sjhb		return (EBUSY);
365221220Sjhb	}
366221220Sjhb	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
367221220Sjhb	    t->r_end < end)) {
368221220Sjhb		mtx_unlock(rm->rm_mtx);
369221220Sjhb		return (EBUSY);
370221220Sjhb	}
371221220Sjhb
372221220Sjhb	/*
373221220Sjhb	 * While holding the lock, grow either end of the resource as
374221220Sjhb	 * needed and shrink either end if the shrinking does not require
375221220Sjhb	 * allocating a new resource.  We can safely drop the lock and then
376221220Sjhb	 * insert a new range to handle the shrinking case afterwards.
377221220Sjhb	 */
378221220Sjhb	if (start < r->r_start ||
379221220Sjhb	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
380221220Sjhb		KASSERT(s->r_flags == 0, ("prev is busy"));
381221220Sjhb		r->r_start = start;
382221220Sjhb		if (s->r_start == start) {
383221220Sjhb			TAILQ_REMOVE(&rm->rm_list, s, r_link);
384221220Sjhb			free(s, M_RMAN);
385221220Sjhb		} else
386221220Sjhb			s->r_end = start - 1;
387221220Sjhb	}
388221220Sjhb	if (end > r->r_end ||
389221220Sjhb	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
390221220Sjhb		KASSERT(t->r_flags == 0, ("next is busy"));
391221220Sjhb		r->r_end = end;
392221220Sjhb		if (t->r_end == end) {
393221220Sjhb			TAILQ_REMOVE(&rm->rm_list, t, r_link);
394221220Sjhb			free(t, M_RMAN);
395221220Sjhb		} else
396221220Sjhb			t->r_start = end + 1;
397221220Sjhb	}
398221220Sjhb	mtx_unlock(rm->rm_mtx);
399221220Sjhb
400221220Sjhb	/*
401221220Sjhb	 * Handle the shrinking cases that require allocating a new
402221220Sjhb	 * resource to hold the newly-free region.  We have to recheck
403221220Sjhb	 * if we still need this new region after acquiring the lock.
404221220Sjhb	 */
405221220Sjhb	if (start > r->r_start) {
406221220Sjhb		new = int_alloc_resource(M_WAITOK);
407221220Sjhb		new->r_start = r->r_start;
408221220Sjhb		new->r_end = start - 1;
409221220Sjhb		new->r_rm = rm;
410221220Sjhb		mtx_lock(rm->rm_mtx);
411221220Sjhb		r->r_start = start;
412221220Sjhb		s = TAILQ_PREV(r, resource_head, r_link);
413221220Sjhb		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
414221220Sjhb			s->r_end = start - 1;
415221220Sjhb			free(new, M_RMAN);
416221220Sjhb		} else
417221220Sjhb			TAILQ_INSERT_BEFORE(r, new, r_link);
418221220Sjhb		mtx_unlock(rm->rm_mtx);
419221220Sjhb	}
420221220Sjhb	if (end < r->r_end) {
421221220Sjhb		new = int_alloc_resource(M_WAITOK);
422221220Sjhb		new->r_start = end + 1;
423221220Sjhb		new->r_end = r->r_end;
424221220Sjhb		new->r_rm = rm;
425221220Sjhb		mtx_lock(rm->rm_mtx);
426221220Sjhb		r->r_end = end;
427221220Sjhb		t = TAILQ_NEXT(r, r_link);
428221220Sjhb		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
429221220Sjhb			t->r_start = end + 1;
430221220Sjhb			free(new, M_RMAN);
431221220Sjhb		} else
432221220Sjhb			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
433221220Sjhb		mtx_unlock(rm->rm_mtx);
434221220Sjhb	}
435221220Sjhb	return (0);
436221220Sjhb}
437221220Sjhb
438266814Struckman#define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_TIMESHARE | RF_PREFETCHABLE))
439266814Struckman
44040711Swollmanstruct resource *
44188372Stmmrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
44288372Stmm		      u_long count, u_long bound,  u_int flags,
44388372Stmm		      struct device *dev)
44440711Swollman{
445266814Struckman	u_int	new_rflags;
446150523Sphk	struct	resource_i *r, *s, *rv;
44788372Stmm	u_long	rstart, rend, amask, bmask;
44840711Swollman
449152543Syongari	rv = NULL;
45040711Swollman
451160958Sjb	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
452160958Sjb	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
453160958Sjb	       count, flags,
454160958Sjb	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
455266814Struckman	KASSERT((flags & (RF_WANTED | RF_FIRSTSHARE)) == 0,
456266814Struckman	    ("invalid flags %#x", flags));
457266814Struckman	new_rflags = (flags & ~(RF_ACTIVE | RF_WANTED | RF_FIRSTSHARE)) |
458266814Struckman	    RF_ALLOCATED;
45940711Swollman
46072200Sbmilekic	mtx_lock(rm->rm_mtx);
46140711Swollman
462152543Syongari	for (r = TAILQ_FIRST(&rm->rm_list);
463265363Struckman	     r && r->r_end < start + count - 1;
46468727Smckusick	     r = TAILQ_NEXT(r, r_link))
46540711Swollman		;
46640711Swollman
46768727Smckusick	if (r == NULL) {
46859910Spaul		DPRINTF(("could not find a region\n"));
46940711Swollman		goto out;
47040711Swollman	}
47140711Swollman
47288372Stmm	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
473266814Struckman	KASSERT(start <= ULONG_MAX - amask,
474266814Struckman	    ("start (%#lx) + amask (%#lx) would wrap around", start, amask));
475265363Struckman
47688372Stmm	/* If bound is 0, bmask will also be 0 */
47788372Stmm	bmask = ~(bound - 1);
47840711Swollman	/*
47940711Swollman	 * First try to find an acceptable totally-unshared region.
48040711Swollman	 */
48168727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
48259910Spaul		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
483265363Struckman		/*
484265363Struckman		 * The resource list is sorted, so there is no point in
485265363Struckman		 * searching further once r_start is too large.
486265363Struckman		 */
487265363Struckman		if (s->r_start > end - (count - 1)) {
488143665Simp			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
489143665Simp			    s->r_start, end));
49040711Swollman			break;
49140711Swollman		}
492265931Struckman		if (s->r_start > ULONG_MAX - amask) {
493265931Struckman			DPRINTF(("s->r_start (%#lx) + amask (%#lx) too large\n",
494265363Struckman			    s->r_start, amask));
495265363Struckman			break;
496265363Struckman		}
49740711Swollman		if (s->r_flags & RF_ALLOCATED) {
49859910Spaul			DPRINTF(("region is allocated\n"));
49940711Swollman			continue;
50040711Swollman		}
50188372Stmm		rstart = ulmax(s->r_start, start);
50288372Stmm		/*
50388372Stmm		 * Try to find a region by adjusting to boundary and alignment
50488372Stmm		 * until both conditions are satisfied. This is not an optimal
50588372Stmm		 * algorithm, but in most cases it isn't really bad, either.
50688372Stmm		 */
50788372Stmm		do {
50888372Stmm			rstart = (rstart + amask) & ~amask;
509109646Stmm			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
51088372Stmm				rstart += bound - (rstart & ~bmask);
51188372Stmm		} while ((rstart & amask) != 0 && rstart < end &&
51288372Stmm		    rstart < s->r_end);
513128172Simp		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
514102572Siwasaki		if (rstart > rend) {
515102572Siwasaki			DPRINTF(("adjusted start exceeds end\n"));
516102572Siwasaki			continue;
517102572Siwasaki		}
51859910Spaul		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
51959910Spaul		       rstart, rend, (rend - rstart + 1), count));
52040711Swollman
52140711Swollman		if ((rend - rstart + 1) >= count) {
52259910Spaul			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
523143664Simp			       rstart, rend, (rend - rstart + 1)));
52440711Swollman			if ((s->r_end - s->r_start + 1) == count) {
52559910Spaul				DPRINTF(("candidate region is entire chunk\n"));
52640711Swollman				rv = s;
527266814Struckman				rv->r_flags = new_rflags;
52840711Swollman				rv->r_dev = dev;
52940711Swollman				goto out;
53040711Swollman			}
53140711Swollman
53240711Swollman			/*
53340711Swollman			 * If s->r_start < rstart and
53440711Swollman			 *    s->r_end > rstart + count - 1, then
53540711Swollman			 * we need to split the region into three pieces
53640711Swollman			 * (the middle one will get returned to the user).
53740711Swollman			 * Otherwise, we are allocating at either the
53840711Swollman			 * beginning or the end of s, so we only need to
53940711Swollman			 * split it in two.  The first case requires
54040711Swollman			 * two new allocations; the second requires but one.
54140711Swollman			 */
542150523Sphk			rv = int_alloc_resource(M_NOWAIT);
543152543Syongari			if (rv == NULL)
54440711Swollman				goto out;
54540711Swollman			rv->r_start = rstart;
54640711Swollman			rv->r_end = rstart + count - 1;
547266814Struckman			rv->r_flags = new_rflags;
54840711Swollman			rv->r_dev = dev;
54945720Speter			rv->r_rm = rm;
550152543Syongari
55140711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
55259910Spaul				DPRINTF(("splitting region in three parts: "
55340711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
55440711Swollman				       s->r_start, rv->r_start - 1,
55540711Swollman				       rv->r_start, rv->r_end,
55659910Spaul				       rv->r_end + 1, s->r_end));
55740711Swollman				/*
55840711Swollman				 * We are allocating in the middle.
55940711Swollman				 */
560150523Sphk				r = int_alloc_resource(M_NOWAIT);
561152543Syongari				if (r == NULL) {
56240711Swollman					free(rv, M_RMAN);
563152543Syongari					rv = NULL;
56440711Swollman					goto out;
56540711Swollman				}
56640711Swollman				r->r_start = rv->r_end + 1;
56740711Swollman				r->r_end = s->r_end;
56840711Swollman				r->r_flags = s->r_flags;
56945720Speter				r->r_rm = rm;
57040711Swollman				s->r_end = rv->r_start - 1;
57168727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
57240711Swollman						     r_link);
57368727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
57440711Swollman						     r_link);
57540711Swollman			} else if (s->r_start == rv->r_start) {
57659910Spaul				DPRINTF(("allocating from the beginning\n"));
57740711Swollman				/*
57840711Swollman				 * We are allocating at the beginning.
57940711Swollman				 */
58040711Swollman				s->r_start = rv->r_end + 1;
58168727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
58240711Swollman			} else {
58359910Spaul				DPRINTF(("allocating at the end\n"));
58440711Swollman				/*
58540711Swollman				 * We are allocating at the end.
58640711Swollman				 */
58740711Swollman				s->r_end = rv->r_start - 1;
58868727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
58940711Swollman						     r_link);
59040711Swollman			}
59140711Swollman			goto out;
59240711Swollman		}
59340711Swollman	}
59440711Swollman
59540711Swollman	/*
59640711Swollman	 * Now find an acceptable shared region, if the client's requirements
59740711Swollman	 * allow sharing.  By our implementation restriction, a candidate
59840711Swollman	 * region must match exactly by both size and sharing type in order
59940711Swollman	 * to be considered compatible with the client's request.  (The
60040711Swollman	 * former restriction could probably be lifted without too much
60140711Swollman	 * additional work, but this does not seem warranted.)
60240711Swollman	 */
60359910Spaul	DPRINTF(("no unshared regions found\n"));
60440711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
60540711Swollman		goto out;
60640711Swollman
607266426Struckman	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
608266814Struckman		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
609266426Struckman		    s->r_start >= start &&
610266426Struckman		    (s->r_end - s->r_start + 1) == count &&
61188372Stmm		    (s->r_start & amask) == 0 &&
61288372Stmm		    ((s->r_start ^ s->r_end) & bmask) == 0) {
613150523Sphk			rv = int_alloc_resource(M_NOWAIT);
614152543Syongari			if (rv == NULL)
61540711Swollman				goto out;
61640711Swollman			rv->r_start = s->r_start;
61740711Swollman			rv->r_end = s->r_end;
618266814Struckman			rv->r_flags = new_rflags;
61940711Swollman			rv->r_dev = dev;
62040711Swollman			rv->r_rm = rm;
621152543Syongari			if (s->r_sharehead == NULL) {
62240711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
62369781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
624152543Syongari				if (s->r_sharehead == NULL) {
62540711Swollman					free(rv, M_RMAN);
626152543Syongari					rv = NULL;
62740711Swollman					goto out;
62840711Swollman				}
62940711Swollman				LIST_INIT(s->r_sharehead);
630152543Syongari				LIST_INSERT_HEAD(s->r_sharehead, s,
63140711Swollman						 r_sharelink);
63245106Sdfr				s->r_flags |= RF_FIRSTSHARE;
63340711Swollman			}
63440711Swollman			rv->r_sharehead = s->r_sharehead;
63540711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
63640711Swollman			goto out;
63740711Swollman		}
63840711Swollman	}
63940711Swollman
64040711Swollman	/*
64140711Swollman	 * We couldn't find anything.
64240711Swollman	 */
64340711Swollmanout:
64440711Swollman	/*
645266814Struckman	 * If the user specified RF_ACTIVE in flags, we attempt to atomically
64640711Swollman	 * activate the resource.  If this fails, we release the resource
64740711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
64840711Swollman	 * make sense for RF_TIMESHARE-type resources.)
64940711Swollman	 */
650266814Struckman	if (rv && (flags & RF_ACTIVE) != 0) {
651150523Sphk		struct resource_i *whohas;
65240711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
65340711Swollman			int_rman_release_resource(rm, rv);
654152543Syongari			rv = NULL;
65540711Swollman		}
65640711Swollman	}
657152543Syongari
65872200Sbmilekic	mtx_unlock(rm->rm_mtx);
659152543Syongari	return (rv == NULL ? NULL : &rv->r_r);
66040711Swollman}
66140711Swollman
66288372Stmmstruct resource *
66388372Stmmrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
66488372Stmm		      u_int flags, struct device *dev)
66588372Stmm{
66688372Stmm
66788372Stmm	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
66888372Stmm	    dev));
66988372Stmm}
67088372Stmm
67140711Swollmanstatic int
672150523Sphkint_rman_activate_resource(struct rman *rm, struct resource_i *r,
673150523Sphk			   struct resource_i **whohas)
67440711Swollman{
675150523Sphk	struct resource_i *s;
67640711Swollman	int ok;
67740711Swollman
67840711Swollman	/*
67940711Swollman	 * If we are not timesharing, then there is nothing much to do.
68040711Swollman	 * If we already have the resource, then there is nothing at all to do.
68140711Swollman	 * If we are not on a sharing list with anybody else, then there is
68240711Swollman	 * little to do.
68340711Swollman	 */
68440711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
68540711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
686152543Syongari	    || r->r_sharehead == NULL) {
68740711Swollman		r->r_flags |= RF_ACTIVE;
68840711Swollman		return 0;
68940711Swollman	}
69040711Swollman
69140711Swollman	ok = 1;
69253225Sphk	for (s = LIST_FIRST(r->r_sharehead); s && ok;
69353225Sphk	     s = LIST_NEXT(s, r_sharelink)) {
69440711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
69540711Swollman			ok = 0;
69640711Swollman			*whohas = s;
69740711Swollman		}
69840711Swollman	}
69940711Swollman	if (ok) {
70040711Swollman		r->r_flags |= RF_ACTIVE;
70140711Swollman		return 0;
70240711Swollman	}
70340711Swollman	return EBUSY;
70440711Swollman}
70540711Swollman
70640711Swollmanint
707150523Sphkrman_activate_resource(struct resource *re)
70840711Swollman{
70940711Swollman	int rv;
710150523Sphk	struct resource_i *r, *whohas;
71140711Swollman	struct rman *rm;
71240711Swollman
713150523Sphk	r = re->__r_i;
71440711Swollman	rm = r->r_rm;
71572200Sbmilekic	mtx_lock(rm->rm_mtx);
71640711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
71772200Sbmilekic	mtx_unlock(rm->rm_mtx);
71840711Swollman	return rv;
71940711Swollman}
72040711Swollman
72140711Swollmanint
722150523Sphkrman_await_resource(struct resource *re, int pri, int timo)
72340711Swollman{
72485519Sjhb	int	rv;
725150523Sphk	struct	resource_i *r, *whohas;
72640711Swollman	struct	rman *rm;
72740711Swollman
728150523Sphk	r = re->__r_i;
72940711Swollman	rm = r->r_rm;
73085519Sjhb	mtx_lock(rm->rm_mtx);
73140711Swollman	for (;;) {
73240711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
73340711Swollman		if (rv != EBUSY)
73471576Sjasone			return (rv);	/* returns with mutex held */
73540711Swollman
736152543Syongari		if (r->r_sharehead == NULL)
73740711Swollman			panic("rman_await_resource");
73840711Swollman		whohas->r_flags |= RF_WANTED;
73985519Sjhb		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
74040711Swollman		if (rv) {
74185519Sjhb			mtx_unlock(rm->rm_mtx);
74285519Sjhb			return (rv);
74340711Swollman		}
74440711Swollman	}
74540711Swollman}
74640711Swollman
74745720Speterstatic int
748150523Sphkint_rman_deactivate_resource(struct resource_i *r)
74940711Swollman{
75040711Swollman
75140711Swollman	r->r_flags &= ~RF_ACTIVE;
75240711Swollman	if (r->r_flags & RF_WANTED) {
75340711Swollman		r->r_flags &= ~RF_WANTED;
75440711Swollman		wakeup(r->r_sharehead);
75540711Swollman	}
75645720Speter	return 0;
75745720Speter}
75845720Speter
75945720Speterint
76045720Speterrman_deactivate_resource(struct resource *r)
76145720Speter{
76245720Speter	struct	rman *rm;
76345720Speter
764150523Sphk	rm = r->__r_i->r_rm;
76572200Sbmilekic	mtx_lock(rm->rm_mtx);
766150523Sphk	int_rman_deactivate_resource(r->__r_i);
76772200Sbmilekic	mtx_unlock(rm->rm_mtx);
76840711Swollman	return 0;
76940711Swollman}
77040711Swollman
77140711Swollmanstatic int
772150523Sphkint_rman_release_resource(struct rman *rm, struct resource_i *r)
77340711Swollman{
774150523Sphk	struct	resource_i *s, *t;
77540711Swollman
77640711Swollman	if (r->r_flags & RF_ACTIVE)
77745720Speter		int_rman_deactivate_resource(r);
77840711Swollman
77940711Swollman	/*
78040711Swollman	 * Check for a sharing list first.  If there is one, then we don't
78140711Swollman	 * have to think as hard.
78240711Swollman	 */
78340711Swollman	if (r->r_sharehead) {
78440711Swollman		/*
78540711Swollman		 * If a sharing list exists, then we know there are at
78640711Swollman		 * least two sharers.
78740711Swollman		 *
78840711Swollman		 * If we are in the main circleq, appoint someone else.
78940711Swollman		 */
79040711Swollman		LIST_REMOVE(r, r_sharelink);
79153225Sphk		s = LIST_FIRST(r->r_sharehead);
79240711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
79340711Swollman			s->r_flags |= RF_FIRSTSHARE;
79468727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
79568727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
79640711Swollman		}
79740711Swollman
79840711Swollman		/*
79940711Swollman		 * Make sure that the sharing list goes away completely
80040711Swollman		 * if the resource is no longer being shared at all.
80140711Swollman		 */
802152543Syongari		if (LIST_NEXT(s, r_sharelink) == NULL) {
80340711Swollman			free(s->r_sharehead, M_RMAN);
804152543Syongari			s->r_sharehead = NULL;
80540711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
80640711Swollman		}
80740711Swollman		goto out;
80840711Swollman	}
80940711Swollman
81040711Swollman	/*
81140711Swollman	 * Look at the adjacent resources in the list and see if our
812133177Sjhb	 * segment can be merged with any of them.  If either of the
813133177Sjhb	 * resources is allocated or is not exactly adjacent then they
814133177Sjhb	 * cannot be merged with our segment.
81540711Swollman	 */
81668727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
817133177Sjhb	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
818133177Sjhb	    s->r_end + 1 != r->r_start))
819133177Sjhb		s = NULL;
82068727Smckusick	t = TAILQ_NEXT(r, r_link);
821133177Sjhb	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
822133177Sjhb	    r->r_end + 1 != t->r_start))
823133177Sjhb		t = NULL;
82440711Swollman
825133177Sjhb	if (s != NULL && t != NULL) {
82640711Swollman		/*
82740711Swollman		 * Merge all three segments.
82840711Swollman		 */
82940711Swollman		s->r_end = t->r_end;
83068727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
83168727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
83240711Swollman		free(t, M_RMAN);
833133177Sjhb	} else if (s != NULL) {
83440711Swollman		/*
83540711Swollman		 * Merge previous segment with ours.
83640711Swollman		 */
83740711Swollman		s->r_end = r->r_end;
83868727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
839133177Sjhb	} else if (t != NULL) {
84040711Swollman		/*
84140711Swollman		 * Merge next segment with ours.
84240711Swollman		 */
84340711Swollman		t->r_start = r->r_start;
84468727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
84540711Swollman	} else {
84640711Swollman		/*
84740711Swollman		 * At this point, we know there is nothing we
84840711Swollman		 * can potentially merge with, because on each
84940711Swollman		 * side, there is either nothing there or what is
85040711Swollman		 * there is still allocated.  In that case, we don't
85140711Swollman		 * want to remove r from the list; we simply want to
85240711Swollman		 * change it to an unallocated region and return
85340711Swollman		 * without freeing anything.
85440711Swollman		 */
85540711Swollman		r->r_flags &= ~RF_ALLOCATED;
856222750Sjhb		r->r_dev = NULL;
85740711Swollman		return 0;
85840711Swollman	}
85940711Swollman
86040711Swollmanout:
86140711Swollman	free(r, M_RMAN);
86240711Swollman	return 0;
86340711Swollman}
86440711Swollman
86540711Swollmanint
866150523Sphkrman_release_resource(struct resource *re)
86740711Swollman{
86840711Swollman	int	rv;
869150523Sphk	struct	resource_i *r;
870150523Sphk	struct	rman *rm;
87140711Swollman
872150523Sphk	r = re->__r_i;
873150523Sphk	rm = r->r_rm;
87472200Sbmilekic	mtx_lock(rm->rm_mtx);
87540711Swollman	rv = int_rman_release_resource(rm, r);
87672200Sbmilekic	mtx_unlock(rm->rm_mtx);
87740711Swollman	return (rv);
87840711Swollman}
87967261Simp
88067261Simpuint32_t
88167261Simprman_make_alignment_flags(uint32_t size)
88267261Simp{
88367261Simp	int	i;
88467261Simp
88567425Simp	/*
88667425Simp	 * Find the hightest bit set, and add one if more than one bit
88767425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
88867425Simp	 */
88988372Stmm	for (i = 31; i > 0; i--)
89067425Simp		if ((1 << i) & size)
89167425Simp			break;
89267425Simp	if (~(1 << i) & size)
89367425Simp		i++;
89467261Simp
89567261Simp	return(RF_ALIGNMENT_LOG2(i));
89667425Simp}
897107296Simp
898182162Sjhbvoid
899182162Sjhbrman_set_start(struct resource *r, u_long start)
900182162Sjhb{
901182162Sjhb	r->__r_i->r_start = start;
902182162Sjhb}
903182162Sjhb
904107296Simpu_long
905107296Simprman_get_start(struct resource *r)
906107296Simp{
907150523Sphk	return (r->__r_i->r_start);
908107296Simp}
909107296Simp
910182162Sjhbvoid
911182162Sjhbrman_set_end(struct resource *r, u_long end)
912182162Sjhb{
913182162Sjhb	r->__r_i->r_end = end;
914182162Sjhb}
915182162Sjhb
916107296Simpu_long
917107296Simprman_get_end(struct resource *r)
918107296Simp{
919150523Sphk	return (r->__r_i->r_end);
920107296Simp}
921107296Simp
922107296Simpu_long
923107296Simprman_get_size(struct resource *r)
924107296Simp{
925150523Sphk	return (r->__r_i->r_end - r->__r_i->r_start + 1);
926107296Simp}
927107296Simp
928107296Simpu_int
929107296Simprman_get_flags(struct resource *r)
930107296Simp{
931150523Sphk	return (r->__r_i->r_flags);
932107296Simp}
933107296Simp
934107296Simpvoid
935107296Simprman_set_virtual(struct resource *r, void *v)
936107296Simp{
937150523Sphk	r->__r_i->r_virtual = v;
938107296Simp}
939107296Simp
940107296Simpvoid *
941107296Simprman_get_virtual(struct resource *r)
942107296Simp{
943150523Sphk	return (r->__r_i->r_virtual);
944107296Simp}
945107296Simp
946107296Simpvoid
947107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t)
948107296Simp{
949107296Simp	r->r_bustag = t;
950107296Simp}
951107296Simp
952107296Simpbus_space_tag_t
953107296Simprman_get_bustag(struct resource *r)
954107296Simp{
955107296Simp	return (r->r_bustag);
956107296Simp}
957107296Simp
958107296Simpvoid
959107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h)
960107296Simp{
961107296Simp	r->r_bushandle = h;
962107296Simp}
963107296Simp
964107296Simpbus_space_handle_t
965107296Simprman_get_bushandle(struct resource *r)
966107296Simp{
967107296Simp	return (r->r_bushandle);
968107296Simp}
969107296Simp
970107296Simpvoid
971107296Simprman_set_rid(struct resource *r, int rid)
972107296Simp{
973150523Sphk	r->__r_i->r_rid = rid;
974107296Simp}
975107296Simp
976182162Sjhbint
977182162Sjhbrman_get_rid(struct resource *r)
978131414Simp{
979182162Sjhb	return (r->__r_i->r_rid);
980131414Simp}
981131414Simp
982131414Simpvoid
983182162Sjhbrman_set_device(struct resource *r, struct device *dev)
984131414Simp{
985182162Sjhb	r->__r_i->r_dev = dev;
986131414Simp}
987131414Simp
988110753Simpstruct device *
989110753Simprman_get_device(struct resource *r)
990110753Simp{
991150523Sphk	return (r->__r_i->r_dev);
992110753Simp}
993144071Sphk
994150547Sphkint
995150547Sphkrman_is_region_manager(struct resource *r, struct rman *rm)
996150547Sphk{
997150547Sphk
998150547Sphk	return (r->__r_i->r_rm == rm);
999150547Sphk}
1000150547Sphk
1001144071Sphk/*
1002144071Sphk * Sysctl interface for scanning the resource lists.
1003144071Sphk *
1004144071Sphk * We take two input parameters; the index into the list of resource
1005144071Sphk * managers, and the resource offset into the list.
1006144071Sphk */
1007144071Sphkstatic int
1008144071Sphksysctl_rman(SYSCTL_HANDLER_ARGS)
1009144071Sphk{
1010144071Sphk	int			*name = (int *)arg1;
1011144071Sphk	u_int			namelen = arg2;
1012144071Sphk	int			rman_idx, res_idx;
1013144071Sphk	struct rman		*rm;
1014150523Sphk	struct resource_i	*res;
1015192379Savg	struct resource_i	*sres;
1016144071Sphk	struct u_rman		urm;
1017144071Sphk	struct u_resource	ures;
1018144071Sphk	int			error;
1019144071Sphk
1020144071Sphk	if (namelen != 3)
1021144071Sphk		return (EINVAL);
1022144071Sphk
1023144071Sphk	if (bus_data_generation_check(name[0]))
1024144071Sphk		return (EINVAL);
1025144071Sphk	rman_idx = name[1];
1026144071Sphk	res_idx = name[2];
1027144071Sphk
1028144071Sphk	/*
1029144071Sphk	 * Find the indexed resource manager
1030144071Sphk	 */
1031152543Syongari	mtx_lock(&rman_mtx);
1032144071Sphk	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1033144071Sphk		if (rman_idx-- == 0)
1034144071Sphk			break;
1035144071Sphk	}
1036152543Syongari	mtx_unlock(&rman_mtx);
1037144071Sphk	if (rm == NULL)
1038144071Sphk		return (ENOENT);
1039144071Sphk
1040144071Sphk	/*
1041144071Sphk	 * If the resource index is -1, we want details on the
1042144071Sphk	 * resource manager.
1043144071Sphk	 */
1044144071Sphk	if (res_idx == -1) {
1045145953Scperciva		bzero(&urm, sizeof(urm));
1046144071Sphk		urm.rm_handle = (uintptr_t)rm;
1047184173Smarcel		if (rm->rm_descr != NULL)
1048184173Smarcel			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1049144071Sphk		urm.rm_start = rm->rm_start;
1050144071Sphk		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1051144071Sphk		urm.rm_type = rm->rm_type;
1052144071Sphk
1053144071Sphk		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1054144071Sphk		return (error);
1055144071Sphk	}
1056144071Sphk
1057144071Sphk	/*
1058144071Sphk	 * Find the indexed resource and return it.
1059144071Sphk	 */
1060152543Syongari	mtx_lock(rm->rm_mtx);
1061144071Sphk	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1062192379Savg		if (res->r_sharehead != NULL) {
1063192379Savg			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1064192379Savg				if (res_idx-- == 0) {
1065192379Savg					res = sres;
1066192379Savg					goto found;
1067144071Sphk				}
1068192379Savg		}
1069192379Savg		else if (res_idx-- == 0)
1070192379Savg				goto found;
1071192379Savg	}
1072192379Savg	mtx_unlock(rm->rm_mtx);
1073192379Savg	return (ENOENT);
1074144071Sphk
1075192379Savgfound:
1076192379Savg	bzero(&ures, sizeof(ures));
1077192379Savg	ures.r_handle = (uintptr_t)res;
1078192379Savg	ures.r_parent = (uintptr_t)res->r_rm;
1079192379Savg	ures.r_device = (uintptr_t)res->r_dev;
1080192379Savg	if (res->r_dev != NULL) {
1081192379Savg		if (device_get_name(res->r_dev) != NULL) {
1082192379Savg			snprintf(ures.r_devname, RM_TEXTLEN,
1083192379Savg			    "%s%d",
1084192379Savg			    device_get_name(res->r_dev),
1085192379Savg			    device_get_unit(res->r_dev));
1086192379Savg		} else {
1087192379Savg			strlcpy(ures.r_devname, "nomatch",
1088192379Savg			    RM_TEXTLEN);
1089144071Sphk		}
1090192379Savg	} else {
1091192379Savg		ures.r_devname[0] = '\0';
1092144071Sphk	}
1093192379Savg	ures.r_start = res->r_start;
1094192379Savg	ures.r_size = res->r_end - res->r_start + 1;
1095192379Savg	ures.r_flags = res->r_flags;
1096192379Savg
1097152543Syongari	mtx_unlock(rm->rm_mtx);
1098192379Savg	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1099192379Savg	return (error);
1100144071Sphk}
1101144071Sphk
1102227309Sedstatic SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1103144071Sphk    "kernel resource manager");
1104168791Sjhb
1105168791Sjhb#ifdef DDB
1106168791Sjhbstatic void
1107220606Sgavindump_rman_header(struct rman *rm)
1108220606Sgavin{
1109220606Sgavin
1110220606Sgavin	if (db_pager_quit)
1111220606Sgavin		return;
1112220606Sgavin	db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1113220606Sgavin	    rm, rm->rm_descr, rm->rm_start, rm->rm_end);
1114220606Sgavin}
1115220606Sgavin
1116220606Sgavinstatic void
1117168791Sjhbdump_rman(struct rman *rm)
1118168791Sjhb{
1119168791Sjhb	struct resource_i *r;
1120168791Sjhb	const char *devname;
1121168791Sjhb
1122168791Sjhb	if (db_pager_quit)
1123168791Sjhb		return;
1124168791Sjhb	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1125168791Sjhb		if (r->r_dev != NULL) {
1126168791Sjhb			devname = device_get_nameunit(r->r_dev);
1127168791Sjhb			if (devname == NULL)
1128168791Sjhb				devname = "nomatch";
1129168791Sjhb		} else
1130168791Sjhb			devname = NULL;
1131168791Sjhb		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
1132168791Sjhb		if (devname != NULL)
1133168791Sjhb			db_printf("(%s)\n", devname);
1134168791Sjhb		else
1135168791Sjhb			db_printf("----\n");
1136168791Sjhb		if (db_pager_quit)
1137168791Sjhb			return;
1138168791Sjhb	}
1139168791Sjhb}
1140168791Sjhb
1141168791SjhbDB_SHOW_COMMAND(rman, db_show_rman)
1142168791Sjhb{
1143168791Sjhb
1144220606Sgavin	if (have_addr) {
1145220606Sgavin		dump_rman_header((struct rman *)addr);
1146168791Sjhb		dump_rman((struct rman *)addr);
1147220606Sgavin	}
1148168791Sjhb}
1149168791Sjhb
1150220606SgavinDB_SHOW_COMMAND(rmans, db_show_rmans)
1151220606Sgavin{
1152220606Sgavin	struct rman *rm;
1153220606Sgavin
1154220606Sgavin	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1155220606Sgavin		dump_rman_header(rm);
1156220606Sgavin	}
1157220606Sgavin}
1158220606Sgavin
1159183054SsamDB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1160168791Sjhb{
1161168791Sjhb	struct rman *rm;
1162168791Sjhb
1163220606Sgavin	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1164220606Sgavin		dump_rman_header(rm);
1165168791Sjhb		dump_rman(rm);
1166220606Sgavin	}
1167168791Sjhb}
1168183054SsamDB_SHOW_ALIAS(allrman, db_show_all_rman);
1169168791Sjhb#endif
1170