1139804Simp/*-
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
15152543Syongari *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman */
2940711Swollman
3040711Swollman/*
3140711Swollman * The kernel resource manager.  This code is responsible for keeping track
3240711Swollman * of hardware resources which are apportioned out to various drivers.
3340711Swollman * It does not actually assign those resources, and it is not expected
3440711Swollman * that end-device drivers will call into this code directly.  Rather,
3540711Swollman * the code which implements the buses that those devices are attached to,
3640711Swollman * and the code which manages CPU resources, will call this code, and the
3740711Swollman * end-device drivers will make upcalls to that code to actually perform
3840711Swollman * the allocation.
3940711Swollman *
4040711Swollman * There are two sorts of resources managed by this code.  The first is
4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4240711Swollman * consist of a sequence of individually-allocatable objects which have
4340711Swollman * been numbered in some well-defined order.  Most of the resources
4440711Swollman * are of this type, as it is the most familiar.  The second type is
4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4640711Swollman * resources in which each instance is indistinguishable from every
4740711Swollman * other instance).  The principal anticipated application of gauges
4840711Swollman * is in the context of power consumption, where a bus may have a specific
4940711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5040711Swollman * implemented yet.
5140711Swollman *
5240711Swollman * For array resources, we make one simplifying assumption: two clients
5340711Swollman * sharing the same resource must use the same range of indices.  That
5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5540711Swollman * permitted.
5640711Swollman */
5740711Swollman
58168791Sjhb#include "opt_ddb.h"
59168791Sjhb
60116182Sobrien#include <sys/cdefs.h>
61116182Sobrien__FBSDID("$FreeBSD$");
62116182Sobrien
6340711Swollman#include <sys/param.h>
6440711Swollman#include <sys/systm.h>
6541304Sbde#include <sys/kernel.h>
66164881Sjhb#include <sys/limits.h>
6740711Swollman#include <sys/lock.h>
6840711Swollman#include <sys/malloc.h>
6971576Sjasone#include <sys/mutex.h>
7045720Speter#include <sys/bus.h>		/* XXX debugging */
7145720Speter#include <machine/bus.h>
7240711Swollman#include <sys/rman.h>
73102962Siwasaki#include <sys/sysctl.h>
7440711Swollman
75168791Sjhb#ifdef DDB
76168791Sjhb#include <ddb/ddb.h>
77168791Sjhb#endif
78168791Sjhb
79151037Sphk/*
80151037Sphk * We use a linked list rather than a bitmap because we need to be able to
81151037Sphk * represent potentially huge objects (like all of a processor's physical
82151037Sphk * address space).  That is also why the indices are defined to have type
83151037Sphk * `unsigned long' -- that being the largest integral type in ISO C (1990).
84151037Sphk * The 1999 version of C allows `long long'; we may need to switch to that
85151037Sphk * at some point in the future, particularly if we want to support 36-bit
86151037Sphk * addresses on IA32 hardware.
87151037Sphk */
88151037Sphkstruct resource_i {
89151037Sphk	struct resource		r_r;
90151037Sphk	TAILQ_ENTRY(resource_i)	r_link;
91151037Sphk	LIST_ENTRY(resource_i)	r_sharelink;
92151037Sphk	LIST_HEAD(, resource_i)	*r_sharehead;
93294883Sjhibbits	rman_res_t	r_start;	/* index of the first entry in this resource */
94294883Sjhibbits	rman_res_t	r_end;		/* index of the last entry (inclusive) */
95151037Sphk	u_int	r_flags;
96151037Sphk	void	*r_virtual;	/* virtual address of this resource */
97299095Sadrian	device_t r_dev;	/* device which has allocated this resource */
98268373Struckman	struct rman *r_rm;	/* resource manager from whence this came */
99151037Sphk	int	r_rid;		/* optional rid for this resource. */
100151037Sphk};
101151037Sphk
102268373Struckmanstatic int rman_debug = 0;
103267992ShselaskySYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
104102962Siwasaki    &rman_debug, 0, "rman debug");
10559910Spaul
106102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params
107102962Siwasaki
10845569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
10940711Swollman
110268373Struckmanstruct rman_head rman_head;
111268373Struckmanstatic struct mtx rman_mtx; /* mutex to protect rman_head */
112268373Struckmanstatic int int_rman_release_resource(struct rman *rm, struct resource_i *r);
11340711Swollman
114150523Sphkstatic __inline struct resource_i *
115150523Sphkint_alloc_resource(int malloc_flag)
116150523Sphk{
117150523Sphk	struct resource_i *r;
118150523Sphk
119150523Sphk	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
120150523Sphk	if (r != NULL) {
121150523Sphk		r->r_r.__r_i = r;
122150523Sphk	}
123150523Sphk	return (r);
124150523Sphk}
125150523Sphk
12640711Swollmanint
12740711Swollmanrman_init(struct rman *rm)
12840711Swollman{
129152543Syongari	static int once = 0;
13040711Swollman
13140711Swollman	if (once == 0) {
13240711Swollman		once = 1;
13340711Swollman		TAILQ_INIT(&rman_head);
13493818Sjhb		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
13540711Swollman	}
13640711Swollman
137221218Sjhb	if (rm->rm_start == 0 && rm->rm_end == 0)
138296336Sjhibbits		rm->rm_end = ~0;
13940711Swollman	if (rm->rm_type == RMAN_UNINIT)
14040711Swollman		panic("rman_init");
14140711Swollman	if (rm->rm_type == RMAN_GAUGE)
14240711Swollman		panic("implement RMAN_GAUGE");
14340711Swollman
14468727Smckusick	TAILQ_INIT(&rm->rm_list);
14584781Sjhb	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
146152543Syongari	if (rm->rm_mtx == NULL)
14740711Swollman		return ENOMEM;
14893818Sjhb	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
14940711Swollman
15072200Sbmilekic	mtx_lock(&rman_mtx);
15140711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
15272200Sbmilekic	mtx_unlock(&rman_mtx);
15340711Swollman	return 0;
15440711Swollman}
15540711Swollman
15640711Swollmanint
157294883Sjhibbitsrman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
15840711Swollman{
159162224Sjhb	struct resource_i *r, *s, *t;
160236359Simp	int rv = 0;
16140711Swollman
162297000Sjhibbits	DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
163134021Snjl	    rm->rm_descr, start, end));
164221218Sjhb	if (start < rm->rm_start || end > rm->rm_end)
165221218Sjhb		return EINVAL;
166150523Sphk	r = int_alloc_resource(M_NOWAIT);
167152543Syongari	if (r == NULL)
16840711Swollman		return ENOMEM;
16940711Swollman	r->r_start = start;
17040711Swollman	r->r_end = end;
17140711Swollman	r->r_rm = rm;
17240711Swollman
17372200Sbmilekic	mtx_lock(rm->rm_mtx);
174162224Sjhb
175162224Sjhb	/* Skip entries before us. */
176164881Sjhb	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
177297000Sjhibbits		if (s->r_end == ~0)
178164881Sjhb			break;
179164881Sjhb		if (s->r_end + 1 >= r->r_start)
180164881Sjhb			break;
181164881Sjhb	}
18240711Swollman
183162224Sjhb	/* If we ran off the end of the list, insert at the tail. */
18468727Smckusick	if (s == NULL) {
18568727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
18640711Swollman	} else {
187162224Sjhb		/* Check for any overlap with the current region. */
188236359Simp		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
189236359Simp			rv = EBUSY;
190236359Simp			goto out;
191236359Simp		}
192162224Sjhb
193162224Sjhb		/* Check for any overlap with the next region. */
194162224Sjhb		t = TAILQ_NEXT(s, r_link);
195236359Simp		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
196236359Simp			rv = EBUSY;
197236359Simp			goto out;
198236359Simp		}
199162224Sjhb
200162224Sjhb		/*
201162224Sjhb		 * See if this region can be merged with the next region.  If
202162224Sjhb		 * not, clear the pointer.
203162224Sjhb		 */
204162224Sjhb		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
205162224Sjhb			t = NULL;
206162224Sjhb
207162224Sjhb		/* See if we can merge with the current region. */
208162224Sjhb		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
209162224Sjhb			/* Can we merge all 3 regions? */
210162224Sjhb			if (t != NULL) {
211162224Sjhb				s->r_end = t->r_end;
212162224Sjhb				TAILQ_REMOVE(&rm->rm_list, t, r_link);
213162224Sjhb				free(r, M_RMAN);
214162224Sjhb				free(t, M_RMAN);
215162224Sjhb			} else {
216162224Sjhb				s->r_end = r->r_end;
217162224Sjhb				free(r, M_RMAN);
218162224Sjhb			}
219166932Sscottl		} else if (t != NULL) {
220166932Sscottl			/* Can we merge with just the next region? */
221166932Sscottl			t->r_start = r->r_start;
222166932Sscottl			free(r, M_RMAN);
223166932Sscottl		} else if (s->r_end < r->r_start) {
224166932Sscottl			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
225162224Sjhb		} else {
226166932Sscottl			TAILQ_INSERT_BEFORE(s, r, r_link);
227162224Sjhb		}
22840711Swollman	}
229236359Simpout:
23072200Sbmilekic	mtx_unlock(rm->rm_mtx);
231236359Simp	return rv;
23240711Swollman}
23340711Swollman
23440711Swollmanint
235159536Simprman_init_from_resource(struct rman *rm, struct resource *r)
236159536Simp{
237159536Simp	int rv;
238159536Simp
239159536Simp	if ((rv = rman_init(rm)) != 0)
240159536Simp		return (rv);
241159536Simp	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
242159536Simp}
243159536Simp
244159536Simpint
24540711Swollmanrman_fini(struct rman *rm)
24640711Swollman{
247150523Sphk	struct resource_i *r;
24840711Swollman
24972200Sbmilekic	mtx_lock(rm->rm_mtx);
25068727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
25145720Speter		if (r->r_flags & RF_ALLOCATED) {
25272200Sbmilekic			mtx_unlock(rm->rm_mtx);
25340711Swollman			return EBUSY;
25445720Speter		}
25540711Swollman	}
25640711Swollman
25740711Swollman	/*
25840711Swollman	 * There really should only be one of these if we are in this
25940711Swollman	 * state and the code is working properly, but it can't hurt.
26040711Swollman	 */
26168727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
26268727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
26368727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
26440711Swollman		free(r, M_RMAN);
26540711Swollman	}
26672200Sbmilekic	mtx_unlock(rm->rm_mtx);
26772200Sbmilekic	mtx_lock(&rman_mtx);
26840711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
26972200Sbmilekic	mtx_unlock(&rman_mtx);
27071576Sjasone	mtx_destroy(rm->rm_mtx);
27171576Sjasone	free(rm->rm_mtx, M_RMAN);
27240711Swollman
27340711Swollman	return 0;
27440711Swollman}
27540711Swollman
276221220Sjhbint
277294883Sjhibbitsrman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
278221220Sjhb{
279221220Sjhb	struct resource_i *r;
280221220Sjhb
281221220Sjhb	mtx_lock(rm->rm_mtx);
282221220Sjhb	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
283221220Sjhb		if (!(r->r_flags & RF_ALLOCATED)) {
284221220Sjhb			*start = r->r_start;
285221220Sjhb			*end = r->r_end;
286221220Sjhb			mtx_unlock(rm->rm_mtx);
287221220Sjhb			return (0);
288221220Sjhb		}
289221220Sjhb	}
290221220Sjhb	mtx_unlock(rm->rm_mtx);
291221220Sjhb	return (ENOENT);
292221220Sjhb}
293221220Sjhb
294221220Sjhbint
295294883Sjhibbitsrman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
296221220Sjhb{
297221220Sjhb	struct resource_i *r;
298221220Sjhb
299221220Sjhb	mtx_lock(rm->rm_mtx);
300221220Sjhb	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
301221220Sjhb		if (!(r->r_flags & RF_ALLOCATED)) {
302221220Sjhb			*start = r->r_start;
303221220Sjhb			*end = r->r_end;
304221220Sjhb			mtx_unlock(rm->rm_mtx);
305221220Sjhb			return (0);
306221220Sjhb		}
307221220Sjhb	}
308221220Sjhb	mtx_unlock(rm->rm_mtx);
309221220Sjhb	return (ENOENT);
310221220Sjhb}
311221220Sjhb
312221220Sjhb/* Shrink or extend one or both ends of an allocated resource. */
313221220Sjhbint
314294883Sjhibbitsrman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
315221220Sjhb{
316268373Struckman	struct resource_i *r, *s, *t, *new;
317268373Struckman	struct rman *rm;
318221220Sjhb
319221220Sjhb	/* Not supported for shared resources. */
320221220Sjhb	r = rr->__r_i;
321268780Struckman	if (r->r_flags & RF_SHAREABLE)
322221220Sjhb		return (EINVAL);
323221220Sjhb
324221220Sjhb	/*
325221220Sjhb	 * This does not support wholesale moving of a resource.  At
326221220Sjhb	 * least part of the desired new range must overlap with the
327221220Sjhb	 * existing resource.
328221220Sjhb	 */
329221220Sjhb	if (end < r->r_start || r->r_end < start)
330221220Sjhb		return (EINVAL);
331221220Sjhb
332221220Sjhb	/*
333221220Sjhb	 * Find the two resource regions immediately adjacent to the
334221220Sjhb	 * allocated resource.
335221220Sjhb	 */
336221220Sjhb	rm = r->r_rm;
337221220Sjhb	mtx_lock(rm->rm_mtx);
338221220Sjhb#ifdef INVARIANTS
339221220Sjhb	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
340221220Sjhb		if (s == r)
341221220Sjhb			break;
342221220Sjhb	}
343221220Sjhb	if (s == NULL)
344221220Sjhb		panic("resource not in list");
345221220Sjhb#endif
346221220Sjhb	s = TAILQ_PREV(r, resource_head, r_link);
347221220Sjhb	t = TAILQ_NEXT(r, r_link);
348221220Sjhb	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
349221220Sjhb	    ("prev resource mismatch"));
350221220Sjhb	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
351221220Sjhb	    ("next resource mismatch"));
352221220Sjhb
353221220Sjhb	/*
354221220Sjhb	 * See if the changes are permitted.  Shrinking is always allowed,
355221220Sjhb	 * but growing requires sufficient room in the adjacent region.
356221220Sjhb	 */
357221220Sjhb	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
358221220Sjhb	    s->r_start > start)) {
359221220Sjhb		mtx_unlock(rm->rm_mtx);
360221220Sjhb		return (EBUSY);
361221220Sjhb	}
362221220Sjhb	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
363221220Sjhb	    t->r_end < end)) {
364221220Sjhb		mtx_unlock(rm->rm_mtx);
365221220Sjhb		return (EBUSY);
366221220Sjhb	}
367221220Sjhb
368221220Sjhb	/*
369221220Sjhb	 * While holding the lock, grow either end of the resource as
370221220Sjhb	 * needed and shrink either end if the shrinking does not require
371221220Sjhb	 * allocating a new resource.  We can safely drop the lock and then
372221220Sjhb	 * insert a new range to handle the shrinking case afterwards.
373221220Sjhb	 */
374221220Sjhb	if (start < r->r_start ||
375221220Sjhb	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
376221220Sjhb		KASSERT(s->r_flags == 0, ("prev is busy"));
377221220Sjhb		r->r_start = start;
378221220Sjhb		if (s->r_start == start) {
379221220Sjhb			TAILQ_REMOVE(&rm->rm_list, s, r_link);
380221220Sjhb			free(s, M_RMAN);
381221220Sjhb		} else
382221220Sjhb			s->r_end = start - 1;
383221220Sjhb	}
384221220Sjhb	if (end > r->r_end ||
385221220Sjhb	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
386221220Sjhb		KASSERT(t->r_flags == 0, ("next is busy"));
387221220Sjhb		r->r_end = end;
388221220Sjhb		if (t->r_end == end) {
389221220Sjhb			TAILQ_REMOVE(&rm->rm_list, t, r_link);
390221220Sjhb			free(t, M_RMAN);
391221220Sjhb		} else
392221220Sjhb			t->r_start = end + 1;
393221220Sjhb	}
394221220Sjhb	mtx_unlock(rm->rm_mtx);
395221220Sjhb
396221220Sjhb	/*
397221220Sjhb	 * Handle the shrinking cases that require allocating a new
398221220Sjhb	 * resource to hold the newly-free region.  We have to recheck
399221220Sjhb	 * if we still need this new region after acquiring the lock.
400221220Sjhb	 */
401221220Sjhb	if (start > r->r_start) {
402221220Sjhb		new = int_alloc_resource(M_WAITOK);
403221220Sjhb		new->r_start = r->r_start;
404221220Sjhb		new->r_end = start - 1;
405221220Sjhb		new->r_rm = rm;
406221220Sjhb		mtx_lock(rm->rm_mtx);
407221220Sjhb		r->r_start = start;
408221220Sjhb		s = TAILQ_PREV(r, resource_head, r_link);
409221220Sjhb		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
410221220Sjhb			s->r_end = start - 1;
411221220Sjhb			free(new, M_RMAN);
412221220Sjhb		} else
413221220Sjhb			TAILQ_INSERT_BEFORE(r, new, r_link);
414221220Sjhb		mtx_unlock(rm->rm_mtx);
415221220Sjhb	}
416221220Sjhb	if (end < r->r_end) {
417221220Sjhb		new = int_alloc_resource(M_WAITOK);
418221220Sjhb		new->r_start = end + 1;
419221220Sjhb		new->r_end = r->r_end;
420221220Sjhb		new->r_rm = rm;
421221220Sjhb		mtx_lock(rm->rm_mtx);
422221220Sjhb		r->r_end = end;
423221220Sjhb		t = TAILQ_NEXT(r, r_link);
424221220Sjhb		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
425221220Sjhb			t->r_start = end + 1;
426221220Sjhb			free(new, M_RMAN);
427221220Sjhb		} else
428221220Sjhb			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
429221220Sjhb		mtx_unlock(rm->rm_mtx);
430221220Sjhb	}
431221220Sjhb	return (0);
432221220Sjhb}
433221220Sjhb
434268780Struckman#define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_PREFETCHABLE))
435266814Struckman
43640711Swollmanstruct resource *
437294883Sjhibbitsrman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
438294883Sjhibbits			    rman_res_t count, rman_res_t bound, u_int flags,
439299095Sadrian			    device_t dev)
44040711Swollman{
441268373Struckman	u_int new_rflags;
442268373Struckman	struct resource_i *r, *s, *rv;
443294883Sjhibbits	rman_res_t rstart, rend, amask, bmask;
44440711Swollman
445152543Syongari	rv = NULL;
44640711Swollman
447297000Sjhibbits	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
448297000Sjhibbits	       "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
449160958Sjb	       count, flags,
450160958Sjb	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
451268780Struckman	KASSERT((flags & RF_FIRSTSHARE) == 0,
452266814Struckman	    ("invalid flags %#x", flags));
453268780Struckman	new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
45440711Swollman
45572200Sbmilekic	mtx_lock(rm->rm_mtx);
45640711Swollman
457297000Sjhibbits	r = TAILQ_FIRST(&rm->rm_list);
458297000Sjhibbits	if (r == NULL) {
459297000Sjhibbits	    DPRINTF(("NULL list head\n"));
460297000Sjhibbits	} else {
461297000Sjhibbits	    DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
462297000Sjhibbits		    r->r_end, start, count-1));
463297000Sjhibbits	}
464152543Syongari	for (r = TAILQ_FIRST(&rm->rm_list);
465265363Struckman	     r && r->r_end < start + count - 1;
466297000Sjhibbits	     r = TAILQ_NEXT(r, r_link)) {
46740711Swollman		;
468297000Sjhibbits		DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
469297000Sjhibbits			r->r_end, start, count-1));
470297000Sjhibbits	}
47140711Swollman
47268727Smckusick	if (r == NULL) {
47359910Spaul		DPRINTF(("could not find a region\n"));
47440711Swollman		goto out;
47540711Swollman	}
47640711Swollman
477297000Sjhibbits	amask = (1ull << RF_ALIGNMENT(flags)) - 1;
478297000Sjhibbits	KASSERT(start <= RM_MAX_END - amask,
479297000Sjhibbits	    ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
480265363Struckman
48188372Stmm	/* If bound is 0, bmask will also be 0 */
48288372Stmm	bmask = ~(bound - 1);
48340711Swollman	/*
48440711Swollman	 * First try to find an acceptable totally-unshared region.
48540711Swollman	 */
48668727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
487297000Sjhibbits		DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
488265363Struckman		/*
489265363Struckman		 * The resource list is sorted, so there is no point in
490265363Struckman		 * searching further once r_start is too large.
491265363Struckman		 */
492265363Struckman		if (s->r_start > end - (count - 1)) {
493297000Sjhibbits			DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
494143665Simp			    s->r_start, end));
49540711Swollman			break;
49640711Swollman		}
497297000Sjhibbits		if (s->r_start > RM_MAX_END - amask) {
498297000Sjhibbits			DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
499265363Struckman			    s->r_start, amask));
500265363Struckman			break;
501265363Struckman		}
50240711Swollman		if (s->r_flags & RF_ALLOCATED) {
50359910Spaul			DPRINTF(("region is allocated\n"));
50440711Swollman			continue;
50540711Swollman		}
506297000Sjhibbits		rstart = ummax(s->r_start, start);
50788372Stmm		/*
50888372Stmm		 * Try to find a region by adjusting to boundary and alignment
50988372Stmm		 * until both conditions are satisfied. This is not an optimal
51088372Stmm		 * algorithm, but in most cases it isn't really bad, either.
51188372Stmm		 */
51288372Stmm		do {
51388372Stmm			rstart = (rstart + amask) & ~amask;
514109646Stmm			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
51588372Stmm				rstart += bound - (rstart & ~bmask);
51688372Stmm		} while ((rstart & amask) != 0 && rstart < end &&
51788372Stmm		    rstart < s->r_end);
518297000Sjhibbits		rend = ummin(s->r_end, ummax(rstart + count - 1, end));
519102572Siwasaki		if (rstart > rend) {
520102572Siwasaki			DPRINTF(("adjusted start exceeds end\n"));
521102572Siwasaki			continue;
522102572Siwasaki		}
523297000Sjhibbits		DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
52459910Spaul		       rstart, rend, (rend - rstart + 1), count));
52540711Swollman
52640711Swollman		if ((rend - rstart + 1) >= count) {
527297000Sjhibbits			DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
528143664Simp			       rstart, rend, (rend - rstart + 1)));
52940711Swollman			if ((s->r_end - s->r_start + 1) == count) {
53059910Spaul				DPRINTF(("candidate region is entire chunk\n"));
53140711Swollman				rv = s;
532266814Struckman				rv->r_flags = new_rflags;
53340711Swollman				rv->r_dev = dev;
53440711Swollman				goto out;
53540711Swollman			}
53640711Swollman
53740711Swollman			/*
53840711Swollman			 * If s->r_start < rstart and
53940711Swollman			 *    s->r_end > rstart + count - 1, then
54040711Swollman			 * we need to split the region into three pieces
54140711Swollman			 * (the middle one will get returned to the user).
54240711Swollman			 * Otherwise, we are allocating at either the
54340711Swollman			 * beginning or the end of s, so we only need to
54440711Swollman			 * split it in two.  The first case requires
54540711Swollman			 * two new allocations; the second requires but one.
54640711Swollman			 */
547150523Sphk			rv = int_alloc_resource(M_NOWAIT);
548152543Syongari			if (rv == NULL)
54940711Swollman				goto out;
55040711Swollman			rv->r_start = rstart;
55140711Swollman			rv->r_end = rstart + count - 1;
552266814Struckman			rv->r_flags = new_rflags;
55340711Swollman			rv->r_dev = dev;
55445720Speter			rv->r_rm = rm;
555152543Syongari
55640711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
55759910Spaul				DPRINTF(("splitting region in three parts: "
558297000Sjhibbits				       "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
55940711Swollman				       s->r_start, rv->r_start - 1,
56040711Swollman				       rv->r_start, rv->r_end,
56159910Spaul				       rv->r_end + 1, s->r_end));
56240711Swollman				/*
56340711Swollman				 * We are allocating in the middle.
56440711Swollman				 */
565150523Sphk				r = int_alloc_resource(M_NOWAIT);
566152543Syongari				if (r == NULL) {
56740711Swollman					free(rv, M_RMAN);
568152543Syongari					rv = NULL;
56940711Swollman					goto out;
57040711Swollman				}
57140711Swollman				r->r_start = rv->r_end + 1;
57240711Swollman				r->r_end = s->r_end;
57340711Swollman				r->r_flags = s->r_flags;
57445720Speter				r->r_rm = rm;
57540711Swollman				s->r_end = rv->r_start - 1;
57668727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
57740711Swollman						     r_link);
57868727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
57940711Swollman						     r_link);
58040711Swollman			} else if (s->r_start == rv->r_start) {
58159910Spaul				DPRINTF(("allocating from the beginning\n"));
58240711Swollman				/*
58340711Swollman				 * We are allocating at the beginning.
58440711Swollman				 */
58540711Swollman				s->r_start = rv->r_end + 1;
58668727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
58740711Swollman			} else {
58859910Spaul				DPRINTF(("allocating at the end\n"));
58940711Swollman				/*
59040711Swollman				 * We are allocating at the end.
59140711Swollman				 */
59240711Swollman				s->r_end = rv->r_start - 1;
59368727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
59440711Swollman						     r_link);
59540711Swollman			}
59640711Swollman			goto out;
59740711Swollman		}
59840711Swollman	}
59940711Swollman
60040711Swollman	/*
60140711Swollman	 * Now find an acceptable shared region, if the client's requirements
60240711Swollman	 * allow sharing.  By our implementation restriction, a candidate
60340711Swollman	 * region must match exactly by both size and sharing type in order
60440711Swollman	 * to be considered compatible with the client's request.  (The
60540711Swollman	 * former restriction could probably be lifted without too much
60640711Swollman	 * additional work, but this does not seem warranted.)
60740711Swollman	 */
60859910Spaul	DPRINTF(("no unshared regions found\n"));
609268780Struckman	if ((flags & RF_SHAREABLE) == 0)
61040711Swollman		goto out;
61140711Swollman
612266426Struckman	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
613266814Struckman		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
614266426Struckman		    s->r_start >= start &&
615266426Struckman		    (s->r_end - s->r_start + 1) == count &&
61688372Stmm		    (s->r_start & amask) == 0 &&
61788372Stmm		    ((s->r_start ^ s->r_end) & bmask) == 0) {
618150523Sphk			rv = int_alloc_resource(M_NOWAIT);
619152543Syongari			if (rv == NULL)
62040711Swollman				goto out;
62140711Swollman			rv->r_start = s->r_start;
62240711Swollman			rv->r_end = s->r_end;
623266814Struckman			rv->r_flags = new_rflags;
62440711Swollman			rv->r_dev = dev;
62540711Swollman			rv->r_rm = rm;
626152543Syongari			if (s->r_sharehead == NULL) {
62740711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
62869781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
629152543Syongari				if (s->r_sharehead == NULL) {
63040711Swollman					free(rv, M_RMAN);
631152543Syongari					rv = NULL;
63240711Swollman					goto out;
63340711Swollman				}
63440711Swollman				LIST_INIT(s->r_sharehead);
635152543Syongari				LIST_INSERT_HEAD(s->r_sharehead, s,
63640711Swollman						 r_sharelink);
63745106Sdfr				s->r_flags |= RF_FIRSTSHARE;
63840711Swollman			}
63940711Swollman			rv->r_sharehead = s->r_sharehead;
64040711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
64140711Swollman			goto out;
64240711Swollman		}
64340711Swollman	}
64440711Swollman	/*
64540711Swollman	 * We couldn't find anything.
64640711Swollman	 */
647268780Struckman
64840711Swollmanout:
64972200Sbmilekic	mtx_unlock(rm->rm_mtx);
650152543Syongari	return (rv == NULL ? NULL : &rv->r_r);
65140711Swollman}
65240711Swollman
65388372Stmmstruct resource *
654294883Sjhibbitsrman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
655299095Sadrian		      rman_res_t count, u_int flags, device_t dev)
65688372Stmm{
65788372Stmm
65888372Stmm	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
65988372Stmm	    dev));
66088372Stmm}
66188372Stmm
66240711Swollmanint
663150523Sphkrman_activate_resource(struct resource *re)
66440711Swollman{
665268780Struckman	struct resource_i *r;
66640711Swollman	struct rman *rm;
66740711Swollman
668150523Sphk	r = re->__r_i;
66940711Swollman	rm = r->r_rm;
67072200Sbmilekic	mtx_lock(rm->rm_mtx);
671268780Struckman	r->r_flags |= RF_ACTIVE;
67272200Sbmilekic	mtx_unlock(rm->rm_mtx);
67345720Speter	return 0;
67445720Speter}
67545720Speter
67645720Speterint
67745720Speterrman_deactivate_resource(struct resource *r)
67845720Speter{
679268373Struckman	struct rman *rm;
68045720Speter
681150523Sphk	rm = r->__r_i->r_rm;
68272200Sbmilekic	mtx_lock(rm->rm_mtx);
683268780Struckman	r->__r_i->r_flags &= ~RF_ACTIVE;
68472200Sbmilekic	mtx_unlock(rm->rm_mtx);
68540711Swollman	return 0;
68640711Swollman}
68740711Swollman
68840711Swollmanstatic int
689150523Sphkint_rman_release_resource(struct rman *rm, struct resource_i *r)
69040711Swollman{
691268373Struckman	struct resource_i *s, *t;
69240711Swollman
69340711Swollman	if (r->r_flags & RF_ACTIVE)
694268780Struckman		r->r_flags &= ~RF_ACTIVE;
69540711Swollman
69640711Swollman	/*
69740711Swollman	 * Check for a sharing list first.  If there is one, then we don't
69840711Swollman	 * have to think as hard.
69940711Swollman	 */
70040711Swollman	if (r->r_sharehead) {
70140711Swollman		/*
70240711Swollman		 * If a sharing list exists, then we know there are at
70340711Swollman		 * least two sharers.
70440711Swollman		 *
70540711Swollman		 * If we are in the main circleq, appoint someone else.
70640711Swollman		 */
70740711Swollman		LIST_REMOVE(r, r_sharelink);
70853225Sphk		s = LIST_FIRST(r->r_sharehead);
70940711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
71040711Swollman			s->r_flags |= RF_FIRSTSHARE;
71168727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
71268727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
71340711Swollman		}
71440711Swollman
71540711Swollman		/*
71640711Swollman		 * Make sure that the sharing list goes away completely
71740711Swollman		 * if the resource is no longer being shared at all.
71840711Swollman		 */
719152543Syongari		if (LIST_NEXT(s, r_sharelink) == NULL) {
72040711Swollman			free(s->r_sharehead, M_RMAN);
721152543Syongari			s->r_sharehead = NULL;
72240711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
72340711Swollman		}
72440711Swollman		goto out;
72540711Swollman	}
72640711Swollman
72740711Swollman	/*
72840711Swollman	 * Look at the adjacent resources in the list and see if our
729133177Sjhb	 * segment can be merged with any of them.  If either of the
730133177Sjhb	 * resources is allocated or is not exactly adjacent then they
731133177Sjhb	 * cannot be merged with our segment.
73240711Swollman	 */
73368727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
734133177Sjhb	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
735133177Sjhb	    s->r_end + 1 != r->r_start))
736133177Sjhb		s = NULL;
73768727Smckusick	t = TAILQ_NEXT(r, r_link);
738133177Sjhb	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
739133177Sjhb	    r->r_end + 1 != t->r_start))
740133177Sjhb		t = NULL;
74140711Swollman
742133177Sjhb	if (s != NULL && t != NULL) {
74340711Swollman		/*
74440711Swollman		 * Merge all three segments.
74540711Swollman		 */
74640711Swollman		s->r_end = t->r_end;
74768727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
74868727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
74940711Swollman		free(t, M_RMAN);
750133177Sjhb	} else if (s != NULL) {
75140711Swollman		/*
75240711Swollman		 * Merge previous segment with ours.
75340711Swollman		 */
75440711Swollman		s->r_end = r->r_end;
75568727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
756133177Sjhb	} else if (t != NULL) {
75740711Swollman		/*
75840711Swollman		 * Merge next segment with ours.
75940711Swollman		 */
76040711Swollman		t->r_start = r->r_start;
76168727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
76240711Swollman	} else {
76340711Swollman		/*
76440711Swollman		 * At this point, we know there is nothing we
76540711Swollman		 * can potentially merge with, because on each
76640711Swollman		 * side, there is either nothing there or what is
76740711Swollman		 * there is still allocated.  In that case, we don't
76840711Swollman		 * want to remove r from the list; we simply want to
76940711Swollman		 * change it to an unallocated region and return
77040711Swollman		 * without freeing anything.
77140711Swollman		 */
77240711Swollman		r->r_flags &= ~RF_ALLOCATED;
773222750Sjhb		r->r_dev = NULL;
77440711Swollman		return 0;
77540711Swollman	}
77640711Swollman
77740711Swollmanout:
77840711Swollman	free(r, M_RMAN);
77940711Swollman	return 0;
78040711Swollman}
78140711Swollman
78240711Swollmanint
783150523Sphkrman_release_resource(struct resource *re)
78440711Swollman{
785268373Struckman	int rv;
786268373Struckman	struct resource_i *r;
787268373Struckman	struct rman *rm;
78840711Swollman
789150523Sphk	r = re->__r_i;
790150523Sphk	rm = r->r_rm;
79172200Sbmilekic	mtx_lock(rm->rm_mtx);
79240711Swollman	rv = int_rman_release_resource(rm, r);
79372200Sbmilekic	mtx_unlock(rm->rm_mtx);
79440711Swollman	return (rv);
79540711Swollman}
79667261Simp
79767261Simpuint32_t
79867261Simprman_make_alignment_flags(uint32_t size)
79967261Simp{
800268373Struckman	int i;
80167261Simp
80267425Simp	/*
80367425Simp	 * Find the hightest bit set, and add one if more than one bit
80467425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
80567425Simp	 */
80688372Stmm	for (i = 31; i > 0; i--)
80767425Simp		if ((1 << i) & size)
80867425Simp			break;
80967425Simp	if (~(1 << i) & size)
81067425Simp		i++;
81167261Simp
81267261Simp	return(RF_ALIGNMENT_LOG2(i));
81367425Simp}
814107296Simp
815182162Sjhbvoid
816294883Sjhibbitsrman_set_start(struct resource *r, rman_res_t start)
817182162Sjhb{
818268373Struckman
819182162Sjhb	r->__r_i->r_start = start;
820182162Sjhb}
821182162Sjhb
822294883Sjhibbitsrman_res_t
823107296Simprman_get_start(struct resource *r)
824107296Simp{
825268373Struckman
826150523Sphk	return (r->__r_i->r_start);
827107296Simp}
828107296Simp
829182162Sjhbvoid
830294883Sjhibbitsrman_set_end(struct resource *r, rman_res_t end)
831182162Sjhb{
832268373Struckman
833182162Sjhb	r->__r_i->r_end = end;
834182162Sjhb}
835182162Sjhb
836294883Sjhibbitsrman_res_t
837107296Simprman_get_end(struct resource *r)
838107296Simp{
839268373Struckman
840150523Sphk	return (r->__r_i->r_end);
841107296Simp}
842107296Simp
843294883Sjhibbitsrman_res_t
844107296Simprman_get_size(struct resource *r)
845107296Simp{
846268373Struckman
847150523Sphk	return (r->__r_i->r_end - r->__r_i->r_start + 1);
848107296Simp}
849107296Simp
850107296Simpu_int
851107296Simprman_get_flags(struct resource *r)
852107296Simp{
853268373Struckman
854150523Sphk	return (r->__r_i->r_flags);
855107296Simp}
856107296Simp
857107296Simpvoid
858107296Simprman_set_virtual(struct resource *r, void *v)
859107296Simp{
860268373Struckman
861150523Sphk	r->__r_i->r_virtual = v;
862107296Simp}
863107296Simp
864107296Simpvoid *
865107296Simprman_get_virtual(struct resource *r)
866107296Simp{
867268373Struckman
868150523Sphk	return (r->__r_i->r_virtual);
869107296Simp}
870107296Simp
871107296Simpvoid
872107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t)
873107296Simp{
874268373Struckman
875107296Simp	r->r_bustag = t;
876107296Simp}
877107296Simp
878107296Simpbus_space_tag_t
879107296Simprman_get_bustag(struct resource *r)
880107296Simp{
881268373Struckman
882107296Simp	return (r->r_bustag);
883107296Simp}
884107296Simp
885107296Simpvoid
886107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h)
887107296Simp{
888268373Struckman
889107296Simp	r->r_bushandle = h;
890107296Simp}
891107296Simp
892107296Simpbus_space_handle_t
893107296Simprman_get_bushandle(struct resource *r)
894107296Simp{
895268373Struckman
896107296Simp	return (r->r_bushandle);
897107296Simp}
898107296Simp
899107296Simpvoid
900300317Sjhbrman_set_mapping(struct resource *r, struct resource_map *map)
901300317Sjhb{
902300317Sjhb
903300317Sjhb	KASSERT(rman_get_size(r) == map->r_size,
904300317Sjhb	    ("rman_set_mapping: size mismatch"));
905300317Sjhb	rman_set_bustag(r, map->r_bustag);
906300317Sjhb	rman_set_bushandle(r, map->r_bushandle);
907300317Sjhb	rman_set_virtual(r, map->r_vaddr);
908300317Sjhb}
909300317Sjhb
910300317Sjhbvoid
911300317Sjhbrman_get_mapping(struct resource *r, struct resource_map *map)
912300317Sjhb{
913300317Sjhb
914300317Sjhb	map->r_bustag = rman_get_bustag(r);
915300317Sjhb	map->r_bushandle = rman_get_bushandle(r);
916300317Sjhb	map->r_size = rman_get_size(r);
917300317Sjhb	map->r_vaddr = rman_get_virtual(r);
918300317Sjhb}
919300317Sjhb
920300317Sjhbvoid
921107296Simprman_set_rid(struct resource *r, int rid)
922107296Simp{
923268373Struckman
924150523Sphk	r->__r_i->r_rid = rid;
925107296Simp}
926107296Simp
927182162Sjhbint
928182162Sjhbrman_get_rid(struct resource *r)
929131414Simp{
930268373Struckman
931182162Sjhb	return (r->__r_i->r_rid);
932131414Simp}
933131414Simp
934131414Simpvoid
935299095Sadrianrman_set_device(struct resource *r, device_t dev)
936131414Simp{
937268373Struckman
938182162Sjhb	r->__r_i->r_dev = dev;
939131414Simp}
940131414Simp
941299095Sadriandevice_t
942110753Simprman_get_device(struct resource *r)
943110753Simp{
944268373Struckman
945150523Sphk	return (r->__r_i->r_dev);
946110753Simp}
947144071Sphk
948150547Sphkint
949150547Sphkrman_is_region_manager(struct resource *r, struct rman *rm)
950150547Sphk{
951150547Sphk
952150547Sphk	return (r->__r_i->r_rm == rm);
953150547Sphk}
954150547Sphk
955144071Sphk/*
956144071Sphk * Sysctl interface for scanning the resource lists.
957144071Sphk *
958144071Sphk * We take two input parameters; the index into the list of resource
959144071Sphk * managers, and the resource offset into the list.
960144071Sphk */
961144071Sphkstatic int
962144071Sphksysctl_rman(SYSCTL_HANDLER_ARGS)
963144071Sphk{
964144071Sphk	int			*name = (int *)arg1;
965144071Sphk	u_int			namelen = arg2;
966144071Sphk	int			rman_idx, res_idx;
967144071Sphk	struct rman		*rm;
968150523Sphk	struct resource_i	*res;
969192379Savg	struct resource_i	*sres;
970144071Sphk	struct u_rman		urm;
971144071Sphk	struct u_resource	ures;
972144071Sphk	int			error;
973144071Sphk
974144071Sphk	if (namelen != 3)
975144071Sphk		return (EINVAL);
976144071Sphk
977144071Sphk	if (bus_data_generation_check(name[0]))
978144071Sphk		return (EINVAL);
979144071Sphk	rman_idx = name[1];
980144071Sphk	res_idx = name[2];
981144071Sphk
982144071Sphk	/*
983144071Sphk	 * Find the indexed resource manager
984144071Sphk	 */
985152543Syongari	mtx_lock(&rman_mtx);
986144071Sphk	TAILQ_FOREACH(rm, &rman_head, rm_link) {
987144071Sphk		if (rman_idx-- == 0)
988144071Sphk			break;
989144071Sphk	}
990152543Syongari	mtx_unlock(&rman_mtx);
991144071Sphk	if (rm == NULL)
992144071Sphk		return (ENOENT);
993144071Sphk
994144071Sphk	/*
995144071Sphk	 * If the resource index is -1, we want details on the
996144071Sphk	 * resource manager.
997144071Sphk	 */
998144071Sphk	if (res_idx == -1) {
999145953Scperciva		bzero(&urm, sizeof(urm));
1000144071Sphk		urm.rm_handle = (uintptr_t)rm;
1001184173Smarcel		if (rm->rm_descr != NULL)
1002184173Smarcel			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1003144071Sphk		urm.rm_start = rm->rm_start;
1004144071Sphk		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1005144071Sphk		urm.rm_type = rm->rm_type;
1006144071Sphk
1007144071Sphk		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1008144071Sphk		return (error);
1009144071Sphk	}
1010144071Sphk
1011144071Sphk	/*
1012144071Sphk	 * Find the indexed resource and return it.
1013144071Sphk	 */
1014152543Syongari	mtx_lock(rm->rm_mtx);
1015144071Sphk	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1016192379Savg		if (res->r_sharehead != NULL) {
1017192379Savg			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1018192379Savg				if (res_idx-- == 0) {
1019192379Savg					res = sres;
1020192379Savg					goto found;
1021144071Sphk				}
1022192379Savg		}
1023192379Savg		else if (res_idx-- == 0)
1024192379Savg				goto found;
1025192379Savg	}
1026192379Savg	mtx_unlock(rm->rm_mtx);
1027192379Savg	return (ENOENT);
1028144071Sphk
1029192379Savgfound:
1030192379Savg	bzero(&ures, sizeof(ures));
1031192379Savg	ures.r_handle = (uintptr_t)res;
1032192379Savg	ures.r_parent = (uintptr_t)res->r_rm;
1033192379Savg	ures.r_device = (uintptr_t)res->r_dev;
1034192379Savg	if (res->r_dev != NULL) {
1035192379Savg		if (device_get_name(res->r_dev) != NULL) {
1036192379Savg			snprintf(ures.r_devname, RM_TEXTLEN,
1037192379Savg			    "%s%d",
1038192379Savg			    device_get_name(res->r_dev),
1039192379Savg			    device_get_unit(res->r_dev));
1040192379Savg		} else {
1041192379Savg			strlcpy(ures.r_devname, "nomatch",
1042192379Savg			    RM_TEXTLEN);
1043144071Sphk		}
1044192379Savg	} else {
1045192379Savg		ures.r_devname[0] = '\0';
1046144071Sphk	}
1047192379Savg	ures.r_start = res->r_start;
1048192379Savg	ures.r_size = res->r_end - res->r_start + 1;
1049192379Savg	ures.r_flags = res->r_flags;
1050192379Savg
1051152543Syongari	mtx_unlock(rm->rm_mtx);
1052192379Savg	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1053192379Savg	return (error);
1054144071Sphk}
1055144071Sphk
1056227309Sedstatic SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1057144071Sphk    "kernel resource manager");
1058168791Sjhb
1059168791Sjhb#ifdef DDB
1060168791Sjhbstatic void
1061220606Sgavindump_rman_header(struct rman *rm)
1062220606Sgavin{
1063220606Sgavin
1064220606Sgavin	if (db_pager_quit)
1065220606Sgavin		return;
1066297000Sjhibbits	db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1067297000Sjhibbits	    rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
1068220606Sgavin}
1069220606Sgavin
1070220606Sgavinstatic void
1071168791Sjhbdump_rman(struct rman *rm)
1072168791Sjhb{
1073168791Sjhb	struct resource_i *r;
1074168791Sjhb	const char *devname;
1075168791Sjhb
1076168791Sjhb	if (db_pager_quit)
1077168791Sjhb		return;
1078168791Sjhb	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1079168791Sjhb		if (r->r_dev != NULL) {
1080168791Sjhb			devname = device_get_nameunit(r->r_dev);
1081168791Sjhb			if (devname == NULL)
1082168791Sjhb				devname = "nomatch";
1083168791Sjhb		} else
1084168791Sjhb			devname = NULL;
1085297000Sjhibbits		db_printf("    0x%jx-0x%jx (RID=%d) ",
1086290429Sjhb		    r->r_start, r->r_end, r->r_rid);
1087168791Sjhb		if (devname != NULL)
1088168791Sjhb			db_printf("(%s)\n", devname);
1089168791Sjhb		else
1090168791Sjhb			db_printf("----\n");
1091168791Sjhb		if (db_pager_quit)
1092168791Sjhb			return;
1093168791Sjhb	}
1094168791Sjhb}
1095168791Sjhb
1096168791SjhbDB_SHOW_COMMAND(rman, db_show_rman)
1097168791Sjhb{
1098168791Sjhb
1099220606Sgavin	if (have_addr) {
1100220606Sgavin		dump_rman_header((struct rman *)addr);
1101168791Sjhb		dump_rman((struct rman *)addr);
1102220606Sgavin	}
1103168791Sjhb}
1104168791Sjhb
1105220606SgavinDB_SHOW_COMMAND(rmans, db_show_rmans)
1106220606Sgavin{
1107220606Sgavin	struct rman *rm;
1108220606Sgavin
1109220606Sgavin	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1110220606Sgavin		dump_rman_header(rm);
1111220606Sgavin	}
1112220606Sgavin}
1113220606Sgavin
1114183054SsamDB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1115168791Sjhb{
1116168791Sjhb	struct rman *rm;
1117168791Sjhb
1118220606Sgavin	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1119220606Sgavin		dump_rman_header(rm);
1120168791Sjhb		dump_rman(rm);
1121220606Sgavin	}
1122168791Sjhb}
1123183054SsamDB_SHOW_ALIAS(allrman, db_show_all_rman);
1124168791Sjhb#endif
1125