subr_rman.c revision 267985
1104477Ssam/*-
2104477Ssam * Copyright 1998 Massachusetts Institute of Technology
3104477Ssam *
4104477Ssam * Permission to use, copy, modify, and distribute this software and
5104477Ssam * its documentation for any purpose and without fee is hereby
6104477Ssam * granted, provided that both the above copyright notice and this
7104477Ssam * permission notice appear in all copies, that both the above
8104477Ssam * copyright notice and this permission notice appear in all
9104477Ssam * supporting documentation, and that the name of M.I.T. not be used
10104477Ssam * in advertising or publicity pertaining to distribution of the
11104477Ssam * software without specific, written prior permission.  M.I.T. makes
12104477Ssam * no representations about the suitability of this software for any
13104477Ssam * purpose.  It is provided "as is" without express or implied
14104477Ssam * warranty.
15104477Ssam *
16104477Ssam * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17104477Ssam * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18104477Ssam * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19104477Ssam * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20104477Ssam * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21104477Ssam * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22104477Ssam * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23104477Ssam * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24104477Ssam * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25104477Ssam * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26104477Ssam * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27104477Ssam * SUCH DAMAGE.
28104477Ssam */
29104477Ssam
30104477Ssam/*
31104477Ssam * The kernel resource manager.  This code is responsible for keeping track
32104477Ssam * of hardware resources which are apportioned out to various drivers.
33104477Ssam * It does not actually assign those resources, and it is not expected
34104477Ssam * that end-device drivers will call into this code directly.  Rather,
35104477Ssam * the code which implements the buses that those devices are attached to,
36104477Ssam * and the code which manages CPU resources, will call this code, and the
37104477Ssam * end-device drivers will make upcalls to that code to actually perform
38104477Ssam * the allocation.
39104477Ssam *
40104477Ssam * There are two sorts of resources managed by this code.  The first is
41104477Ssam * the more familiar array (RMAN_ARRAY) type; resources in this class
42104477Ssam * consist of a sequence of individually-allocatable objects which have
43104477Ssam * been numbered in some well-defined order.  Most of the resources
44104477Ssam * are of this type, as it is the most familiar.  The second type is
45104477Ssam * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46104477Ssam * resources in which each instance is indistinguishable from every
47104477Ssam * other instance).  The principal anticipated application of gauges
48104477Ssam * is in the context of power consumption, where a bus may have a specific
49104477Ssam * power budget which all attached devices share.  RMAN_GAUGE is not
50104477Ssam * implemented yet.
51104477Ssam *
52104477Ssam * For array resources, we make one simplifying assumption: two clients
53104477Ssam * sharing the same resource must use the same range of indices.  That
54104477Ssam * is to say, sharing of overlapping-but-not-identical regions is not
55104477Ssam * permitted.
56104477Ssam */
57104477Ssam
58104477Ssam#include "opt_ddb.h"
59104477Ssam
60104477Ssam#include <sys/cdefs.h>
61104477Ssam__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 267985 2014-06-27 22:05:21Z gjb $");
62104477Ssam
63104477Ssam#include <sys/param.h>
64104477Ssam#include <sys/systm.h>
65104477Ssam#include <sys/kernel.h>
66104477Ssam#include <sys/limits.h>
67104477Ssam#include <sys/lock.h>
68104477Ssam#include <sys/malloc.h>
69104477Ssam#include <sys/mutex.h>
70104477Ssam#include <sys/bus.h>		/* XXX debugging */
71104477Ssam#include <machine/bus.h>
72104477Ssam#include <sys/rman.h>
73104477Ssam#include <sys/sysctl.h>
74104477Ssam
75104477Ssam#ifdef DDB
76104477Ssam#include <ddb/ddb.h>
77104477Ssam#endif
78104477Ssam
79104477Ssam/*
80104477Ssam * We use a linked list rather than a bitmap because we need to be able to
81104477Ssam * represent potentially huge objects (like all of a processor's physical
82104477Ssam * address space).  That is also why the indices are defined to have type
83104477Ssam * `unsigned long' -- that being the largest integral type in ISO C (1990).
84104477Ssam * The 1999 version of C allows `long long'; we may need to switch to that
85104477Ssam * at some point in the future, particularly if we want to support 36-bit
86104477Ssam * addresses on IA32 hardware.
87104477Ssam */
88104477Ssamstruct resource_i {
89104477Ssam	struct resource		r_r;
90104477Ssam	TAILQ_ENTRY(resource_i)	r_link;
91104477Ssam	LIST_ENTRY(resource_i)	r_sharelink;
92104477Ssam	LIST_HEAD(, resource_i)	*r_sharehead;
93104477Ssam	u_long	r_start;	/* index of the first entry in this resource */
94104477Ssam	u_long	r_end;		/* index of the last entry (inclusive) */
95104477Ssam	u_int	r_flags;
96104477Ssam	void	*r_virtual;	/* virtual address of this resource */
97104477Ssam	struct	device *r_dev;	/* device which has allocated this resource */
98104477Ssam	struct	rman *r_rm;	/* resource manager from whence this came */
99104477Ssam	int	r_rid;		/* optional rid for this resource. */
100104477Ssam};
101104477Ssam
102104477Ssamstatic int     rman_debug = 0;
103104477SsamTUNABLE_INT("debug.rman_debug", &rman_debug);
104104477SsamSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105104477Ssam    &rman_debug, 0, "rman debug");
106104477Ssam
107104477Ssam#define DPRINTF(params) if (rman_debug) printf params
108104477Ssam
109104477Ssamstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
110104477Ssam
111105251Smarkmstruct	rman_head rman_head;
112104477Ssamstatic	struct mtx rman_mtx; /* mutex to protect rman_head */
113104477Ssamstatic	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114104477Ssam				       struct resource_i **whohas);
115104477Ssamstatic	int int_rman_deactivate_resource(struct resource_i *r);
116104477Ssamstatic	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
117104477Ssam
118104477Ssamstatic __inline struct resource_i *
119104477Ssamint_alloc_resource(int malloc_flag)
120104477Ssam{
121104477Ssam	struct resource_i *r;
122104477Ssam
123104477Ssam	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124104477Ssam	if (r != NULL) {
125104477Ssam		r->r_r.__r_i = r;
126104477Ssam	}
127104477Ssam	return (r);
128104477Ssam}
129104477Ssam
130104477Ssamint
131104477Ssamrman_init(struct rman *rm)
132104477Ssam{
133104477Ssam	static int once = 0;
134104477Ssam
135104477Ssam	if (once == 0) {
136104477Ssam		once = 1;
137104477Ssam		TAILQ_INIT(&rman_head);
138104477Ssam		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
139104477Ssam	}
140104477Ssam
141104477Ssam	if (rm->rm_start == 0 && rm->rm_end == 0)
142104477Ssam		rm->rm_end = ~0ul;
143104477Ssam	if (rm->rm_type == RMAN_UNINIT)
144104477Ssam		panic("rman_init");
145104477Ssam	if (rm->rm_type == RMAN_GAUGE)
146104477Ssam		panic("implement RMAN_GAUGE");
147104477Ssam
148104477Ssam	TAILQ_INIT(&rm->rm_list);
149104477Ssam	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
150104477Ssam	if (rm->rm_mtx == NULL)
151104477Ssam		return ENOMEM;
152104477Ssam	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
153104477Ssam
154104477Ssam	mtx_lock(&rman_mtx);
155104477Ssam	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
156104477Ssam	mtx_unlock(&rman_mtx);
157104477Ssam	return 0;
158104477Ssam}
159104477Ssam
160104477Ssamint
161104477Ssamrman_manage_region(struct rman *rm, u_long start, u_long end)
162104477Ssam{
163104477Ssam	struct resource_i *r, *s, *t;
164104477Ssam	int rv = 0;
165104477Ssam
166104477Ssam	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
167104477Ssam	    rm->rm_descr, start, end));
168104477Ssam	if (start < rm->rm_start || end > rm->rm_end)
169104477Ssam		return EINVAL;
170104477Ssam	r = int_alloc_resource(M_NOWAIT);
171104477Ssam	if (r == NULL)
172104477Ssam		return ENOMEM;
173104477Ssam	r->r_start = start;
174104477Ssam	r->r_end = end;
175104477Ssam	r->r_rm = rm;
176104477Ssam
177104477Ssam	mtx_lock(rm->rm_mtx);
178104477Ssam
179104477Ssam	/* Skip entries before us. */
180104477Ssam	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
181104477Ssam		if (s->r_end == ULONG_MAX)
182104477Ssam			break;
183104477Ssam		if (s->r_end + 1 >= r->r_start)
184104477Ssam			break;
185104477Ssam	}
186104477Ssam
187104477Ssam	/* If we ran off the end of the list, insert at the tail. */
188104477Ssam	if (s == NULL) {
189104477Ssam		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
190104477Ssam	} else {
191104477Ssam		/* Check for any overlap with the current region. */
192104477Ssam		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
193104477Ssam			rv = EBUSY;
194104477Ssam			goto out;
195104477Ssam		}
196104477Ssam
197104477Ssam		/* Check for any overlap with the next region. */
198104477Ssam		t = TAILQ_NEXT(s, r_link);
199104477Ssam		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
200104477Ssam			rv = EBUSY;
201104477Ssam			goto out;
202104477Ssam		}
203104477Ssam
204104477Ssam		/*
205104477Ssam		 * See if this region can be merged with the next region.  If
206104477Ssam		 * not, clear the pointer.
207104477Ssam		 */
208104477Ssam		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
209104477Ssam			t = NULL;
210104477Ssam
211104477Ssam		/* See if we can merge with the current region. */
212104477Ssam		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
213104477Ssam			/* Can we merge all 3 regions? */
214104477Ssam			if (t != NULL) {
215104477Ssam				s->r_end = t->r_end;
216104477Ssam				TAILQ_REMOVE(&rm->rm_list, t, r_link);
217104477Ssam				free(r, M_RMAN);
218104477Ssam				free(t, M_RMAN);
219104477Ssam			} else {
220104477Ssam				s->r_end = r->r_end;
221104477Ssam				free(r, M_RMAN);
222104477Ssam			}
223104477Ssam		} else if (t != NULL) {
224104477Ssam			/* Can we merge with just the next region? */
225104477Ssam			t->r_start = r->r_start;
226104477Ssam			free(r, M_RMAN);
227104477Ssam		} else if (s->r_end < r->r_start) {
228104477Ssam			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
229104477Ssam		} else {
230104477Ssam			TAILQ_INSERT_BEFORE(s, r, r_link);
231104477Ssam		}
232104477Ssam	}
233104477Ssamout:
234104477Ssam	mtx_unlock(rm->rm_mtx);
235104477Ssam	return rv;
236104477Ssam}
237104477Ssam
238104477Ssamint
239104477Ssamrman_init_from_resource(struct rman *rm, struct resource *r)
240104477Ssam{
241104477Ssam	int rv;
242104477Ssam
243104477Ssam	if ((rv = rman_init(rm)) != 0)
244104477Ssam		return (rv);
245104477Ssam	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
246104477Ssam}
247104477Ssam
248104477Ssamint
249104477Ssamrman_fini(struct rman *rm)
250104477Ssam{
251104477Ssam	struct resource_i *r;
252104477Ssam
253104477Ssam	mtx_lock(rm->rm_mtx);
254104477Ssam	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
255104477Ssam		if (r->r_flags & RF_ALLOCATED) {
256104477Ssam			mtx_unlock(rm->rm_mtx);
257104477Ssam			return EBUSY;
258104477Ssam		}
259104477Ssam	}
260104477Ssam
261104477Ssam	/*
262104477Ssam	 * There really should only be one of these if we are in this
263104477Ssam	 * state and the code is working properly, but it can't hurt.
264104477Ssam	 */
265104477Ssam	while (!TAILQ_EMPTY(&rm->rm_list)) {
266104477Ssam		r = TAILQ_FIRST(&rm->rm_list);
267104477Ssam		TAILQ_REMOVE(&rm->rm_list, r, r_link);
268104477Ssam		free(r, M_RMAN);
269104477Ssam	}
270104477Ssam	mtx_unlock(rm->rm_mtx);
271104477Ssam	mtx_lock(&rman_mtx);
272104477Ssam	TAILQ_REMOVE(&rman_head, rm, rm_link);
273104477Ssam	mtx_unlock(&rman_mtx);
274104477Ssam	mtx_destroy(rm->rm_mtx);
275104477Ssam	free(rm->rm_mtx, M_RMAN);
276104477Ssam
277104477Ssam	return 0;
278104477Ssam}
279104477Ssam
280104477Ssamint
281104477Ssamrman_first_free_region(struct rman *rm, u_long *start, u_long *end)
282104477Ssam{
283104477Ssam	struct resource_i *r;
284104477Ssam
285104477Ssam	mtx_lock(rm->rm_mtx);
286104477Ssam	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
287104477Ssam		if (!(r->r_flags & RF_ALLOCATED)) {
288104477Ssam			*start = r->r_start;
289104477Ssam			*end = r->r_end;
290104477Ssam			mtx_unlock(rm->rm_mtx);
291104477Ssam			return (0);
292104477Ssam		}
293104477Ssam	}
294104477Ssam	mtx_unlock(rm->rm_mtx);
295104477Ssam	return (ENOENT);
296104477Ssam}
297104477Ssam
298104477Ssamint
299104477Ssamrman_last_free_region(struct rman *rm, u_long *start, u_long *end)
300104477Ssam{
301104477Ssam	struct resource_i *r;
302104477Ssam
303104477Ssam	mtx_lock(rm->rm_mtx);
304104477Ssam	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
305104477Ssam		if (!(r->r_flags & RF_ALLOCATED)) {
306104477Ssam			*start = r->r_start;
307104477Ssam			*end = r->r_end;
308104477Ssam			mtx_unlock(rm->rm_mtx);
309104477Ssam			return (0);
310104477Ssam		}
311104477Ssam	}
312104477Ssam	mtx_unlock(rm->rm_mtx);
313104477Ssam	return (ENOENT);
314104477Ssam}
315104477Ssam
316104477Ssam/* Shrink or extend one or both ends of an allocated resource. */
317104477Ssamint
318104477Ssamrman_adjust_resource(struct resource *rr, u_long start, u_long end)
319104477Ssam{
320104477Ssam	struct	resource_i *r, *s, *t, *new;
321104477Ssam	struct	rman *rm;
322104477Ssam
323104477Ssam	/* Not supported for shared resources. */
324104477Ssam	r = rr->__r_i;
325104477Ssam	if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
326104477Ssam		return (EINVAL);
327104477Ssam
328104477Ssam	/*
329104477Ssam	 * This does not support wholesale moving of a resource.  At
330104477Ssam	 * least part of the desired new range must overlap with the
331104477Ssam	 * existing resource.
332104477Ssam	 */
333104477Ssam	if (end < r->r_start || r->r_end < start)
334104477Ssam		return (EINVAL);
335104477Ssam
336104477Ssam	/*
337104477Ssam	 * Find the two resource regions immediately adjacent to the
338104477Ssam	 * allocated resource.
339104477Ssam	 */
340104477Ssam	rm = r->r_rm;
341104477Ssam	mtx_lock(rm->rm_mtx);
342104477Ssam#ifdef INVARIANTS
343104477Ssam	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
344104477Ssam		if (s == r)
345104477Ssam			break;
346104477Ssam	}
347104477Ssam	if (s == NULL)
348104477Ssam		panic("resource not in list");
349104477Ssam#endif
350104477Ssam	s = TAILQ_PREV(r, resource_head, r_link);
351104477Ssam	t = TAILQ_NEXT(r, r_link);
352104477Ssam	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
353104477Ssam	    ("prev resource mismatch"));
354104477Ssam	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
355104477Ssam	    ("next resource mismatch"));
356104477Ssam
357104477Ssam	/*
358104477Ssam	 * See if the changes are permitted.  Shrinking is always allowed,
359104477Ssam	 * but growing requires sufficient room in the adjacent region.
360104477Ssam	 */
361104477Ssam	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
362104477Ssam	    s->r_start > start)) {
363104477Ssam		mtx_unlock(rm->rm_mtx);
364104477Ssam		return (EBUSY);
365104477Ssam	}
366104477Ssam	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
367104477Ssam	    t->r_end < end)) {
368104477Ssam		mtx_unlock(rm->rm_mtx);
369104477Ssam		return (EBUSY);
370104477Ssam	}
371104477Ssam
372104477Ssam	/*
373104477Ssam	 * While holding the lock, grow either end of the resource as
374104477Ssam	 * needed and shrink either end if the shrinking does not require
375104477Ssam	 * allocating a new resource.  We can safely drop the lock and then
376104477Ssam	 * insert a new range to handle the shrinking case afterwards.
377104477Ssam	 */
378104477Ssam	if (start < r->r_start ||
379104477Ssam	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
380104477Ssam		KASSERT(s->r_flags == 0, ("prev is busy"));
381104477Ssam		r->r_start = start;
382104477Ssam		if (s->r_start == start) {
383104477Ssam			TAILQ_REMOVE(&rm->rm_list, s, r_link);
384104477Ssam			free(s, M_RMAN);
385104477Ssam		} else
386104477Ssam			s->r_end = start - 1;
387104477Ssam	}
388104477Ssam	if (end > r->r_end ||
389104477Ssam	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
390104477Ssam		KASSERT(t->r_flags == 0, ("next is busy"));
391104477Ssam		r->r_end = end;
392104477Ssam		if (t->r_end == end) {
393104477Ssam			TAILQ_REMOVE(&rm->rm_list, t, r_link);
394104477Ssam			free(t, M_RMAN);
395104477Ssam		} else
396104477Ssam			t->r_start = end + 1;
397104477Ssam	}
398104477Ssam	mtx_unlock(rm->rm_mtx);
399104477Ssam
400104477Ssam	/*
401104477Ssam	 * Handle the shrinking cases that require allocating a new
402104477Ssam	 * resource to hold the newly-free region.  We have to recheck
403104477Ssam	 * if we still need this new region after acquiring the lock.
404104477Ssam	 */
405104477Ssam	if (start > r->r_start) {
406104477Ssam		new = int_alloc_resource(M_WAITOK);
407104477Ssam		new->r_start = r->r_start;
408104477Ssam		new->r_end = start - 1;
409104477Ssam		new->r_rm = rm;
410104477Ssam		mtx_lock(rm->rm_mtx);
411104477Ssam		r->r_start = start;
412104477Ssam		s = TAILQ_PREV(r, resource_head, r_link);
413104477Ssam		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
414104477Ssam			s->r_end = start - 1;
415104477Ssam			free(new, M_RMAN);
416104477Ssam		} else
417104477Ssam			TAILQ_INSERT_BEFORE(r, new, r_link);
418104477Ssam		mtx_unlock(rm->rm_mtx);
419104477Ssam	}
420104477Ssam	if (end < r->r_end) {
421104477Ssam		new = int_alloc_resource(M_WAITOK);
422104477Ssam		new->r_start = end + 1;
423104477Ssam		new->r_end = r->r_end;
424104477Ssam		new->r_rm = rm;
425104477Ssam		mtx_lock(rm->rm_mtx);
426104477Ssam		r->r_end = end;
427104477Ssam		t = TAILQ_NEXT(r, r_link);
428104477Ssam		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
429104477Ssam			t->r_start = end + 1;
430104477Ssam			free(new, M_RMAN);
431104477Ssam		} else
432104477Ssam			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
433104477Ssam		mtx_unlock(rm->rm_mtx);
434104477Ssam	}
435104477Ssam	return (0);
436104477Ssam}
437104477Ssam
438104477Ssam#define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_TIMESHARE | RF_PREFETCHABLE))
439104477Ssam
440104477Ssamstruct resource *
441104477Ssamrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
442104477Ssam		      u_long count, u_long bound,  u_int flags,
443104477Ssam		      struct device *dev)
444104477Ssam{
445104477Ssam	u_int	new_rflags;
446104477Ssam	struct	resource_i *r, *s, *rv;
447104477Ssam	u_long	rstart, rend, amask, bmask;
448104477Ssam
449104477Ssam	rv = NULL;
450104477Ssam
451104477Ssam	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
452104477Ssam	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
453104477Ssam	       count, flags,
454104477Ssam	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
455104477Ssam	KASSERT((flags & (RF_WANTED | RF_FIRSTSHARE)) == 0,
456104477Ssam	    ("invalid flags %#x", flags));
457104477Ssam	new_rflags = (flags & ~(RF_ACTIVE | RF_WANTED | RF_FIRSTSHARE)) |
458104477Ssam	    RF_ALLOCATED;
459104477Ssam
460104477Ssam	mtx_lock(rm->rm_mtx);
461104477Ssam
462104477Ssam	for (r = TAILQ_FIRST(&rm->rm_list);
463104477Ssam	     r && r->r_end < start + count - 1;
464104477Ssam	     r = TAILQ_NEXT(r, r_link))
465104477Ssam		;
466104477Ssam
467104477Ssam	if (r == NULL) {
468104477Ssam		DPRINTF(("could not find a region\n"));
469104477Ssam		goto out;
470104477Ssam	}
471104477Ssam
472104477Ssam	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
473104477Ssam	KASSERT(start <= ULONG_MAX - amask,
474104918Ssam	    ("start (%#lx) + amask (%#lx) would wrap around", start, amask));
475104918Ssam
476104477Ssam	/* If bound is 0, bmask will also be 0 */
477104477Ssam	bmask = ~(bound - 1);
478104477Ssam	/*
479104477Ssam	 * First try to find an acceptable totally-unshared region.
480104477Ssam	 */
481104477Ssam	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
482104477Ssam		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
483104477Ssam		/*
484104477Ssam		 * The resource list is sorted, so there is no point in
485104477Ssam		 * searching further once r_start is too large.
486104477Ssam		 */
487104477Ssam		if (s->r_start > end - (count - 1)) {
488104477Ssam			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
489104477Ssam			    s->r_start, end));
490104477Ssam			break;
491104477Ssam		}
492104477Ssam		if (s->r_start > ULONG_MAX - amask) {
493104477Ssam			DPRINTF(("s->r_start (%#lx) + amask (%#lx) too large\n",
494104477Ssam			    s->r_start, amask));
495104477Ssam			break;
496104477Ssam		}
497104477Ssam		if (s->r_flags & RF_ALLOCATED) {
498104477Ssam			DPRINTF(("region is allocated\n"));
499104477Ssam			continue;
500104477Ssam		}
501104477Ssam		rstart = ulmax(s->r_start, start);
502104477Ssam		/*
503104477Ssam		 * Try to find a region by adjusting to boundary and alignment
504104477Ssam		 * until both conditions are satisfied. This is not an optimal
505104477Ssam		 * algorithm, but in most cases it isn't really bad, either.
506104477Ssam		 */
507104477Ssam		do {
508104477Ssam			rstart = (rstart + amask) & ~amask;
509104477Ssam			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
510104477Ssam				rstart += bound - (rstart & ~bmask);
511104477Ssam		} while ((rstart & amask) != 0 && rstart < end &&
512104477Ssam		    rstart < s->r_end);
513104477Ssam		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
514104477Ssam		if (rstart > rend) {
515104477Ssam			DPRINTF(("adjusted start exceeds end\n"));
516104477Ssam			continue;
517104477Ssam		}
518104477Ssam		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
519104477Ssam		       rstart, rend, (rend - rstart + 1), count));
520104477Ssam
521104477Ssam		if ((rend - rstart + 1) >= count) {
522104477Ssam			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
523104477Ssam			       rstart, rend, (rend - rstart + 1)));
524104477Ssam			if ((s->r_end - s->r_start + 1) == count) {
525104477Ssam				DPRINTF(("candidate region is entire chunk\n"));
526104477Ssam				rv = s;
527104477Ssam				rv->r_flags = new_rflags;
528104477Ssam				rv->r_dev = dev;
529104477Ssam				goto out;
530104477Ssam			}
531104477Ssam
532104477Ssam			/*
533104477Ssam			 * If s->r_start < rstart and
534104477Ssam			 *    s->r_end > rstart + count - 1, then
535104477Ssam			 * we need to split the region into three pieces
536104477Ssam			 * (the middle one will get returned to the user).
537104477Ssam			 * Otherwise, we are allocating at either the
538104477Ssam			 * beginning or the end of s, so we only need to
539104477Ssam			 * split it in two.  The first case requires
540104477Ssam			 * two new allocations; the second requires but one.
541104477Ssam			 */
542104477Ssam			rv = int_alloc_resource(M_NOWAIT);
543104477Ssam			if (rv == NULL)
544104477Ssam				goto out;
545104477Ssam			rv->r_start = rstart;
546104477Ssam			rv->r_end = rstart + count - 1;
547104477Ssam			rv->r_flags = new_rflags;
548104477Ssam			rv->r_dev = dev;
549104477Ssam			rv->r_rm = rm;
550104477Ssam
551104477Ssam			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
552104477Ssam				DPRINTF(("splitting region in three parts: "
553104477Ssam				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
554104477Ssam				       s->r_start, rv->r_start - 1,
555104477Ssam				       rv->r_start, rv->r_end,
556104477Ssam				       rv->r_end + 1, s->r_end));
557104477Ssam				/*
558104477Ssam				 * We are allocating in the middle.
559104477Ssam				 */
560104477Ssam				r = int_alloc_resource(M_NOWAIT);
561104477Ssam				if (r == NULL) {
562104477Ssam					free(rv, M_RMAN);
563104477Ssam					rv = NULL;
564104477Ssam					goto out;
565104477Ssam				}
566104477Ssam				r->r_start = rv->r_end + 1;
567104477Ssam				r->r_end = s->r_end;
568104477Ssam				r->r_flags = s->r_flags;
569104477Ssam				r->r_rm = rm;
570104477Ssam				s->r_end = rv->r_start - 1;
571104477Ssam				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
572104477Ssam						     r_link);
573104477Ssam				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
574104477Ssam						     r_link);
575104477Ssam			} else if (s->r_start == rv->r_start) {
576104477Ssam				DPRINTF(("allocating from the beginning\n"));
577104477Ssam				/*
578104477Ssam				 * We are allocating at the beginning.
579104477Ssam				 */
580104477Ssam				s->r_start = rv->r_end + 1;
581104477Ssam				TAILQ_INSERT_BEFORE(s, rv, r_link);
582104477Ssam			} else {
583104477Ssam				DPRINTF(("allocating at the end\n"));
584104477Ssam				/*
585104477Ssam				 * We are allocating at the end.
586104477Ssam				 */
587104477Ssam				s->r_end = rv->r_start - 1;
588104477Ssam				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
589104477Ssam						     r_link);
590104477Ssam			}
591104477Ssam			goto out;
592104477Ssam		}
593104477Ssam	}
594104477Ssam
595104477Ssam	/*
596104477Ssam	 * Now find an acceptable shared region, if the client's requirements
597104477Ssam	 * allow sharing.  By our implementation restriction, a candidate
598104477Ssam	 * region must match exactly by both size and sharing type in order
599104477Ssam	 * to be considered compatible with the client's request.  (The
600104477Ssam	 * former restriction could probably be lifted without too much
601104477Ssam	 * additional work, but this does not seem warranted.)
602104477Ssam	 */
603104477Ssam	DPRINTF(("no unshared regions found\n"));
604104477Ssam	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
605104477Ssam		goto out;
606104477Ssam
607104477Ssam	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
608104477Ssam		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
609104477Ssam		    s->r_start >= start &&
610104477Ssam		    (s->r_end - s->r_start + 1) == count &&
611104477Ssam		    (s->r_start & amask) == 0 &&
612104477Ssam		    ((s->r_start ^ s->r_end) & bmask) == 0) {
613104477Ssam			rv = int_alloc_resource(M_NOWAIT);
614104477Ssam			if (rv == NULL)
615104477Ssam				goto out;
616104477Ssam			rv->r_start = s->r_start;
617104477Ssam			rv->r_end = s->r_end;
618104477Ssam			rv->r_flags = new_rflags;
619104477Ssam			rv->r_dev = dev;
620104477Ssam			rv->r_rm = rm;
621104477Ssam			if (s->r_sharehead == NULL) {
622104477Ssam				s->r_sharehead = malloc(sizeof *s->r_sharehead,
623104477Ssam						M_RMAN, M_NOWAIT | M_ZERO);
624104477Ssam				if (s->r_sharehead == NULL) {
625104477Ssam					free(rv, M_RMAN);
626104477Ssam					rv = NULL;
627104477Ssam					goto out;
628104477Ssam				}
629104477Ssam				LIST_INIT(s->r_sharehead);
630104477Ssam				LIST_INSERT_HEAD(s->r_sharehead, s,
631104477Ssam						 r_sharelink);
632104477Ssam				s->r_flags |= RF_FIRSTSHARE;
633104477Ssam			}
634104477Ssam			rv->r_sharehead = s->r_sharehead;
635104477Ssam			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
636104477Ssam			goto out;
637104477Ssam		}
638104477Ssam	}
639104477Ssam
640104477Ssam	/*
641104477Ssam	 * We couldn't find anything.
642104477Ssam	 */
643104477Ssamout:
644104477Ssam	/*
645104477Ssam	 * If the user specified RF_ACTIVE in flags, we attempt to atomically
646104477Ssam	 * activate the resource.  If this fails, we release the resource
647104477Ssam	 * and indicate overall failure.  (This behavior probably doesn't
648104477Ssam	 * make sense for RF_TIMESHARE-type resources.)
649104477Ssam	 */
650104477Ssam	if (rv && (flags & RF_ACTIVE) != 0) {
651104477Ssam		struct resource_i *whohas;
652104477Ssam		if (int_rman_activate_resource(rm, rv, &whohas)) {
653104477Ssam			int_rman_release_resource(rm, rv);
654104477Ssam			rv = NULL;
655104477Ssam		}
656104477Ssam	}
657104477Ssam
658104477Ssam	mtx_unlock(rm->rm_mtx);
659104477Ssam	return (rv == NULL ? NULL : &rv->r_r);
660104477Ssam}
661104477Ssam
662105190Ssamstruct resource *
663105190Ssamrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
664104477Ssam		      u_int flags, struct device *dev)
665104477Ssam{
666104477Ssam
667104477Ssam	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
668104477Ssam	    dev));
669104477Ssam}
670104477Ssam
671104477Ssamstatic int
672104477Ssamint_rman_activate_resource(struct rman *rm, struct resource_i *r,
673104477Ssam			   struct resource_i **whohas)
674104477Ssam{
675104477Ssam	struct resource_i *s;
676104477Ssam	int ok;
677104477Ssam
678104477Ssam	/*
679104477Ssam	 * If we are not timesharing, then there is nothing much to do.
680104477Ssam	 * If we already have the resource, then there is nothing at all to do.
681104477Ssam	 * If we are not on a sharing list with anybody else, then there is
682104477Ssam	 * little to do.
683104477Ssam	 */
684104477Ssam	if ((r->r_flags & RF_TIMESHARE) == 0
685104477Ssam	    || (r->r_flags & RF_ACTIVE) != 0
686104477Ssam	    || r->r_sharehead == NULL) {
687104477Ssam		r->r_flags |= RF_ACTIVE;
688104477Ssam		return 0;
689104477Ssam	}
690104477Ssam
691104477Ssam	ok = 1;
692104477Ssam	for (s = LIST_FIRST(r->r_sharehead); s && ok;
693104477Ssam	     s = LIST_NEXT(s, r_sharelink)) {
694104477Ssam		if ((s->r_flags & RF_ACTIVE) != 0) {
695104477Ssam			ok = 0;
696104477Ssam			*whohas = s;
697104477Ssam		}
698104477Ssam	}
699104477Ssam	if (ok) {
700104477Ssam		r->r_flags |= RF_ACTIVE;
701104477Ssam		return 0;
702104477Ssam	}
703104477Ssam	return EBUSY;
704104477Ssam}
705104477Ssam
706104477Ssamint
707104477Ssamrman_activate_resource(struct resource *re)
708104477Ssam{
709104477Ssam	int rv;
710104477Ssam	struct resource_i *r, *whohas;
711104477Ssam	struct rman *rm;
712104477Ssam
713104477Ssam	r = re->__r_i;
714104477Ssam	rm = r->r_rm;
715104477Ssam	mtx_lock(rm->rm_mtx);
716104477Ssam	rv = int_rman_activate_resource(rm, r, &whohas);
717104477Ssam	mtx_unlock(rm->rm_mtx);
718104477Ssam	return rv;
719104477Ssam}
720104477Ssam
721104477Ssamint
722104477Ssamrman_await_resource(struct resource *re, int pri, int timo)
723104477Ssam{
724104477Ssam	int	rv;
725104477Ssam	struct	resource_i *r, *whohas;
726104477Ssam	struct	rman *rm;
727104477Ssam
728104477Ssam	r = re->__r_i;
729104477Ssam	rm = r->r_rm;
730104477Ssam	mtx_lock(rm->rm_mtx);
731104477Ssam	for (;;) {
732104477Ssam		rv = int_rman_activate_resource(rm, r, &whohas);
733104477Ssam		if (rv != EBUSY)
734104477Ssam			return (rv);	/* returns with mutex held */
735104477Ssam
736104477Ssam		if (r->r_sharehead == NULL)
737104477Ssam			panic("rman_await_resource");
738104477Ssam		whohas->r_flags |= RF_WANTED;
739104477Ssam		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
740104477Ssam		if (rv) {
741104477Ssam			mtx_unlock(rm->rm_mtx);
742104477Ssam			return (rv);
743104477Ssam		}
744104477Ssam	}
745104477Ssam}
746104477Ssam
747104477Ssamstatic int
748104477Ssamint_rman_deactivate_resource(struct resource_i *r)
749104477Ssam{
750104477Ssam
751104477Ssam	r->r_flags &= ~RF_ACTIVE;
752104477Ssam	if (r->r_flags & RF_WANTED) {
753104477Ssam		r->r_flags &= ~RF_WANTED;
754104477Ssam		wakeup(r->r_sharehead);
755104477Ssam	}
756104477Ssam	return 0;
757104477Ssam}
758104477Ssam
759104477Ssamint
760104477Ssamrman_deactivate_resource(struct resource *r)
761104477Ssam{
762104477Ssam	struct	rman *rm;
763104477Ssam
764104477Ssam	rm = r->__r_i->r_rm;
765104477Ssam	mtx_lock(rm->rm_mtx);
766104477Ssam	int_rman_deactivate_resource(r->__r_i);
767104477Ssam	mtx_unlock(rm->rm_mtx);
768104477Ssam	return 0;
769104477Ssam}
770104477Ssam
771104477Ssamstatic int
772104477Ssamint_rman_release_resource(struct rman *rm, struct resource_i *r)
773104477Ssam{
774104477Ssam	struct	resource_i *s, *t;
775104477Ssam
776104477Ssam	if (r->r_flags & RF_ACTIVE)
777104477Ssam		int_rman_deactivate_resource(r);
778104477Ssam
779104477Ssam	/*
780104477Ssam	 * Check for a sharing list first.  If there is one, then we don't
781104477Ssam	 * have to think as hard.
782104477Ssam	 */
783104477Ssam	if (r->r_sharehead) {
784104477Ssam		/*
785104477Ssam		 * If a sharing list exists, then we know there are at
786104477Ssam		 * least two sharers.
787104477Ssam		 *
788104477Ssam		 * If we are in the main circleq, appoint someone else.
789104477Ssam		 */
790104477Ssam		LIST_REMOVE(r, r_sharelink);
791104477Ssam		s = LIST_FIRST(r->r_sharehead);
792104477Ssam		if (r->r_flags & RF_FIRSTSHARE) {
793104477Ssam			s->r_flags |= RF_FIRSTSHARE;
794104477Ssam			TAILQ_INSERT_BEFORE(r, s, r_link);
795104477Ssam			TAILQ_REMOVE(&rm->rm_list, r, r_link);
796104477Ssam		}
797104477Ssam
798104477Ssam		/*
799104477Ssam		 * Make sure that the sharing list goes away completely
800104477Ssam		 * if the resource is no longer being shared at all.
801104477Ssam		 */
802104477Ssam		if (LIST_NEXT(s, r_sharelink) == NULL) {
803104477Ssam			free(s->r_sharehead, M_RMAN);
804104477Ssam			s->r_sharehead = NULL;
805104477Ssam			s->r_flags &= ~RF_FIRSTSHARE;
806104477Ssam		}
807104477Ssam		goto out;
808104477Ssam	}
809104477Ssam
810104477Ssam	/*
811104477Ssam	 * Look at the adjacent resources in the list and see if our
812104477Ssam	 * segment can be merged with any of them.  If either of the
813104477Ssam	 * resources is allocated or is not exactly adjacent then they
814104477Ssam	 * cannot be merged with our segment.
815104477Ssam	 */
816104477Ssam	s = TAILQ_PREV(r, resource_head, r_link);
817104477Ssam	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
818104477Ssam	    s->r_end + 1 != r->r_start))
819104477Ssam		s = NULL;
820104477Ssam	t = TAILQ_NEXT(r, r_link);
821104477Ssam	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
822104477Ssam	    r->r_end + 1 != t->r_start))
823104477Ssam		t = NULL;
824104477Ssam
825104477Ssam	if (s != NULL && t != NULL) {
826104477Ssam		/*
827104477Ssam		 * Merge all three segments.
828104477Ssam		 */
829104477Ssam		s->r_end = t->r_end;
830104477Ssam		TAILQ_REMOVE(&rm->rm_list, r, r_link);
831104477Ssam		TAILQ_REMOVE(&rm->rm_list, t, r_link);
832104477Ssam		free(t, M_RMAN);
833104477Ssam	} else if (s != NULL) {
834104477Ssam		/*
835104477Ssam		 * Merge previous segment with ours.
836104477Ssam		 */
837104477Ssam		s->r_end = r->r_end;
838104477Ssam		TAILQ_REMOVE(&rm->rm_list, r, r_link);
839104477Ssam	} else if (t != NULL) {
840104477Ssam		/*
841104477Ssam		 * Merge next segment with ours.
842104477Ssam		 */
843104477Ssam		t->r_start = r->r_start;
844104477Ssam		TAILQ_REMOVE(&rm->rm_list, r, r_link);
845104477Ssam	} else {
846104477Ssam		/*
847104477Ssam		 * At this point, we know there is nothing we
848104477Ssam		 * can potentially merge with, because on each
849104477Ssam		 * side, there is either nothing there or what is
850104477Ssam		 * there is still allocated.  In that case, we don't
851104477Ssam		 * want to remove r from the list; we simply want to
852104477Ssam		 * change it to an unallocated region and return
853104477Ssam		 * without freeing anything.
854104477Ssam		 */
855104477Ssam		r->r_flags &= ~RF_ALLOCATED;
856104477Ssam		r->r_dev = NULL;
857104477Ssam		return 0;
858104477Ssam	}
859104477Ssam
860104477Ssamout:
861104477Ssam	free(r, M_RMAN);
862104477Ssam	return 0;
863104477Ssam}
864104477Ssam
865104477Ssamint
866104477Ssamrman_release_resource(struct resource *re)
867104477Ssam{
868104477Ssam	int	rv;
869104477Ssam	struct	resource_i *r;
870104477Ssam	struct	rman *rm;
871104477Ssam
872104477Ssam	r = re->__r_i;
873104477Ssam	rm = r->r_rm;
874104477Ssam	mtx_lock(rm->rm_mtx);
875104477Ssam	rv = int_rman_release_resource(rm, r);
876104477Ssam	mtx_unlock(rm->rm_mtx);
877104477Ssam	return (rv);
878104477Ssam}
879104477Ssam
880104477Ssamuint32_t
881104477Ssamrman_make_alignment_flags(uint32_t size)
882104477Ssam{
883104477Ssam	int	i;
884104477Ssam
885104477Ssam	/*
886104477Ssam	 * Find the hightest bit set, and add one if more than one bit
887104477Ssam	 * set.  We're effectively computing the ceil(log2(size)) here.
888104477Ssam	 */
889104477Ssam	for (i = 31; i > 0; i--)
890104477Ssam		if ((1 << i) & size)
891104477Ssam			break;
892104477Ssam	if (~(1 << i) & size)
893104477Ssam		i++;
894104477Ssam
895104477Ssam	return(RF_ALIGNMENT_LOG2(i));
896104477Ssam}
897104477Ssam
898104477Ssamvoid
899104477Ssamrman_set_start(struct resource *r, u_long start)
900104477Ssam{
901104477Ssam	r->__r_i->r_start = start;
902104477Ssam}
903104477Ssam
904104477Ssamu_long
905104477Ssamrman_get_start(struct resource *r)
906104477Ssam{
907104477Ssam	return (r->__r_i->r_start);
908104477Ssam}
909104477Ssam
910104477Ssamvoid
911104477Ssamrman_set_end(struct resource *r, u_long end)
912104477Ssam{
913104477Ssam	r->__r_i->r_end = end;
914104477Ssam}
915104477Ssam
916104477Ssamu_long
917104477Ssamrman_get_end(struct resource *r)
918104477Ssam{
919104477Ssam	return (r->__r_i->r_end);
920104477Ssam}
921104477Ssam
922104477Ssamu_long
923104477Ssamrman_get_size(struct resource *r)
924104477Ssam{
925104477Ssam	return (r->__r_i->r_end - r->__r_i->r_start + 1);
926104477Ssam}
927104477Ssam
928104477Ssamu_int
929104477Ssamrman_get_flags(struct resource *r)
930104477Ssam{
931104477Ssam	return (r->__r_i->r_flags);
932104477Ssam}
933104477Ssam
934104477Ssamvoid
935104477Ssamrman_set_virtual(struct resource *r, void *v)
936104477Ssam{
937104477Ssam	r->__r_i->r_virtual = v;
938104477Ssam}
939104477Ssam
940104477Ssamvoid *
941104477Ssamrman_get_virtual(struct resource *r)
942104477Ssam{
943104477Ssam	return (r->__r_i->r_virtual);
944104477Ssam}
945104477Ssam
946104477Ssamvoid
947104477Ssamrman_set_bustag(struct resource *r, bus_space_tag_t t)
948104477Ssam{
949104477Ssam	r->r_bustag = t;
950104477Ssam}
951104477Ssam
952104477Ssambus_space_tag_t
953104477Ssamrman_get_bustag(struct resource *r)
954104477Ssam{
955104477Ssam	return (r->r_bustag);
956104477Ssam}
957104477Ssam
958104477Ssamvoid
959104477Ssamrman_set_bushandle(struct resource *r, bus_space_handle_t h)
960104477Ssam{
961104477Ssam	r->r_bushandle = h;
962104477Ssam}
963104477Ssam
964104477Ssambus_space_handle_t
965104477Ssamrman_get_bushandle(struct resource *r)
966104477Ssam{
967104477Ssam	return (r->r_bushandle);
968104477Ssam}
969104477Ssam
970104477Ssamvoid
971104477Ssamrman_set_rid(struct resource *r, int rid)
972104477Ssam{
973104477Ssam	r->__r_i->r_rid = rid;
974104477Ssam}
975104477Ssam
976104477Ssamint
977104477Ssamrman_get_rid(struct resource *r)
978104477Ssam{
979104477Ssam	return (r->__r_i->r_rid);
980104477Ssam}
981104477Ssam
982104477Ssamvoid
983104477Ssamrman_set_device(struct resource *r, struct device *dev)
984104477Ssam{
985104477Ssam	r->__r_i->r_dev = dev;
986104477Ssam}
987104477Ssam
988104477Ssamstruct device *
989104477Ssamrman_get_device(struct resource *r)
990104477Ssam{
991104477Ssam	return (r->__r_i->r_dev);
992104477Ssam}
993104477Ssam
994104477Ssamint
995104477Ssamrman_is_region_manager(struct resource *r, struct rman *rm)
996104477Ssam{
997104477Ssam
998104477Ssam	return (r->__r_i->r_rm == rm);
999104477Ssam}
1000104477Ssam
1001104477Ssam/*
1002104477Ssam * Sysctl interface for scanning the resource lists.
1003104477Ssam *
1004104477Ssam * We take two input parameters; the index into the list of resource
1005104477Ssam * managers, and the resource offset into the list.
1006104477Ssam */
1007104477Ssamstatic int
1008104477Ssamsysctl_rman(SYSCTL_HANDLER_ARGS)
1009104477Ssam{
1010104477Ssam	int			*name = (int *)arg1;
1011104477Ssam	u_int			namelen = arg2;
1012104477Ssam	int			rman_idx, res_idx;
1013104477Ssam	struct rman		*rm;
1014104477Ssam	struct resource_i	*res;
1015104477Ssam	struct resource_i	*sres;
1016104477Ssam	struct u_rman		urm;
1017104477Ssam	struct u_resource	ures;
1018104477Ssam	int			error;
1019104477Ssam
1020104477Ssam	if (namelen != 3)
1021104477Ssam		return (EINVAL);
1022104477Ssam
1023104477Ssam	if (bus_data_generation_check(name[0]))
1024104477Ssam		return (EINVAL);
1025104477Ssam	rman_idx = name[1];
1026104477Ssam	res_idx = name[2];
1027104477Ssam
1028104477Ssam	/*
1029104477Ssam	 * Find the indexed resource manager
1030104477Ssam	 */
1031104477Ssam	mtx_lock(&rman_mtx);
1032104477Ssam	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1033104477Ssam		if (rman_idx-- == 0)
1034104477Ssam			break;
1035104477Ssam	}
1036104477Ssam	mtx_unlock(&rman_mtx);
1037104477Ssam	if (rm == NULL)
1038104477Ssam		return (ENOENT);
1039104477Ssam
1040104477Ssam	/*
1041104477Ssam	 * If the resource index is -1, we want details on the
1042104477Ssam	 * resource manager.
1043104477Ssam	 */
1044104477Ssam	if (res_idx == -1) {
1045104477Ssam		bzero(&urm, sizeof(urm));
1046104477Ssam		urm.rm_handle = (uintptr_t)rm;
1047104477Ssam		if (rm->rm_descr != NULL)
1048104477Ssam			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1049104477Ssam		urm.rm_start = rm->rm_start;
1050104477Ssam		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1051104477Ssam		urm.rm_type = rm->rm_type;
1052104477Ssam
1053104477Ssam		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1054104477Ssam		return (error);
1055104477Ssam	}
1056104477Ssam
1057104477Ssam	/*
1058104477Ssam	 * Find the indexed resource and return it.
1059104477Ssam	 */
1060104477Ssam	mtx_lock(rm->rm_mtx);
1061104477Ssam	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1062104477Ssam		if (res->r_sharehead != NULL) {
1063104477Ssam			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1064104477Ssam				if (res_idx-- == 0) {
1065104477Ssam					res = sres;
1066104477Ssam					goto found;
1067104477Ssam				}
1068104477Ssam		}
1069104477Ssam		else if (res_idx-- == 0)
1070104477Ssam				goto found;
1071104477Ssam	}
1072104477Ssam	mtx_unlock(rm->rm_mtx);
1073104477Ssam	return (ENOENT);
1074104477Ssam
1075104477Ssamfound:
1076104477Ssam	bzero(&ures, sizeof(ures));
1077104477Ssam	ures.r_handle = (uintptr_t)res;
1078104477Ssam	ures.r_parent = (uintptr_t)res->r_rm;
1079104477Ssam	ures.r_device = (uintptr_t)res->r_dev;
1080104477Ssam	if (res->r_dev != NULL) {
1081104477Ssam		if (device_get_name(res->r_dev) != NULL) {
1082104477Ssam			snprintf(ures.r_devname, RM_TEXTLEN,
1083104477Ssam			    "%s%d",
1084104477Ssam			    device_get_name(res->r_dev),
1085104477Ssam			    device_get_unit(res->r_dev));
1086104477Ssam		} else {
1087104477Ssam			strlcpy(ures.r_devname, "nomatch",
1088104477Ssam			    RM_TEXTLEN);
1089104477Ssam		}
1090104477Ssam	} else {
1091104477Ssam		ures.r_devname[0] = '\0';
1092104477Ssam	}
1093104477Ssam	ures.r_start = res->r_start;
1094104477Ssam	ures.r_size = res->r_end - res->r_start + 1;
1095104477Ssam	ures.r_flags = res->r_flags;
1096104477Ssam
1097104477Ssam	mtx_unlock(rm->rm_mtx);
1098104477Ssam	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1099104477Ssam	return (error);
1100104477Ssam}
1101104477Ssam
1102104477Ssamstatic SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1103104477Ssam    "kernel resource manager");
1104104477Ssam
1105104477Ssam#ifdef DDB
1106104477Ssamstatic void
1107104477Ssamdump_rman_header(struct rman *rm)
1108104477Ssam{
1109104477Ssam
1110104477Ssam	if (db_pager_quit)
1111104477Ssam		return;
1112104477Ssam	db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1113104477Ssam	    rm, rm->rm_descr, rm->rm_start, rm->rm_end);
1114104477Ssam}
1115104477Ssam
1116104477Ssamstatic void
1117104477Ssamdump_rman(struct rman *rm)
1118104477Ssam{
1119104477Ssam	struct resource_i *r;
1120104477Ssam	const char *devname;
1121104477Ssam
1122104477Ssam	if (db_pager_quit)
1123104477Ssam		return;
1124104477Ssam	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1125104477Ssam		if (r->r_dev != NULL) {
1126104477Ssam			devname = device_get_nameunit(r->r_dev);
1127104477Ssam			if (devname == NULL)
1128104477Ssam				devname = "nomatch";
1129104477Ssam		} else
1130104477Ssam			devname = NULL;
1131104477Ssam		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
1132104477Ssam		if (devname != NULL)
1133104477Ssam			db_printf("(%s)\n", devname);
1134104477Ssam		else
1135104477Ssam			db_printf("----\n");
1136104477Ssam		if (db_pager_quit)
1137104477Ssam			return;
1138104477Ssam	}
1139104477Ssam}
1140104477Ssam
1141104477SsamDB_SHOW_COMMAND(rman, db_show_rman)
1142104477Ssam{
1143104477Ssam
1144104477Ssam	if (have_addr) {
1145104477Ssam		dump_rman_header((struct rman *)addr);
1146104477Ssam		dump_rman((struct rman *)addr);
1147104477Ssam	}
1148104477Ssam}
1149104477Ssam
1150104477SsamDB_SHOW_COMMAND(rmans, db_show_rmans)
1151104477Ssam{
1152104477Ssam	struct rman *rm;
1153104477Ssam
1154104477Ssam	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1155104477Ssam		dump_rman_header(rm);
1156104477Ssam	}
1157104477Ssam}
1158104477Ssam
1159104477SsamDB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1160104477Ssam{
1161104477Ssam	struct rman *rm;
1162104477Ssam
1163104477Ssam	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1164104477Ssam		dump_rman_header(rm);
1165104477Ssam		dump_rman(rm);
1166104477Ssam	}
1167104477Ssam}
1168104477SsamDB_SHOW_ALIAS(allrman, db_show_all_rman);
1169104477Ssam#endif
1170104477Ssam