subr_rman.c revision 166932
1145256Sjkoshy/*-
2177107Sjkoshy * Copyright 1998 Massachusetts Institute of Technology
3145256Sjkoshy *
4145256Sjkoshy * Permission to use, copy, modify, and distribute this software and
5145256Sjkoshy * its documentation for any purpose and without fee is hereby
6145256Sjkoshy * granted, provided that both the above copyright notice and this
7145256Sjkoshy * permission notice appear in all copies, that both the above
8145256Sjkoshy * copyright notice and this permission notice appear in all
9145256Sjkoshy * supporting documentation, and that the name of M.I.T. not be used
10145256Sjkoshy * in advertising or publicity pertaining to distribution of the
11145256Sjkoshy * software without specific, written prior permission.  M.I.T. makes
12145256Sjkoshy * no representations about the suitability of this software for any
13145256Sjkoshy * purpose.  It is provided "as is" without express or implied
14145256Sjkoshy * warranty.
15145256Sjkoshy *
16145256Sjkoshy * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17145256Sjkoshy * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18145256Sjkoshy * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19145256Sjkoshy * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20145256Sjkoshy * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21145256Sjkoshy * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22145256Sjkoshy * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23145256Sjkoshy * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24145256Sjkoshy * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25145256Sjkoshy * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26145256Sjkoshy * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27145256Sjkoshy * SUCH DAMAGE.
28145256Sjkoshy */
29145256Sjkoshy
30145256Sjkoshy/*
31145256Sjkoshy * The kernel resource manager.  This code is responsible for keeping track
32145256Sjkoshy * of hardware resources which are apportioned out to various drivers.
33145256Sjkoshy * It does not actually assign those resources, and it is not expected
34145256Sjkoshy * that end-device drivers will call into this code directly.  Rather,
35145256Sjkoshy * the code which implements the buses that those devices are attached to,
36145256Sjkoshy * and the code which manages CPU resources, will call this code, and the
37145256Sjkoshy * end-device drivers will make upcalls to that code to actually perform
38145256Sjkoshy * the allocation.
39145256Sjkoshy *
40145256Sjkoshy * There are two sorts of resources managed by this code.  The first is
41145256Sjkoshy * the more familiar array (RMAN_ARRAY) type; resources in this class
42145256Sjkoshy * consist of a sequence of individually-allocatable objects which have
43145256Sjkoshy * been numbered in some well-defined order.  Most of the resources
44145256Sjkoshy * are of this type, as it is the most familiar.  The second type is
45185363Sjkoshy * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46185363Sjkoshy * resources in which each instance is indistinguishable from every
47145256Sjkoshy * other instance).  The principal anticipated application of gauges
48145340Smarcel * is in the context of power consumption, where a bus may have a specific
49145256Sjkoshy * power budget which all attached devices share.  RMAN_GAUGE is not
50145256Sjkoshy * implemented yet.
51147191Sjkoshy *
52147759Sjkoshy * For array resources, we make one simplifying assumption: two clients
53185363Sjkoshy * sharing the same resource must use the same range of indices.  That
54185363Sjkoshy * is to say, sharing of overlapping-but-not-identical regions is not
55185363Sjkoshy * permitted.
56185363Sjkoshy */
57206089Sfabient
58206089Sfabient#include <sys/cdefs.h>
59206089Sfabient__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 166932 2007-02-23 22:53:56Z scottl $");
60206089Sfabient
61147191Sjkoshy#include <sys/param.h>
62145256Sjkoshy#include <sys/systm.h>
63147759Sjkoshy#include <sys/kernel.h>
64147759Sjkoshy#include <sys/limits.h>
65147191Sjkoshy#include <sys/lock.h>
66147191Sjkoshy#include <sys/malloc.h>
67145256Sjkoshy#include <sys/mutex.h>
68145256Sjkoshy#include <sys/bus.h>		/* XXX debugging */
69147191Sjkoshy#include <machine/bus.h>
70145256Sjkoshy#include <sys/rman.h>
71145256Sjkoshy#include <sys/sysctl.h>
72183725Sjkoshy
73183725Sjkoshy/*
74183725Sjkoshy * We use a linked list rather than a bitmap because we need to be able to
75183725Sjkoshy * represent potentially huge objects (like all of a processor's physical
76200928Srpaulo * address space).  That is also why the indices are defined to have type
77200928Srpaulo * `unsigned long' -- that being the largest integral type in ISO C (1990).
78200928Srpaulo * The 1999 version of C allows `long long'; we may need to switch to that
79200928Srpaulo * at some point in the future, particularly if we want to support 36-bit
80204635Sgnn * addresses on IA32 hardware.
81233320Sgonzo */
82204635Sgnnstruct resource_i {
83204635Sgnn	struct resource		r_r;
84233628Sfabient	TAILQ_ENTRY(resource_i)	r_link;
85233628Sfabient	LIST_ENTRY(resource_i)	r_sharelink;
86204635Sgnn	LIST_HEAD(, resource_i)	*r_sharehead;
87228869Sjhibbits	u_long	r_start;	/* index of the first entry in this resource */
88228869Sjhibbits	u_long	r_end;		/* index of the last entry (inclusive) */
89228869Sjhibbits	u_int	r_flags;
90228869Sjhibbits	void	*r_virtual;	/* virtual address of this resource */
91204635Sgnn	struct	device *r_dev;	/* device which has allocated this resource */
92145256Sjkoshy	struct	rman *r_rm;	/* resource manager from whence this came */
93145256Sjkoshy	int	r_rid;		/* optional rid for this resource. */
94145256Sjkoshy};
95145256Sjkoshy
96145256Sjkoshyint     rman_debug = 0;
97145256SjkoshyTUNABLE_INT("debug.rman_debug", &rman_debug);
98145256SjkoshySYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
99145256Sjkoshy    &rman_debug, 0, "rman debug");
100145256Sjkoshy
101145256Sjkoshy#define DPRINTF(params) if (rman_debug) printf params
102145256Sjkoshy
103145256Sjkoshystatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
104145256Sjkoshy
105145256Sjkoshystruct	rman_head rman_head;
106145256Sjkoshystatic	struct mtx rman_mtx; /* mutex to protect rman_head */
107145256Sjkoshystatic	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
108145256Sjkoshy				       struct resource_i **whohas);
109183725Sjkoshystatic	int int_rman_deactivate_resource(struct resource_i *r);
110145256Sjkoshystatic	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
111145256Sjkoshy
112145256Sjkoshystatic __inline struct resource_i *
113145256Sjkoshyint_alloc_resource(int malloc_flag)
114145256Sjkoshy{
115145256Sjkoshy	struct resource_i *r;
116145256Sjkoshy
117183725Sjkoshy	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
118183725Sjkoshy	if (r != NULL) {
119183725Sjkoshy		r->r_r.__r_i = r;
120183725Sjkoshy	}
121183725Sjkoshy	return (r);
122183725Sjkoshy}
123183725Sjkoshy
124183725Sjkoshyint
125183725Sjkoshyrman_init(struct rman *rm)
126183725Sjkoshy{
127183725Sjkoshy	static int once = 0;
128183725Sjkoshy
129183725Sjkoshy	if (once == 0) {
130183725Sjkoshy		once = 1;
131183725Sjkoshy		TAILQ_INIT(&rman_head);
132183725Sjkoshy		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
133183725Sjkoshy	}
134183725Sjkoshy
135183725Sjkoshy	if (rm->rm_type == RMAN_UNINIT)
136183725Sjkoshy		panic("rman_init");
137183725Sjkoshy	if (rm->rm_type == RMAN_GAUGE)
138185363Sjkoshy		panic("implement RMAN_GAUGE");
139183725Sjkoshy
140185363Sjkoshy	TAILQ_INIT(&rm->rm_list);
141183725Sjkoshy	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
142185363Sjkoshy	if (rm->rm_mtx == NULL)
143183725Sjkoshy		return ENOMEM;
144183725Sjkoshy	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
145183725Sjkoshy
146185363Sjkoshy	mtx_lock(&rman_mtx);
147185363Sjkoshy	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
148185363Sjkoshy	mtx_unlock(&rman_mtx);
149185363Sjkoshy	return 0;
150185363Sjkoshy}
151185363Sjkoshy
152185363Sjkoshy/*
153185363Sjkoshy * NB: this interface is not robust against programming errors which
154200928Srpaulo * add multiple copies of the same region.
155204635Sgnn */
156233335Sgonzoint
157206089Sfabientrman_manage_region(struct rman *rm, u_long start, u_long end)
158228869Sjhibbits{
159185363Sjkoshy	struct resource_i *r, *s, *t;
160233628Sfabient
161233628Sfabient	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
162185363Sjkoshy	    rm->rm_descr, start, end));
163185363Sjkoshy	r = int_alloc_resource(M_NOWAIT);
164185363Sjkoshy	if (r == NULL)
165185363Sjkoshy		return ENOMEM;
166185363Sjkoshy	r->r_start = start;
167185363Sjkoshy	r->r_end = end;
168185363Sjkoshy	r->r_rm = rm;
169185363Sjkoshy
170185363Sjkoshy	mtx_lock(rm->rm_mtx);
171185363Sjkoshy
172185363Sjkoshy	/* Skip entries before us. */
173185363Sjkoshy	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
174185363Sjkoshy		if (s->r_end == ULONG_MAX)
175185363Sjkoshy			break;
176185363Sjkoshy		if (s->r_end + 1 >= r->r_start)
177185363Sjkoshy			break;
178185363Sjkoshy	}
179185363Sjkoshy
180185363Sjkoshy	/* If we ran off the end of the list, insert at the tail. */
181187761Sjeff	if (s == NULL) {
182187761Sjeff		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
183187761Sjeff	} else {
184187761Sjeff		/* Check for any overlap with the current region. */
185187761Sjeff		if (r->r_start <= s->r_end && r->r_end >= s->r_start)
186240164Sfabient			return EBUSY;
187240164Sfabient
188240164Sfabient		/* Check for any overlap with the next region. */
189240164Sfabient		t = TAILQ_NEXT(s, r_link);
190240164Sfabient		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
191246166Ssbruno			return EBUSY;
192246166Ssbruno
193246166Ssbruno		/*
194246166Ssbruno		 * See if this region can be merged with the next region.  If
195246166Ssbruno		 * not, clear the pointer.
196232366Sdavide		 */
197232366Sdavide		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
198232366Sdavide			t = NULL;
199232366Sdavide
200232366Sdavide		/* See if we can merge with the current region. */
201241738Ssbruno		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
202241738Ssbruno			/* Can we merge all 3 regions? */
203241738Ssbruno			if (t != NULL) {
204241738Ssbruno				s->r_end = t->r_end;
205241738Ssbruno				TAILQ_REMOVE(&rm->rm_list, t, r_link);
206206089Sfabient				free(r, M_RMAN);
207206089Sfabient				free(t, M_RMAN);
208206089Sfabient			} else {
209206089Sfabient				s->r_end = r->r_end;
210206089Sfabient				free(r, M_RMAN);
211206089Sfabient			}
212206089Sfabient		} else if (t != NULL) {
213206089Sfabient			/* Can we merge with just the next region? */
214206089Sfabient			t->r_start = r->r_start;
215206089Sfabient			free(r, M_RMAN);
216232366Sdavide		} else if (s->r_end < r->r_start) {
217232366Sdavide			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
218232366Sdavide		} else {
219232366Sdavide			TAILQ_INSERT_BEFORE(s, r, r_link);
220232366Sdavide		}
221206089Sfabient	}
222206089Sfabient
223206089Sfabient	mtx_unlock(rm->rm_mtx);
224206089Sfabient	return 0;
225206089Sfabient}
226185363Sjkoshy
227185363Sjkoshyint
228185363Sjkoshyrman_init_from_resource(struct rman *rm, struct resource *r)
229185363Sjkoshy{
230185363Sjkoshy	int rv;
231185363Sjkoshy
232183725Sjkoshy	if ((rv = rman_init(rm)) != 0)
233183725Sjkoshy		return (rv);
234183725Sjkoshy	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
235183725Sjkoshy}
236233628Sfabient
237233628Sfabientint
238233628Sfabientrman_fini(struct rman *rm)
239233628Sfabient{
240240164Sfabient	struct resource_i *r;
241246166Ssbruno
242233628Sfabient	mtx_lock(rm->rm_mtx);
243241738Ssbruno	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
244233628Sfabient		if (r->r_flags & RF_ALLOCATED) {
245233628Sfabient			mtx_unlock(rm->rm_mtx);
246233628Sfabient			return EBUSY;
247233628Sfabient		}
248233628Sfabient	}
249233628Sfabient
250233628Sfabient	/*
251233628Sfabient	 * There really should only be one of these if we are in this
252233628Sfabient	 * state and the code is working properly, but it can't hurt.
253233628Sfabient	 */
254233628Sfabient	while (!TAILQ_EMPTY(&rm->rm_list)) {
255183725Sjkoshy		r = TAILQ_FIRST(&rm->rm_list);
256183725Sjkoshy		TAILQ_REMOVE(&rm->rm_list, r, r_link);
257145256Sjkoshy		free(r, M_RMAN);
258183725Sjkoshy	}
259145256Sjkoshy	mtx_unlock(rm->rm_mtx);
260145256Sjkoshy	mtx_lock(&rman_mtx);
261183725Sjkoshy	TAILQ_REMOVE(&rman_head, rm, rm_link);
262185363Sjkoshy	mtx_unlock(&rman_mtx);
263185363Sjkoshy	mtx_destroy(rm->rm_mtx);
264185363Sjkoshy	free(rm->rm_mtx, M_RMAN);
265185363Sjkoshy
266185363Sjkoshy	return 0;
267185363Sjkoshy}
268185363Sjkoshy
269183725Sjkoshystruct resource *
270185363Sjkoshyrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
271185363Sjkoshy		      u_long count, u_long bound,  u_int flags,
272183725Sjkoshy		      struct device *dev)
273183725Sjkoshy{
274185363Sjkoshy	u_int	want_activate;
275185363Sjkoshy	struct	resource_i *r, *s, *rv;
276185363Sjkoshy	u_long	rstart, rend, amask, bmask;
277185363Sjkoshy
278185363Sjkoshy	rv = NULL;
279187761Sjeff
280240164Sfabient	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
281246166Ssbruno	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
282232366Sdavide	       count, flags,
283241738Ssbruno	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
284206089Sfabient	want_activate = (flags & RF_ACTIVE);
285206089Sfabient	flags &= ~RF_ACTIVE;
286206089Sfabient
287232366Sdavide	mtx_lock(rm->rm_mtx);
288206089Sfabient
289185363Sjkoshy	for (r = TAILQ_FIRST(&rm->rm_list);
290183725Sjkoshy	     r && r->r_end < start;
291185363Sjkoshy	     r = TAILQ_NEXT(r, r_link))
292183725Sjkoshy		;
293183725Sjkoshy
294185363Sjkoshy	if (r == NULL) {
295185363Sjkoshy		DPRINTF(("could not find a region\n"));
296183725Sjkoshy		goto out;
297183725Sjkoshy	}
298185363Sjkoshy
299185363Sjkoshy	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
300183725Sjkoshy	/* If bound is 0, bmask will also be 0 */
301183725Sjkoshy	bmask = ~(bound - 1);
302185363Sjkoshy	/*
303183725Sjkoshy	 * First try to find an acceptable totally-unshared region.
304200928Srpaulo	 */
305200928Srpaulo	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
306200928Srpaulo		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
307204635Sgnn		if (s->r_start + count - 1 > end) {
308233320Sgonzo			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
309233335Sgonzo			    s->r_start, end));
310204635Sgnn			break;
311228869Sjhibbits		}
312228869Sjhibbits		if (s->r_flags & RF_ALLOCATED) {
313228869Sjhibbits			DPRINTF(("region is allocated\n"));
314228869Sjhibbits			continue;
315233628Sfabient		}
316233628Sfabient		rstart = ulmax(s->r_start, start);
317233628Sfabient		/*
318233628Sfabient		 * Try to find a region by adjusting to boundary and alignment
319233628Sfabient		 * until both conditions are satisfied. This is not an optimal
320233628Sfabient		 * algorithm, but in most cases it isn't really bad, either.
321233628Sfabient		 */
322233628Sfabient		do {
323233628Sfabient			rstart = (rstart + amask) & ~amask;
324233628Sfabient			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
325183725Sjkoshy				rstart += bound - (rstart & ~bmask);
326183725Sjkoshy		} while ((rstart & amask) != 0 && rstart < end &&
327185363Sjkoshy		    rstart < s->r_end);
328185363Sjkoshy		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
329185363Sjkoshy		if (rstart > rend) {
330183725Sjkoshy			DPRINTF(("adjusted start exceeds end\n"));
331183725Sjkoshy			continue;
332183725Sjkoshy		}
333145256Sjkoshy		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
334145256Sjkoshy		       rstart, rend, (rend - rstart + 1), count));
335145256Sjkoshy
336145256Sjkoshy		if ((rend - rstart + 1) >= count) {
337145256Sjkoshy			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
338145256Sjkoshy			       rstart, rend, (rend - rstart + 1)));
339145256Sjkoshy			if ((s->r_end - s->r_start + 1) == count) {
340145256Sjkoshy				DPRINTF(("candidate region is entire chunk\n"));
341145256Sjkoshy				rv = s;
342145256Sjkoshy				rv->r_flags |= RF_ALLOCATED | flags;
343145256Sjkoshy				rv->r_dev = dev;
344145256Sjkoshy				goto out;
345145256Sjkoshy			}
346145256Sjkoshy
347145256Sjkoshy			/*
348145256Sjkoshy			 * If s->r_start < rstart and
349145256Sjkoshy			 *    s->r_end > rstart + count - 1, then
350183725Sjkoshy			 * we need to split the region into three pieces
351228557Sdim			 * (the middle one will get returned to the user).
352183725Sjkoshy			 * Otherwise, we are allocating at either the
353183725Sjkoshy			 * beginning or the end of s, so we only need to
354183725Sjkoshy			 * split it in two.  The first case requires
355183725Sjkoshy			 * two new allocations; the second requires but one.
356145256Sjkoshy			 */
357183725Sjkoshy			rv = int_alloc_resource(M_NOWAIT);
358145256Sjkoshy			if (rv == NULL)
359145256Sjkoshy				goto out;
360145256Sjkoshy			rv->r_start = rstart;
361145256Sjkoshy			rv->r_end = rstart + count - 1;
362145256Sjkoshy			rv->r_flags = flags | RF_ALLOCATED;
363145256Sjkoshy			rv->r_dev = dev;
364145256Sjkoshy			rv->r_rm = rm;
365145256Sjkoshy
366145256Sjkoshy			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
367145256Sjkoshy				DPRINTF(("splitting region in three parts: "
368145256Sjkoshy				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
369145256Sjkoshy				       s->r_start, rv->r_start - 1,
370145256Sjkoshy				       rv->r_start, rv->r_end,
371145256Sjkoshy				       rv->r_end + 1, s->r_end));
372145256Sjkoshy				/*
373145256Sjkoshy				 * We are allocating in the middle.
374145256Sjkoshy				 */
375145256Sjkoshy				r = int_alloc_resource(M_NOWAIT);
376145256Sjkoshy				if (r == NULL) {
377145256Sjkoshy					free(rv, M_RMAN);
378145256Sjkoshy					rv = NULL;
379233628Sfabient					goto out;
380233628Sfabient				}
381233628Sfabient				r->r_start = rv->r_end + 1;
382233628Sfabient				r->r_end = s->r_end;
383233628Sfabient				r->r_flags = s->r_flags;
384233628Sfabient				r->r_rm = rm;
385145256Sjkoshy				s->r_end = rv->r_start - 1;
386145256Sjkoshy				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
387145256Sjkoshy						     r_link);
388145256Sjkoshy				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
389240164Sfabient						     r_link);
390145256Sjkoshy			} else if (s->r_start == rv->r_start) {
391145256Sjkoshy				DPRINTF(("allocating from the beginning\n"));
392206089Sfabient				/*
393145256Sjkoshy				 * We are allocating at the beginning.
394147759Sjkoshy				 */
395145256Sjkoshy				s->r_start = rv->r_end + 1;
396240164Sfabient				TAILQ_INSERT_BEFORE(s, rv, r_link);
397145256Sjkoshy			} else {
398145256Sjkoshy				DPRINTF(("allocating at the end\n"));
399145256Sjkoshy				/*
400145256Sjkoshy				 * We are allocating at the end.
401145256Sjkoshy				 */
402145256Sjkoshy				s->r_end = rv->r_start - 1;
403174406Sjkoshy				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
404183107Sjkoshy						     r_link);
405145256Sjkoshy			}
406174406Sjkoshy			goto out;
407145256Sjkoshy		}
408145256Sjkoshy	}
409183725Sjkoshy
410183725Sjkoshy	/*
411145256Sjkoshy	 * Now find an acceptable shared region, if the client's requirements
412145256Sjkoshy	 * allow sharing.  By our implementation restriction, a candidate
413174406Sjkoshy	 * region must match exactly by both size and sharing type in order
414145256Sjkoshy	 * to be considered compatible with the client's request.  (The
415145256Sjkoshy	 * former restriction could probably be lifted without too much
416145256Sjkoshy	 * additional work, but this does not seem warranted.)
417174406Sjkoshy	 */
418145256Sjkoshy	DPRINTF(("no unshared regions found\n"));
419145340Smarcel	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
420145256Sjkoshy		goto out;
421145256Sjkoshy
422145256Sjkoshy	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
423145256Sjkoshy		if (s->r_start > end)
424145256Sjkoshy			break;
425145340Smarcel		if ((s->r_flags & flags) != flags)
426145256Sjkoshy			continue;
427145256Sjkoshy		rstart = ulmax(s->r_start, start);
428145256Sjkoshy		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
429145256Sjkoshy		if (s->r_start >= start && s->r_end <= end
430145256Sjkoshy		    && (s->r_end - s->r_start + 1) == count &&
431145256Sjkoshy		    (s->r_start & amask) == 0 &&
432145351Sjkoshy		    ((s->r_start ^ s->r_end) & bmask) == 0) {
433145351Sjkoshy			rv = int_alloc_resource(M_NOWAIT);
434145351Sjkoshy			if (rv == NULL)
435183075Sjkoshy				goto out;
436145351Sjkoshy			rv->r_start = s->r_start;
437145351Sjkoshy			rv->r_end = s->r_end;
438145351Sjkoshy			rv->r_flags = s->r_flags &
439145351Sjkoshy				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
440145256Sjkoshy			rv->r_dev = dev;
441145256Sjkoshy			rv->r_rm = rm;
442145256Sjkoshy			if (s->r_sharehead == NULL) {
443145256Sjkoshy				s->r_sharehead = malloc(sizeof *s->r_sharehead,
444145256Sjkoshy						M_RMAN, M_NOWAIT | M_ZERO);
445145256Sjkoshy				if (s->r_sharehead == NULL) {
446145256Sjkoshy					free(rv, M_RMAN);
447145256Sjkoshy					rv = NULL;
448145256Sjkoshy					goto out;
449145256Sjkoshy				}
450145256Sjkoshy				LIST_INIT(s->r_sharehead);
451145256Sjkoshy				LIST_INSERT_HEAD(s->r_sharehead, s,
452145256Sjkoshy						 r_sharelink);
453183107Sjkoshy				s->r_flags |= RF_FIRSTSHARE;
454183107Sjkoshy			}
455145256Sjkoshy			rv->r_sharehead = s->r_sharehead;
456145256Sjkoshy			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
457147191Sjkoshy			goto out;
458183725Sjkoshy		}
459145256Sjkoshy	}
460145256Sjkoshy
461145256Sjkoshy	/*
462145256Sjkoshy	 * We couldn't find anything.
463145256Sjkoshy	 */
464147191Sjkoshyout:
465145256Sjkoshy	/*
466145256Sjkoshy	 * If the user specified RF_ACTIVE in the initial flags,
467145256Sjkoshy	 * which is reflected in `want_activate', we attempt to atomically
468145256Sjkoshy	 * activate the resource.  If this fails, we release the resource
469145256Sjkoshy	 * and indicate overall failure.  (This behavior probably doesn't
470145256Sjkoshy	 * make sense for RF_TIMESHARE-type resources.)
471145256Sjkoshy	 */
472174406Sjkoshy	if (rv && want_activate) {
473145256Sjkoshy		struct resource_i *whohas;
474145256Sjkoshy		if (int_rman_activate_resource(rm, rv, &whohas)) {
475145256Sjkoshy			int_rman_release_resource(rm, rv);
476174406Sjkoshy			rv = NULL;
477145256Sjkoshy		}
478145256Sjkoshy	}
479147191Sjkoshy
480147191Sjkoshy	mtx_unlock(rm->rm_mtx);
481145256Sjkoshy	return (rv == NULL ? NULL : &rv->r_r);
482145256Sjkoshy}
483145256Sjkoshy
484145256Sjkoshystruct resource *
485145256Sjkoshyrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
486145256Sjkoshy		      u_int flags, struct device *dev)
487145256Sjkoshy{
488145256Sjkoshy
489145256Sjkoshy	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
490174406Sjkoshy	    dev));
491145256Sjkoshy}
492145256Sjkoshy
493145256Sjkoshystatic int
494174406Sjkoshyint_rman_activate_resource(struct rman *rm, struct resource_i *r,
495145256Sjkoshy			   struct resource_i **whohas)
496145256Sjkoshy{
497145256Sjkoshy	struct resource_i *s;
498147191Sjkoshy	int ok;
499145256Sjkoshy
500147191Sjkoshy	/*
501145256Sjkoshy	 * If we are not timesharing, then there is nothing much to do.
502147191Sjkoshy	 * If we already have the resource, then there is nothing at all to do.
503145256Sjkoshy	 * If we are not on a sharing list with anybody else, then there is
504147191Sjkoshy	 * little to do.
505145256Sjkoshy	 */
506147191Sjkoshy	if ((r->r_flags & RF_TIMESHARE) == 0
507145256Sjkoshy	    || (r->r_flags & RF_ACTIVE) != 0
508145256Sjkoshy	    || r->r_sharehead == NULL) {
509145256Sjkoshy		r->r_flags |= RF_ACTIVE;
510174406Sjkoshy		return 0;
511145256Sjkoshy	}
512145256Sjkoshy
513174406Sjkoshy	ok = 1;
514145256Sjkoshy	for (s = LIST_FIRST(r->r_sharehead); s && ok;
515145256Sjkoshy	     s = LIST_NEXT(s, r_sharelink)) {
516145256Sjkoshy		if ((s->r_flags & RF_ACTIVE) != 0) {
517145256Sjkoshy			ok = 0;
518174406Sjkoshy			*whohas = s;
519145256Sjkoshy		}
520145256Sjkoshy	}
521145256Sjkoshy	if (ok) {
522145256Sjkoshy		r->r_flags |= RF_ACTIVE;
523147191Sjkoshy		return 0;
524147191Sjkoshy	}
525145256Sjkoshy	return EBUSY;
526145256Sjkoshy}
527174406Sjkoshy
528145256Sjkoshyint
529145256Sjkoshyrman_activate_resource(struct resource *re)
530145256Sjkoshy{
531147191Sjkoshy	int rv;
532147191Sjkoshy	struct resource_i *r, *whohas;
533147759Sjkoshy	struct rman *rm;
534147191Sjkoshy
535145256Sjkoshy	r = re->__r_i;
536185363Sjkoshy	rm = r->r_rm;
537185363Sjkoshy	mtx_lock(rm->rm_mtx);
538185363Sjkoshy	rv = int_rman_activate_resource(rm, r, &whohas);
539185363Sjkoshy	mtx_unlock(rm->rm_mtx);
540185363Sjkoshy	return rv;
541185363Sjkoshy}
542185363Sjkoshy
543185363Sjkoshyint
544185363Sjkoshyrman_await_resource(struct resource *re, int pri, int timo)
545185363Sjkoshy{
546185363Sjkoshy	int	rv;
547185363Sjkoshy	struct	resource_i *r, *whohas;
548185363Sjkoshy	struct	rman *rm;
549185363Sjkoshy
550185363Sjkoshy	r = re->__r_i;
551185363Sjkoshy	rm = r->r_rm;
552185363Sjkoshy	mtx_lock(rm->rm_mtx);
553198433Sjkoshy	for (;;) {
554198433Sjkoshy		rv = int_rman_activate_resource(rm, r, &whohas);
555198433Sjkoshy		if (rv != EBUSY)
556198433Sjkoshy			return (rv);	/* returns with mutex held */
557185363Sjkoshy
558185363Sjkoshy		if (r->r_sharehead == NULL)
559185363Sjkoshy			panic("rman_await_resource");
560185363Sjkoshy		whohas->r_flags |= RF_WANTED;
561185363Sjkoshy		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
562185363Sjkoshy		if (rv) {
563185363Sjkoshy			mtx_unlock(rm->rm_mtx);
564185363Sjkoshy			return (rv);
565185363Sjkoshy		}
566185363Sjkoshy	}
567185363Sjkoshy}
568185363Sjkoshy
569185363Sjkoshystatic int
570198433Sjkoshyint_rman_deactivate_resource(struct resource_i *r)
571198433Sjkoshy{
572198433Sjkoshy
573198433Sjkoshy	r->r_flags &= ~RF_ACTIVE;
574198433Sjkoshy	if (r->r_flags & RF_WANTED) {
575198433Sjkoshy		r->r_flags &= ~RF_WANTED;
576198433Sjkoshy		wakeup(r->r_sharehead);
577198433Sjkoshy	}
578198433Sjkoshy	return 0;
579198433Sjkoshy}
580198433Sjkoshy
581198433Sjkoshyint
582198433Sjkoshyrman_deactivate_resource(struct resource *r)
583198433Sjkoshy{
584198433Sjkoshy	struct	rman *rm;
585240164Sfabient
586240164Sfabient	rm = r->__r_i->r_rm;
587246166Ssbruno	mtx_lock(rm->rm_mtx);
588246166Ssbruno	int_rman_deactivate_resource(r->__r_i);
589232366Sdavide	mtx_unlock(rm->rm_mtx);
590232366Sdavide	return 0;
591241738Ssbruno}
592241738Ssbruno
593206089Sfabientstatic int
594206089Sfabientint_rman_release_resource(struct rman *rm, struct resource_i *r)
595198433Sjkoshy{
596185363Sjkoshy	struct	resource_i *s, *t;
597185363Sjkoshy
598185363Sjkoshy	if (r->r_flags & RF_ACTIVE)
599185363Sjkoshy		int_rman_deactivate_resource(r);
600185363Sjkoshy
601185363Sjkoshy	/*
602185363Sjkoshy	 * Check for a sharing list first.  If there is one, then we don't
603185363Sjkoshy	 * have to think as hard.
604185363Sjkoshy	 */
605185363Sjkoshy	if (r->r_sharehead) {
606185363Sjkoshy		/*
607185363Sjkoshy		 * If a sharing list exists, then we know there are at
608185363Sjkoshy		 * least two sharers.
609185363Sjkoshy		 *
610185363Sjkoshy		 * If we are in the main circleq, appoint someone else.
611185363Sjkoshy		 */
612185363Sjkoshy		LIST_REMOVE(r, r_sharelink);
613185363Sjkoshy		s = LIST_FIRST(r->r_sharehead);
614185363Sjkoshy		if (r->r_flags & RF_FIRSTSHARE) {
615185363Sjkoshy			s->r_flags |= RF_FIRSTSHARE;
616185363Sjkoshy			TAILQ_INSERT_BEFORE(r, s, r_link);
617185363Sjkoshy			TAILQ_REMOVE(&rm->rm_list, r, r_link);
618185363Sjkoshy		}
619185363Sjkoshy
620185363Sjkoshy		/*
621185363Sjkoshy		 * Make sure that the sharing list goes away completely
622185363Sjkoshy		 * if the resource is no longer being shared at all.
623185363Sjkoshy		 */
624185363Sjkoshy		if (LIST_NEXT(s, r_sharelink) == NULL) {
625185363Sjkoshy			free(s->r_sharehead, M_RMAN);
626185363Sjkoshy			s->r_sharehead = NULL;
627185363Sjkoshy			s->r_flags &= ~RF_FIRSTSHARE;
628185363Sjkoshy		}
629185363Sjkoshy		goto out;
630185363Sjkoshy	}
631185363Sjkoshy
632185363Sjkoshy	/*
633185363Sjkoshy	 * Look at the adjacent resources in the list and see if our
634185363Sjkoshy	 * segment can be merged with any of them.  If either of the
635185363Sjkoshy	 * resources is allocated or is not exactly adjacent then they
636185363Sjkoshy	 * cannot be merged with our segment.
637185363Sjkoshy	 */
638185363Sjkoshy	s = TAILQ_PREV(r, resource_head, r_link);
639185363Sjkoshy	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
640185363Sjkoshy	    s->r_end + 1 != r->r_start))
641185363Sjkoshy		s = NULL;
642185363Sjkoshy	t = TAILQ_NEXT(r, r_link);
643185363Sjkoshy	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
644185363Sjkoshy	    r->r_end + 1 != t->r_start))
645206089Sfabient		t = NULL;
646185363Sjkoshy
647185363Sjkoshy	if (s != NULL && t != NULL) {
648185363Sjkoshy		/*
649185363Sjkoshy		 * Merge all three segments.
650185363Sjkoshy		 */
651185363Sjkoshy		s->r_end = t->r_end;
652185363Sjkoshy		TAILQ_REMOVE(&rm->rm_list, r, r_link);
653185363Sjkoshy		TAILQ_REMOVE(&rm->rm_list, t, r_link);
654185363Sjkoshy		free(t, M_RMAN);
655185363Sjkoshy	} else if (s != NULL) {
656185363Sjkoshy		/*
657185363Sjkoshy		 * Merge previous segment with ours.
658185363Sjkoshy		 */
659185363Sjkoshy		s->r_end = r->r_end;
660185363Sjkoshy		TAILQ_REMOVE(&rm->rm_list, r, r_link);
661185363Sjkoshy	} else if (t != NULL) {
662185363Sjkoshy		/*
663185363Sjkoshy		 * Merge next segment with ours.
664185363Sjkoshy		 */
665185363Sjkoshy		t->r_start = r->r_start;
666185363Sjkoshy		TAILQ_REMOVE(&rm->rm_list, r, r_link);
667185363Sjkoshy	} else {
668185363Sjkoshy		/*
669185363Sjkoshy		 * At this point, we know there is nothing we
670185363Sjkoshy		 * can potentially merge with, because on each
671185363Sjkoshy		 * side, there is either nothing there or what is
672185363Sjkoshy		 * there is still allocated.  In that case, we don't
673185363Sjkoshy		 * want to remove r from the list; we simply want to
674185363Sjkoshy		 * change it to an unallocated region and return
675185363Sjkoshy		 * without freeing anything.
676185363Sjkoshy		 */
677185363Sjkoshy		r->r_flags &= ~RF_ALLOCATED;
678185363Sjkoshy		return 0;
679185363Sjkoshy	}
680185363Sjkoshy
681185363Sjkoshyout:
682185363Sjkoshy	free(r, M_RMAN);
683185363Sjkoshy	return 0;
684185363Sjkoshy}
685185363Sjkoshy
686185363Sjkoshyint
687185363Sjkoshyrman_release_resource(struct resource *re)
688185363Sjkoshy{
689185363Sjkoshy	int	rv;
690185363Sjkoshy	struct	resource_i *r;
691185363Sjkoshy	struct	rman *rm;
692185363Sjkoshy
693240164Sfabient	r = re->__r_i;
694206089Sfabient	rm = r->r_rm;
695206089Sfabient	mtx_lock(rm->rm_mtx);
696206089Sfabient	rv = int_rman_release_resource(rm, r);
697206089Sfabient	mtx_unlock(rm->rm_mtx);
698206089Sfabient	return (rv);
699206089Sfabient}
700206089Sfabient
701206089Sfabientuint32_t
702206089Sfabientrman_make_alignment_flags(uint32_t size)
703206089Sfabient{
704206089Sfabient	int	i;
705206089Sfabient
706206089Sfabient	/*
707206089Sfabient	 * Find the hightest bit set, and add one if more than one bit
708206089Sfabient	 * set.  We're effectively computing the ceil(log2(size)) here.
709206089Sfabient	 */
710206089Sfabient	for (i = 31; i > 0; i--)
711206089Sfabient		if ((1 << i) & size)
712241738Ssbruno			break;
713240164Sfabient	if (~(1 << i) & size)
714240164Sfabient		i++;
715240164Sfabient
716240164Sfabient	return(RF_ALIGNMENT_LOG2(i));
717240164Sfabient}
718240164Sfabient
719240164Sfabientu_long
720240164Sfabientrman_get_start(struct resource *r)
721240164Sfabient{
722240164Sfabient	return (r->__r_i->r_start);
723240164Sfabient}
724240164Sfabient
725240164Sfabientu_long
726240164Sfabientrman_get_end(struct resource *r)
727240164Sfabient{
728240164Sfabient	return (r->__r_i->r_end);
729240164Sfabient}
730240164Sfabient
731240164Sfabientu_long
732240164Sfabientrman_get_size(struct resource *r)
733241974Ssbruno{
734240164Sfabient	return (r->__r_i->r_end - r->__r_i->r_start + 1);
735240164Sfabient}
736240164Sfabient
737240164Sfabientu_int
738240164Sfabientrman_get_flags(struct resource *r)
739240164Sfabient{
740240164Sfabient	return (r->__r_i->r_flags);
741240164Sfabient}
742240164Sfabient
743185363Sjkoshyvoid
744185363Sjkoshyrman_set_virtual(struct resource *r, void *v)
745185363Sjkoshy{
746185363Sjkoshy	r->__r_i->r_virtual = v;
747185363Sjkoshy}
748240164Sfabient
749185363Sjkoshyvoid *
750185363Sjkoshyrman_get_virtual(struct resource *r)
751185363Sjkoshy{
752185363Sjkoshy	return (r->__r_i->r_virtual);
753185363Sjkoshy}
754185363Sjkoshy
755206089Sfabientvoid
756185363Sjkoshyrman_set_bustag(struct resource *r, bus_space_tag_t t)
757185363Sjkoshy{
758185363Sjkoshy	r->r_bustag = t;
759185363Sjkoshy}
760185363Sjkoshy
761185363Sjkoshybus_space_tag_t
762185363Sjkoshyrman_get_bustag(struct resource *r)
763185363Sjkoshy{
764185363Sjkoshy	return (r->r_bustag);
765185363Sjkoshy}
766185363Sjkoshy
767185363Sjkoshyvoid
768185363Sjkoshyrman_set_bushandle(struct resource *r, bus_space_handle_t h)
769185363Sjkoshy{
770185363Sjkoshy	r->r_bushandle = h;
771185363Sjkoshy}
772185363Sjkoshy
773185363Sjkoshybus_space_handle_t
774185363Sjkoshyrman_get_bushandle(struct resource *r)
775185363Sjkoshy{
776185363Sjkoshy	return (r->r_bushandle);
777185363Sjkoshy}
778185363Sjkoshy
779185363Sjkoshyvoid
780185363Sjkoshyrman_set_rid(struct resource *r, int rid)
781193809Sjkoshy{
782185363Sjkoshy	r->__r_i->r_rid = rid;
783185363Sjkoshy}
784185363Sjkoshy
785193809Sjkoshyvoid
786185363Sjkoshyrman_set_start(struct resource *r, u_long start)
787185363Sjkoshy{
788185363Sjkoshy	r->__r_i->r_start = start;
789193809Sjkoshy}
790185363Sjkoshy
791185363Sjkoshyvoid
792185363Sjkoshyrman_set_end(struct resource *r, u_long end)
793193809Sjkoshy{
794185363Sjkoshy	r->__r_i->r_end = end;
795185363Sjkoshy}
796193809Sjkoshy
797185363Sjkoshyint
798185363Sjkoshyrman_get_rid(struct resource *r)
799185363Sjkoshy{
800185363Sjkoshy	return (r->__r_i->r_rid);
801185585Sjkoshy}
802206089Sfabient
803193809Sjkoshystruct device *
804185363Sjkoshyrman_get_device(struct resource *r)
805185363Sjkoshy{
806193809Sjkoshy	return (r->__r_i->r_dev);
807185363Sjkoshy}
808185363Sjkoshy
809185363Sjkoshyvoid
810185363Sjkoshyrman_set_device(struct resource *r, struct device *dev)
811206089Sfabient{
812206089Sfabient	r->__r_i->r_dev = dev;
813206089Sfabient}
814240164Sfabient
815206089Sfabientint
816206089Sfabientrman_is_region_manager(struct resource *r, struct rman *rm)
817240164Sfabient{
818241738Ssbruno
819246166Ssbruno	return (r->__r_i->r_rm == rm);
820246166Ssbruno}
821240164Sfabient
822241738Ssbruno/*
823240164Sfabient * Sysctl interface for scanning the resource lists.
824240164Sfabient *
825185363Sjkoshy * We take two input parameters; the index into the list of resource
826185363Sjkoshy * managers, and the resource offset into the list.
827185363Sjkoshy */
828185363Sjkoshystatic int
829185363Sjkoshysysctl_rman(SYSCTL_HANDLER_ARGS)
830185363Sjkoshy{
831185363Sjkoshy	int			*name = (int *)arg1;
832185363Sjkoshy	u_int			namelen = arg2;
833185363Sjkoshy	int			rman_idx, res_idx;
834185363Sjkoshy	struct rman		*rm;
835185363Sjkoshy	struct resource_i	*res;
836185363Sjkoshy	struct u_rman		urm;
837185363Sjkoshy	struct u_resource	ures;
838185363Sjkoshy	int			error;
839185363Sjkoshy
840185363Sjkoshy	if (namelen != 3)
841185363Sjkoshy		return (EINVAL);
842185363Sjkoshy
843185363Sjkoshy	if (bus_data_generation_check(name[0]))
844185363Sjkoshy		return (EINVAL);
845185363Sjkoshy	rman_idx = name[1];
846185363Sjkoshy	res_idx = name[2];
847185363Sjkoshy
848185363Sjkoshy	/*
849185363Sjkoshy	 * Find the indexed resource manager
850185363Sjkoshy	 */
851207482Srstone	mtx_lock(&rman_mtx);
852207482Srstone	TAILQ_FOREACH(rm, &rman_head, rm_link) {
853207482Srstone		if (rman_idx-- == 0)
854207482Srstone			break;
855207482Srstone	}
856207482Srstone	mtx_unlock(&rman_mtx);
857207482Srstone	if (rm == NULL)
858207482Srstone		return (ENOENT);
859185363Sjkoshy
860185363Sjkoshy	/*
861185363Sjkoshy	 * If the resource index is -1, we want details on the
862185363Sjkoshy	 * resource manager.
863185363Sjkoshy	 */
864206089Sfabient	if (res_idx == -1) {
865185363Sjkoshy		bzero(&urm, sizeof(urm));
866185363Sjkoshy		urm.rm_handle = (uintptr_t)rm;
867185363Sjkoshy		strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
868185363Sjkoshy		urm.rm_start = rm->rm_start;
869185363Sjkoshy		urm.rm_size = rm->rm_end - rm->rm_start + 1;
870206089Sfabient		urm.rm_type = rm->rm_type;
871206089Sfabient
872206089Sfabient		error = SYSCTL_OUT(req, &urm, sizeof(urm));
873206089Sfabient		return (error);
874206089Sfabient	}
875206089Sfabient
876206089Sfabient	/*
877206089Sfabient	 * Find the indexed resource and return it.
878206089Sfabient	 */
879206089Sfabient	mtx_lock(rm->rm_mtx);
880206089Sfabient	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
881206089Sfabient		if (res_idx-- == 0) {
882206089Sfabient			bzero(&ures, sizeof(ures));
883206089Sfabient			ures.r_handle = (uintptr_t)res;
884206089Sfabient			ures.r_parent = (uintptr_t)res->r_rm;
885206089Sfabient			ures.r_device = (uintptr_t)res->r_dev;
886206089Sfabient			if (res->r_dev != NULL) {
887206089Sfabient				if (device_get_name(res->r_dev) != NULL) {
888206089Sfabient					snprintf(ures.r_devname, RM_TEXTLEN,
889206089Sfabient					    "%s%d",
890206089Sfabient					    device_get_name(res->r_dev),
891206089Sfabient					    device_get_unit(res->r_dev));
892206089Sfabient				} else {
893206089Sfabient					strlcpy(ures.r_devname, "nomatch",
894206089Sfabient					    RM_TEXTLEN);
895206089Sfabient				}
896206089Sfabient			} else {
897206089Sfabient				ures.r_devname[0] = '\0';
898206089Sfabient			}
899206089Sfabient			ures.r_start = res->r_start;
900206089Sfabient			ures.r_size = res->r_end - res->r_start + 1;
901206089Sfabient			ures.r_flags = res->r_flags;
902206089Sfabient
903206089Sfabient			mtx_unlock(rm->rm_mtx);
904206089Sfabient			error = SYSCTL_OUT(req, &ures, sizeof(ures));
905206089Sfabient			return (error);
906206089Sfabient		}
907206089Sfabient	}
908206089Sfabient	mtx_unlock(rm->rm_mtx);
909206089Sfabient	return (ENOENT);
910206089Sfabient}
911206089Sfabient
912206089SfabientSYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
913206089Sfabient    "kernel resource manager");
914206089Sfabient