subr_rman.c revision 267961
1/*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission.  M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose.  It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * The kernel resource manager.  This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly.  Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code.  The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order.  Most of the resources
44 * are of this type, as it is the most familiar.  The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance).  The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share.  RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices.  That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58#include "opt_ddb.h"
59
60#include <sys/cdefs.h>
61__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 267961 2014-06-27 16:33:43Z hselasky $");
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/limits.h>
67#include <sys/lock.h>
68#include <sys/malloc.h>
69#include <sys/mutex.h>
70#include <sys/bus.h>		/* XXX debugging */
71#include <machine/bus.h>
72#include <sys/rman.h>
73#include <sys/sysctl.h>
74
75#ifdef DDB
76#include <ddb/ddb.h>
77#endif
78
79/*
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space).  That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
87 */
88struct resource_i {
89	struct resource		r_r;
90	TAILQ_ENTRY(resource_i)	r_link;
91	LIST_ENTRY(resource_i)	r_sharelink;
92	LIST_HEAD(, resource_i)	*r_sharehead;
93	u_long	r_start;	/* index of the first entry in this resource */
94	u_long	r_end;		/* index of the last entry (inclusive) */
95	u_int	r_flags;
96	void	*r_virtual;	/* virtual address of this resource */
97	struct	device *r_dev;	/* device which has allocated this resource */
98	struct	rman *r_rm;	/* resource manager from whence this came */
99	int	r_rid;		/* optional rid for this resource. */
100};
101
102static int     rman_debug = 0;
103SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
104    &rman_debug, 0, "rman debug");
105
106#define DPRINTF(params) if (rman_debug) printf params
107
108static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
109
110struct	rman_head rman_head;
111static	struct mtx rman_mtx; /* mutex to protect rman_head */
112static	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
113				       struct resource_i **whohas);
114static	int int_rman_deactivate_resource(struct resource_i *r);
115static	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
116
117static __inline struct resource_i *
118int_alloc_resource(int malloc_flag)
119{
120	struct resource_i *r;
121
122	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
123	if (r != NULL) {
124		r->r_r.__r_i = r;
125	}
126	return (r);
127}
128
129int
130rman_init(struct rman *rm)
131{
132	static int once = 0;
133
134	if (once == 0) {
135		once = 1;
136		TAILQ_INIT(&rman_head);
137		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
138	}
139
140	if (rm->rm_start == 0 && rm->rm_end == 0)
141		rm->rm_end = ~0ul;
142	if (rm->rm_type == RMAN_UNINIT)
143		panic("rman_init");
144	if (rm->rm_type == RMAN_GAUGE)
145		panic("implement RMAN_GAUGE");
146
147	TAILQ_INIT(&rm->rm_list);
148	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
149	if (rm->rm_mtx == NULL)
150		return ENOMEM;
151	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
152
153	mtx_lock(&rman_mtx);
154	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
155	mtx_unlock(&rman_mtx);
156	return 0;
157}
158
159int
160rman_manage_region(struct rman *rm, u_long start, u_long end)
161{
162	struct resource_i *r, *s, *t;
163	int rv = 0;
164
165	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
166	    rm->rm_descr, start, end));
167	if (start < rm->rm_start || end > rm->rm_end)
168		return EINVAL;
169	r = int_alloc_resource(M_NOWAIT);
170	if (r == NULL)
171		return ENOMEM;
172	r->r_start = start;
173	r->r_end = end;
174	r->r_rm = rm;
175
176	mtx_lock(rm->rm_mtx);
177
178	/* Skip entries before us. */
179	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
180		if (s->r_end == ULONG_MAX)
181			break;
182		if (s->r_end + 1 >= r->r_start)
183			break;
184	}
185
186	/* If we ran off the end of the list, insert at the tail. */
187	if (s == NULL) {
188		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
189	} else {
190		/* Check for any overlap with the current region. */
191		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
192			rv = EBUSY;
193			goto out;
194		}
195
196		/* Check for any overlap with the next region. */
197		t = TAILQ_NEXT(s, r_link);
198		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
199			rv = EBUSY;
200			goto out;
201		}
202
203		/*
204		 * See if this region can be merged with the next region.  If
205		 * not, clear the pointer.
206		 */
207		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
208			t = NULL;
209
210		/* See if we can merge with the current region. */
211		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
212			/* Can we merge all 3 regions? */
213			if (t != NULL) {
214				s->r_end = t->r_end;
215				TAILQ_REMOVE(&rm->rm_list, t, r_link);
216				free(r, M_RMAN);
217				free(t, M_RMAN);
218			} else {
219				s->r_end = r->r_end;
220				free(r, M_RMAN);
221			}
222		} else if (t != NULL) {
223			/* Can we merge with just the next region? */
224			t->r_start = r->r_start;
225			free(r, M_RMAN);
226		} else if (s->r_end < r->r_start) {
227			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
228		} else {
229			TAILQ_INSERT_BEFORE(s, r, r_link);
230		}
231	}
232out:
233	mtx_unlock(rm->rm_mtx);
234	return rv;
235}
236
237int
238rman_init_from_resource(struct rman *rm, struct resource *r)
239{
240	int rv;
241
242	if ((rv = rman_init(rm)) != 0)
243		return (rv);
244	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
245}
246
247int
248rman_fini(struct rman *rm)
249{
250	struct resource_i *r;
251
252	mtx_lock(rm->rm_mtx);
253	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
254		if (r->r_flags & RF_ALLOCATED) {
255			mtx_unlock(rm->rm_mtx);
256			return EBUSY;
257		}
258	}
259
260	/*
261	 * There really should only be one of these if we are in this
262	 * state and the code is working properly, but it can't hurt.
263	 */
264	while (!TAILQ_EMPTY(&rm->rm_list)) {
265		r = TAILQ_FIRST(&rm->rm_list);
266		TAILQ_REMOVE(&rm->rm_list, r, r_link);
267		free(r, M_RMAN);
268	}
269	mtx_unlock(rm->rm_mtx);
270	mtx_lock(&rman_mtx);
271	TAILQ_REMOVE(&rman_head, rm, rm_link);
272	mtx_unlock(&rman_mtx);
273	mtx_destroy(rm->rm_mtx);
274	free(rm->rm_mtx, M_RMAN);
275
276	return 0;
277}
278
279int
280rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
281{
282	struct resource_i *r;
283
284	mtx_lock(rm->rm_mtx);
285	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
286		if (!(r->r_flags & RF_ALLOCATED)) {
287			*start = r->r_start;
288			*end = r->r_end;
289			mtx_unlock(rm->rm_mtx);
290			return (0);
291		}
292	}
293	mtx_unlock(rm->rm_mtx);
294	return (ENOENT);
295}
296
297int
298rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
299{
300	struct resource_i *r;
301
302	mtx_lock(rm->rm_mtx);
303	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
304		if (!(r->r_flags & RF_ALLOCATED)) {
305			*start = r->r_start;
306			*end = r->r_end;
307			mtx_unlock(rm->rm_mtx);
308			return (0);
309		}
310	}
311	mtx_unlock(rm->rm_mtx);
312	return (ENOENT);
313}
314
315/* Shrink or extend one or both ends of an allocated resource. */
316int
317rman_adjust_resource(struct resource *rr, u_long start, u_long end)
318{
319	struct	resource_i *r, *s, *t, *new;
320	struct	rman *rm;
321
322	/* Not supported for shared resources. */
323	r = rr->__r_i;
324	if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
325		return (EINVAL);
326
327	/*
328	 * This does not support wholesale moving of a resource.  At
329	 * least part of the desired new range must overlap with the
330	 * existing resource.
331	 */
332	if (end < r->r_start || r->r_end < start)
333		return (EINVAL);
334
335	/*
336	 * Find the two resource regions immediately adjacent to the
337	 * allocated resource.
338	 */
339	rm = r->r_rm;
340	mtx_lock(rm->rm_mtx);
341#ifdef INVARIANTS
342	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
343		if (s == r)
344			break;
345	}
346	if (s == NULL)
347		panic("resource not in list");
348#endif
349	s = TAILQ_PREV(r, resource_head, r_link);
350	t = TAILQ_NEXT(r, r_link);
351	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
352	    ("prev resource mismatch"));
353	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
354	    ("next resource mismatch"));
355
356	/*
357	 * See if the changes are permitted.  Shrinking is always allowed,
358	 * but growing requires sufficient room in the adjacent region.
359	 */
360	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
361	    s->r_start > start)) {
362		mtx_unlock(rm->rm_mtx);
363		return (EBUSY);
364	}
365	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
366	    t->r_end < end)) {
367		mtx_unlock(rm->rm_mtx);
368		return (EBUSY);
369	}
370
371	/*
372	 * While holding the lock, grow either end of the resource as
373	 * needed and shrink either end if the shrinking does not require
374	 * allocating a new resource.  We can safely drop the lock and then
375	 * insert a new range to handle the shrinking case afterwards.
376	 */
377	if (start < r->r_start ||
378	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
379		KASSERT(s->r_flags == 0, ("prev is busy"));
380		r->r_start = start;
381		if (s->r_start == start) {
382			TAILQ_REMOVE(&rm->rm_list, s, r_link);
383			free(s, M_RMAN);
384		} else
385			s->r_end = start - 1;
386	}
387	if (end > r->r_end ||
388	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
389		KASSERT(t->r_flags == 0, ("next is busy"));
390		r->r_end = end;
391		if (t->r_end == end) {
392			TAILQ_REMOVE(&rm->rm_list, t, r_link);
393			free(t, M_RMAN);
394		} else
395			t->r_start = end + 1;
396	}
397	mtx_unlock(rm->rm_mtx);
398
399	/*
400	 * Handle the shrinking cases that require allocating a new
401	 * resource to hold the newly-free region.  We have to recheck
402	 * if we still need this new region after acquiring the lock.
403	 */
404	if (start > r->r_start) {
405		new = int_alloc_resource(M_WAITOK);
406		new->r_start = r->r_start;
407		new->r_end = start - 1;
408		new->r_rm = rm;
409		mtx_lock(rm->rm_mtx);
410		r->r_start = start;
411		s = TAILQ_PREV(r, resource_head, r_link);
412		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
413			s->r_end = start - 1;
414			free(new, M_RMAN);
415		} else
416			TAILQ_INSERT_BEFORE(r, new, r_link);
417		mtx_unlock(rm->rm_mtx);
418	}
419	if (end < r->r_end) {
420		new = int_alloc_resource(M_WAITOK);
421		new->r_start = end + 1;
422		new->r_end = r->r_end;
423		new->r_rm = rm;
424		mtx_lock(rm->rm_mtx);
425		r->r_end = end;
426		t = TAILQ_NEXT(r, r_link);
427		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
428			t->r_start = end + 1;
429			free(new, M_RMAN);
430		} else
431			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
432		mtx_unlock(rm->rm_mtx);
433	}
434	return (0);
435}
436
437#define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_TIMESHARE | RF_PREFETCHABLE))
438
439struct resource *
440rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
441		      u_long count, u_long bound,  u_int flags,
442		      struct device *dev)
443{
444	u_int	new_rflags;
445	struct	resource_i *r, *s, *rv;
446	u_long	rstart, rend, amask, bmask;
447
448	rv = NULL;
449
450	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
451	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
452	       count, flags,
453	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
454	KASSERT((flags & (RF_WANTED | RF_FIRSTSHARE)) == 0,
455	    ("invalid flags %#x", flags));
456	new_rflags = (flags & ~(RF_ACTIVE | RF_WANTED | RF_FIRSTSHARE)) |
457	    RF_ALLOCATED;
458
459	mtx_lock(rm->rm_mtx);
460
461	for (r = TAILQ_FIRST(&rm->rm_list);
462	     r && r->r_end < start + count - 1;
463	     r = TAILQ_NEXT(r, r_link))
464		;
465
466	if (r == NULL) {
467		DPRINTF(("could not find a region\n"));
468		goto out;
469	}
470
471	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
472	KASSERT(start <= ULONG_MAX - amask,
473	    ("start (%#lx) + amask (%#lx) would wrap around", start, amask));
474
475	/* If bound is 0, bmask will also be 0 */
476	bmask = ~(bound - 1);
477	/*
478	 * First try to find an acceptable totally-unshared region.
479	 */
480	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
481		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
482		/*
483		 * The resource list is sorted, so there is no point in
484		 * searching further once r_start is too large.
485		 */
486		if (s->r_start > end - (count - 1)) {
487			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
488			    s->r_start, end));
489			break;
490		}
491		if (s->r_start > ULONG_MAX - amask) {
492			DPRINTF(("s->r_start (%#lx) + amask (%#lx) too large\n",
493			    s->r_start, amask));
494			break;
495		}
496		if (s->r_flags & RF_ALLOCATED) {
497			DPRINTF(("region is allocated\n"));
498			continue;
499		}
500		rstart = ulmax(s->r_start, start);
501		/*
502		 * Try to find a region by adjusting to boundary and alignment
503		 * until both conditions are satisfied. This is not an optimal
504		 * algorithm, but in most cases it isn't really bad, either.
505		 */
506		do {
507			rstart = (rstart + amask) & ~amask;
508			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
509				rstart += bound - (rstart & ~bmask);
510		} while ((rstart & amask) != 0 && rstart < end &&
511		    rstart < s->r_end);
512		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
513		if (rstart > rend) {
514			DPRINTF(("adjusted start exceeds end\n"));
515			continue;
516		}
517		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
518		       rstart, rend, (rend - rstart + 1), count));
519
520		if ((rend - rstart + 1) >= count) {
521			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
522			       rstart, rend, (rend - rstart + 1)));
523			if ((s->r_end - s->r_start + 1) == count) {
524				DPRINTF(("candidate region is entire chunk\n"));
525				rv = s;
526				rv->r_flags = new_rflags;
527				rv->r_dev = dev;
528				goto out;
529			}
530
531			/*
532			 * If s->r_start < rstart and
533			 *    s->r_end > rstart + count - 1, then
534			 * we need to split the region into three pieces
535			 * (the middle one will get returned to the user).
536			 * Otherwise, we are allocating at either the
537			 * beginning or the end of s, so we only need to
538			 * split it in two.  The first case requires
539			 * two new allocations; the second requires but one.
540			 */
541			rv = int_alloc_resource(M_NOWAIT);
542			if (rv == NULL)
543				goto out;
544			rv->r_start = rstart;
545			rv->r_end = rstart + count - 1;
546			rv->r_flags = new_rflags;
547			rv->r_dev = dev;
548			rv->r_rm = rm;
549
550			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
551				DPRINTF(("splitting region in three parts: "
552				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
553				       s->r_start, rv->r_start - 1,
554				       rv->r_start, rv->r_end,
555				       rv->r_end + 1, s->r_end));
556				/*
557				 * We are allocating in the middle.
558				 */
559				r = int_alloc_resource(M_NOWAIT);
560				if (r == NULL) {
561					free(rv, M_RMAN);
562					rv = NULL;
563					goto out;
564				}
565				r->r_start = rv->r_end + 1;
566				r->r_end = s->r_end;
567				r->r_flags = s->r_flags;
568				r->r_rm = rm;
569				s->r_end = rv->r_start - 1;
570				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
571						     r_link);
572				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
573						     r_link);
574			} else if (s->r_start == rv->r_start) {
575				DPRINTF(("allocating from the beginning\n"));
576				/*
577				 * We are allocating at the beginning.
578				 */
579				s->r_start = rv->r_end + 1;
580				TAILQ_INSERT_BEFORE(s, rv, r_link);
581			} else {
582				DPRINTF(("allocating at the end\n"));
583				/*
584				 * We are allocating at the end.
585				 */
586				s->r_end = rv->r_start - 1;
587				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
588						     r_link);
589			}
590			goto out;
591		}
592	}
593
594	/*
595	 * Now find an acceptable shared region, if the client's requirements
596	 * allow sharing.  By our implementation restriction, a candidate
597	 * region must match exactly by both size and sharing type in order
598	 * to be considered compatible with the client's request.  (The
599	 * former restriction could probably be lifted without too much
600	 * additional work, but this does not seem warranted.)
601	 */
602	DPRINTF(("no unshared regions found\n"));
603	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
604		goto out;
605
606	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
607		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
608		    s->r_start >= start &&
609		    (s->r_end - s->r_start + 1) == count &&
610		    (s->r_start & amask) == 0 &&
611		    ((s->r_start ^ s->r_end) & bmask) == 0) {
612			rv = int_alloc_resource(M_NOWAIT);
613			if (rv == NULL)
614				goto out;
615			rv->r_start = s->r_start;
616			rv->r_end = s->r_end;
617			rv->r_flags = new_rflags;
618			rv->r_dev = dev;
619			rv->r_rm = rm;
620			if (s->r_sharehead == NULL) {
621				s->r_sharehead = malloc(sizeof *s->r_sharehead,
622						M_RMAN, M_NOWAIT | M_ZERO);
623				if (s->r_sharehead == NULL) {
624					free(rv, M_RMAN);
625					rv = NULL;
626					goto out;
627				}
628				LIST_INIT(s->r_sharehead);
629				LIST_INSERT_HEAD(s->r_sharehead, s,
630						 r_sharelink);
631				s->r_flags |= RF_FIRSTSHARE;
632			}
633			rv->r_sharehead = s->r_sharehead;
634			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
635			goto out;
636		}
637	}
638
639	/*
640	 * We couldn't find anything.
641	 */
642out:
643	/*
644	 * If the user specified RF_ACTIVE in flags, we attempt to atomically
645	 * activate the resource.  If this fails, we release the resource
646	 * and indicate overall failure.  (This behavior probably doesn't
647	 * make sense for RF_TIMESHARE-type resources.)
648	 */
649	if (rv && (flags & RF_ACTIVE) != 0) {
650		struct resource_i *whohas;
651		if (int_rman_activate_resource(rm, rv, &whohas)) {
652			int_rman_release_resource(rm, rv);
653			rv = NULL;
654		}
655	}
656
657	mtx_unlock(rm->rm_mtx);
658	return (rv == NULL ? NULL : &rv->r_r);
659}
660
661struct resource *
662rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
663		      u_int flags, struct device *dev)
664{
665
666	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
667	    dev));
668}
669
670static int
671int_rman_activate_resource(struct rman *rm, struct resource_i *r,
672			   struct resource_i **whohas)
673{
674	struct resource_i *s;
675	int ok;
676
677	/*
678	 * If we are not timesharing, then there is nothing much to do.
679	 * If we already have the resource, then there is nothing at all to do.
680	 * If we are not on a sharing list with anybody else, then there is
681	 * little to do.
682	 */
683	if ((r->r_flags & RF_TIMESHARE) == 0
684	    || (r->r_flags & RF_ACTIVE) != 0
685	    || r->r_sharehead == NULL) {
686		r->r_flags |= RF_ACTIVE;
687		return 0;
688	}
689
690	ok = 1;
691	for (s = LIST_FIRST(r->r_sharehead); s && ok;
692	     s = LIST_NEXT(s, r_sharelink)) {
693		if ((s->r_flags & RF_ACTIVE) != 0) {
694			ok = 0;
695			*whohas = s;
696		}
697	}
698	if (ok) {
699		r->r_flags |= RF_ACTIVE;
700		return 0;
701	}
702	return EBUSY;
703}
704
705int
706rman_activate_resource(struct resource *re)
707{
708	int rv;
709	struct resource_i *r, *whohas;
710	struct rman *rm;
711
712	r = re->__r_i;
713	rm = r->r_rm;
714	mtx_lock(rm->rm_mtx);
715	rv = int_rman_activate_resource(rm, r, &whohas);
716	mtx_unlock(rm->rm_mtx);
717	return rv;
718}
719
720int
721rman_await_resource(struct resource *re, int pri, int timo)
722{
723	int	rv;
724	struct	resource_i *r, *whohas;
725	struct	rman *rm;
726
727	r = re->__r_i;
728	rm = r->r_rm;
729	mtx_lock(rm->rm_mtx);
730	for (;;) {
731		rv = int_rman_activate_resource(rm, r, &whohas);
732		if (rv != EBUSY)
733			return (rv);	/* returns with mutex held */
734
735		if (r->r_sharehead == NULL)
736			panic("rman_await_resource");
737		whohas->r_flags |= RF_WANTED;
738		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
739		if (rv) {
740			mtx_unlock(rm->rm_mtx);
741			return (rv);
742		}
743	}
744}
745
746static int
747int_rman_deactivate_resource(struct resource_i *r)
748{
749
750	r->r_flags &= ~RF_ACTIVE;
751	if (r->r_flags & RF_WANTED) {
752		r->r_flags &= ~RF_WANTED;
753		wakeup(r->r_sharehead);
754	}
755	return 0;
756}
757
758int
759rman_deactivate_resource(struct resource *r)
760{
761	struct	rman *rm;
762
763	rm = r->__r_i->r_rm;
764	mtx_lock(rm->rm_mtx);
765	int_rman_deactivate_resource(r->__r_i);
766	mtx_unlock(rm->rm_mtx);
767	return 0;
768}
769
770static int
771int_rman_release_resource(struct rman *rm, struct resource_i *r)
772{
773	struct	resource_i *s, *t;
774
775	if (r->r_flags & RF_ACTIVE)
776		int_rman_deactivate_resource(r);
777
778	/*
779	 * Check for a sharing list first.  If there is one, then we don't
780	 * have to think as hard.
781	 */
782	if (r->r_sharehead) {
783		/*
784		 * If a sharing list exists, then we know there are at
785		 * least two sharers.
786		 *
787		 * If we are in the main circleq, appoint someone else.
788		 */
789		LIST_REMOVE(r, r_sharelink);
790		s = LIST_FIRST(r->r_sharehead);
791		if (r->r_flags & RF_FIRSTSHARE) {
792			s->r_flags |= RF_FIRSTSHARE;
793			TAILQ_INSERT_BEFORE(r, s, r_link);
794			TAILQ_REMOVE(&rm->rm_list, r, r_link);
795		}
796
797		/*
798		 * Make sure that the sharing list goes away completely
799		 * if the resource is no longer being shared at all.
800		 */
801		if (LIST_NEXT(s, r_sharelink) == NULL) {
802			free(s->r_sharehead, M_RMAN);
803			s->r_sharehead = NULL;
804			s->r_flags &= ~RF_FIRSTSHARE;
805		}
806		goto out;
807	}
808
809	/*
810	 * Look at the adjacent resources in the list and see if our
811	 * segment can be merged with any of them.  If either of the
812	 * resources is allocated or is not exactly adjacent then they
813	 * cannot be merged with our segment.
814	 */
815	s = TAILQ_PREV(r, resource_head, r_link);
816	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
817	    s->r_end + 1 != r->r_start))
818		s = NULL;
819	t = TAILQ_NEXT(r, r_link);
820	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
821	    r->r_end + 1 != t->r_start))
822		t = NULL;
823
824	if (s != NULL && t != NULL) {
825		/*
826		 * Merge all three segments.
827		 */
828		s->r_end = t->r_end;
829		TAILQ_REMOVE(&rm->rm_list, r, r_link);
830		TAILQ_REMOVE(&rm->rm_list, t, r_link);
831		free(t, M_RMAN);
832	} else if (s != NULL) {
833		/*
834		 * Merge previous segment with ours.
835		 */
836		s->r_end = r->r_end;
837		TAILQ_REMOVE(&rm->rm_list, r, r_link);
838	} else if (t != NULL) {
839		/*
840		 * Merge next segment with ours.
841		 */
842		t->r_start = r->r_start;
843		TAILQ_REMOVE(&rm->rm_list, r, r_link);
844	} else {
845		/*
846		 * At this point, we know there is nothing we
847		 * can potentially merge with, because on each
848		 * side, there is either nothing there or what is
849		 * there is still allocated.  In that case, we don't
850		 * want to remove r from the list; we simply want to
851		 * change it to an unallocated region and return
852		 * without freeing anything.
853		 */
854		r->r_flags &= ~RF_ALLOCATED;
855		r->r_dev = NULL;
856		return 0;
857	}
858
859out:
860	free(r, M_RMAN);
861	return 0;
862}
863
864int
865rman_release_resource(struct resource *re)
866{
867	int	rv;
868	struct	resource_i *r;
869	struct	rman *rm;
870
871	r = re->__r_i;
872	rm = r->r_rm;
873	mtx_lock(rm->rm_mtx);
874	rv = int_rman_release_resource(rm, r);
875	mtx_unlock(rm->rm_mtx);
876	return (rv);
877}
878
879uint32_t
880rman_make_alignment_flags(uint32_t size)
881{
882	int	i;
883
884	/*
885	 * Find the hightest bit set, and add one if more than one bit
886	 * set.  We're effectively computing the ceil(log2(size)) here.
887	 */
888	for (i = 31; i > 0; i--)
889		if ((1 << i) & size)
890			break;
891	if (~(1 << i) & size)
892		i++;
893
894	return(RF_ALIGNMENT_LOG2(i));
895}
896
897void
898rman_set_start(struct resource *r, u_long start)
899{
900	r->__r_i->r_start = start;
901}
902
903u_long
904rman_get_start(struct resource *r)
905{
906	return (r->__r_i->r_start);
907}
908
909void
910rman_set_end(struct resource *r, u_long end)
911{
912	r->__r_i->r_end = end;
913}
914
915u_long
916rman_get_end(struct resource *r)
917{
918	return (r->__r_i->r_end);
919}
920
921u_long
922rman_get_size(struct resource *r)
923{
924	return (r->__r_i->r_end - r->__r_i->r_start + 1);
925}
926
927u_int
928rman_get_flags(struct resource *r)
929{
930	return (r->__r_i->r_flags);
931}
932
933void
934rman_set_virtual(struct resource *r, void *v)
935{
936	r->__r_i->r_virtual = v;
937}
938
939void *
940rman_get_virtual(struct resource *r)
941{
942	return (r->__r_i->r_virtual);
943}
944
945void
946rman_set_bustag(struct resource *r, bus_space_tag_t t)
947{
948	r->r_bustag = t;
949}
950
951bus_space_tag_t
952rman_get_bustag(struct resource *r)
953{
954	return (r->r_bustag);
955}
956
957void
958rman_set_bushandle(struct resource *r, bus_space_handle_t h)
959{
960	r->r_bushandle = h;
961}
962
963bus_space_handle_t
964rman_get_bushandle(struct resource *r)
965{
966	return (r->r_bushandle);
967}
968
969void
970rman_set_rid(struct resource *r, int rid)
971{
972	r->__r_i->r_rid = rid;
973}
974
975int
976rman_get_rid(struct resource *r)
977{
978	return (r->__r_i->r_rid);
979}
980
981void
982rman_set_device(struct resource *r, struct device *dev)
983{
984	r->__r_i->r_dev = dev;
985}
986
987struct device *
988rman_get_device(struct resource *r)
989{
990	return (r->__r_i->r_dev);
991}
992
993int
994rman_is_region_manager(struct resource *r, struct rman *rm)
995{
996
997	return (r->__r_i->r_rm == rm);
998}
999
1000/*
1001 * Sysctl interface for scanning the resource lists.
1002 *
1003 * We take two input parameters; the index into the list of resource
1004 * managers, and the resource offset into the list.
1005 */
1006static int
1007sysctl_rman(SYSCTL_HANDLER_ARGS)
1008{
1009	int			*name = (int *)arg1;
1010	u_int			namelen = arg2;
1011	int			rman_idx, res_idx;
1012	struct rman		*rm;
1013	struct resource_i	*res;
1014	struct resource_i	*sres;
1015	struct u_rman		urm;
1016	struct u_resource	ures;
1017	int			error;
1018
1019	if (namelen != 3)
1020		return (EINVAL);
1021
1022	if (bus_data_generation_check(name[0]))
1023		return (EINVAL);
1024	rman_idx = name[1];
1025	res_idx = name[2];
1026
1027	/*
1028	 * Find the indexed resource manager
1029	 */
1030	mtx_lock(&rman_mtx);
1031	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1032		if (rman_idx-- == 0)
1033			break;
1034	}
1035	mtx_unlock(&rman_mtx);
1036	if (rm == NULL)
1037		return (ENOENT);
1038
1039	/*
1040	 * If the resource index is -1, we want details on the
1041	 * resource manager.
1042	 */
1043	if (res_idx == -1) {
1044		bzero(&urm, sizeof(urm));
1045		urm.rm_handle = (uintptr_t)rm;
1046		if (rm->rm_descr != NULL)
1047			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1048		urm.rm_start = rm->rm_start;
1049		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1050		urm.rm_type = rm->rm_type;
1051
1052		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1053		return (error);
1054	}
1055
1056	/*
1057	 * Find the indexed resource and return it.
1058	 */
1059	mtx_lock(rm->rm_mtx);
1060	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1061		if (res->r_sharehead != NULL) {
1062			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1063				if (res_idx-- == 0) {
1064					res = sres;
1065					goto found;
1066				}
1067		}
1068		else if (res_idx-- == 0)
1069				goto found;
1070	}
1071	mtx_unlock(rm->rm_mtx);
1072	return (ENOENT);
1073
1074found:
1075	bzero(&ures, sizeof(ures));
1076	ures.r_handle = (uintptr_t)res;
1077	ures.r_parent = (uintptr_t)res->r_rm;
1078	ures.r_device = (uintptr_t)res->r_dev;
1079	if (res->r_dev != NULL) {
1080		if (device_get_name(res->r_dev) != NULL) {
1081			snprintf(ures.r_devname, RM_TEXTLEN,
1082			    "%s%d",
1083			    device_get_name(res->r_dev),
1084			    device_get_unit(res->r_dev));
1085		} else {
1086			strlcpy(ures.r_devname, "nomatch",
1087			    RM_TEXTLEN);
1088		}
1089	} else {
1090		ures.r_devname[0] = '\0';
1091	}
1092	ures.r_start = res->r_start;
1093	ures.r_size = res->r_end - res->r_start + 1;
1094	ures.r_flags = res->r_flags;
1095
1096	mtx_unlock(rm->rm_mtx);
1097	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1098	return (error);
1099}
1100
1101static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1102    "kernel resource manager");
1103
1104#ifdef DDB
1105static void
1106dump_rman_header(struct rman *rm)
1107{
1108
1109	if (db_pager_quit)
1110		return;
1111	db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1112	    rm, rm->rm_descr, rm->rm_start, rm->rm_end);
1113}
1114
1115static void
1116dump_rman(struct rman *rm)
1117{
1118	struct resource_i *r;
1119	const char *devname;
1120
1121	if (db_pager_quit)
1122		return;
1123	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1124		if (r->r_dev != NULL) {
1125			devname = device_get_nameunit(r->r_dev);
1126			if (devname == NULL)
1127				devname = "nomatch";
1128		} else
1129			devname = NULL;
1130		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
1131		if (devname != NULL)
1132			db_printf("(%s)\n", devname);
1133		else
1134			db_printf("----\n");
1135		if (db_pager_quit)
1136			return;
1137	}
1138}
1139
1140DB_SHOW_COMMAND(rman, db_show_rman)
1141{
1142
1143	if (have_addr) {
1144		dump_rman_header((struct rman *)addr);
1145		dump_rman((struct rman *)addr);
1146	}
1147}
1148
1149DB_SHOW_COMMAND(rmans, db_show_rmans)
1150{
1151	struct rman *rm;
1152
1153	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1154		dump_rman_header(rm);
1155	}
1156}
1157
1158DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1159{
1160	struct rman *rm;
1161
1162	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1163		dump_rman_header(rm);
1164		dump_rman(rm);
1165	}
1166}
1167DB_SHOW_ALIAS(allrman, db_show_all_rman);
1168#endif
1169