1139804Simp/*- 240711Swollman * Copyright 1998 Massachusetts Institute of Technology 340711Swollman * 440711Swollman * Permission to use, copy, modify, and distribute this software and 540711Swollman * its documentation for any purpose and without fee is hereby 640711Swollman * granted, provided that both the above copyright notice and this 740711Swollman * permission notice appear in all copies, that both the above 840711Swollman * copyright notice and this permission notice appear in all 940711Swollman * supporting documentation, and that the name of M.I.T. not be used 1040711Swollman * in advertising or publicity pertaining to distribution of the 1140711Swollman * software without specific, written prior permission. M.I.T. makes 1240711Swollman * no representations about the suitability of this software for any 1340711Swollman * purpose. It is provided "as is" without express or implied 1440711Swollman * warranty. 15152543Syongari * 1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2740711Swollman * SUCH DAMAGE. 2840711Swollman */ 2940711Swollman 3040711Swollman/* 3140711Swollman * The kernel resource manager. This code is responsible for keeping track 3240711Swollman * of hardware resources which are apportioned out to various drivers. 3340711Swollman * It does not actually assign those resources, and it is not expected 3440711Swollman * that end-device drivers will call into this code directly. Rather, 3540711Swollman * the code which implements the buses that those devices are attached to, 3640711Swollman * and the code which manages CPU resources, will call this code, and the 3740711Swollman * end-device drivers will make upcalls to that code to actually perform 3840711Swollman * the allocation. 3940711Swollman * 4040711Swollman * There are two sorts of resources managed by this code. The first is 4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class 4240711Swollman * consist of a sequence of individually-allocatable objects which have 4340711Swollman * been numbered in some well-defined order. Most of the resources 4440711Swollman * are of this type, as it is the most familiar. The second type is 4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 4640711Swollman * resources in which each instance is indistinguishable from every 4740711Swollman * other instance). The principal anticipated application of gauges 4840711Swollman * is in the context of power consumption, where a bus may have a specific 4940711Swollman * power budget which all attached devices share. RMAN_GAUGE is not 5040711Swollman * implemented yet. 5140711Swollman * 5240711Swollman * For array resources, we make one simplifying assumption: two clients 5340711Swollman * sharing the same resource must use the same range of indices. That 5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not 5540711Swollman * permitted. 5640711Swollman */ 5740711Swollman 58168791Sjhb#include "opt_ddb.h" 59168791Sjhb 60116182Sobrien#include <sys/cdefs.h> 61116182Sobrien__FBSDID("$FreeBSD$"); 62116182Sobrien 6340711Swollman#include <sys/param.h> 6440711Swollman#include <sys/systm.h> 6541304Sbde#include <sys/kernel.h> 66164881Sjhb#include <sys/limits.h> 6740711Swollman#include <sys/lock.h> 6840711Swollman#include <sys/malloc.h> 6971576Sjasone#include <sys/mutex.h> 7045720Speter#include <sys/bus.h> /* XXX debugging */ 7145720Speter#include <machine/bus.h> 7240711Swollman#include <sys/rman.h> 73102962Siwasaki#include <sys/sysctl.h> 7440711Swollman 75168791Sjhb#ifdef DDB 76168791Sjhb#include <ddb/ddb.h> 77168791Sjhb#endif 78168791Sjhb 79151037Sphk/* 80151037Sphk * We use a linked list rather than a bitmap because we need to be able to 81151037Sphk * represent potentially huge objects (like all of a processor's physical 82151037Sphk * address space). That is also why the indices are defined to have type 83151037Sphk * `unsigned long' -- that being the largest integral type in ISO C (1990). 84151037Sphk * The 1999 version of C allows `long long'; we may need to switch to that 85151037Sphk * at some point in the future, particularly if we want to support 36-bit 86151037Sphk * addresses on IA32 hardware. 87151037Sphk */ 88151037Sphkstruct resource_i { 89151037Sphk struct resource r_r; 90151037Sphk TAILQ_ENTRY(resource_i) r_link; 91151037Sphk LIST_ENTRY(resource_i) r_sharelink; 92151037Sphk LIST_HEAD(, resource_i) *r_sharehead; 93151037Sphk u_long r_start; /* index of the first entry in this resource */ 94151037Sphk u_long r_end; /* index of the last entry (inclusive) */ 95151037Sphk u_int r_flags; 96151037Sphk void *r_virtual; /* virtual address of this resource */ 97151037Sphk struct device *r_dev; /* device which has allocated this resource */ 98151037Sphk struct rman *r_rm; /* resource manager from whence this came */ 99151037Sphk int r_rid; /* optional rid for this resource. */ 100151037Sphk}; 101151037Sphk 102188061Simpstatic int rman_debug = 0; 103102962SiwasakiTUNABLE_INT("debug.rman_debug", &rman_debug); 104102962SiwasakiSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW, 105102962Siwasaki &rman_debug, 0, "rman debug"); 10659910Spaul 107102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params 108102962Siwasaki 10945569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 11040711Swollman 11140711Swollmanstruct rman_head rman_head; 11271576Sjasonestatic struct mtx rman_mtx; /* mutex to protect rman_head */ 113150523Sphkstatic int int_rman_activate_resource(struct rman *rm, struct resource_i *r, 114150523Sphk struct resource_i **whohas); 115150523Sphkstatic int int_rman_deactivate_resource(struct resource_i *r); 116150523Sphkstatic int int_rman_release_resource(struct rman *rm, struct resource_i *r); 11740711Swollman 118150523Sphkstatic __inline struct resource_i * 119150523Sphkint_alloc_resource(int malloc_flag) 120150523Sphk{ 121150523Sphk struct resource_i *r; 122150523Sphk 123150523Sphk r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO); 124150523Sphk if (r != NULL) { 125150523Sphk r->r_r.__r_i = r; 126150523Sphk } 127150523Sphk return (r); 128150523Sphk} 129150523Sphk 13040711Swollmanint 13140711Swollmanrman_init(struct rman *rm) 13240711Swollman{ 133152543Syongari static int once = 0; 13440711Swollman 13540711Swollman if (once == 0) { 13640711Swollman once = 1; 13740711Swollman TAILQ_INIT(&rman_head); 13893818Sjhb mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF); 13940711Swollman } 14040711Swollman 141221218Sjhb if (rm->rm_start == 0 && rm->rm_end == 0) 142221218Sjhb rm->rm_end = ~0ul; 14340711Swollman if (rm->rm_type == RMAN_UNINIT) 14440711Swollman panic("rman_init"); 14540711Swollman if (rm->rm_type == RMAN_GAUGE) 14640711Swollman panic("implement RMAN_GAUGE"); 14740711Swollman 14868727Smckusick TAILQ_INIT(&rm->rm_list); 14984781Sjhb rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO); 150152543Syongari if (rm->rm_mtx == NULL) 15140711Swollman return ENOMEM; 15293818Sjhb mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF); 15340711Swollman 15472200Sbmilekic mtx_lock(&rman_mtx); 15540711Swollman TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 15672200Sbmilekic mtx_unlock(&rman_mtx); 15740711Swollman return 0; 15840711Swollman} 15940711Swollman 16040711Swollmanint 16140711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end) 16240711Swollman{ 163162224Sjhb struct resource_i *r, *s, *t; 16440711Swollman 165134040Snjl DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", 166134021Snjl rm->rm_descr, start, end)); 167221218Sjhb if (start < rm->rm_start || end > rm->rm_end) 168221218Sjhb return EINVAL; 169150523Sphk r = int_alloc_resource(M_NOWAIT); 170152543Syongari if (r == NULL) 17140711Swollman return ENOMEM; 17240711Swollman r->r_start = start; 17340711Swollman r->r_end = end; 17440711Swollman r->r_rm = rm; 17540711Swollman 17672200Sbmilekic mtx_lock(rm->rm_mtx); 177162224Sjhb 178162224Sjhb /* Skip entries before us. */ 179164881Sjhb TAILQ_FOREACH(s, &rm->rm_list, r_link) { 180164881Sjhb if (s->r_end == ULONG_MAX) 181164881Sjhb break; 182164881Sjhb if (s->r_end + 1 >= r->r_start) 183164881Sjhb break; 184164881Sjhb } 18540711Swollman 186162224Sjhb /* If we ran off the end of the list, insert at the tail. */ 18768727Smckusick if (s == NULL) { 18868727Smckusick TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 18940711Swollman } else { 190162224Sjhb /* Check for any overlap with the current region. */ 191162224Sjhb if (r->r_start <= s->r_end && r->r_end >= s->r_start) 192162224Sjhb return EBUSY; 193162224Sjhb 194162224Sjhb /* Check for any overlap with the next region. */ 195162224Sjhb t = TAILQ_NEXT(s, r_link); 196162224Sjhb if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) 197162224Sjhb return EBUSY; 198162224Sjhb 199162224Sjhb /* 200162224Sjhb * See if this region can be merged with the next region. If 201162224Sjhb * not, clear the pointer. 202162224Sjhb */ 203162224Sjhb if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0)) 204162224Sjhb t = NULL; 205162224Sjhb 206162224Sjhb /* See if we can merge with the current region. */ 207162224Sjhb if (s->r_end + 1 == r->r_start && s->r_flags == 0) { 208162224Sjhb /* Can we merge all 3 regions? */ 209162224Sjhb if (t != NULL) { 210162224Sjhb s->r_end = t->r_end; 211162224Sjhb TAILQ_REMOVE(&rm->rm_list, t, r_link); 212162224Sjhb free(r, M_RMAN); 213162224Sjhb free(t, M_RMAN); 214162224Sjhb } else { 215162224Sjhb s->r_end = r->r_end; 216162224Sjhb free(r, M_RMAN); 217162224Sjhb } 218166932Sscottl } else if (t != NULL) { 219166932Sscottl /* Can we merge with just the next region? */ 220166932Sscottl t->r_start = r->r_start; 221166932Sscottl free(r, M_RMAN); 222166932Sscottl } else if (s->r_end < r->r_start) { 223166932Sscottl TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link); 224162224Sjhb } else { 225166932Sscottl TAILQ_INSERT_BEFORE(s, r, r_link); 226162224Sjhb } 22740711Swollman } 22840711Swollman 22972200Sbmilekic mtx_unlock(rm->rm_mtx); 23040711Swollman return 0; 23140711Swollman} 23240711Swollman 23340711Swollmanint 234159536Simprman_init_from_resource(struct rman *rm, struct resource *r) 235159536Simp{ 236159536Simp int rv; 237159536Simp 238159536Simp if ((rv = rman_init(rm)) != 0) 239159536Simp return (rv); 240159536Simp return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end)); 241159536Simp} 242159536Simp 243159536Simpint 24440711Swollmanrman_fini(struct rman *rm) 24540711Swollman{ 246150523Sphk struct resource_i *r; 24740711Swollman 24872200Sbmilekic mtx_lock(rm->rm_mtx); 24968727Smckusick TAILQ_FOREACH(r, &rm->rm_list, r_link) { 25045720Speter if (r->r_flags & RF_ALLOCATED) { 25172200Sbmilekic mtx_unlock(rm->rm_mtx); 25240711Swollman return EBUSY; 25345720Speter } 25440711Swollman } 25540711Swollman 25640711Swollman /* 25740711Swollman * There really should only be one of these if we are in this 25840711Swollman * state and the code is working properly, but it can't hurt. 25940711Swollman */ 26068727Smckusick while (!TAILQ_EMPTY(&rm->rm_list)) { 26168727Smckusick r = TAILQ_FIRST(&rm->rm_list); 26268727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 26340711Swollman free(r, M_RMAN); 26440711Swollman } 26572200Sbmilekic mtx_unlock(rm->rm_mtx); 26672200Sbmilekic mtx_lock(&rman_mtx); 26740711Swollman TAILQ_REMOVE(&rman_head, rm, rm_link); 26872200Sbmilekic mtx_unlock(&rman_mtx); 26971576Sjasone mtx_destroy(rm->rm_mtx); 27071576Sjasone free(rm->rm_mtx, M_RMAN); 27140711Swollman 27240711Swollman return 0; 27340711Swollman} 27440711Swollman 275221220Sjhbint 276221220Sjhbrman_first_free_region(struct rman *rm, u_long *start, u_long *end) 277221220Sjhb{ 278221220Sjhb struct resource_i *r; 279221220Sjhb 280221220Sjhb mtx_lock(rm->rm_mtx); 281221220Sjhb TAILQ_FOREACH(r, &rm->rm_list, r_link) { 282221220Sjhb if (!(r->r_flags & RF_ALLOCATED)) { 283221220Sjhb *start = r->r_start; 284221220Sjhb *end = r->r_end; 285221220Sjhb mtx_unlock(rm->rm_mtx); 286221220Sjhb return (0); 287221220Sjhb } 288221220Sjhb } 289221220Sjhb mtx_unlock(rm->rm_mtx); 290221220Sjhb return (ENOENT); 291221220Sjhb} 292221220Sjhb 293221220Sjhbint 294221220Sjhbrman_last_free_region(struct rman *rm, u_long *start, u_long *end) 295221220Sjhb{ 296221220Sjhb struct resource_i *r; 297221220Sjhb 298221220Sjhb mtx_lock(rm->rm_mtx); 299221220Sjhb TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) { 300221220Sjhb if (!(r->r_flags & RF_ALLOCATED)) { 301221220Sjhb *start = r->r_start; 302221220Sjhb *end = r->r_end; 303221220Sjhb mtx_unlock(rm->rm_mtx); 304221220Sjhb return (0); 305221220Sjhb } 306221220Sjhb } 307221220Sjhb mtx_unlock(rm->rm_mtx); 308221220Sjhb return (ENOENT); 309221220Sjhb} 310221220Sjhb 311221220Sjhb/* Shrink or extend one or both ends of an allocated resource. */ 312221220Sjhbint 313221220Sjhbrman_adjust_resource(struct resource *rr, u_long start, u_long end) 314221220Sjhb{ 315221220Sjhb struct resource_i *r, *s, *t, *new; 316221220Sjhb struct rman *rm; 317221220Sjhb 318221220Sjhb /* Not supported for shared resources. */ 319221220Sjhb r = rr->__r_i; 320221220Sjhb if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE)) 321221220Sjhb return (EINVAL); 322221220Sjhb 323221220Sjhb /* 324221220Sjhb * This does not support wholesale moving of a resource. At 325221220Sjhb * least part of the desired new range must overlap with the 326221220Sjhb * existing resource. 327221220Sjhb */ 328221220Sjhb if (end < r->r_start || r->r_end < start) 329221220Sjhb return (EINVAL); 330221220Sjhb 331221220Sjhb /* 332221220Sjhb * Find the two resource regions immediately adjacent to the 333221220Sjhb * allocated resource. 334221220Sjhb */ 335221220Sjhb rm = r->r_rm; 336221220Sjhb mtx_lock(rm->rm_mtx); 337221220Sjhb#ifdef INVARIANTS 338221220Sjhb TAILQ_FOREACH(s, &rm->rm_list, r_link) { 339221220Sjhb if (s == r) 340221220Sjhb break; 341221220Sjhb } 342221220Sjhb if (s == NULL) 343221220Sjhb panic("resource not in list"); 344221220Sjhb#endif 345221220Sjhb s = TAILQ_PREV(r, resource_head, r_link); 346221220Sjhb t = TAILQ_NEXT(r, r_link); 347221220Sjhb KASSERT(s == NULL || s->r_end + 1 == r->r_start, 348221220Sjhb ("prev resource mismatch")); 349221220Sjhb KASSERT(t == NULL || r->r_end + 1 == t->r_start, 350221220Sjhb ("next resource mismatch")); 351221220Sjhb 352221220Sjhb /* 353221220Sjhb * See if the changes are permitted. Shrinking is always allowed, 354221220Sjhb * but growing requires sufficient room in the adjacent region. 355221220Sjhb */ 356221220Sjhb if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) || 357221220Sjhb s->r_start > start)) { 358221220Sjhb mtx_unlock(rm->rm_mtx); 359221220Sjhb return (EBUSY); 360221220Sjhb } 361221220Sjhb if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) || 362221220Sjhb t->r_end < end)) { 363221220Sjhb mtx_unlock(rm->rm_mtx); 364221220Sjhb return (EBUSY); 365221220Sjhb } 366221220Sjhb 367221220Sjhb /* 368221220Sjhb * While holding the lock, grow either end of the resource as 369221220Sjhb * needed and shrink either end if the shrinking does not require 370221220Sjhb * allocating a new resource. We can safely drop the lock and then 371221220Sjhb * insert a new range to handle the shrinking case afterwards. 372221220Sjhb */ 373221220Sjhb if (start < r->r_start || 374221220Sjhb (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) { 375221220Sjhb KASSERT(s->r_flags == 0, ("prev is busy")); 376221220Sjhb r->r_start = start; 377221220Sjhb if (s->r_start == start) { 378221220Sjhb TAILQ_REMOVE(&rm->rm_list, s, r_link); 379221220Sjhb free(s, M_RMAN); 380221220Sjhb } else 381221220Sjhb s->r_end = start - 1; 382221220Sjhb } 383221220Sjhb if (end > r->r_end || 384221220Sjhb (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) { 385221220Sjhb KASSERT(t->r_flags == 0, ("next is busy")); 386221220Sjhb r->r_end = end; 387221220Sjhb if (t->r_end == end) { 388221220Sjhb TAILQ_REMOVE(&rm->rm_list, t, r_link); 389221220Sjhb free(t, M_RMAN); 390221220Sjhb } else 391221220Sjhb t->r_start = end + 1; 392221220Sjhb } 393221220Sjhb mtx_unlock(rm->rm_mtx); 394221220Sjhb 395221220Sjhb /* 396221220Sjhb * Handle the shrinking cases that require allocating a new 397221220Sjhb * resource to hold the newly-free region. We have to recheck 398221220Sjhb * if we still need this new region after acquiring the lock. 399221220Sjhb */ 400221220Sjhb if (start > r->r_start) { 401221220Sjhb new = int_alloc_resource(M_WAITOK); 402221220Sjhb new->r_start = r->r_start; 403221220Sjhb new->r_end = start - 1; 404221220Sjhb new->r_rm = rm; 405221220Sjhb mtx_lock(rm->rm_mtx); 406221220Sjhb r->r_start = start; 407221220Sjhb s = TAILQ_PREV(r, resource_head, r_link); 408221220Sjhb if (s != NULL && !(s->r_flags & RF_ALLOCATED)) { 409221220Sjhb s->r_end = start - 1; 410221220Sjhb free(new, M_RMAN); 411221220Sjhb } else 412221220Sjhb TAILQ_INSERT_BEFORE(r, new, r_link); 413221220Sjhb mtx_unlock(rm->rm_mtx); 414221220Sjhb } 415221220Sjhb if (end < r->r_end) { 416221220Sjhb new = int_alloc_resource(M_WAITOK); 417221220Sjhb new->r_start = end + 1; 418221220Sjhb new->r_end = r->r_end; 419221220Sjhb new->r_rm = rm; 420221220Sjhb mtx_lock(rm->rm_mtx); 421221220Sjhb r->r_end = end; 422221220Sjhb t = TAILQ_NEXT(r, r_link); 423221220Sjhb if (t != NULL && !(t->r_flags & RF_ALLOCATED)) { 424221220Sjhb t->r_start = end + 1; 425221220Sjhb free(new, M_RMAN); 426221220Sjhb } else 427221220Sjhb TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link); 428221220Sjhb mtx_unlock(rm->rm_mtx); 429221220Sjhb } 430221220Sjhb return (0); 431221220Sjhb} 432221220Sjhb 43340711Swollmanstruct resource * 43488372Stmmrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end, 43588372Stmm u_long count, u_long bound, u_int flags, 43688372Stmm struct device *dev) 43740711Swollman{ 43840711Swollman u_int want_activate; 439150523Sphk struct resource_i *r, *s, *rv; 44088372Stmm u_long rstart, rend, amask, bmask; 44140711Swollman 442152543Syongari rv = NULL; 44340711Swollman 444160958Sjb DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], " 445160958Sjb "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end, 446160958Sjb count, flags, 447160958Sjb dev == NULL ? "<null>" : device_get_nameunit(dev))); 44840711Swollman want_activate = (flags & RF_ACTIVE); 44940711Swollman flags &= ~RF_ACTIVE; 45040711Swollman 45172200Sbmilekic mtx_lock(rm->rm_mtx); 45240711Swollman 453152543Syongari for (r = TAILQ_FIRST(&rm->rm_list); 454265902Struckman r && r->r_end < start + count - 1; 45568727Smckusick r = TAILQ_NEXT(r, r_link)) 45640711Swollman ; 45740711Swollman 45868727Smckusick if (r == NULL) { 45959910Spaul DPRINTF(("could not find a region\n")); 46040711Swollman goto out; 46140711Swollman } 46240711Swollman 46388372Stmm amask = (1ul << RF_ALIGNMENT(flags)) - 1; 464266433Struckman if (start > ULONG_MAX - amask) { 465266433Struckman DPRINTF(("start+amask would wrap around\n")); 466265902Struckman goto out; 467265902Struckman } 468265902Struckman 46988372Stmm /* If bound is 0, bmask will also be 0 */ 47088372Stmm bmask = ~(bound - 1); 47140711Swollman /* 47240711Swollman * First try to find an acceptable totally-unshared region. 47340711Swollman */ 47468727Smckusick for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 47559910Spaul DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end)); 476265902Struckman /* 477265902Struckman * The resource list is sorted, so there is no point in 478265902Struckman * searching further once r_start is too large. 479265902Struckman */ 480265902Struckman if (s->r_start > end - (count - 1)) { 481143665Simp DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n", 482143665Simp s->r_start, end)); 48340711Swollman break; 48440711Swollman } 485266433Struckman if (s->r_start > ULONG_MAX - amask) { 486266433Struckman DPRINTF(("s->r_start (%#lx) + amask (%#lx) too large\n", 487265902Struckman s->r_start, amask)); 488265902Struckman break; 489265902Struckman } 49040711Swollman if (s->r_flags & RF_ALLOCATED) { 49159910Spaul DPRINTF(("region is allocated\n")); 49240711Swollman continue; 49340711Swollman } 49488372Stmm rstart = ulmax(s->r_start, start); 49588372Stmm /* 49688372Stmm * Try to find a region by adjusting to boundary and alignment 49788372Stmm * until both conditions are satisfied. This is not an optimal 49888372Stmm * algorithm, but in most cases it isn't really bad, either. 49988372Stmm */ 50088372Stmm do { 50188372Stmm rstart = (rstart + amask) & ~amask; 502109646Stmm if (((rstart ^ (rstart + count - 1)) & bmask) != 0) 50388372Stmm rstart += bound - (rstart & ~bmask); 50488372Stmm } while ((rstart & amask) != 0 && rstart < end && 50588372Stmm rstart < s->r_end); 506128172Simp rend = ulmin(s->r_end, ulmax(rstart + count - 1, end)); 507102572Siwasaki if (rstart > rend) { 508102572Siwasaki DPRINTF(("adjusted start exceeds end\n")); 509102572Siwasaki continue; 510102572Siwasaki } 51159910Spaul DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 51259910Spaul rstart, rend, (rend - rstart + 1), count)); 51340711Swollman 51440711Swollman if ((rend - rstart + 1) >= count) { 51559910Spaul DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n", 516143664Simp rstart, rend, (rend - rstart + 1))); 51740711Swollman if ((s->r_end - s->r_start + 1) == count) { 51859910Spaul DPRINTF(("candidate region is entire chunk\n")); 51940711Swollman rv = s; 52048235Sdfr rv->r_flags |= RF_ALLOCATED | flags; 52140711Swollman rv->r_dev = dev; 52240711Swollman goto out; 52340711Swollman } 52440711Swollman 52540711Swollman /* 52640711Swollman * If s->r_start < rstart and 52740711Swollman * s->r_end > rstart + count - 1, then 52840711Swollman * we need to split the region into three pieces 52940711Swollman * (the middle one will get returned to the user). 53040711Swollman * Otherwise, we are allocating at either the 53140711Swollman * beginning or the end of s, so we only need to 53240711Swollman * split it in two. The first case requires 53340711Swollman * two new allocations; the second requires but one. 53440711Swollman */ 535150523Sphk rv = int_alloc_resource(M_NOWAIT); 536152543Syongari if (rv == NULL) 53740711Swollman goto out; 53840711Swollman rv->r_start = rstart; 53940711Swollman rv->r_end = rstart + count - 1; 54040711Swollman rv->r_flags = flags | RF_ALLOCATED; 54140711Swollman rv->r_dev = dev; 54245720Speter rv->r_rm = rm; 543152543Syongari 54440711Swollman if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 54559910Spaul DPRINTF(("splitting region in three parts: " 54640711Swollman "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 54740711Swollman s->r_start, rv->r_start - 1, 54840711Swollman rv->r_start, rv->r_end, 54959910Spaul rv->r_end + 1, s->r_end)); 55040711Swollman /* 55140711Swollman * We are allocating in the middle. 55240711Swollman */ 553150523Sphk r = int_alloc_resource(M_NOWAIT); 554152543Syongari if (r == NULL) { 55540711Swollman free(rv, M_RMAN); 556152543Syongari rv = NULL; 55740711Swollman goto out; 55840711Swollman } 55940711Swollman r->r_start = rv->r_end + 1; 56040711Swollman r->r_end = s->r_end; 56140711Swollman r->r_flags = s->r_flags; 56245720Speter r->r_rm = rm; 56340711Swollman s->r_end = rv->r_start - 1; 56468727Smckusick TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 56540711Swollman r_link); 56668727Smckusick TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 56740711Swollman r_link); 56840711Swollman } else if (s->r_start == rv->r_start) { 56959910Spaul DPRINTF(("allocating from the beginning\n")); 57040711Swollman /* 57140711Swollman * We are allocating at the beginning. 57240711Swollman */ 57340711Swollman s->r_start = rv->r_end + 1; 57468727Smckusick TAILQ_INSERT_BEFORE(s, rv, r_link); 57540711Swollman } else { 57659910Spaul DPRINTF(("allocating at the end\n")); 57740711Swollman /* 57840711Swollman * We are allocating at the end. 57940711Swollman */ 58040711Swollman s->r_end = rv->r_start - 1; 58168727Smckusick TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 58240711Swollman r_link); 58340711Swollman } 58440711Swollman goto out; 58540711Swollman } 58640711Swollman } 58740711Swollman 58840711Swollman /* 58940711Swollman * Now find an acceptable shared region, if the client's requirements 59040711Swollman * allow sharing. By our implementation restriction, a candidate 59140711Swollman * region must match exactly by both size and sharing type in order 59240711Swollman * to be considered compatible with the client's request. (The 59340711Swollman * former restriction could probably be lifted without too much 59440711Swollman * additional work, but this does not seem warranted.) 59540711Swollman */ 59659910Spaul DPRINTF(("no unshared regions found\n")); 59740711Swollman if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 59840711Swollman goto out; 59940711Swollman 600266522Struckman for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) { 601266522Struckman if ((s->r_flags & flags) == flags && 602266522Struckman s->r_start >= start && 603266522Struckman (s->r_end - s->r_start + 1) == count && 60488372Stmm (s->r_start & amask) == 0 && 60588372Stmm ((s->r_start ^ s->r_end) & bmask) == 0) { 606150523Sphk rv = int_alloc_resource(M_NOWAIT); 607152543Syongari if (rv == NULL) 60840711Swollman goto out; 60940711Swollman rv->r_start = s->r_start; 61040711Swollman rv->r_end = s->r_end; 611152543Syongari rv->r_flags = s->r_flags & 61240711Swollman (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 61340711Swollman rv->r_dev = dev; 61440711Swollman rv->r_rm = rm; 615152543Syongari if (s->r_sharehead == NULL) { 61640711Swollman s->r_sharehead = malloc(sizeof *s->r_sharehead, 61769781Sdwmalone M_RMAN, M_NOWAIT | M_ZERO); 618152543Syongari if (s->r_sharehead == NULL) { 61940711Swollman free(rv, M_RMAN); 620152543Syongari rv = NULL; 62140711Swollman goto out; 62240711Swollman } 62340711Swollman LIST_INIT(s->r_sharehead); 624152543Syongari LIST_INSERT_HEAD(s->r_sharehead, s, 62540711Swollman r_sharelink); 62645106Sdfr s->r_flags |= RF_FIRSTSHARE; 62740711Swollman } 62840711Swollman rv->r_sharehead = s->r_sharehead; 62940711Swollman LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 63040711Swollman goto out; 63140711Swollman } 63240711Swollman } 63340711Swollman 63440711Swollman /* 63540711Swollman * We couldn't find anything. 63640711Swollman */ 63740711Swollmanout: 63840711Swollman /* 63940711Swollman * If the user specified RF_ACTIVE in the initial flags, 64040711Swollman * which is reflected in `want_activate', we attempt to atomically 64140711Swollman * activate the resource. If this fails, we release the resource 64240711Swollman * and indicate overall failure. (This behavior probably doesn't 64340711Swollman * make sense for RF_TIMESHARE-type resources.) 64440711Swollman */ 64540711Swollman if (rv && want_activate) { 646150523Sphk struct resource_i *whohas; 64740711Swollman if (int_rman_activate_resource(rm, rv, &whohas)) { 64840711Swollman int_rman_release_resource(rm, rv); 649152543Syongari rv = NULL; 65040711Swollman } 65140711Swollman } 652152543Syongari 65372200Sbmilekic mtx_unlock(rm->rm_mtx); 654152543Syongari return (rv == NULL ? NULL : &rv->r_r); 65540711Swollman} 65640711Swollman 65788372Stmmstruct resource * 65888372Stmmrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 65988372Stmm u_int flags, struct device *dev) 66088372Stmm{ 66188372Stmm 66288372Stmm return (rman_reserve_resource_bound(rm, start, end, count, 0, flags, 66388372Stmm dev)); 66488372Stmm} 66588372Stmm 66640711Swollmanstatic int 667150523Sphkint_rman_activate_resource(struct rman *rm, struct resource_i *r, 668150523Sphk struct resource_i **whohas) 66940711Swollman{ 670150523Sphk struct resource_i *s; 67140711Swollman int ok; 67240711Swollman 67340711Swollman /* 67440711Swollman * If we are not timesharing, then there is nothing much to do. 67540711Swollman * If we already have the resource, then there is nothing at all to do. 67640711Swollman * If we are not on a sharing list with anybody else, then there is 67740711Swollman * little to do. 67840711Swollman */ 67940711Swollman if ((r->r_flags & RF_TIMESHARE) == 0 68040711Swollman || (r->r_flags & RF_ACTIVE) != 0 681152543Syongari || r->r_sharehead == NULL) { 68240711Swollman r->r_flags |= RF_ACTIVE; 68340711Swollman return 0; 68440711Swollman } 68540711Swollman 68640711Swollman ok = 1; 68753225Sphk for (s = LIST_FIRST(r->r_sharehead); s && ok; 68853225Sphk s = LIST_NEXT(s, r_sharelink)) { 68940711Swollman if ((s->r_flags & RF_ACTIVE) != 0) { 69040711Swollman ok = 0; 69140711Swollman *whohas = s; 69240711Swollman } 69340711Swollman } 69440711Swollman if (ok) { 69540711Swollman r->r_flags |= RF_ACTIVE; 69640711Swollman return 0; 69740711Swollman } 69840711Swollman return EBUSY; 69940711Swollman} 70040711Swollman 70140711Swollmanint 702150523Sphkrman_activate_resource(struct resource *re) 70340711Swollman{ 70440711Swollman int rv; 705150523Sphk struct resource_i *r, *whohas; 70640711Swollman struct rman *rm; 70740711Swollman 708150523Sphk r = re->__r_i; 70940711Swollman rm = r->r_rm; 71072200Sbmilekic mtx_lock(rm->rm_mtx); 71140711Swollman rv = int_rman_activate_resource(rm, r, &whohas); 71272200Sbmilekic mtx_unlock(rm->rm_mtx); 71340711Swollman return rv; 71440711Swollman} 71540711Swollman 71640711Swollmanint 717150523Sphkrman_await_resource(struct resource *re, int pri, int timo) 71840711Swollman{ 71985519Sjhb int rv; 720150523Sphk struct resource_i *r, *whohas; 72140711Swollman struct rman *rm; 72240711Swollman 723150523Sphk r = re->__r_i; 72440711Swollman rm = r->r_rm; 72585519Sjhb mtx_lock(rm->rm_mtx); 72640711Swollman for (;;) { 72740711Swollman rv = int_rman_activate_resource(rm, r, &whohas); 72840711Swollman if (rv != EBUSY) 72971576Sjasone return (rv); /* returns with mutex held */ 73040711Swollman 731152543Syongari if (r->r_sharehead == NULL) 73240711Swollman panic("rman_await_resource"); 73340711Swollman whohas->r_flags |= RF_WANTED; 73485519Sjhb rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo); 73540711Swollman if (rv) { 73685519Sjhb mtx_unlock(rm->rm_mtx); 73785519Sjhb return (rv); 73840711Swollman } 73940711Swollman } 74040711Swollman} 74140711Swollman 74245720Speterstatic int 743150523Sphkint_rman_deactivate_resource(struct resource_i *r) 74440711Swollman{ 74540711Swollman 74640711Swollman r->r_flags &= ~RF_ACTIVE; 74740711Swollman if (r->r_flags & RF_WANTED) { 74840711Swollman r->r_flags &= ~RF_WANTED; 74940711Swollman wakeup(r->r_sharehead); 75040711Swollman } 75145720Speter return 0; 75245720Speter} 75345720Speter 75445720Speterint 75545720Speterrman_deactivate_resource(struct resource *r) 75645720Speter{ 75745720Speter struct rman *rm; 75845720Speter 759150523Sphk rm = r->__r_i->r_rm; 76072200Sbmilekic mtx_lock(rm->rm_mtx); 761150523Sphk int_rman_deactivate_resource(r->__r_i); 76272200Sbmilekic mtx_unlock(rm->rm_mtx); 76340711Swollman return 0; 76440711Swollman} 76540711Swollman 76640711Swollmanstatic int 767150523Sphkint_rman_release_resource(struct rman *rm, struct resource_i *r) 76840711Swollman{ 769150523Sphk struct resource_i *s, *t; 77040711Swollman 77140711Swollman if (r->r_flags & RF_ACTIVE) 77245720Speter int_rman_deactivate_resource(r); 77340711Swollman 77440711Swollman /* 77540711Swollman * Check for a sharing list first. If there is one, then we don't 77640711Swollman * have to think as hard. 77740711Swollman */ 77840711Swollman if (r->r_sharehead) { 77940711Swollman /* 78040711Swollman * If a sharing list exists, then we know there are at 78140711Swollman * least two sharers. 78240711Swollman * 78340711Swollman * If we are in the main circleq, appoint someone else. 78440711Swollman */ 78540711Swollman LIST_REMOVE(r, r_sharelink); 78653225Sphk s = LIST_FIRST(r->r_sharehead); 78740711Swollman if (r->r_flags & RF_FIRSTSHARE) { 78840711Swollman s->r_flags |= RF_FIRSTSHARE; 78968727Smckusick TAILQ_INSERT_BEFORE(r, s, r_link); 79068727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 79140711Swollman } 79240711Swollman 79340711Swollman /* 79440711Swollman * Make sure that the sharing list goes away completely 79540711Swollman * if the resource is no longer being shared at all. 79640711Swollman */ 797152543Syongari if (LIST_NEXT(s, r_sharelink) == NULL) { 79840711Swollman free(s->r_sharehead, M_RMAN); 799152543Syongari s->r_sharehead = NULL; 80040711Swollman s->r_flags &= ~RF_FIRSTSHARE; 80140711Swollman } 80240711Swollman goto out; 80340711Swollman } 80440711Swollman 80540711Swollman /* 80640711Swollman * Look at the adjacent resources in the list and see if our 807133177Sjhb * segment can be merged with any of them. If either of the 808133177Sjhb * resources is allocated or is not exactly adjacent then they 809133177Sjhb * cannot be merged with our segment. 81040711Swollman */ 81168727Smckusick s = TAILQ_PREV(r, resource_head, r_link); 812133177Sjhb if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 || 813133177Sjhb s->r_end + 1 != r->r_start)) 814133177Sjhb s = NULL; 81568727Smckusick t = TAILQ_NEXT(r, r_link); 816133177Sjhb if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 || 817133177Sjhb r->r_end + 1 != t->r_start)) 818133177Sjhb t = NULL; 81940711Swollman 820133177Sjhb if (s != NULL && t != NULL) { 82140711Swollman /* 82240711Swollman * Merge all three segments. 82340711Swollman */ 82440711Swollman s->r_end = t->r_end; 82568727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 82668727Smckusick TAILQ_REMOVE(&rm->rm_list, t, r_link); 82740711Swollman free(t, M_RMAN); 828133177Sjhb } else if (s != NULL) { 82940711Swollman /* 83040711Swollman * Merge previous segment with ours. 83140711Swollman */ 83240711Swollman s->r_end = r->r_end; 83368727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 834133177Sjhb } else if (t != NULL) { 83540711Swollman /* 83640711Swollman * Merge next segment with ours. 83740711Swollman */ 83840711Swollman t->r_start = r->r_start; 83968727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 84040711Swollman } else { 84140711Swollman /* 84240711Swollman * At this point, we know there is nothing we 84340711Swollman * can potentially merge with, because on each 84440711Swollman * side, there is either nothing there or what is 84540711Swollman * there is still allocated. In that case, we don't 84640711Swollman * want to remove r from the list; we simply want to 84740711Swollman * change it to an unallocated region and return 84840711Swollman * without freeing anything. 84940711Swollman */ 85040711Swollman r->r_flags &= ~RF_ALLOCATED; 851222750Sjhb r->r_dev = NULL; 85240711Swollman return 0; 85340711Swollman } 85440711Swollman 85540711Swollmanout: 85640711Swollman free(r, M_RMAN); 85740711Swollman return 0; 85840711Swollman} 85940711Swollman 86040711Swollmanint 861150523Sphkrman_release_resource(struct resource *re) 86240711Swollman{ 86340711Swollman int rv; 864150523Sphk struct resource_i *r; 865150523Sphk struct rman *rm; 86640711Swollman 867150523Sphk r = re->__r_i; 868150523Sphk rm = r->r_rm; 86972200Sbmilekic mtx_lock(rm->rm_mtx); 87040711Swollman rv = int_rman_release_resource(rm, r); 87172200Sbmilekic mtx_unlock(rm->rm_mtx); 87240711Swollman return (rv); 87340711Swollman} 87467261Simp 87567261Simpuint32_t 87667261Simprman_make_alignment_flags(uint32_t size) 87767261Simp{ 87867261Simp int i; 87967261Simp 88067425Simp /* 88167425Simp * Find the hightest bit set, and add one if more than one bit 88267425Simp * set. We're effectively computing the ceil(log2(size)) here. 88367425Simp */ 88488372Stmm for (i = 31; i > 0; i--) 88567425Simp if ((1 << i) & size) 88667425Simp break; 88767425Simp if (~(1 << i) & size) 88867425Simp i++; 88967261Simp 89067261Simp return(RF_ALIGNMENT_LOG2(i)); 89167425Simp} 892107296Simp 893182162Sjhbvoid 894182162Sjhbrman_set_start(struct resource *r, u_long start) 895182162Sjhb{ 896182162Sjhb r->__r_i->r_start = start; 897182162Sjhb} 898182162Sjhb 899107296Simpu_long 900107296Simprman_get_start(struct resource *r) 901107296Simp{ 902150523Sphk return (r->__r_i->r_start); 903107296Simp} 904107296Simp 905182162Sjhbvoid 906182162Sjhbrman_set_end(struct resource *r, u_long end) 907182162Sjhb{ 908182162Sjhb r->__r_i->r_end = end; 909182162Sjhb} 910182162Sjhb 911107296Simpu_long 912107296Simprman_get_end(struct resource *r) 913107296Simp{ 914150523Sphk return (r->__r_i->r_end); 915107296Simp} 916107296Simp 917107296Simpu_long 918107296Simprman_get_size(struct resource *r) 919107296Simp{ 920150523Sphk return (r->__r_i->r_end - r->__r_i->r_start + 1); 921107296Simp} 922107296Simp 923107296Simpu_int 924107296Simprman_get_flags(struct resource *r) 925107296Simp{ 926150523Sphk return (r->__r_i->r_flags); 927107296Simp} 928107296Simp 929107296Simpvoid 930107296Simprman_set_virtual(struct resource *r, void *v) 931107296Simp{ 932150523Sphk r->__r_i->r_virtual = v; 933107296Simp} 934107296Simp 935107296Simpvoid * 936107296Simprman_get_virtual(struct resource *r) 937107296Simp{ 938150523Sphk return (r->__r_i->r_virtual); 939107296Simp} 940107296Simp 941107296Simpvoid 942107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t) 943107296Simp{ 944107296Simp r->r_bustag = t; 945107296Simp} 946107296Simp 947107296Simpbus_space_tag_t 948107296Simprman_get_bustag(struct resource *r) 949107296Simp{ 950107296Simp return (r->r_bustag); 951107296Simp} 952107296Simp 953107296Simpvoid 954107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h) 955107296Simp{ 956107296Simp r->r_bushandle = h; 957107296Simp} 958107296Simp 959107296Simpbus_space_handle_t 960107296Simprman_get_bushandle(struct resource *r) 961107296Simp{ 962107296Simp return (r->r_bushandle); 963107296Simp} 964107296Simp 965107296Simpvoid 966107296Simprman_set_rid(struct resource *r, int rid) 967107296Simp{ 968150523Sphk r->__r_i->r_rid = rid; 969107296Simp} 970107296Simp 971182162Sjhbint 972182162Sjhbrman_get_rid(struct resource *r) 973131414Simp{ 974182162Sjhb return (r->__r_i->r_rid); 975131414Simp} 976131414Simp 977131414Simpvoid 978182162Sjhbrman_set_device(struct resource *r, struct device *dev) 979131414Simp{ 980182162Sjhb r->__r_i->r_dev = dev; 981131414Simp} 982131414Simp 983110753Simpstruct device * 984110753Simprman_get_device(struct resource *r) 985110753Simp{ 986150523Sphk return (r->__r_i->r_dev); 987110753Simp} 988144071Sphk 989150547Sphkint 990150547Sphkrman_is_region_manager(struct resource *r, struct rman *rm) 991150547Sphk{ 992150547Sphk 993150547Sphk return (r->__r_i->r_rm == rm); 994150547Sphk} 995150547Sphk 996144071Sphk/* 997144071Sphk * Sysctl interface for scanning the resource lists. 998144071Sphk * 999144071Sphk * We take two input parameters; the index into the list of resource 1000144071Sphk * managers, and the resource offset into the list. 1001144071Sphk */ 1002144071Sphkstatic int 1003144071Sphksysctl_rman(SYSCTL_HANDLER_ARGS) 1004144071Sphk{ 1005144071Sphk int *name = (int *)arg1; 1006144071Sphk u_int namelen = arg2; 1007144071Sphk int rman_idx, res_idx; 1008144071Sphk struct rman *rm; 1009150523Sphk struct resource_i *res; 1010192379Savg struct resource_i *sres; 1011144071Sphk struct u_rman urm; 1012144071Sphk struct u_resource ures; 1013144071Sphk int error; 1014144071Sphk 1015144071Sphk if (namelen != 3) 1016144071Sphk return (EINVAL); 1017144071Sphk 1018144071Sphk if (bus_data_generation_check(name[0])) 1019144071Sphk return (EINVAL); 1020144071Sphk rman_idx = name[1]; 1021144071Sphk res_idx = name[2]; 1022144071Sphk 1023144071Sphk /* 1024144071Sphk * Find the indexed resource manager 1025144071Sphk */ 1026152543Syongari mtx_lock(&rman_mtx); 1027144071Sphk TAILQ_FOREACH(rm, &rman_head, rm_link) { 1028144071Sphk if (rman_idx-- == 0) 1029144071Sphk break; 1030144071Sphk } 1031152543Syongari mtx_unlock(&rman_mtx); 1032144071Sphk if (rm == NULL) 1033144071Sphk return (ENOENT); 1034144071Sphk 1035144071Sphk /* 1036144071Sphk * If the resource index is -1, we want details on the 1037144071Sphk * resource manager. 1038144071Sphk */ 1039144071Sphk if (res_idx == -1) { 1040145953Scperciva bzero(&urm, sizeof(urm)); 1041144071Sphk urm.rm_handle = (uintptr_t)rm; 1042184173Smarcel if (rm->rm_descr != NULL) 1043184173Smarcel strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 1044144071Sphk urm.rm_start = rm->rm_start; 1045144071Sphk urm.rm_size = rm->rm_end - rm->rm_start + 1; 1046144071Sphk urm.rm_type = rm->rm_type; 1047144071Sphk 1048144071Sphk error = SYSCTL_OUT(req, &urm, sizeof(urm)); 1049144071Sphk return (error); 1050144071Sphk } 1051144071Sphk 1052144071Sphk /* 1053144071Sphk * Find the indexed resource and return it. 1054144071Sphk */ 1055152543Syongari mtx_lock(rm->rm_mtx); 1056144071Sphk TAILQ_FOREACH(res, &rm->rm_list, r_link) { 1057192379Savg if (res->r_sharehead != NULL) { 1058192379Savg LIST_FOREACH(sres, res->r_sharehead, r_sharelink) 1059192379Savg if (res_idx-- == 0) { 1060192379Savg res = sres; 1061192379Savg goto found; 1062144071Sphk } 1063192379Savg } 1064192379Savg else if (res_idx-- == 0) 1065192379Savg goto found; 1066192379Savg } 1067192379Savg mtx_unlock(rm->rm_mtx); 1068192379Savg return (ENOENT); 1069144071Sphk 1070192379Savgfound: 1071192379Savg bzero(&ures, sizeof(ures)); 1072192379Savg ures.r_handle = (uintptr_t)res; 1073192379Savg ures.r_parent = (uintptr_t)res->r_rm; 1074192379Savg ures.r_device = (uintptr_t)res->r_dev; 1075192379Savg if (res->r_dev != NULL) { 1076192379Savg if (device_get_name(res->r_dev) != NULL) { 1077192379Savg snprintf(ures.r_devname, RM_TEXTLEN, 1078192379Savg "%s%d", 1079192379Savg device_get_name(res->r_dev), 1080192379Savg device_get_unit(res->r_dev)); 1081192379Savg } else { 1082192379Savg strlcpy(ures.r_devname, "nomatch", 1083192379Savg RM_TEXTLEN); 1084144071Sphk } 1085192379Savg } else { 1086192379Savg ures.r_devname[0] = '\0'; 1087144071Sphk } 1088192379Savg ures.r_start = res->r_start; 1089192379Savg ures.r_size = res->r_end - res->r_start + 1; 1090192379Savg ures.r_flags = res->r_flags; 1091192379Savg 1092152543Syongari mtx_unlock(rm->rm_mtx); 1093192379Savg error = SYSCTL_OUT(req, &ures, sizeof(ures)); 1094192379Savg return (error); 1095144071Sphk} 1096144071Sphk 1097248085Smariusstatic SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 1098144071Sphk "kernel resource manager"); 1099168791Sjhb 1100168791Sjhb#ifdef DDB 1101168791Sjhbstatic void 1102220606Sgavindump_rman_header(struct rman *rm) 1103220606Sgavin{ 1104220606Sgavin 1105220606Sgavin if (db_pager_quit) 1106220606Sgavin return; 1107220606Sgavin db_printf("rman %p: %s (0x%lx-0x%lx full range)\n", 1108220606Sgavin rm, rm->rm_descr, rm->rm_start, rm->rm_end); 1109220606Sgavin} 1110220606Sgavin 1111220606Sgavinstatic void 1112168791Sjhbdump_rman(struct rman *rm) 1113168791Sjhb{ 1114168791Sjhb struct resource_i *r; 1115168791Sjhb const char *devname; 1116168791Sjhb 1117168791Sjhb if (db_pager_quit) 1118168791Sjhb return; 1119168791Sjhb TAILQ_FOREACH(r, &rm->rm_list, r_link) { 1120168791Sjhb if (r->r_dev != NULL) { 1121168791Sjhb devname = device_get_nameunit(r->r_dev); 1122168791Sjhb if (devname == NULL) 1123168791Sjhb devname = "nomatch"; 1124168791Sjhb } else 1125168791Sjhb devname = NULL; 1126168791Sjhb db_printf(" 0x%lx-0x%lx ", r->r_start, r->r_end); 1127168791Sjhb if (devname != NULL) 1128168791Sjhb db_printf("(%s)\n", devname); 1129168791Sjhb else 1130168791Sjhb db_printf("----\n"); 1131168791Sjhb if (db_pager_quit) 1132168791Sjhb return; 1133168791Sjhb } 1134168791Sjhb} 1135168791Sjhb 1136168791SjhbDB_SHOW_COMMAND(rman, db_show_rman) 1137168791Sjhb{ 1138168791Sjhb 1139220606Sgavin if (have_addr) { 1140220606Sgavin dump_rman_header((struct rman *)addr); 1141168791Sjhb dump_rman((struct rman *)addr); 1142220606Sgavin } 1143168791Sjhb} 1144168791Sjhb 1145220606SgavinDB_SHOW_COMMAND(rmans, db_show_rmans) 1146220606Sgavin{ 1147220606Sgavin struct rman *rm; 1148220606Sgavin 1149220606Sgavin TAILQ_FOREACH(rm, &rman_head, rm_link) { 1150220606Sgavin dump_rman_header(rm); 1151220606Sgavin } 1152220606Sgavin} 1153220606Sgavin 1154183054SsamDB_SHOW_ALL_COMMAND(rman, db_show_all_rman) 1155168791Sjhb{ 1156168791Sjhb struct rman *rm; 1157168791Sjhb 1158220606Sgavin TAILQ_FOREACH(rm, &rman_head, rm_link) { 1159220606Sgavin dump_rman_header(rm); 1160168791Sjhb dump_rman(rm); 1161220606Sgavin } 1162168791Sjhb} 1163183054SsamDB_SHOW_ALIAS(allrman, db_show_all_rman); 1164168791Sjhb#endif 1165