subr_rman.c revision 158125
1/*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * The kernel resource manager. This code is responsible for keeping track 32 * of hardware resources which are apportioned out to various drivers. 33 * It does not actually assign those resources, and it is not expected 34 * that end-device drivers will call into this code directly. Rather, 35 * the code which implements the buses that those devices are attached to, 36 * and the code which manages CPU resources, will call this code, and the 37 * end-device drivers will make upcalls to that code to actually perform 38 * the allocation. 39 * 40 * There are two sorts of resources managed by this code. The first is 41 * the more familiar array (RMAN_ARRAY) type; resources in this class 42 * consist of a sequence of individually-allocatable objects which have 43 * been numbered in some well-defined order. Most of the resources 44 * are of this type, as it is the most familiar. The second type is 45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 46 * resources in which each instance is indistinguishable from every 47 * other instance). The principal anticipated application of gauges 48 * is in the context of power consumption, where a bus may have a specific 49 * power budget which all attached devices share. RMAN_GAUGE is not 50 * implemented yet. 51 * 52 * For array resources, we make one simplifying assumption: two clients 53 * sharing the same resource must use the same range of indices. That 54 * is to say, sharing of overlapping-but-not-identical regions is not 55 * permitted. 56 */ 57 58#include <sys/cdefs.h> 59__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 158125 2006-04-28 21:23:09Z marcel $"); 60 61#include <sys/param.h> 62#include <sys/systm.h> 63#include <sys/kernel.h> 64#include <sys/lock.h> 65#include <sys/malloc.h> 66#include <sys/mutex.h> 67#include <sys/bus.h> /* XXX debugging */ 68#include <machine/bus.h> 69#include <sys/rman.h> 70#include <sys/sysctl.h> 71 72/* 73 * We use a linked list rather than a bitmap because we need to be able to 74 * represent potentially huge objects (like all of a processor's physical 75 * address space). That is also why the indices are defined to have type 76 * `unsigned long' -- that being the largest integral type in ISO C (1990). 77 * The 1999 version of C allows `long long'; we may need to switch to that 78 * at some point in the future, particularly if we want to support 36-bit 79 * addresses on IA32 hardware. 80 */ 81struct resource_i { 82 struct resource r_r; 83 TAILQ_ENTRY(resource_i) r_link; 84 LIST_ENTRY(resource_i) r_sharelink; 85 LIST_HEAD(, resource_i) *r_sharehead; 86 u_long r_start; /* index of the first entry in this resource */ 87 u_long r_end; /* index of the last entry (inclusive) */ 88 u_int r_flags; 89 void *r_virtual; /* virtual address of this resource */ 90 struct device *r_dev; /* device which has allocated this resource */ 91 struct rman *r_rm; /* resource manager from whence this came */ 92 int r_rid; /* optional rid for this resource. */ 93}; 94 95int rman_debug = 0; 96TUNABLE_INT("debug.rman_debug", &rman_debug); 97SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW, 98 &rman_debug, 0, "rman debug"); 99 100#define DPRINTF(params) if (rman_debug) printf params 101 102static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 103 104struct rman_head rman_head; 105static struct mtx rman_mtx; /* mutex to protect rman_head */ 106static int int_rman_activate_resource(struct rman *rm, struct resource_i *r, 107 struct resource_i **whohas); 108static int int_rman_deactivate_resource(struct resource_i *r); 109static int int_rman_release_resource(struct rman *rm, struct resource_i *r); 110 111static __inline struct resource_i * 112int_alloc_resource(int malloc_flag) 113{ 114 struct resource_i *r; 115 116 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO); 117 if (r != NULL) { 118 r->r_r.__r_i = r; 119 } 120 return (r); 121} 122 123int 124rman_init(struct rman *rm) 125{ 126 static int once = 0; 127 128 if (once == 0) { 129 once = 1; 130 TAILQ_INIT(&rman_head); 131 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF); 132 } 133 134 if (rm->rm_type == RMAN_UNINIT) 135 panic("rman_init"); 136 if (rm->rm_type == RMAN_GAUGE) 137 panic("implement RMAN_GAUGE"); 138 139 TAILQ_INIT(&rm->rm_list); 140 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO); 141 if (rm->rm_mtx == NULL) 142 return ENOMEM; 143 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF); 144 145 mtx_lock(&rman_mtx); 146 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 147 mtx_unlock(&rman_mtx); 148 return 0; 149} 150 151/* 152 * NB: this interface is not robust against programming errors which 153 * add multiple copies of the same region. 154 */ 155int 156rman_manage_region(struct rman *rm, u_long start, u_long end) 157{ 158 struct resource_i *r, *s; 159 160 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", 161 rm->rm_descr, start, end)); 162 r = int_alloc_resource(M_NOWAIT); 163 if (r == NULL) 164 return ENOMEM; 165 r->r_start = start; 166 r->r_end = end; 167 r->r_rm = rm; 168 169 mtx_lock(rm->rm_mtx); 170 for (s = TAILQ_FIRST(&rm->rm_list); 171 s && s->r_end < r->r_start; 172 s = TAILQ_NEXT(s, r_link)) 173 ; 174 175 if (s == NULL) { 176 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 177 } else { 178 TAILQ_INSERT_BEFORE(s, r, r_link); 179 } 180 181 mtx_unlock(rm->rm_mtx); 182 return 0; 183} 184 185int 186rman_fini(struct rman *rm) 187{ 188 struct resource_i *r; 189 190 mtx_lock(rm->rm_mtx); 191 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 192 if (r->r_flags & RF_ALLOCATED) { 193 mtx_unlock(rm->rm_mtx); 194 return EBUSY; 195 } 196 } 197 198 /* 199 * There really should only be one of these if we are in this 200 * state and the code is working properly, but it can't hurt. 201 */ 202 while (!TAILQ_EMPTY(&rm->rm_list)) { 203 r = TAILQ_FIRST(&rm->rm_list); 204 TAILQ_REMOVE(&rm->rm_list, r, r_link); 205 free(r, M_RMAN); 206 } 207 mtx_unlock(rm->rm_mtx); 208 mtx_lock(&rman_mtx); 209 TAILQ_REMOVE(&rman_head, rm, rm_link); 210 mtx_unlock(&rman_mtx); 211 mtx_destroy(rm->rm_mtx); 212 free(rm->rm_mtx, M_RMAN); 213 214 return 0; 215} 216 217struct resource * 218rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end, 219 u_long count, u_long bound, u_int flags, 220 struct device *dev) 221{ 222 u_int want_activate; 223 struct resource_i *r, *s, *rv; 224 u_long rstart, rend, amask, bmask; 225 226 rv = NULL; 227 228 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length " 229 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count, 230 flags, dev == NULL ? "<null>" : device_get_nameunit(dev))); 231 want_activate = (flags & RF_ACTIVE); 232 flags &= ~RF_ACTIVE; 233 234 mtx_lock(rm->rm_mtx); 235 236 for (r = TAILQ_FIRST(&rm->rm_list); 237 r && r->r_end < start; 238 r = TAILQ_NEXT(r, r_link)) 239 ; 240 241 if (r == NULL) { 242 DPRINTF(("could not find a region\n")); 243 goto out; 244 } 245 246 amask = (1ul << RF_ALIGNMENT(flags)) - 1; 247 /* If bound is 0, bmask will also be 0 */ 248 bmask = ~(bound - 1); 249 /* 250 * First try to find an acceptable totally-unshared region. 251 */ 252 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 253 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end)); 254 if (s->r_start + count - 1 > end) { 255 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n", 256 s->r_start, end)); 257 break; 258 } 259 if (s->r_flags & RF_ALLOCATED) { 260 DPRINTF(("region is allocated\n")); 261 continue; 262 } 263 rstart = ulmax(s->r_start, start); 264 /* 265 * Try to find a region by adjusting to boundary and alignment 266 * until both conditions are satisfied. This is not an optimal 267 * algorithm, but in most cases it isn't really bad, either. 268 */ 269 do { 270 rstart = (rstart + amask) & ~amask; 271 if (((rstart ^ (rstart + count - 1)) & bmask) != 0) 272 rstart += bound - (rstart & ~bmask); 273 } while ((rstart & amask) != 0 && rstart < end && 274 rstart < s->r_end); 275 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end)); 276 if (rstart > rend) { 277 DPRINTF(("adjusted start exceeds end\n")); 278 continue; 279 } 280 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 281 rstart, rend, (rend - rstart + 1), count)); 282 283 if ((rend - rstart + 1) >= count) { 284 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n", 285 rstart, rend, (rend - rstart + 1))); 286 if ((s->r_end - s->r_start + 1) == count) { 287 DPRINTF(("candidate region is entire chunk\n")); 288 rv = s; 289 rv->r_flags |= RF_ALLOCATED | flags; 290 rv->r_dev = dev; 291 goto out; 292 } 293 294 /* 295 * If s->r_start < rstart and 296 * s->r_end > rstart + count - 1, then 297 * we need to split the region into three pieces 298 * (the middle one will get returned to the user). 299 * Otherwise, we are allocating at either the 300 * beginning or the end of s, so we only need to 301 * split it in two. The first case requires 302 * two new allocations; the second requires but one. 303 */ 304 rv = int_alloc_resource(M_NOWAIT); 305 if (rv == NULL) 306 goto out; 307 rv->r_start = rstart; 308 rv->r_end = rstart + count - 1; 309 rv->r_flags = flags | RF_ALLOCATED; 310 rv->r_dev = dev; 311 rv->r_rm = rm; 312 313 if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 314 DPRINTF(("splitting region in three parts: " 315 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 316 s->r_start, rv->r_start - 1, 317 rv->r_start, rv->r_end, 318 rv->r_end + 1, s->r_end)); 319 /* 320 * We are allocating in the middle. 321 */ 322 r = int_alloc_resource(M_NOWAIT); 323 if (r == NULL) { 324 free(rv, M_RMAN); 325 rv = NULL; 326 goto out; 327 } 328 r->r_start = rv->r_end + 1; 329 r->r_end = s->r_end; 330 r->r_flags = s->r_flags; 331 r->r_rm = rm; 332 s->r_end = rv->r_start - 1; 333 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 334 r_link); 335 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 336 r_link); 337 } else if (s->r_start == rv->r_start) { 338 DPRINTF(("allocating from the beginning\n")); 339 /* 340 * We are allocating at the beginning. 341 */ 342 s->r_start = rv->r_end + 1; 343 TAILQ_INSERT_BEFORE(s, rv, r_link); 344 } else { 345 DPRINTF(("allocating at the end\n")); 346 /* 347 * We are allocating at the end. 348 */ 349 s->r_end = rv->r_start - 1; 350 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 351 r_link); 352 } 353 goto out; 354 } 355 } 356 357 /* 358 * Now find an acceptable shared region, if the client's requirements 359 * allow sharing. By our implementation restriction, a candidate 360 * region must match exactly by both size and sharing type in order 361 * to be considered compatible with the client's request. (The 362 * former restriction could probably be lifted without too much 363 * additional work, but this does not seem warranted.) 364 */ 365 DPRINTF(("no unshared regions found\n")); 366 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 367 goto out; 368 369 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 370 if (s->r_start > end) 371 break; 372 if ((s->r_flags & flags) != flags) 373 continue; 374 rstart = ulmax(s->r_start, start); 375 rend = ulmin(s->r_end, ulmax(start + count - 1, end)); 376 if (s->r_start >= start && s->r_end <= end 377 && (s->r_end - s->r_start + 1) == count && 378 (s->r_start & amask) == 0 && 379 ((s->r_start ^ s->r_end) & bmask) == 0) { 380 rv = int_alloc_resource(M_NOWAIT); 381 if (rv == NULL) 382 goto out; 383 rv->r_start = s->r_start; 384 rv->r_end = s->r_end; 385 rv->r_flags = s->r_flags & 386 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 387 rv->r_dev = dev; 388 rv->r_rm = rm; 389 if (s->r_sharehead == NULL) { 390 s->r_sharehead = malloc(sizeof *s->r_sharehead, 391 M_RMAN, M_NOWAIT | M_ZERO); 392 if (s->r_sharehead == NULL) { 393 free(rv, M_RMAN); 394 rv = NULL; 395 goto out; 396 } 397 LIST_INIT(s->r_sharehead); 398 LIST_INSERT_HEAD(s->r_sharehead, s, 399 r_sharelink); 400 s->r_flags |= RF_FIRSTSHARE; 401 } 402 rv->r_sharehead = s->r_sharehead; 403 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 404 goto out; 405 } 406 } 407 408 /* 409 * We couldn't find anything. 410 */ 411out: 412 /* 413 * If the user specified RF_ACTIVE in the initial flags, 414 * which is reflected in `want_activate', we attempt to atomically 415 * activate the resource. If this fails, we release the resource 416 * and indicate overall failure. (This behavior probably doesn't 417 * make sense for RF_TIMESHARE-type resources.) 418 */ 419 if (rv && want_activate) { 420 struct resource_i *whohas; 421 if (int_rman_activate_resource(rm, rv, &whohas)) { 422 int_rman_release_resource(rm, rv); 423 rv = NULL; 424 } 425 } 426 427 mtx_unlock(rm->rm_mtx); 428 return (rv == NULL ? NULL : &rv->r_r); 429} 430 431struct resource * 432rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 433 u_int flags, struct device *dev) 434{ 435 436 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags, 437 dev)); 438} 439 440static int 441int_rman_activate_resource(struct rman *rm, struct resource_i *r, 442 struct resource_i **whohas) 443{ 444 struct resource_i *s; 445 int ok; 446 447 /* 448 * If we are not timesharing, then there is nothing much to do. 449 * If we already have the resource, then there is nothing at all to do. 450 * If we are not on a sharing list with anybody else, then there is 451 * little to do. 452 */ 453 if ((r->r_flags & RF_TIMESHARE) == 0 454 || (r->r_flags & RF_ACTIVE) != 0 455 || r->r_sharehead == NULL) { 456 r->r_flags |= RF_ACTIVE; 457 return 0; 458 } 459 460 ok = 1; 461 for (s = LIST_FIRST(r->r_sharehead); s && ok; 462 s = LIST_NEXT(s, r_sharelink)) { 463 if ((s->r_flags & RF_ACTIVE) != 0) { 464 ok = 0; 465 *whohas = s; 466 } 467 } 468 if (ok) { 469 r->r_flags |= RF_ACTIVE; 470 return 0; 471 } 472 return EBUSY; 473} 474 475int 476rman_activate_resource(struct resource *re) 477{ 478 int rv; 479 struct resource_i *r, *whohas; 480 struct rman *rm; 481 482 r = re->__r_i; 483 rm = r->r_rm; 484 mtx_lock(rm->rm_mtx); 485 rv = int_rman_activate_resource(rm, r, &whohas); 486 mtx_unlock(rm->rm_mtx); 487 return rv; 488} 489 490int 491rman_await_resource(struct resource *re, int pri, int timo) 492{ 493 int rv; 494 struct resource_i *r, *whohas; 495 struct rman *rm; 496 497 r = re->__r_i; 498 rm = r->r_rm; 499 mtx_lock(rm->rm_mtx); 500 for (;;) { 501 rv = int_rman_activate_resource(rm, r, &whohas); 502 if (rv != EBUSY) 503 return (rv); /* returns with mutex held */ 504 505 if (r->r_sharehead == NULL) 506 panic("rman_await_resource"); 507 whohas->r_flags |= RF_WANTED; 508 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo); 509 if (rv) { 510 mtx_unlock(rm->rm_mtx); 511 return (rv); 512 } 513 } 514} 515 516static int 517int_rman_deactivate_resource(struct resource_i *r) 518{ 519 520 r->r_flags &= ~RF_ACTIVE; 521 if (r->r_flags & RF_WANTED) { 522 r->r_flags &= ~RF_WANTED; 523 wakeup(r->r_sharehead); 524 } 525 return 0; 526} 527 528int 529rman_deactivate_resource(struct resource *r) 530{ 531 struct rman *rm; 532 533 rm = r->__r_i->r_rm; 534 mtx_lock(rm->rm_mtx); 535 int_rman_deactivate_resource(r->__r_i); 536 mtx_unlock(rm->rm_mtx); 537 return 0; 538} 539 540static int 541int_rman_release_resource(struct rman *rm, struct resource_i *r) 542{ 543 struct resource_i *s, *t; 544 545 if (r->r_flags & RF_ACTIVE) 546 int_rman_deactivate_resource(r); 547 548 /* 549 * Check for a sharing list first. If there is one, then we don't 550 * have to think as hard. 551 */ 552 if (r->r_sharehead) { 553 /* 554 * If a sharing list exists, then we know there are at 555 * least two sharers. 556 * 557 * If we are in the main circleq, appoint someone else. 558 */ 559 LIST_REMOVE(r, r_sharelink); 560 s = LIST_FIRST(r->r_sharehead); 561 if (r->r_flags & RF_FIRSTSHARE) { 562 s->r_flags |= RF_FIRSTSHARE; 563 TAILQ_INSERT_BEFORE(r, s, r_link); 564 TAILQ_REMOVE(&rm->rm_list, r, r_link); 565 } 566 567 /* 568 * Make sure that the sharing list goes away completely 569 * if the resource is no longer being shared at all. 570 */ 571 if (LIST_NEXT(s, r_sharelink) == NULL) { 572 free(s->r_sharehead, M_RMAN); 573 s->r_sharehead = NULL; 574 s->r_flags &= ~RF_FIRSTSHARE; 575 } 576 goto out; 577 } 578 579 /* 580 * Look at the adjacent resources in the list and see if our 581 * segment can be merged with any of them. If either of the 582 * resources is allocated or is not exactly adjacent then they 583 * cannot be merged with our segment. 584 */ 585 s = TAILQ_PREV(r, resource_head, r_link); 586 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 || 587 s->r_end + 1 != r->r_start)) 588 s = NULL; 589 t = TAILQ_NEXT(r, r_link); 590 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 || 591 r->r_end + 1 != t->r_start)) 592 t = NULL; 593 594 if (s != NULL && t != NULL) { 595 /* 596 * Merge all three segments. 597 */ 598 s->r_end = t->r_end; 599 TAILQ_REMOVE(&rm->rm_list, r, r_link); 600 TAILQ_REMOVE(&rm->rm_list, t, r_link); 601 free(t, M_RMAN); 602 } else if (s != NULL) { 603 /* 604 * Merge previous segment with ours. 605 */ 606 s->r_end = r->r_end; 607 TAILQ_REMOVE(&rm->rm_list, r, r_link); 608 } else if (t != NULL) { 609 /* 610 * Merge next segment with ours. 611 */ 612 t->r_start = r->r_start; 613 TAILQ_REMOVE(&rm->rm_list, r, r_link); 614 } else { 615 /* 616 * At this point, we know there is nothing we 617 * can potentially merge with, because on each 618 * side, there is either nothing there or what is 619 * there is still allocated. In that case, we don't 620 * want to remove r from the list; we simply want to 621 * change it to an unallocated region and return 622 * without freeing anything. 623 */ 624 r->r_flags &= ~RF_ALLOCATED; 625 return 0; 626 } 627 628out: 629 free(r, M_RMAN); 630 return 0; 631} 632 633int 634rman_release_resource(struct resource *re) 635{ 636 int rv; 637 struct resource_i *r; 638 struct rman *rm; 639 640 r = re->__r_i; 641 rm = r->r_rm; 642 mtx_lock(rm->rm_mtx); 643 rv = int_rman_release_resource(rm, r); 644 mtx_unlock(rm->rm_mtx); 645 return (rv); 646} 647 648uint32_t 649rman_make_alignment_flags(uint32_t size) 650{ 651 int i; 652 653 /* 654 * Find the hightest bit set, and add one if more than one bit 655 * set. We're effectively computing the ceil(log2(size)) here. 656 */ 657 for (i = 31; i > 0; i--) 658 if ((1 << i) & size) 659 break; 660 if (~(1 << i) & size) 661 i++; 662 663 return(RF_ALIGNMENT_LOG2(i)); 664} 665 666u_long 667rman_get_start(struct resource *r) 668{ 669 return (r->__r_i->r_start); 670} 671 672u_long 673rman_get_end(struct resource *r) 674{ 675 return (r->__r_i->r_end); 676} 677 678u_long 679rman_get_size(struct resource *r) 680{ 681 return (r->__r_i->r_end - r->__r_i->r_start + 1); 682} 683 684u_int 685rman_get_flags(struct resource *r) 686{ 687 return (r->__r_i->r_flags); 688} 689 690void 691rman_set_virtual(struct resource *r, void *v) 692{ 693 r->__r_i->r_virtual = v; 694} 695 696void * 697rman_get_virtual(struct resource *r) 698{ 699 return (r->__r_i->r_virtual); 700} 701 702void 703rman_set_bustag(struct resource *r, bus_space_tag_t t) 704{ 705 r->r_bustag = t; 706} 707 708bus_space_tag_t 709rman_get_bustag(struct resource *r) 710{ 711 return (r->r_bustag); 712} 713 714void 715rman_set_bushandle(struct resource *r, bus_space_handle_t h) 716{ 717 r->r_bushandle = h; 718} 719 720bus_space_handle_t 721rman_get_bushandle(struct resource *r) 722{ 723 return (r->r_bushandle); 724} 725 726void 727rman_set_rid(struct resource *r, int rid) 728{ 729 r->__r_i->r_rid = rid; 730} 731 732void 733rman_set_start(struct resource *r, u_long start) 734{ 735 r->__r_i->r_start = start; 736} 737 738void 739rman_set_end(struct resource *r, u_long end) 740{ 741 r->__r_i->r_end = end; 742} 743 744int 745rman_get_rid(struct resource *r) 746{ 747 return (r->__r_i->r_rid); 748} 749 750struct device * 751rman_get_device(struct resource *r) 752{ 753 return (r->__r_i->r_dev); 754} 755 756void 757rman_set_device(struct resource *r, struct device *dev) 758{ 759 r->__r_i->r_dev = dev; 760} 761 762int 763rman_is_region_manager(struct resource *r, struct rman *rm) 764{ 765 766 return (r->__r_i->r_rm == rm); 767} 768 769/* 770 * Sysctl interface for scanning the resource lists. 771 * 772 * We take two input parameters; the index into the list of resource 773 * managers, and the resource offset into the list. 774 */ 775static int 776sysctl_rman(SYSCTL_HANDLER_ARGS) 777{ 778 int *name = (int *)arg1; 779 u_int namelen = arg2; 780 int rman_idx, res_idx; 781 struct rman *rm; 782 struct resource_i *res; 783 struct u_rman urm; 784 struct u_resource ures; 785 int error; 786 787 if (namelen != 3) 788 return (EINVAL); 789 790 if (bus_data_generation_check(name[0])) 791 return (EINVAL); 792 rman_idx = name[1]; 793 res_idx = name[2]; 794 795 /* 796 * Find the indexed resource manager 797 */ 798 mtx_lock(&rman_mtx); 799 TAILQ_FOREACH(rm, &rman_head, rm_link) { 800 if (rman_idx-- == 0) 801 break; 802 } 803 mtx_unlock(&rman_mtx); 804 if (rm == NULL) 805 return (ENOENT); 806 807 /* 808 * If the resource index is -1, we want details on the 809 * resource manager. 810 */ 811 if (res_idx == -1) { 812 bzero(&urm, sizeof(urm)); 813 urm.rm_handle = (uintptr_t)rm; 814 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 815 urm.rm_start = rm->rm_start; 816 urm.rm_size = rm->rm_end - rm->rm_start + 1; 817 urm.rm_type = rm->rm_type; 818 819 error = SYSCTL_OUT(req, &urm, sizeof(urm)); 820 return (error); 821 } 822 823 /* 824 * Find the indexed resource and return it. 825 */ 826 mtx_lock(rm->rm_mtx); 827 TAILQ_FOREACH(res, &rm->rm_list, r_link) { 828 if (res_idx-- == 0) { 829 bzero(&ures, sizeof(ures)); 830 ures.r_handle = (uintptr_t)res; 831 ures.r_parent = (uintptr_t)res->r_rm; 832 ures.r_device = (uintptr_t)res->r_dev; 833 if (res->r_dev != NULL) { 834 if (device_get_name(res->r_dev) != NULL) { 835 snprintf(ures.r_devname, RM_TEXTLEN, 836 "%s%d", 837 device_get_name(res->r_dev), 838 device_get_unit(res->r_dev)); 839 } else { 840 strlcpy(ures.r_devname, "nomatch", 841 RM_TEXTLEN); 842 } 843 } else { 844 ures.r_devname[0] = '\0'; 845 } 846 ures.r_start = res->r_start; 847 ures.r_size = res->r_end - res->r_start + 1; 848 ures.r_flags = res->r_flags; 849 850 mtx_unlock(rm->rm_mtx); 851 error = SYSCTL_OUT(req, &ures, sizeof(ures)); 852 return (error); 853 } 854 } 855 mtx_unlock(rm->rm_mtx); 856 return (ENOENT); 857} 858 859SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 860 "kernel resource manager"); 861