subr_rman.c revision 157886
1/*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * The kernel resource manager. This code is responsible for keeping track 32 * of hardware resources which are apportioned out to various drivers. 33 * It does not actually assign those resources, and it is not expected 34 * that end-device drivers will call into this code directly. Rather, 35 * the code which implements the buses that those devices are attached to, 36 * and the code which manages CPU resources, will call this code, and the 37 * end-device drivers will make upcalls to that code to actually perform 38 * the allocation. 39 * 40 * There are two sorts of resources managed by this code. The first is 41 * the more familiar array (RMAN_ARRAY) type; resources in this class 42 * consist of a sequence of individually-allocatable objects which have 43 * been numbered in some well-defined order. Most of the resources 44 * are of this type, as it is the most familiar. The second type is 45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 46 * resources in which each instance is indistinguishable from every 47 * other instance). The principal anticipated application of gauges 48 * is in the context of power consumption, where a bus may have a specific 49 * power budget which all attached devices share. RMAN_GAUGE is not 50 * implemented yet. 51 * 52 * For array resources, we make one simplifying assumption: two clients 53 * sharing the same resource must use the same range of indices. That 54 * is to say, sharing of overlapping-but-not-identical regions is not 55 * permitted. 56 */ 57 58#include <sys/cdefs.h> 59__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 157886 2006-04-19 21:25:55Z imp $"); 60 61#include <sys/param.h> 62#include <sys/systm.h> 63#include <sys/kernel.h> 64#include <sys/lock.h> 65#include <sys/malloc.h> 66#include <sys/mutex.h> 67#include <sys/bus.h> /* XXX debugging */ 68#include <machine/bus.h> 69#include <sys/rman.h> 70#include <sys/sysctl.h> 71 72/* 73 * We use a linked list rather than a bitmap because we need to be able to 74 * represent potentially huge objects (like all of a processor's physical 75 * address space). That is also why the indices are defined to have type 76 * `unsigned long' -- that being the largest integral type in ISO C (1990). 77 * The 1999 version of C allows `long long'; we may need to switch to that 78 * at some point in the future, particularly if we want to support 36-bit 79 * addresses on IA32 hardware. 80 */ 81struct resource_i { 82 struct resource r_r; 83 TAILQ_ENTRY(resource_i) r_link; 84 LIST_ENTRY(resource_i) r_sharelink; 85 LIST_HEAD(, resource_i) *r_sharehead; 86 u_long r_start; /* index of the first entry in this resource */ 87 u_long r_end; /* index of the last entry (inclusive) */ 88 u_int r_flags; 89 void *r_virtual; /* virtual address of this resource */ 90 struct device *r_dev; /* device which has allocated this resource */ 91 struct rman *r_rm; /* resource manager from whence this came */ 92 int r_rid; /* optional rid for this resource. */ 93}; 94 95int rman_debug = 0; 96TUNABLE_INT("debug.rman_debug", &rman_debug); 97SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW, 98 &rman_debug, 0, "rman debug"); 99 100#define DPRINTF(params) if (rman_debug) printf params 101 102static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 103 104struct rman_head rman_head; 105static struct mtx rman_mtx; /* mutex to protect rman_head */ 106static int int_rman_activate_resource(struct rman *rm, struct resource_i *r, 107 struct resource_i **whohas); 108static int int_rman_deactivate_resource(struct resource_i *r); 109static int int_rman_release_resource(struct rman *rm, struct resource_i *r); 110 111static __inline struct resource_i * 112int_alloc_resource(int malloc_flag) 113{ 114 struct resource_i *r; 115 116 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO); 117 if (r != NULL) { 118 r->r_r.__r_i = r; 119 } 120 return (r); 121} 122 123/* 124 * XXX: puc.c is a big hack. 125 * XXX: it should be rewritten to act like a bridge and offer 126 * XXX: its own resource manager. 127 * XXX: until somebody has time, help it out with these two functions 128 */ 129 130struct resource * 131rman_secret_puc_alloc_resource(int malloc_flag) 132{ 133 struct resource_i *r; 134 135 r = int_alloc_resource(malloc_flag); 136 if (r) 137 return (&r->r_r); 138 return (NULL); 139} 140 141void 142rman_secret_puc_free_resource(struct resource *r) 143{ 144 145 free(r->__r_i, M_RMAN); 146} 147 148int 149rman_init(struct rman *rm) 150{ 151 static int once = 0; 152 153 if (once == 0) { 154 once = 1; 155 TAILQ_INIT(&rman_head); 156 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF); 157 } 158 159 if (rm->rm_type == RMAN_UNINIT) 160 panic("rman_init"); 161 if (rm->rm_type == RMAN_GAUGE) 162 panic("implement RMAN_GAUGE"); 163 164 TAILQ_INIT(&rm->rm_list); 165 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO); 166 if (rm->rm_mtx == NULL) 167 return ENOMEM; 168 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF); 169 170 mtx_lock(&rman_mtx); 171 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 172 mtx_unlock(&rman_mtx); 173 return 0; 174} 175 176/* 177 * NB: this interface is not robust against programming errors which 178 * add multiple copies of the same region. 179 */ 180int 181rman_manage_region(struct rman *rm, u_long start, u_long end) 182{ 183 struct resource_i *r, *s; 184 185 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", 186 rm->rm_descr, start, end)); 187 r = int_alloc_resource(M_NOWAIT); 188 if (r == NULL) 189 return ENOMEM; 190 r->r_start = start; 191 r->r_end = end; 192 r->r_rm = rm; 193 194 mtx_lock(rm->rm_mtx); 195 for (s = TAILQ_FIRST(&rm->rm_list); 196 s && s->r_end < r->r_start; 197 s = TAILQ_NEXT(s, r_link)) 198 ; 199 200 if (s == NULL) { 201 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 202 } else { 203 TAILQ_INSERT_BEFORE(s, r, r_link); 204 } 205 206 mtx_unlock(rm->rm_mtx); 207 return 0; 208} 209 210int 211rman_fini(struct rman *rm) 212{ 213 struct resource_i *r; 214 215 mtx_lock(rm->rm_mtx); 216 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 217 if (r->r_flags & RF_ALLOCATED) { 218 mtx_unlock(rm->rm_mtx); 219 return EBUSY; 220 } 221 } 222 223 /* 224 * There really should only be one of these if we are in this 225 * state and the code is working properly, but it can't hurt. 226 */ 227 while (!TAILQ_EMPTY(&rm->rm_list)) { 228 r = TAILQ_FIRST(&rm->rm_list); 229 TAILQ_REMOVE(&rm->rm_list, r, r_link); 230 free(r, M_RMAN); 231 } 232 mtx_unlock(rm->rm_mtx); 233 mtx_lock(&rman_mtx); 234 TAILQ_REMOVE(&rman_head, rm, rm_link); 235 mtx_unlock(&rman_mtx); 236 mtx_destroy(rm->rm_mtx); 237 free(rm->rm_mtx, M_RMAN); 238 239 return 0; 240} 241 242struct resource * 243rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end, 244 u_long count, u_long bound, u_int flags, 245 struct device *dev) 246{ 247 u_int want_activate; 248 struct resource_i *r, *s, *rv; 249 u_long rstart, rend, amask, bmask; 250 251 rv = NULL; 252 253 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length " 254 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count, 255 flags, dev == NULL ? "<null>" : device_get_nameunit(dev))); 256 want_activate = (flags & RF_ACTIVE); 257 flags &= ~RF_ACTIVE; 258 259 mtx_lock(rm->rm_mtx); 260 261 for (r = TAILQ_FIRST(&rm->rm_list); 262 r && r->r_end < start; 263 r = TAILQ_NEXT(r, r_link)) 264 ; 265 266 if (r == NULL) { 267 DPRINTF(("could not find a region\n")); 268 goto out; 269 } 270 271 amask = (1ul << RF_ALIGNMENT(flags)) - 1; 272 /* If bound is 0, bmask will also be 0 */ 273 bmask = ~(bound - 1); 274 /* 275 * First try to find an acceptable totally-unshared region. 276 */ 277 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 278 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end)); 279 if (s->r_start + count - 1 > end) { 280 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n", 281 s->r_start, end)); 282 break; 283 } 284 if (s->r_flags & RF_ALLOCATED) { 285 DPRINTF(("region is allocated\n")); 286 continue; 287 } 288 rstart = ulmax(s->r_start, start); 289 /* 290 * Try to find a region by adjusting to boundary and alignment 291 * until both conditions are satisfied. This is not an optimal 292 * algorithm, but in most cases it isn't really bad, either. 293 */ 294 do { 295 rstart = (rstart + amask) & ~amask; 296 if (((rstart ^ (rstart + count - 1)) & bmask) != 0) 297 rstart += bound - (rstart & ~bmask); 298 } while ((rstart & amask) != 0 && rstart < end && 299 rstart < s->r_end); 300 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end)); 301 if (rstart > rend) { 302 DPRINTF(("adjusted start exceeds end\n")); 303 continue; 304 } 305 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 306 rstart, rend, (rend - rstart + 1), count)); 307 308 if ((rend - rstart + 1) >= count) { 309 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n", 310 rstart, rend, (rend - rstart + 1))); 311 if ((s->r_end - s->r_start + 1) == count) { 312 DPRINTF(("candidate region is entire chunk\n")); 313 rv = s; 314 rv->r_flags |= RF_ALLOCATED | flags; 315 rv->r_dev = dev; 316 goto out; 317 } 318 319 /* 320 * If s->r_start < rstart and 321 * s->r_end > rstart + count - 1, then 322 * we need to split the region into three pieces 323 * (the middle one will get returned to the user). 324 * Otherwise, we are allocating at either the 325 * beginning or the end of s, so we only need to 326 * split it in two. The first case requires 327 * two new allocations; the second requires but one. 328 */ 329 rv = int_alloc_resource(M_NOWAIT); 330 if (rv == NULL) 331 goto out; 332 rv->r_start = rstart; 333 rv->r_end = rstart + count - 1; 334 rv->r_flags = flags | RF_ALLOCATED; 335 rv->r_dev = dev; 336 rv->r_rm = rm; 337 338 if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 339 DPRINTF(("splitting region in three parts: " 340 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 341 s->r_start, rv->r_start - 1, 342 rv->r_start, rv->r_end, 343 rv->r_end + 1, s->r_end)); 344 /* 345 * We are allocating in the middle. 346 */ 347 r = int_alloc_resource(M_NOWAIT); 348 if (r == NULL) { 349 free(rv, M_RMAN); 350 rv = NULL; 351 goto out; 352 } 353 r->r_start = rv->r_end + 1; 354 r->r_end = s->r_end; 355 r->r_flags = s->r_flags; 356 r->r_rm = rm; 357 s->r_end = rv->r_start - 1; 358 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 359 r_link); 360 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 361 r_link); 362 } else if (s->r_start == rv->r_start) { 363 DPRINTF(("allocating from the beginning\n")); 364 /* 365 * We are allocating at the beginning. 366 */ 367 s->r_start = rv->r_end + 1; 368 TAILQ_INSERT_BEFORE(s, rv, r_link); 369 } else { 370 DPRINTF(("allocating at the end\n")); 371 /* 372 * We are allocating at the end. 373 */ 374 s->r_end = rv->r_start - 1; 375 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 376 r_link); 377 } 378 goto out; 379 } 380 } 381 382 /* 383 * Now find an acceptable shared region, if the client's requirements 384 * allow sharing. By our implementation restriction, a candidate 385 * region must match exactly by both size and sharing type in order 386 * to be considered compatible with the client's request. (The 387 * former restriction could probably be lifted without too much 388 * additional work, but this does not seem warranted.) 389 */ 390 DPRINTF(("no unshared regions found\n")); 391 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 392 goto out; 393 394 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 395 if (s->r_start > end) 396 break; 397 if ((s->r_flags & flags) != flags) 398 continue; 399 rstart = ulmax(s->r_start, start); 400 rend = ulmin(s->r_end, ulmax(start + count - 1, end)); 401 if (s->r_start >= start && s->r_end <= end 402 && (s->r_end - s->r_start + 1) == count && 403 (s->r_start & amask) == 0 && 404 ((s->r_start ^ s->r_end) & bmask) == 0) { 405 rv = int_alloc_resource(M_NOWAIT); 406 if (rv == NULL) 407 goto out; 408 rv->r_start = s->r_start; 409 rv->r_end = s->r_end; 410 rv->r_flags = s->r_flags & 411 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 412 rv->r_dev = dev; 413 rv->r_rm = rm; 414 if (s->r_sharehead == NULL) { 415 s->r_sharehead = malloc(sizeof *s->r_sharehead, 416 M_RMAN, M_NOWAIT | M_ZERO); 417 if (s->r_sharehead == NULL) { 418 free(rv, M_RMAN); 419 rv = NULL; 420 goto out; 421 } 422 LIST_INIT(s->r_sharehead); 423 LIST_INSERT_HEAD(s->r_sharehead, s, 424 r_sharelink); 425 s->r_flags |= RF_FIRSTSHARE; 426 } 427 rv->r_sharehead = s->r_sharehead; 428 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 429 goto out; 430 } 431 } 432 433 /* 434 * We couldn't find anything. 435 */ 436out: 437 /* 438 * If the user specified RF_ACTIVE in the initial flags, 439 * which is reflected in `want_activate', we attempt to atomically 440 * activate the resource. If this fails, we release the resource 441 * and indicate overall failure. (This behavior probably doesn't 442 * make sense for RF_TIMESHARE-type resources.) 443 */ 444 if (rv && want_activate) { 445 struct resource_i *whohas; 446 if (int_rman_activate_resource(rm, rv, &whohas)) { 447 int_rman_release_resource(rm, rv); 448 rv = NULL; 449 } 450 } 451 452 mtx_unlock(rm->rm_mtx); 453 return (rv == NULL ? NULL : &rv->r_r); 454} 455 456struct resource * 457rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 458 u_int flags, struct device *dev) 459{ 460 461 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags, 462 dev)); 463} 464 465static int 466int_rman_activate_resource(struct rman *rm, struct resource_i *r, 467 struct resource_i **whohas) 468{ 469 struct resource_i *s; 470 int ok; 471 472 /* 473 * If we are not timesharing, then there is nothing much to do. 474 * If we already have the resource, then there is nothing at all to do. 475 * If we are not on a sharing list with anybody else, then there is 476 * little to do. 477 */ 478 if ((r->r_flags & RF_TIMESHARE) == 0 479 || (r->r_flags & RF_ACTIVE) != 0 480 || r->r_sharehead == NULL) { 481 r->r_flags |= RF_ACTIVE; 482 return 0; 483 } 484 485 ok = 1; 486 for (s = LIST_FIRST(r->r_sharehead); s && ok; 487 s = LIST_NEXT(s, r_sharelink)) { 488 if ((s->r_flags & RF_ACTIVE) != 0) { 489 ok = 0; 490 *whohas = s; 491 } 492 } 493 if (ok) { 494 r->r_flags |= RF_ACTIVE; 495 return 0; 496 } 497 return EBUSY; 498} 499 500int 501rman_activate_resource(struct resource *re) 502{ 503 int rv; 504 struct resource_i *r, *whohas; 505 struct rman *rm; 506 507 r = re->__r_i; 508 rm = r->r_rm; 509 mtx_lock(rm->rm_mtx); 510 rv = int_rman_activate_resource(rm, r, &whohas); 511 mtx_unlock(rm->rm_mtx); 512 return rv; 513} 514 515int 516rman_await_resource(struct resource *re, int pri, int timo) 517{ 518 int rv; 519 struct resource_i *r, *whohas; 520 struct rman *rm; 521 522 r = re->__r_i; 523 rm = r->r_rm; 524 mtx_lock(rm->rm_mtx); 525 for (;;) { 526 rv = int_rman_activate_resource(rm, r, &whohas); 527 if (rv != EBUSY) 528 return (rv); /* returns with mutex held */ 529 530 if (r->r_sharehead == NULL) 531 panic("rman_await_resource"); 532 whohas->r_flags |= RF_WANTED; 533 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo); 534 if (rv) { 535 mtx_unlock(rm->rm_mtx); 536 return (rv); 537 } 538 } 539} 540 541static int 542int_rman_deactivate_resource(struct resource_i *r) 543{ 544 545 r->r_flags &= ~RF_ACTIVE; 546 if (r->r_flags & RF_WANTED) { 547 r->r_flags &= ~RF_WANTED; 548 wakeup(r->r_sharehead); 549 } 550 return 0; 551} 552 553int 554rman_deactivate_resource(struct resource *r) 555{ 556 struct rman *rm; 557 558 rm = r->__r_i->r_rm; 559 mtx_lock(rm->rm_mtx); 560 int_rman_deactivate_resource(r->__r_i); 561 mtx_unlock(rm->rm_mtx); 562 return 0; 563} 564 565static int 566int_rman_release_resource(struct rman *rm, struct resource_i *r) 567{ 568 struct resource_i *s, *t; 569 570 if (r->r_flags & RF_ACTIVE) 571 int_rman_deactivate_resource(r); 572 573 /* 574 * Check for a sharing list first. If there is one, then we don't 575 * have to think as hard. 576 */ 577 if (r->r_sharehead) { 578 /* 579 * If a sharing list exists, then we know there are at 580 * least two sharers. 581 * 582 * If we are in the main circleq, appoint someone else. 583 */ 584 LIST_REMOVE(r, r_sharelink); 585 s = LIST_FIRST(r->r_sharehead); 586 if (r->r_flags & RF_FIRSTSHARE) { 587 s->r_flags |= RF_FIRSTSHARE; 588 TAILQ_INSERT_BEFORE(r, s, r_link); 589 TAILQ_REMOVE(&rm->rm_list, r, r_link); 590 } 591 592 /* 593 * Make sure that the sharing list goes away completely 594 * if the resource is no longer being shared at all. 595 */ 596 if (LIST_NEXT(s, r_sharelink) == NULL) { 597 free(s->r_sharehead, M_RMAN); 598 s->r_sharehead = NULL; 599 s->r_flags &= ~RF_FIRSTSHARE; 600 } 601 goto out; 602 } 603 604 /* 605 * Look at the adjacent resources in the list and see if our 606 * segment can be merged with any of them. If either of the 607 * resources is allocated or is not exactly adjacent then they 608 * cannot be merged with our segment. 609 */ 610 s = TAILQ_PREV(r, resource_head, r_link); 611 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 || 612 s->r_end + 1 != r->r_start)) 613 s = NULL; 614 t = TAILQ_NEXT(r, r_link); 615 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 || 616 r->r_end + 1 != t->r_start)) 617 t = NULL; 618 619 if (s != NULL && t != NULL) { 620 /* 621 * Merge all three segments. 622 */ 623 s->r_end = t->r_end; 624 TAILQ_REMOVE(&rm->rm_list, r, r_link); 625 TAILQ_REMOVE(&rm->rm_list, t, r_link); 626 free(t, M_RMAN); 627 } else if (s != NULL) { 628 /* 629 * Merge previous segment with ours. 630 */ 631 s->r_end = r->r_end; 632 TAILQ_REMOVE(&rm->rm_list, r, r_link); 633 } else if (t != NULL) { 634 /* 635 * Merge next segment with ours. 636 */ 637 t->r_start = r->r_start; 638 TAILQ_REMOVE(&rm->rm_list, r, r_link); 639 } else { 640 /* 641 * At this point, we know there is nothing we 642 * can potentially merge with, because on each 643 * side, there is either nothing there or what is 644 * there is still allocated. In that case, we don't 645 * want to remove r from the list; we simply want to 646 * change it to an unallocated region and return 647 * without freeing anything. 648 */ 649 r->r_flags &= ~RF_ALLOCATED; 650 return 0; 651 } 652 653out: 654 free(r, M_RMAN); 655 return 0; 656} 657 658int 659rman_release_resource(struct resource *re) 660{ 661 int rv; 662 struct resource_i *r; 663 struct rman *rm; 664 665 r = re->__r_i; 666 rm = r->r_rm; 667 mtx_lock(rm->rm_mtx); 668 rv = int_rman_release_resource(rm, r); 669 mtx_unlock(rm->rm_mtx); 670 return (rv); 671} 672 673uint32_t 674rman_make_alignment_flags(uint32_t size) 675{ 676 int i; 677 678 /* 679 * Find the hightest bit set, and add one if more than one bit 680 * set. We're effectively computing the ceil(log2(size)) here. 681 */ 682 for (i = 31; i > 0; i--) 683 if ((1 << i) & size) 684 break; 685 if (~(1 << i) & size) 686 i++; 687 688 return(RF_ALIGNMENT_LOG2(i)); 689} 690 691u_long 692rman_get_start(struct resource *r) 693{ 694 return (r->__r_i->r_start); 695} 696 697u_long 698rman_get_end(struct resource *r) 699{ 700 return (r->__r_i->r_end); 701} 702 703u_long 704rman_get_size(struct resource *r) 705{ 706 return (r->__r_i->r_end - r->__r_i->r_start + 1); 707} 708 709u_int 710rman_get_flags(struct resource *r) 711{ 712 return (r->__r_i->r_flags); 713} 714 715void 716rman_set_virtual(struct resource *r, void *v) 717{ 718 r->__r_i->r_virtual = v; 719} 720 721void * 722rman_get_virtual(struct resource *r) 723{ 724 return (r->__r_i->r_virtual); 725} 726 727void 728rman_set_bustag(struct resource *r, bus_space_tag_t t) 729{ 730 r->r_bustag = t; 731} 732 733bus_space_tag_t 734rman_get_bustag(struct resource *r) 735{ 736 return (r->r_bustag); 737} 738 739void 740rman_set_bushandle(struct resource *r, bus_space_handle_t h) 741{ 742 r->r_bushandle = h; 743} 744 745bus_space_handle_t 746rman_get_bushandle(struct resource *r) 747{ 748 return (r->r_bushandle); 749} 750 751void 752rman_set_rid(struct resource *r, int rid) 753{ 754 r->__r_i->r_rid = rid; 755} 756 757void 758rman_set_start(struct resource *r, u_long start) 759{ 760 r->__r_i->r_start = start; 761} 762 763void 764rman_set_end(struct resource *r, u_long end) 765{ 766 r->__r_i->r_end = end; 767} 768 769int 770rman_get_rid(struct resource *r) 771{ 772 return (r->__r_i->r_rid); 773} 774 775struct device * 776rman_get_device(struct resource *r) 777{ 778 return (r->__r_i->r_dev); 779} 780 781void 782rman_set_device(struct resource *r, struct device *dev) 783{ 784 r->__r_i->r_dev = dev; 785} 786 787int 788rman_is_region_manager(struct resource *r, struct rman *rm) 789{ 790 791 return (r->__r_i->r_rm == rm); 792} 793 794/* 795 * Sysctl interface for scanning the resource lists. 796 * 797 * We take two input parameters; the index into the list of resource 798 * managers, and the resource offset into the list. 799 */ 800static int 801sysctl_rman(SYSCTL_HANDLER_ARGS) 802{ 803 int *name = (int *)arg1; 804 u_int namelen = arg2; 805 int rman_idx, res_idx; 806 struct rman *rm; 807 struct resource_i *res; 808 struct u_rman urm; 809 struct u_resource ures; 810 int error; 811 812 if (namelen != 3) 813 return (EINVAL); 814 815 if (bus_data_generation_check(name[0])) 816 return (EINVAL); 817 rman_idx = name[1]; 818 res_idx = name[2]; 819 820 /* 821 * Find the indexed resource manager 822 */ 823 mtx_lock(&rman_mtx); 824 TAILQ_FOREACH(rm, &rman_head, rm_link) { 825 if (rman_idx-- == 0) 826 break; 827 } 828 mtx_unlock(&rman_mtx); 829 if (rm == NULL) 830 return (ENOENT); 831 832 /* 833 * If the resource index is -1, we want details on the 834 * resource manager. 835 */ 836 if (res_idx == -1) { 837 bzero(&urm, sizeof(urm)); 838 urm.rm_handle = (uintptr_t)rm; 839 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 840 urm.rm_start = rm->rm_start; 841 urm.rm_size = rm->rm_end - rm->rm_start + 1; 842 urm.rm_type = rm->rm_type; 843 844 error = SYSCTL_OUT(req, &urm, sizeof(urm)); 845 return (error); 846 } 847 848 /* 849 * Find the indexed resource and return it. 850 */ 851 mtx_lock(rm->rm_mtx); 852 TAILQ_FOREACH(res, &rm->rm_list, r_link) { 853 if (res_idx-- == 0) { 854 bzero(&ures, sizeof(ures)); 855 ures.r_handle = (uintptr_t)res; 856 ures.r_parent = (uintptr_t)res->r_rm; 857 ures.r_device = (uintptr_t)res->r_dev; 858 if (res->r_dev != NULL) { 859 if (device_get_name(res->r_dev) != NULL) { 860 snprintf(ures.r_devname, RM_TEXTLEN, 861 "%s%d", 862 device_get_name(res->r_dev), 863 device_get_unit(res->r_dev)); 864 } else { 865 strlcpy(ures.r_devname, "nomatch", 866 RM_TEXTLEN); 867 } 868 } else { 869 ures.r_devname[0] = '\0'; 870 } 871 ures.r_start = res->r_start; 872 ures.r_size = res->r_end - res->r_start + 1; 873 ures.r_flags = res->r_flags; 874 875 mtx_unlock(rm->rm_mtx); 876 error = SYSCTL_OUT(req, &ures, sizeof(ures)); 877 return (error); 878 } 879 } 880 mtx_unlock(rm->rm_mtx); 881 return (ENOENT); 882} 883 884SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 885 "kernel resource manager"); 886