subr_rman.c revision 150523
1/*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * The kernel resource manager. This code is responsible for keeping track 32 * of hardware resources which are apportioned out to various drivers. 33 * It does not actually assign those resources, and it is not expected 34 * that end-device drivers will call into this code directly. Rather, 35 * the code which implements the buses that those devices are attached to, 36 * and the code which manages CPU resources, will call this code, and the 37 * end-device drivers will make upcalls to that code to actually perform 38 * the allocation. 39 * 40 * There are two sorts of resources managed by this code. The first is 41 * the more familiar array (RMAN_ARRAY) type; resources in this class 42 * consist of a sequence of individually-allocatable objects which have 43 * been numbered in some well-defined order. Most of the resources 44 * are of this type, as it is the most familiar. The second type is 45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 46 * resources in which each instance is indistinguishable from every 47 * other instance). The principal anticipated application of gauges 48 * is in the context of power consumption, where a bus may have a specific 49 * power budget which all attached devices share. RMAN_GAUGE is not 50 * implemented yet. 51 * 52 * For array resources, we make one simplifying assumption: two clients 53 * sharing the same resource must use the same range of indices. That 54 * is to say, sharing of overlapping-but-not-identical regions is not 55 * permitted. 56 */ 57 58#include <sys/cdefs.h> 59__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 150523 2005-09-24 20:07:03Z phk $"); 60 61#define __RMAN_RESOURCE_VISIBLE 62#include <sys/param.h> 63#include <sys/systm.h> 64#include <sys/kernel.h> 65#include <sys/lock.h> 66#include <sys/malloc.h> 67#include <sys/mutex.h> 68#include <sys/bus.h> /* XXX debugging */ 69#include <machine/bus.h> 70#include <sys/rman.h> 71#include <sys/sysctl.h> 72 73int rman_debug = 0; 74TUNABLE_INT("debug.rman_debug", &rman_debug); 75SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW, 76 &rman_debug, 0, "rman debug"); 77 78#define DPRINTF(params) if (rman_debug) printf params 79 80static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 81 82struct rman_head rman_head; 83static struct mtx rman_mtx; /* mutex to protect rman_head */ 84static int int_rman_activate_resource(struct rman *rm, struct resource_i *r, 85 struct resource_i **whohas); 86static int int_rman_deactivate_resource(struct resource_i *r); 87static int int_rman_release_resource(struct rman *rm, struct resource_i *r); 88 89static __inline struct resource_i * 90int_alloc_resource(int malloc_flag) 91{ 92 struct resource_i *r; 93 94 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO); 95 if (r != NULL) { 96 r->r_r.__r_i = r; 97 } 98 return (r); 99} 100 101int 102rman_init(struct rman *rm) 103{ 104 static int once; 105 106 if (once == 0) { 107 once = 1; 108 TAILQ_INIT(&rman_head); 109 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF); 110 } 111 112 if (rm->rm_type == RMAN_UNINIT) 113 panic("rman_init"); 114 if (rm->rm_type == RMAN_GAUGE) 115 panic("implement RMAN_GAUGE"); 116 117 TAILQ_INIT(&rm->rm_list); 118 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO); 119 if (rm->rm_mtx == 0) 120 return ENOMEM; 121 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF); 122 123 mtx_lock(&rman_mtx); 124 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 125 mtx_unlock(&rman_mtx); 126 return 0; 127} 128 129/* 130 * NB: this interface is not robust against programming errors which 131 * add multiple copies of the same region. 132 */ 133int 134rman_manage_region(struct rman *rm, u_long start, u_long end) 135{ 136 struct resource_i *r, *s; 137 138 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", 139 rm->rm_descr, start, end)); 140 r = int_alloc_resource(M_NOWAIT); 141 if (r == 0) 142 return ENOMEM; 143 r->r_start = start; 144 r->r_end = end; 145 r->r_rm = rm; 146 147 mtx_lock(rm->rm_mtx); 148 for (s = TAILQ_FIRST(&rm->rm_list); 149 s && s->r_end < r->r_start; 150 s = TAILQ_NEXT(s, r_link)) 151 ; 152 153 if (s == NULL) { 154 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 155 } else { 156 TAILQ_INSERT_BEFORE(s, r, r_link); 157 } 158 159 mtx_unlock(rm->rm_mtx); 160 return 0; 161} 162 163int 164rman_fini(struct rman *rm) 165{ 166 struct resource_i *r; 167 168 mtx_lock(rm->rm_mtx); 169 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 170 if (r->r_flags & RF_ALLOCATED) { 171 mtx_unlock(rm->rm_mtx); 172 return EBUSY; 173 } 174 } 175 176 /* 177 * There really should only be one of these if we are in this 178 * state and the code is working properly, but it can't hurt. 179 */ 180 while (!TAILQ_EMPTY(&rm->rm_list)) { 181 r = TAILQ_FIRST(&rm->rm_list); 182 TAILQ_REMOVE(&rm->rm_list, r, r_link); 183 free(r, M_RMAN); 184 } 185 mtx_unlock(rm->rm_mtx); 186 mtx_lock(&rman_mtx); 187 TAILQ_REMOVE(&rman_head, rm, rm_link); 188 mtx_unlock(&rman_mtx); 189 mtx_destroy(rm->rm_mtx); 190 free(rm->rm_mtx, M_RMAN); 191 192 return 0; 193} 194 195struct resource * 196rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end, 197 u_long count, u_long bound, u_int flags, 198 struct device *dev) 199{ 200 u_int want_activate; 201 struct resource_i *r, *s, *rv; 202 u_long rstart, rend, amask, bmask; 203 204 rv = 0; 205 206 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length " 207 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count, 208 flags, dev == NULL ? "<null>" : device_get_nameunit(dev))); 209 want_activate = (flags & RF_ACTIVE); 210 flags &= ~RF_ACTIVE; 211 212 mtx_lock(rm->rm_mtx); 213 214 for (r = TAILQ_FIRST(&rm->rm_list); 215 r && r->r_end < start; 216 r = TAILQ_NEXT(r, r_link)) 217 ; 218 219 if (r == NULL) { 220 DPRINTF(("could not find a region\n")); 221 goto out; 222 } 223 224 amask = (1ul << RF_ALIGNMENT(flags)) - 1; 225 /* If bound is 0, bmask will also be 0 */ 226 bmask = ~(bound - 1); 227 /* 228 * First try to find an acceptable totally-unshared region. 229 */ 230 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 231 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end)); 232 if (s->r_start + count - 1 > end) { 233 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n", 234 s->r_start, end)); 235 break; 236 } 237 if (s->r_flags & RF_ALLOCATED) { 238 DPRINTF(("region is allocated\n")); 239 continue; 240 } 241 rstart = ulmax(s->r_start, start); 242 /* 243 * Try to find a region by adjusting to boundary and alignment 244 * until both conditions are satisfied. This is not an optimal 245 * algorithm, but in most cases it isn't really bad, either. 246 */ 247 do { 248 rstart = (rstart + amask) & ~amask; 249 if (((rstart ^ (rstart + count - 1)) & bmask) != 0) 250 rstart += bound - (rstart & ~bmask); 251 } while ((rstart & amask) != 0 && rstart < end && 252 rstart < s->r_end); 253 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end)); 254 if (rstart > rend) { 255 DPRINTF(("adjusted start exceeds end\n")); 256 continue; 257 } 258 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 259 rstart, rend, (rend - rstart + 1), count)); 260 261 if ((rend - rstart + 1) >= count) { 262 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n", 263 rstart, rend, (rend - rstart + 1))); 264 if ((s->r_end - s->r_start + 1) == count) { 265 DPRINTF(("candidate region is entire chunk\n")); 266 rv = s; 267 rv->r_flags |= RF_ALLOCATED | flags; 268 rv->r_dev = dev; 269 goto out; 270 } 271 272 /* 273 * If s->r_start < rstart and 274 * s->r_end > rstart + count - 1, then 275 * we need to split the region into three pieces 276 * (the middle one will get returned to the user). 277 * Otherwise, we are allocating at either the 278 * beginning or the end of s, so we only need to 279 * split it in two. The first case requires 280 * two new allocations; the second requires but one. 281 */ 282 rv = int_alloc_resource(M_NOWAIT); 283 if (rv == 0) 284 goto out; 285 rv->r_start = rstart; 286 rv->r_end = rstart + count - 1; 287 rv->r_flags = flags | RF_ALLOCATED; 288 rv->r_dev = dev; 289 rv->r_rm = rm; 290 291 if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 292 DPRINTF(("splitting region in three parts: " 293 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 294 s->r_start, rv->r_start - 1, 295 rv->r_start, rv->r_end, 296 rv->r_end + 1, s->r_end)); 297 /* 298 * We are allocating in the middle. 299 */ 300 r = int_alloc_resource(M_NOWAIT); 301 if (r == 0) { 302 free(rv, M_RMAN); 303 rv = 0; 304 goto out; 305 } 306 r->r_start = rv->r_end + 1; 307 r->r_end = s->r_end; 308 r->r_flags = s->r_flags; 309 r->r_rm = rm; 310 s->r_end = rv->r_start - 1; 311 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 312 r_link); 313 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 314 r_link); 315 } else if (s->r_start == rv->r_start) { 316 DPRINTF(("allocating from the beginning\n")); 317 /* 318 * We are allocating at the beginning. 319 */ 320 s->r_start = rv->r_end + 1; 321 TAILQ_INSERT_BEFORE(s, rv, r_link); 322 } else { 323 DPRINTF(("allocating at the end\n")); 324 /* 325 * We are allocating at the end. 326 */ 327 s->r_end = rv->r_start - 1; 328 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 329 r_link); 330 } 331 goto out; 332 } 333 } 334 335 /* 336 * Now find an acceptable shared region, if the client's requirements 337 * allow sharing. By our implementation restriction, a candidate 338 * region must match exactly by both size and sharing type in order 339 * to be considered compatible with the client's request. (The 340 * former restriction could probably be lifted without too much 341 * additional work, but this does not seem warranted.) 342 */ 343 DPRINTF(("no unshared regions found\n")); 344 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 345 goto out; 346 347 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 348 if (s->r_start > end) 349 break; 350 if ((s->r_flags & flags) != flags) 351 continue; 352 rstart = ulmax(s->r_start, start); 353 rend = ulmin(s->r_end, ulmax(start + count - 1, end)); 354 if (s->r_start >= start && s->r_end <= end 355 && (s->r_end - s->r_start + 1) == count && 356 (s->r_start & amask) == 0 && 357 ((s->r_start ^ s->r_end) & bmask) == 0) { 358 rv = int_alloc_resource(M_NOWAIT); 359 if (rv == 0) 360 goto out; 361 rv->r_start = s->r_start; 362 rv->r_end = s->r_end; 363 rv->r_flags = s->r_flags & 364 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 365 rv->r_dev = dev; 366 rv->r_rm = rm; 367 if (s->r_sharehead == 0) { 368 s->r_sharehead = malloc(sizeof *s->r_sharehead, 369 M_RMAN, M_NOWAIT | M_ZERO); 370 if (s->r_sharehead == 0) { 371 free(rv, M_RMAN); 372 rv = 0; 373 goto out; 374 } 375 LIST_INIT(s->r_sharehead); 376 LIST_INSERT_HEAD(s->r_sharehead, s, 377 r_sharelink); 378 s->r_flags |= RF_FIRSTSHARE; 379 } 380 rv->r_sharehead = s->r_sharehead; 381 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 382 goto out; 383 } 384 } 385 386 /* 387 * We couldn't find anything. 388 */ 389out: 390 /* 391 * If the user specified RF_ACTIVE in the initial flags, 392 * which is reflected in `want_activate', we attempt to atomically 393 * activate the resource. If this fails, we release the resource 394 * and indicate overall failure. (This behavior probably doesn't 395 * make sense for RF_TIMESHARE-type resources.) 396 */ 397 if (rv && want_activate) { 398 struct resource_i *whohas; 399 if (int_rman_activate_resource(rm, rv, &whohas)) { 400 int_rman_release_resource(rm, rv); 401 rv = 0; 402 } 403 } 404 405 mtx_unlock(rm->rm_mtx); 406 return (&rv->r_r); 407} 408 409struct resource * 410rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 411 u_int flags, struct device *dev) 412{ 413 414 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags, 415 dev)); 416} 417 418static int 419int_rman_activate_resource(struct rman *rm, struct resource_i *r, 420 struct resource_i **whohas) 421{ 422 struct resource_i *s; 423 int ok; 424 425 /* 426 * If we are not timesharing, then there is nothing much to do. 427 * If we already have the resource, then there is nothing at all to do. 428 * If we are not on a sharing list with anybody else, then there is 429 * little to do. 430 */ 431 if ((r->r_flags & RF_TIMESHARE) == 0 432 || (r->r_flags & RF_ACTIVE) != 0 433 || r->r_sharehead == 0) { 434 r->r_flags |= RF_ACTIVE; 435 return 0; 436 } 437 438 ok = 1; 439 for (s = LIST_FIRST(r->r_sharehead); s && ok; 440 s = LIST_NEXT(s, r_sharelink)) { 441 if ((s->r_flags & RF_ACTIVE) != 0) { 442 ok = 0; 443 *whohas = s; 444 } 445 } 446 if (ok) { 447 r->r_flags |= RF_ACTIVE; 448 return 0; 449 } 450 return EBUSY; 451} 452 453int 454rman_activate_resource(struct resource *re) 455{ 456 int rv; 457 struct resource_i *r, *whohas; 458 struct rman *rm; 459 460 r = re->__r_i; 461 rm = r->r_rm; 462 mtx_lock(rm->rm_mtx); 463 rv = int_rman_activate_resource(rm, r, &whohas); 464 mtx_unlock(rm->rm_mtx); 465 return rv; 466} 467 468int 469rman_await_resource(struct resource *re, int pri, int timo) 470{ 471 int rv; 472 struct resource_i *r, *whohas; 473 struct rman *rm; 474 475 r = re->__r_i; 476 rm = r->r_rm; 477 mtx_lock(rm->rm_mtx); 478 for (;;) { 479 rv = int_rman_activate_resource(rm, r, &whohas); 480 if (rv != EBUSY) 481 return (rv); /* returns with mutex held */ 482 483 if (r->r_sharehead == 0) 484 panic("rman_await_resource"); 485 whohas->r_flags |= RF_WANTED; 486 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo); 487 if (rv) { 488 mtx_unlock(rm->rm_mtx); 489 return (rv); 490 } 491 } 492} 493 494static int 495int_rman_deactivate_resource(struct resource_i *r) 496{ 497 498 r->r_flags &= ~RF_ACTIVE; 499 if (r->r_flags & RF_WANTED) { 500 r->r_flags &= ~RF_WANTED; 501 wakeup(r->r_sharehead); 502 } 503 return 0; 504} 505 506int 507rman_deactivate_resource(struct resource *r) 508{ 509 struct rman *rm; 510 511 rm = r->__r_i->r_rm; 512 mtx_lock(rm->rm_mtx); 513 int_rman_deactivate_resource(r->__r_i); 514 mtx_unlock(rm->rm_mtx); 515 return 0; 516} 517 518static int 519int_rman_release_resource(struct rman *rm, struct resource_i *r) 520{ 521 struct resource_i *s, *t; 522 523 if (r->r_flags & RF_ACTIVE) 524 int_rman_deactivate_resource(r); 525 526 /* 527 * Check for a sharing list first. If there is one, then we don't 528 * have to think as hard. 529 */ 530 if (r->r_sharehead) { 531 /* 532 * If a sharing list exists, then we know there are at 533 * least two sharers. 534 * 535 * If we are in the main circleq, appoint someone else. 536 */ 537 LIST_REMOVE(r, r_sharelink); 538 s = LIST_FIRST(r->r_sharehead); 539 if (r->r_flags & RF_FIRSTSHARE) { 540 s->r_flags |= RF_FIRSTSHARE; 541 TAILQ_INSERT_BEFORE(r, s, r_link); 542 TAILQ_REMOVE(&rm->rm_list, r, r_link); 543 } 544 545 /* 546 * Make sure that the sharing list goes away completely 547 * if the resource is no longer being shared at all. 548 */ 549 if (LIST_NEXT(s, r_sharelink) == 0) { 550 free(s->r_sharehead, M_RMAN); 551 s->r_sharehead = 0; 552 s->r_flags &= ~RF_FIRSTSHARE; 553 } 554 goto out; 555 } 556 557 /* 558 * Look at the adjacent resources in the list and see if our 559 * segment can be merged with any of them. If either of the 560 * resources is allocated or is not exactly adjacent then they 561 * cannot be merged with our segment. 562 */ 563 s = TAILQ_PREV(r, resource_head, r_link); 564 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 || 565 s->r_end + 1 != r->r_start)) 566 s = NULL; 567 t = TAILQ_NEXT(r, r_link); 568 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 || 569 r->r_end + 1 != t->r_start)) 570 t = NULL; 571 572 if (s != NULL && t != NULL) { 573 /* 574 * Merge all three segments. 575 */ 576 s->r_end = t->r_end; 577 TAILQ_REMOVE(&rm->rm_list, r, r_link); 578 TAILQ_REMOVE(&rm->rm_list, t, r_link); 579 free(t, M_RMAN); 580 } else if (s != NULL) { 581 /* 582 * Merge previous segment with ours. 583 */ 584 s->r_end = r->r_end; 585 TAILQ_REMOVE(&rm->rm_list, r, r_link); 586 } else if (t != NULL) { 587 /* 588 * Merge next segment with ours. 589 */ 590 t->r_start = r->r_start; 591 TAILQ_REMOVE(&rm->rm_list, r, r_link); 592 } else { 593 /* 594 * At this point, we know there is nothing we 595 * can potentially merge with, because on each 596 * side, there is either nothing there or what is 597 * there is still allocated. In that case, we don't 598 * want to remove r from the list; we simply want to 599 * change it to an unallocated region and return 600 * without freeing anything. 601 */ 602 r->r_flags &= ~RF_ALLOCATED; 603 return 0; 604 } 605 606out: 607 free(r, M_RMAN); 608 return 0; 609} 610 611int 612rman_release_resource(struct resource *re) 613{ 614 int rv; 615 struct resource_i *r; 616 struct rman *rm; 617 618 r = re->__r_i; 619 rm = r->r_rm; 620 mtx_lock(rm->rm_mtx); 621 rv = int_rman_release_resource(rm, r); 622 mtx_unlock(rm->rm_mtx); 623 return (rv); 624} 625 626uint32_t 627rman_make_alignment_flags(uint32_t size) 628{ 629 int i; 630 631 /* 632 * Find the hightest bit set, and add one if more than one bit 633 * set. We're effectively computing the ceil(log2(size)) here. 634 */ 635 for (i = 31; i > 0; i--) 636 if ((1 << i) & size) 637 break; 638 if (~(1 << i) & size) 639 i++; 640 641 return(RF_ALIGNMENT_LOG2(i)); 642} 643 644u_long 645rman_get_start(struct resource *r) 646{ 647 return (r->__r_i->r_start); 648} 649 650u_long 651rman_get_end(struct resource *r) 652{ 653 return (r->__r_i->r_end); 654} 655 656u_long 657rman_get_size(struct resource *r) 658{ 659 return (r->__r_i->r_end - r->__r_i->r_start + 1); 660} 661 662u_int 663rman_get_flags(struct resource *r) 664{ 665 return (r->__r_i->r_flags); 666} 667 668void 669rman_set_virtual(struct resource *r, void *v) 670{ 671 r->__r_i->r_virtual = v; 672} 673 674void * 675rman_get_virtual(struct resource *r) 676{ 677 return (r->__r_i->r_virtual); 678} 679 680void 681rman_set_bustag(struct resource *r, bus_space_tag_t t) 682{ 683 r->r_bustag = t; 684} 685 686bus_space_tag_t 687rman_get_bustag(struct resource *r) 688{ 689 return (r->r_bustag); 690} 691 692void 693rman_set_bushandle(struct resource *r, bus_space_handle_t h) 694{ 695 r->r_bushandle = h; 696} 697 698bus_space_handle_t 699rman_get_bushandle(struct resource *r) 700{ 701 return (r->r_bushandle); 702} 703 704void 705rman_set_rid(struct resource *r, int rid) 706{ 707 r->__r_i->r_rid = rid; 708} 709 710void 711rman_set_start(struct resource *r, u_long start) 712{ 713 r->__r_i->r_start = start; 714} 715 716void 717rman_set_end(struct resource *r, u_long end) 718{ 719 r->__r_i->r_end = end; 720} 721 722int 723rman_get_rid(struct resource *r) 724{ 725 return (r->__r_i->r_rid); 726} 727 728struct device * 729rman_get_device(struct resource *r) 730{ 731 return (r->__r_i->r_dev); 732} 733 734void 735rman_set_device(struct resource *r, struct device *dev) 736{ 737 r->__r_i->r_dev = dev; 738} 739 740/* 741 * Sysctl interface for scanning the resource lists. 742 * 743 * We take two input parameters; the index into the list of resource 744 * managers, and the resource offset into the list. 745 */ 746static int 747sysctl_rman(SYSCTL_HANDLER_ARGS) 748{ 749 int *name = (int *)arg1; 750 u_int namelen = arg2; 751 int rman_idx, res_idx; 752 struct rman *rm; 753 struct resource_i *res; 754 struct u_rman urm; 755 struct u_resource ures; 756 int error; 757 758 if (namelen != 3) 759 return (EINVAL); 760 761 if (bus_data_generation_check(name[0])) 762 return (EINVAL); 763 rman_idx = name[1]; 764 res_idx = name[2]; 765 766 /* 767 * Find the indexed resource manager 768 */ 769 TAILQ_FOREACH(rm, &rman_head, rm_link) { 770 if (rman_idx-- == 0) 771 break; 772 } 773 if (rm == NULL) 774 return (ENOENT); 775 776 /* 777 * If the resource index is -1, we want details on the 778 * resource manager. 779 */ 780 if (res_idx == -1) { 781 bzero(&urm, sizeof(urm)); 782 urm.rm_handle = (uintptr_t)rm; 783 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 784 urm.rm_start = rm->rm_start; 785 urm.rm_size = rm->rm_end - rm->rm_start + 1; 786 urm.rm_type = rm->rm_type; 787 788 error = SYSCTL_OUT(req, &urm, sizeof(urm)); 789 return (error); 790 } 791 792 /* 793 * Find the indexed resource and return it. 794 */ 795 TAILQ_FOREACH(res, &rm->rm_list, r_link) { 796 if (res_idx-- == 0) { 797 bzero(&ures, sizeof(ures)); 798 ures.r_handle = (uintptr_t)res; 799 ures.r_parent = (uintptr_t)res->r_rm; 800 ures.r_device = (uintptr_t)res->r_dev; 801 if (res->r_dev != NULL) { 802 if (device_get_name(res->r_dev) != NULL) { 803 snprintf(ures.r_devname, RM_TEXTLEN, 804 "%s%d", 805 device_get_name(res->r_dev), 806 device_get_unit(res->r_dev)); 807 } else { 808 strlcpy(ures.r_devname, "nomatch", 809 RM_TEXTLEN); 810 } 811 } else { 812 ures.r_devname[0] = '\0'; 813 } 814 ures.r_start = res->r_start; 815 ures.r_size = res->r_end - res->r_start + 1; 816 ures.r_flags = res->r_flags; 817 818 error = SYSCTL_OUT(req, &ures, sizeof(ures)); 819 return (error); 820 } 821 } 822 return (ENOENT); 823} 824 825SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 826 "kernel resource manager"); 827 828