1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD$"); 33 34#include <sys/param.h> 35#include <sys/disk.h> 36#include <sys/kernel.h> 37#include <sys/systm.h> 38#include <sys/bio.h> 39#include <sys/devicestat.h> 40#include <sys/sdt.h> 41#include <sys/sysctl.h> 42#include <sys/malloc.h> 43#include <sys/lock.h> 44#include <sys/mutex.h> 45#include <sys/conf.h> 46#include <vm/vm.h> 47#include <vm/pmap.h> 48 49#include <machine/atomic.h> 50 51SDT_PROVIDER_DEFINE(io); 52 53SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *"); 54SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *"); 55SDT_PROBE_DEFINE2(io, , , wait__start, "struct bio *", 56 "struct devstat *"); 57SDT_PROBE_DEFINE2(io, , , wait__done, "struct bio *", 58 "struct devstat *"); 59 60#define DTRACE_DEVSTAT_START() SDT_PROBE2(io, , , start, NULL, ds) 61#define DTRACE_DEVSTAT_BIO_START() SDT_PROBE2(io, , , start, bp, ds) 62#define DTRACE_DEVSTAT_DONE() SDT_PROBE2(io, , , done, NULL, ds) 63#define DTRACE_DEVSTAT_BIO_DONE() SDT_PROBE2(io, , , done, bp, ds) 64#define DTRACE_DEVSTAT_WAIT_START() SDT_PROBE2(io, , , wait__start, NULL, ds) 65#define DTRACE_DEVSTAT_WAIT_DONE() SDT_PROBE2(io, , , wait__done, NULL, ds) 66 67static int devstat_num_devs; 68static long devstat_generation = 1; 69static int devstat_version = DEVSTAT_VERSION; 70static int devstat_current_devnumber; 71static struct mtx devstat_mutex; 72MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF); 73 74static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq); 75static struct devstat *devstat_alloc(void); 76static void devstat_free(struct devstat *); 77static void devstat_add_entry(struct devstat *ds, const void *dev_name, 78 int unit_number, uint32_t block_size, 79 devstat_support_flags flags, 80 devstat_type_flags device_type, 81 devstat_priority priority); 82 83/* 84 * Allocate a devstat and initialize it 85 */ 86struct devstat * 87devstat_new_entry(const void *dev_name, 88 int unit_number, uint32_t block_size, 89 devstat_support_flags flags, 90 devstat_type_flags device_type, 91 devstat_priority priority) 92{ 93 struct devstat *ds; 94 95 mtx_assert(&devstat_mutex, MA_NOTOWNED); 96 97 ds = devstat_alloc(); 98 mtx_lock(&devstat_mutex); 99 if (unit_number == -1) { 100 ds->unit_number = unit_number; 101 ds->id = dev_name; 102 binuptime(&ds->creation_time); 103 devstat_generation++; 104 } else { 105 devstat_add_entry(ds, dev_name, unit_number, block_size, 106 flags, device_type, priority); 107 } 108 mtx_unlock(&devstat_mutex); 109 return (ds); 110} 111 112/* 113 * Take a malloced and zeroed devstat structure given to us, fill it in 114 * and add it to the queue of devices. 115 */ 116static void 117devstat_add_entry(struct devstat *ds, const void *dev_name, 118 int unit_number, uint32_t block_size, 119 devstat_support_flags flags, 120 devstat_type_flags device_type, 121 devstat_priority priority) 122{ 123 struct devstatlist *devstat_head; 124 struct devstat *ds_tmp; 125 126 mtx_assert(&devstat_mutex, MA_OWNED); 127 devstat_num_devs++; 128 129 devstat_head = &device_statq; 130 131 /* 132 * Priority sort. Each driver passes in its priority when it adds 133 * its devstat entry. Drivers are sorted first by priority, and 134 * then by probe order. 135 * 136 * For the first device, we just insert it, since the priority 137 * doesn't really matter yet. Subsequent devices are inserted into 138 * the list using the order outlined above. 139 */ 140 if (devstat_num_devs == 1) 141 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links); 142 else { 143 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) { 144 struct devstat *ds_next; 145 146 ds_next = STAILQ_NEXT(ds_tmp, dev_links); 147 148 /* 149 * If we find a break between higher and lower 150 * priority items, and if this item fits in the 151 * break, insert it. This also applies if the 152 * "lower priority item" is the end of the list. 153 */ 154 if ((priority <= ds_tmp->priority) 155 && ((ds_next == NULL) 156 || (priority > ds_next->priority))) { 157 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds, 158 dev_links); 159 break; 160 } else if (priority > ds_tmp->priority) { 161 /* 162 * If this is the case, we should be able 163 * to insert ourselves at the head of the 164 * list. If we can't, something is wrong. 165 */ 166 if (ds_tmp == STAILQ_FIRST(devstat_head)) { 167 STAILQ_INSERT_HEAD(devstat_head, 168 ds, dev_links); 169 break; 170 } else { 171 STAILQ_INSERT_TAIL(devstat_head, 172 ds, dev_links); 173 printf("devstat_add_entry: HELP! " 174 "sorting problem detected " 175 "for name %p unit %d\n", 176 dev_name, unit_number); 177 break; 178 } 179 } 180 } 181 } 182 183 ds->device_number = devstat_current_devnumber++; 184 ds->unit_number = unit_number; 185 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN); 186 ds->block_size = block_size; 187 ds->flags = flags; 188 ds->device_type = device_type; 189 ds->priority = priority; 190 binuptime(&ds->creation_time); 191 devstat_generation++; 192} 193 194/* 195 * Remove a devstat structure from the list of devices. 196 */ 197void 198devstat_remove_entry(struct devstat *ds) 199{ 200 struct devstatlist *devstat_head; 201 202 mtx_assert(&devstat_mutex, MA_NOTOWNED); 203 if (ds == NULL) 204 return; 205 206 mtx_lock(&devstat_mutex); 207 208 devstat_head = &device_statq; 209 210 /* Remove this entry from the devstat queue */ 211 atomic_add_acq_int(&ds->sequence1, 1); 212 if (ds->unit_number != -1) { 213 devstat_num_devs--; 214 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links); 215 } 216 devstat_free(ds); 217 devstat_generation++; 218 mtx_unlock(&devstat_mutex); 219} 220 221/* 222 * Record a transaction start. 223 * 224 * See comments for devstat_end_transaction(). Ordering is very important 225 * here. 226 */ 227void 228devstat_start_transaction(struct devstat *ds, const struct bintime *now) 229{ 230 231 mtx_assert(&devstat_mutex, MA_NOTOWNED); 232 233 /* sanity check */ 234 if (ds == NULL) 235 return; 236 237 atomic_add_acq_int(&ds->sequence1, 1); 238 /* 239 * We only want to set the start time when we are going from idle 240 * to busy. The start time is really the start of the latest busy 241 * period. 242 */ 243 if (ds->start_count == ds->end_count) { 244 if (now != NULL) 245 ds->busy_from = *now; 246 else 247 binuptime(&ds->busy_from); 248 } 249 ds->start_count++; 250 atomic_add_rel_int(&ds->sequence0, 1); 251 DTRACE_DEVSTAT_START(); 252} 253 254void 255devstat_start_transaction_bio(struct devstat *ds, struct bio *bp) 256{ 257 258 mtx_assert(&devstat_mutex, MA_NOTOWNED); 259 260 /* sanity check */ 261 if (ds == NULL) 262 return; 263 264 binuptime(&bp->bio_t0); 265 devstat_start_transaction_bio_t0(ds, bp); 266} 267 268void 269devstat_start_transaction_bio_t0(struct devstat *ds, struct bio *bp) 270{ 271 272 /* sanity check */ 273 if (ds == NULL) 274 return; 275 276 devstat_start_transaction(ds, &bp->bio_t0); 277 DTRACE_DEVSTAT_BIO_START(); 278} 279 280/* 281 * Record the ending of a transaction, and incrment the various counters. 282 * 283 * Ordering in this function, and in devstat_start_transaction() is VERY 284 * important. The idea here is to run without locks, so we are very 285 * careful to only modify some fields on the way "down" (i.e. at 286 * transaction start) and some fields on the way "up" (i.e. at transaction 287 * completion). One exception is busy_from, which we only modify in 288 * devstat_start_transaction() when there are no outstanding transactions, 289 * and thus it can't be modified in devstat_end_transaction() 290 * simultaneously. 291 * 292 * The sequence0 and sequence1 fields are provided to enable an application 293 * spying on the structures with mmap(2) to tell when a structure is in a 294 * consistent state or not. 295 * 296 * For this to work 100% reliably, it is important that the two fields 297 * are at opposite ends of the structure and that they are incremented 298 * in the opposite order of how a memcpy(3) in userland would copy them. 299 * We assume that the copying happens front to back, but there is actually 300 * no way short of writing your own memcpy(3) replacement to guarantee 301 * this will be the case. 302 * 303 * In addition to this, being a kind of locks, they must be updated with 304 * atomic instructions using appropriate memory barriers. 305 */ 306void 307devstat_end_transaction(struct devstat *ds, uint32_t bytes, 308 devstat_tag_type tag_type, devstat_trans_flags flags, 309 const struct bintime *now, const struct bintime *then) 310{ 311 struct bintime dt, lnow; 312 313 /* sanity check */ 314 if (ds == NULL) 315 return; 316 317 if (now == NULL) { 318 binuptime(&lnow); 319 now = &lnow; 320 } 321 322 atomic_add_acq_int(&ds->sequence1, 1); 323 /* Update byte and operations counts */ 324 ds->bytes[flags] += bytes; 325 ds->operations[flags]++; 326 327 /* 328 * Keep a count of the various tag types sent. 329 */ 330 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 && 331 tag_type != DEVSTAT_TAG_NONE) 332 ds->tag_types[tag_type]++; 333 334 if (then != NULL) { 335 /* Update duration of operations */ 336 dt = *now; 337 bintime_sub(&dt, then); 338 bintime_add(&ds->duration[flags], &dt); 339 } 340 341 /* Accumulate busy time */ 342 dt = *now; 343 bintime_sub(&dt, &ds->busy_from); 344 bintime_add(&ds->busy_time, &dt); 345 ds->busy_from = *now; 346 347 ds->end_count++; 348 atomic_add_rel_int(&ds->sequence0, 1); 349 DTRACE_DEVSTAT_DONE(); 350} 351 352void 353devstat_end_transaction_bio(struct devstat *ds, const struct bio *bp) 354{ 355 356 devstat_end_transaction_bio_bt(ds, bp, NULL); 357} 358 359void 360devstat_end_transaction_bio_bt(struct devstat *ds, const struct bio *bp, 361 const struct bintime *now) 362{ 363 devstat_trans_flags flg; 364 devstat_tag_type tag; 365 366 /* sanity check */ 367 if (ds == NULL) 368 return; 369 370 if (bp->bio_flags & BIO_ORDERED) 371 tag = DEVSTAT_TAG_ORDERED; 372 else 373 tag = DEVSTAT_TAG_SIMPLE; 374 if (bp->bio_cmd == BIO_DELETE) 375 flg = DEVSTAT_FREE; 376 else if ((bp->bio_cmd == BIO_READ) 377 || ((bp->bio_cmd == BIO_ZONE) 378 && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES))) 379 flg = DEVSTAT_READ; 380 else if (bp->bio_cmd == BIO_WRITE) 381 flg = DEVSTAT_WRITE; 382 else 383 flg = DEVSTAT_NO_DATA; 384 385 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid, 386 tag, flg, now, &bp->bio_t0); 387 DTRACE_DEVSTAT_BIO_DONE(); 388} 389 390/* 391 * This is the sysctl handler for the devstat package. The data pushed out 392 * on the kern.devstat.all sysctl variable consists of the current devstat 393 * generation number, and then an array of devstat structures, one for each 394 * device in the system. 395 * 396 * This is more cryptic that obvious, but basically we neither can nor 397 * want to hold the devstat_mutex for any amount of time, so we grab it 398 * only when we need to and keep an eye on devstat_generation all the time. 399 */ 400static int 401sysctl_devstat(SYSCTL_HANDLER_ARGS) 402{ 403 int error; 404 long mygen; 405 struct devstat *nds; 406 407 mtx_assert(&devstat_mutex, MA_NOTOWNED); 408 409 /* 410 * XXX devstat_generation should really be "volatile" but that 411 * XXX freaks out the sysctl macro below. The places where we 412 * XXX change it and inspect it are bracketed in the mutex which 413 * XXX guarantees us proper write barriers. I don't believe the 414 * XXX compiler is allowed to optimize mygen away across calls 415 * XXX to other functions, so the following is belived to be safe. 416 */ 417 mygen = devstat_generation; 418 419 error = SYSCTL_OUT(req, &mygen, sizeof(mygen)); 420 421 if (devstat_num_devs == 0) 422 return(0); 423 424 if (error != 0) 425 return (error); 426 427 mtx_lock(&devstat_mutex); 428 nds = STAILQ_FIRST(&device_statq); 429 if (mygen != devstat_generation) 430 error = EBUSY; 431 mtx_unlock(&devstat_mutex); 432 433 if (error != 0) 434 return (error); 435 436 for (;nds != NULL;) { 437 error = SYSCTL_OUT(req, nds, sizeof(struct devstat)); 438 if (error != 0) 439 return (error); 440 mtx_lock(&devstat_mutex); 441 if (mygen != devstat_generation) 442 error = EBUSY; 443 else 444 nds = STAILQ_NEXT(nds, dev_links); 445 mtx_unlock(&devstat_mutex); 446 if (error != 0) 447 return (error); 448 } 449 return(error); 450} 451 452/* 453 * Sysctl entries for devstat. The first one is a node that all the rest 454 * hang off of. 455 */ 456static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, NULL, 457 "Device Statistics"); 458 459SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE, 460 NULL, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list"); 461/* 462 * Export the number of devices in the system so that userland utilities 463 * can determine how much memory to allocate to hold all the devices. 464 */ 465SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD, 466 &devstat_num_devs, 0, "Number of devices in the devstat list"); 467SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD, 468 &devstat_generation, 0, "Devstat list generation"); 469SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD, 470 &devstat_version, 0, "Devstat list version number"); 471 472/* 473 * Allocator for struct devstat structures. We sub-allocate these from pages 474 * which we get from malloc. These pages are exported for mmap(2)'ing through 475 * a miniature device driver 476 */ 477 478#define statsperpage (PAGE_SIZE / sizeof(struct devstat)) 479 480static d_ioctl_t devstat_ioctl; 481static d_mmap_t devstat_mmap; 482 483static struct cdevsw devstat_cdevsw = { 484 .d_version = D_VERSION, 485 .d_ioctl = devstat_ioctl, 486 .d_mmap = devstat_mmap, 487 .d_name = "devstat", 488}; 489 490struct statspage { 491 TAILQ_ENTRY(statspage) list; 492 struct devstat *stat; 493 u_int nfree; 494}; 495 496static size_t pagelist_pages = 0; 497static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist); 498static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics"); 499 500static int 501devstat_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 502 struct thread *td) 503{ 504 int error = ENOTTY; 505 506 switch (cmd) { 507 case DIOCGMEDIASIZE: 508 error = 0; 509 *(off_t *)data = pagelist_pages * PAGE_SIZE; 510 break; 511 } 512 513 return (error); 514} 515 516static int 517devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 518 int nprot, vm_memattr_t *memattr) 519{ 520 struct statspage *spp; 521 522 if (nprot != VM_PROT_READ) 523 return (-1); 524 mtx_lock(&devstat_mutex); 525 TAILQ_FOREACH(spp, &pagelist, list) { 526 if (offset == 0) { 527 *paddr = vtophys(spp->stat); 528 mtx_unlock(&devstat_mutex); 529 return (0); 530 } 531 offset -= PAGE_SIZE; 532 } 533 mtx_unlock(&devstat_mutex); 534 return (-1); 535} 536 537static struct devstat * 538devstat_alloc(void) 539{ 540 struct devstat *dsp; 541 struct statspage *spp, *spp2; 542 u_int u; 543 static int once; 544 545 mtx_assert(&devstat_mutex, MA_NOTOWNED); 546 if (!once) { 547 make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME, 548 &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444, 549 DEVSTAT_DEVICE_NAME); 550 once = 1; 551 } 552 spp2 = NULL; 553 mtx_lock(&devstat_mutex); 554 for (;;) { 555 TAILQ_FOREACH(spp, &pagelist, list) { 556 if (spp->nfree > 0) 557 break; 558 } 559 if (spp != NULL) 560 break; 561 mtx_unlock(&devstat_mutex); 562 spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); 563 spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); 564 spp2->nfree = statsperpage; 565 566 /* 567 * If free statspages were added while the lock was released 568 * just reuse them. 569 */ 570 mtx_lock(&devstat_mutex); 571 TAILQ_FOREACH(spp, &pagelist, list) 572 if (spp->nfree > 0) 573 break; 574 if (spp == NULL) { 575 spp = spp2; 576 577 /* 578 * It would make more sense to add the new page at the 579 * head but the order on the list determine the 580 * sequence of the mapping so we can't do that. 581 */ 582 pagelist_pages++; 583 TAILQ_INSERT_TAIL(&pagelist, spp, list); 584 } else 585 break; 586 } 587 dsp = spp->stat; 588 for (u = 0; u < statsperpage; u++) { 589 if (dsp->allocated == 0) 590 break; 591 dsp++; 592 } 593 spp->nfree--; 594 dsp->allocated = 1; 595 mtx_unlock(&devstat_mutex); 596 if (spp2 != NULL && spp2 != spp) { 597 free(spp2->stat, M_DEVSTAT); 598 free(spp2, M_DEVSTAT); 599 } 600 return (dsp); 601} 602 603static void 604devstat_free(struct devstat *dsp) 605{ 606 struct statspage *spp; 607 608 mtx_assert(&devstat_mutex, MA_OWNED); 609 bzero(dsp, sizeof *dsp); 610 TAILQ_FOREACH(spp, &pagelist, list) { 611 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) { 612 spp->nfree++; 613 return; 614 } 615 } 616} 617 618SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD, 619 SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)"); 620