subr_devstat.c revision 113599
139229Sgibbs/* 243819Sken * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 339229Sgibbs * All rights reserved. 439229Sgibbs * 539229Sgibbs * Redistribution and use in source and binary forms, with or without 639229Sgibbs * modification, are permitted provided that the following conditions 739229Sgibbs * are met: 839229Sgibbs * 1. Redistributions of source code must retain the above copyright 939229Sgibbs * notice, this list of conditions and the following disclaimer. 1039229Sgibbs * 2. Redistributions in binary form must reproduce the above copyright 1139229Sgibbs * notice, this list of conditions and the following disclaimer in the 1239229Sgibbs * documentation and/or other materials provided with the distribution. 1339229Sgibbs * 3. The name of the author may not be used to endorse or promote products 1439229Sgibbs * derived from this software without specific prior written permission. 1539229Sgibbs * 1639229Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1739229Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1839229Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1939229Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2039229Sgibbs * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2139229Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2239229Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2339229Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2439229Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2539229Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2639229Sgibbs * SUCH DAMAGE. 2739229Sgibbs * 2850477Speter * $FreeBSD: head/sys/kern/subr_devstat.c 113599 2003-04-17 15:06:28Z harti $ 2939229Sgibbs */ 3039229Sgibbs 3139229Sgibbs#include <sys/param.h> 3239229Sgibbs#include <sys/kernel.h> 3339229Sgibbs#include <sys/systm.h> 3460041Sphk#include <sys/bio.h> 35112368Sphk#include <sys/devicestat.h> 3639229Sgibbs#include <sys/sysctl.h> 37112001Sphk#include <sys/malloc.h> 38112368Sphk#include <sys/lock.h> 39112368Sphk#include <sys/mutex.h> 40112001Sphk#include <sys/conf.h> 41112001Sphk#include <vm/vm.h> 42112001Sphk#include <vm/pmap.h> 4339229Sgibbs 44112368Sphk#include <machine/atomic.h> 4539229Sgibbs 4639229Sgibbsstatic int devstat_num_devs; 47113599Shartistatic long devstat_generation; 4839229Sgibbsstatic int devstat_version = DEVSTAT_VERSION; 4939229Sgibbsstatic int devstat_current_devnumber; 50112368Sphkstatic struct mtx devstat_mutex; 5139229Sgibbs 5281129Stmmstatic struct devstatlist device_statq; 53112001Sphkstatic struct devstat *devstat_alloc(void); 54112001Sphkstatic void devstat_free(struct devstat *); 55112365Sphkstatic void devstat_add_entry(struct devstat *ds, const void *dev_name, 56112007Sphk int unit_number, u_int32_t block_size, 57112007Sphk devstat_support_flags flags, 58112007Sphk devstat_type_flags device_type, 59112007Sphk devstat_priority priority); 6039229Sgibbs 6139229Sgibbs/* 62112001Sphk * Allocate a devstat and initialize it 63112001Sphk */ 64112001Sphkstruct devstat * 65112365Sphkdevstat_new_entry(const void *dev_name, 66112001Sphk int unit_number, u_int32_t block_size, 67112001Sphk devstat_support_flags flags, 68112001Sphk devstat_type_flags device_type, 69112001Sphk devstat_priority priority) 70112001Sphk{ 71112001Sphk struct devstat *ds; 72112368Sphk static int once; 73112001Sphk 74112368Sphk if (!once) { 75112368Sphk STAILQ_INIT(&device_statq); 76112368Sphk mtx_init(&devstat_mutex, "devstat", NULL, MTX_DEF); 77112368Sphk once = 1; 78112368Sphk } 79112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 80112368Sphk 81112001Sphk ds = devstat_alloc(); 82112368Sphk mtx_lock(&devstat_mutex); 83112369Sphk if (unit_number == -1) { 84112369Sphk ds->id = dev_name; 85112369Sphk binuptime(&ds->creation_time); 86112369Sphk devstat_generation++; 87112369Sphk } else { 88112369Sphk devstat_add_entry(ds, dev_name, unit_number, block_size, 89112369Sphk flags, device_type, priority); 90112369Sphk } 91112368Sphk mtx_unlock(&devstat_mutex); 92112001Sphk return (ds); 93112001Sphk} 94112001Sphk 95112001Sphk/* 9639229Sgibbs * Take a malloced and zeroed devstat structure given to us, fill it in 9739229Sgibbs * and add it to the queue of devices. 9839229Sgibbs */ 99112007Sphkstatic void 100112365Sphkdevstat_add_entry(struct devstat *ds, const void *dev_name, 10139229Sgibbs int unit_number, u_int32_t block_size, 10239229Sgibbs devstat_support_flags flags, 10343819Sken devstat_type_flags device_type, 10443819Sken devstat_priority priority) 10539229Sgibbs{ 10639229Sgibbs struct devstatlist *devstat_head; 10743819Sken struct devstat *ds_tmp; 10839229Sgibbs 109112368Sphk mtx_assert(&devstat_mutex, MA_OWNED); 11039229Sgibbs devstat_num_devs++; 11139229Sgibbs 11239229Sgibbs devstat_head = &device_statq; 11339229Sgibbs 11443819Sken /* 11543819Sken * Priority sort. Each driver passes in its priority when it adds 11643819Sken * its devstat entry. Drivers are sorted first by priority, and 11743819Sken * then by probe order. 11843819Sken * 11943819Sken * For the first device, we just insert it, since the priority 12043819Sken * doesn't really matter yet. Subsequent devices are inserted into 12143819Sken * the list using the order outlined above. 12243819Sken */ 12343819Sken if (devstat_num_devs == 1) 12443819Sken STAILQ_INSERT_TAIL(devstat_head, ds, dev_links); 12543819Sken else { 12672012Sphk STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) { 12743819Sken struct devstat *ds_next; 12839229Sgibbs 12943819Sken ds_next = STAILQ_NEXT(ds_tmp, dev_links); 13043819Sken 13143819Sken /* 13243819Sken * If we find a break between higher and lower 13343819Sken * priority items, and if this item fits in the 13443819Sken * break, insert it. This also applies if the 13543819Sken * "lower priority item" is the end of the list. 13643819Sken */ 13743819Sken if ((priority <= ds_tmp->priority) 13843819Sken && ((ds_next == NULL) 13943819Sken || (priority > ds_next->priority))) { 14043819Sken STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds, 14143819Sken dev_links); 14243819Sken break; 14343819Sken } else if (priority > ds_tmp->priority) { 14443819Sken /* 14543819Sken * If this is the case, we should be able 14643819Sken * to insert ourselves at the head of the 14743819Sken * list. If we can't, something is wrong. 14843819Sken */ 14943819Sken if (ds_tmp == STAILQ_FIRST(devstat_head)) { 15043819Sken STAILQ_INSERT_HEAD(devstat_head, 15143819Sken ds, dev_links); 15243819Sken break; 15343819Sken } else { 15443819Sken STAILQ_INSERT_TAIL(devstat_head, 15543819Sken ds, dev_links); 15643819Sken printf("devstat_add_entry: HELP! " 15743819Sken "sorting problem detected " 158112365Sphk "for name %p unit %d\n", 159112365Sphk dev_name, unit_number); 16043819Sken break; 16143819Sken } 16243819Sken } 16343819Sken } 16443819Sken } 16543819Sken 16639229Sgibbs ds->device_number = devstat_current_devnumber++; 16739229Sgibbs ds->unit_number = unit_number; 168105354Srobert strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN); 16939229Sgibbs ds->block_size = block_size; 17039229Sgibbs ds->flags = flags; 17139229Sgibbs ds->device_type = device_type; 17243819Sken ds->priority = priority; 173112288Sphk binuptime(&ds->creation_time); 174112368Sphk devstat_generation++; 17539229Sgibbs} 17639229Sgibbs 17739229Sgibbs/* 17839229Sgibbs * Remove a devstat structure from the list of devices. 17939229Sgibbs */ 18039229Sgibbsvoid 18139229Sgibbsdevstat_remove_entry(struct devstat *ds) 18239229Sgibbs{ 18339229Sgibbs struct devstatlist *devstat_head; 18439229Sgibbs 185112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 18639229Sgibbs if (ds == NULL) 18739229Sgibbs return; 18839229Sgibbs 189112368Sphk mtx_lock(&devstat_mutex); 19039229Sgibbs 19139229Sgibbs devstat_head = &device_statq; 19239229Sgibbs 19339229Sgibbs /* Remove this entry from the devstat queue */ 194112368Sphk atomic_add_acq_int(&ds->sequence1, 1); 195112369Sphk if (ds->id == NULL) { 196112369Sphk devstat_num_devs--; 197112369Sphk STAILQ_REMOVE(devstat_head, ds, devstat, dev_links); 198112369Sphk } 199112368Sphk devstat_free(ds); 200112368Sphk devstat_generation++; 201112368Sphk mtx_unlock(&devstat_mutex); 20239229Sgibbs} 20339229Sgibbs 20439229Sgibbs/* 20539229Sgibbs * Record a transaction start. 206112288Sphk * 207112288Sphk * See comments for devstat_end_transaction(). Ordering is very important 208112288Sphk * here. 20939229Sgibbs */ 21039229Sgibbsvoid 211112288Sphkdevstat_start_transaction(struct devstat *ds, struct bintime *now) 21239229Sgibbs{ 213112368Sphk 214112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 215112368Sphk 21639229Sgibbs /* sanity check */ 21739229Sgibbs if (ds == NULL) 21839229Sgibbs return; 21939229Sgibbs 220112368Sphk atomic_add_acq_int(&ds->sequence1, 1); 22139229Sgibbs /* 22239229Sgibbs * We only want to set the start time when we are going from idle 22339229Sgibbs * to busy. The start time is really the start of the latest busy 22439229Sgibbs * period. 22539229Sgibbs */ 226112288Sphk if (ds->start_count == ds->end_count) { 227112288Sphk if (now != NULL) 228112288Sphk ds->busy_from = *now; 229112288Sphk else 230112288Sphk binuptime(&ds->busy_from); 231112288Sphk } 232112288Sphk ds->start_count++; 233112368Sphk atomic_add_rel_int(&ds->sequence0, 1); 23439229Sgibbs} 23539229Sgibbs 236112258Sphkvoid 237112258Sphkdevstat_start_transaction_bio(struct devstat *ds, struct bio *bp) 238112258Sphk{ 239112258Sphk 240112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 241112368Sphk 242112368Sphk /* sanity check */ 243112368Sphk if (ds == NULL) 244112368Sphk return; 245112368Sphk 246112288Sphk binuptime(&bp->bio_t0); 247112288Sphk devstat_start_transaction(ds, &bp->bio_t0); 248112288Sphk} 249112288Sphk 25039229Sgibbs/* 25139229Sgibbs * Record the ending of a transaction, and incrment the various counters. 252112288Sphk * 253112288Sphk * Ordering in this function, and in devstat_start_transaction() is VERY 254112288Sphk * important. The idea here is to run without locks, so we are very 255112288Sphk * careful to only modify some fields on the way "down" (i.e. at 256112288Sphk * transaction start) and some fields on the way "up" (i.e. at transaction 257112288Sphk * completion). One exception is busy_from, which we only modify in 258112288Sphk * devstat_start_transaction() when there are no outstanding transactions, 259112288Sphk * and thus it can't be modified in devstat_end_transaction() 260112288Sphk * simultaneously. 261112368Sphk * 262112368Sphk * The sequence0 and sequence1 fields are provided to enable an application 263112368Sphk * spying on the structures with mmap(2) to tell when a structure is in a 264112368Sphk * consistent state or not. 265112368Sphk * 266112368Sphk * For this to work 100% reliably, it is important that the two fields 267112368Sphk * are at opposite ends of the structure and that they are incremented 268112368Sphk * in the opposite order of how a memcpy(3) in userland would copy them. 269112368Sphk * We assume that the copying happens front to back, but there is actually 270112368Sphk * no way short of writing your own memcpy(3) replacement to guarantee 271112368Sphk * this will be the case. 272112368Sphk * 273112368Sphk * In addition to this, being a kind of locks, they must be updated with 274112368Sphk * atomic instructions using appropriate memory barriers. 27539229Sgibbs */ 27639229Sgibbsvoid 27739229Sgibbsdevstat_end_transaction(struct devstat *ds, u_int32_t bytes, 278112288Sphk devstat_tag_type tag_type, devstat_trans_flags flags, 279112288Sphk struct bintime *now, struct bintime *then) 28039229Sgibbs{ 281112288Sphk struct bintime dt, lnow; 28239229Sgibbs 283112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 284112368Sphk 28539229Sgibbs /* sanity check */ 28639229Sgibbs if (ds == NULL) 28739229Sgibbs return; 28839229Sgibbs 289112288Sphk if (now == NULL) { 290112288Sphk now = &lnow; 291112288Sphk binuptime(now); 292112288Sphk } 29339229Sgibbs 294112368Sphk atomic_add_acq_int(&ds->sequence1, 1); 295112288Sphk /* Update byte and operations counts */ 296112288Sphk ds->bytes[flags] += bytes; 297112288Sphk ds->operations[flags]++; 29839229Sgibbs 29939229Sgibbs /* 30039229Sgibbs * Keep a count of the various tag types sent. 30139229Sgibbs */ 30251397Sphk if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 && 30351375Sphk tag_type != DEVSTAT_TAG_NONE) 30439229Sgibbs ds->tag_types[tag_type]++; 30539229Sgibbs 306112288Sphk if (then != NULL) { 307112288Sphk /* Update duration of operations */ 308112288Sphk dt = *now; 309112288Sphk bintime_sub(&dt, then); 310112288Sphk bintime_add(&ds->duration[flags], &dt); 311112288Sphk } 31239229Sgibbs 313112288Sphk /* Accumulate busy time */ 314112288Sphk dt = *now; 315112288Sphk bintime_sub(&dt, &ds->busy_from); 316112288Sphk bintime_add(&ds->busy_time, &dt); 317112288Sphk ds->busy_from = *now; 318112288Sphk 319112288Sphk ds->end_count++; 320112368Sphk atomic_add_rel_int(&ds->sequence0, 1); 32139229Sgibbs} 32239229Sgibbs 32351375Sphkvoid 32458942Sphkdevstat_end_transaction_bio(struct devstat *ds, struct bio *bp) 32558942Sphk{ 32658942Sphk devstat_trans_flags flg; 32758942Sphk 328112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 329112368Sphk 330112368Sphk /* sanity check */ 331112368Sphk if (ds == NULL) 332112368Sphk return; 333112368Sphk 33458942Sphk if (bp->bio_cmd == BIO_DELETE) 33558942Sphk flg = DEVSTAT_FREE; 33658942Sphk else if (bp->bio_cmd == BIO_READ) 33758942Sphk flg = DEVSTAT_READ; 338112368Sphk else if (bp->bio_cmd == BIO_WRITE) 33958942Sphk flg = DEVSTAT_WRITE; 340112368Sphk else 341112368Sphk flg = DEVSTAT_NO_DATA; 34258942Sphk 34358942Sphk devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid, 344112288Sphk DEVSTAT_TAG_SIMPLE, flg, NULL, &bp->bio_t0); 34558942Sphk} 34658942Sphk 34739229Sgibbs/* 34839229Sgibbs * This is the sysctl handler for the devstat package. The data pushed out 34939229Sgibbs * on the kern.devstat.all sysctl variable consists of the current devstat 35039229Sgibbs * generation number, and then an array of devstat structures, one for each 35139229Sgibbs * device in the system. 35239229Sgibbs * 353112368Sphk * This is more cryptic that obvious, but basically we neither can nor 354112368Sphk * want to hold the devstat_mutex for any amount of time, so we grab it 355112368Sphk * only when we need to and keep an eye on devstat_generation all the time. 35639229Sgibbs */ 35739229Sgibbsstatic int 35862573Sphksysctl_devstat(SYSCTL_HANDLER_ARGS) 35939229Sgibbs{ 360112368Sphk int error; 361113599Sharti long mygen; 36239229Sgibbs struct devstat *nds; 36339229Sgibbs 364112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 365112368Sphk 36639229Sgibbs if (devstat_num_devs == 0) 36739229Sgibbs return(EINVAL); 36839229Sgibbs 36939229Sgibbs /* 370112368Sphk * XXX devstat_generation should really be "volatile" but that 371112368Sphk * XXX freaks out the sysctl macro below. The places where we 372112368Sphk * XXX change it and inspect it are bracketed in the mutex which 373112368Sphk * XXX guarantees us proper write barriers. I don't belive the 374112368Sphk * XXX compiler is allowed to optimize mygen away across calls 375112368Sphk * XXX to other functions, so the following is belived to be safe. 37639229Sgibbs */ 377112368Sphk mygen = devstat_generation; 37839229Sgibbs 379112368Sphk error = SYSCTL_OUT(req, &mygen, sizeof(mygen)); 380112368Sphk 381112368Sphk if (error != 0) 382112368Sphk return (error); 383112368Sphk 384112368Sphk mtx_lock(&devstat_mutex); 385112368Sphk nds = STAILQ_FIRST(&device_statq); 386112368Sphk if (mygen != devstat_generation) 387112368Sphk error = EBUSY; 388112368Sphk mtx_unlock(&devstat_mutex); 389112368Sphk 390112368Sphk if (error != 0) 391112368Sphk return (error); 392112368Sphk 393112368Sphk for (;nds != NULL;) { 39439229Sgibbs error = SYSCTL_OUT(req, nds, sizeof(struct devstat)); 395112368Sphk if (error != 0) 396112368Sphk return (error); 397112368Sphk mtx_lock(&devstat_mutex); 398112368Sphk if (mygen != devstat_generation) 399112368Sphk error = EBUSY; 400112368Sphk else 401112368Sphk nds = STAILQ_NEXT(nds, dev_links); 402112368Sphk mtx_unlock(&devstat_mutex); 403112368Sphk if (error != 0) 404112368Sphk return (error); 405112368Sphk } 40639229Sgibbs return(error); 40739229Sgibbs} 40839229Sgibbs 40939229Sgibbs/* 41039229Sgibbs * Sysctl entries for devstat. The first one is a node that all the rest 41139229Sgibbs * hang off of. 41239229Sgibbs */ 41339229SgibbsSYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, 0, "Device Statistics"); 41439229Sgibbs 41539229SgibbsSYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE, 41646381Sbillf 0, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list"); 41739229Sgibbs/* 41839229Sgibbs * Export the number of devices in the system so that userland utilities 41939229Sgibbs * can determine how much memory to allocate to hold all the devices. 42039229Sgibbs */ 42146381SbillfSYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD, 42246381Sbillf &devstat_num_devs, 0, "Number of devices in the devstat list"); 423113599ShartiSYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD, 42462622Sjhb &devstat_generation, 0, "Devstat list generation"); 42546381SbillfSYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD, 42646381Sbillf &devstat_version, 0, "Devstat list version number"); 427112001Sphk 428112368Sphk/* 429112368Sphk * Allocator for struct devstat structures. We sub-allocate these from pages 430112368Sphk * which we get from malloc. These pages are exported for mmap(2)'ing through 431112368Sphk * a miniature device driver 432112368Sphk */ 433112368Sphk 434112001Sphk#define statsperpage (PAGE_SIZE / sizeof(struct devstat)) 435112001Sphk 436112001Sphkstatic d_mmap_t devstat_mmap; 437112001Sphk 438112001Sphkstatic struct cdevsw devstat_cdevsw = { 439112001Sphk .d_open = nullopen, 440112001Sphk .d_close = nullclose, 441112001Sphk .d_mmap = devstat_mmap, 442112001Sphk .d_name = "devstat", 443112001Sphk}; 444112001Sphk 445112001Sphkstruct statspage { 446112001Sphk TAILQ_ENTRY(statspage) list; 447112001Sphk struct devstat *stat; 448112001Sphk u_int nfree; 449112001Sphk}; 450112001Sphk 451112001Sphkstatic TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist); 452112001Sphkstatic MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics"); 453112001Sphk 454112001Sphkstatic int 455112569Sjakedevstat_mmap(dev_t dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 456112001Sphk{ 457112001Sphk struct statspage *spp; 458112001Sphk 459112001Sphk if (nprot != VM_PROT_READ) 460112001Sphk return (-1); 461112001Sphk TAILQ_FOREACH(spp, &pagelist, list) { 462112001Sphk if (offset == 0) { 463112001Sphk *paddr = vtophys(spp->stat); 464112001Sphk return (0); 465112001Sphk } 466112001Sphk offset -= PAGE_SIZE; 467112001Sphk } 468112001Sphk return (-1); 469112001Sphk} 470112001Sphk 471112001Sphkstatic struct devstat * 472112001Sphkdevstat_alloc(void) 473112001Sphk{ 474112001Sphk struct devstat *dsp; 475112001Sphk struct statspage *spp; 476112001Sphk u_int u; 477112001Sphk static int once; 478112001Sphk 479112368Sphk mtx_assert(&devstat_mutex, MA_NOTOWNED); 480112001Sphk if (!once) { 481112001Sphk make_dev(&devstat_cdevsw, 0, 482112326Sphk UID_ROOT, GID_WHEEL, 0400, DEVSTAT_DEVICE_NAME); 483112368Sphk once = 1; 484112001Sphk } 485112368Sphk mtx_lock(&devstat_mutex); 486112368Sphk for (;;) { 487112368Sphk TAILQ_FOREACH(spp, &pagelist, list) { 488112368Sphk if (spp->nfree > 0) 489112368Sphk break; 490112368Sphk } 491112368Sphk if (spp != NULL) 492112001Sphk break; 493112368Sphk /* 494112368Sphk * We had no free slot in any of our pages, drop the mutex 495112368Sphk * and get another page. In theory we could have more than 496112368Sphk * one process doing this at the same time and consequently 497112368Sphk * we may allocate more pages than we will need. That is 498112368Sphk * Just Too Bad[tm], we can live with that. 499112368Sphk */ 500112368Sphk mtx_unlock(&devstat_mutex); 501112001Sphk spp = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); 502112001Sphk spp->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); 503112001Sphk spp->nfree = statsperpage; 504112368Sphk mtx_lock(&devstat_mutex); 505112368Sphk /* 506112368Sphk * It would make more sense to add the new page at the head 507112368Sphk * but the order on the list determine the sequence of the 508112368Sphk * mapping so we can't do that. 509112368Sphk */ 510112368Sphk TAILQ_INSERT_TAIL(&pagelist, spp, list); 511112001Sphk } 512112001Sphk dsp = spp->stat; 513112001Sphk for (u = 0; u < statsperpage; u++) { 514112001Sphk if (dsp->allocated == 0) 515112001Sphk break; 516112001Sphk dsp++; 517112001Sphk } 518112001Sphk spp->nfree--; 519112001Sphk dsp->allocated = 1; 520112368Sphk mtx_unlock(&devstat_mutex); 521112001Sphk return (dsp); 522112001Sphk} 523112001Sphk 524112001Sphkstatic void 525112001Sphkdevstat_free(struct devstat *dsp) 526112001Sphk{ 527112001Sphk struct statspage *spp; 528112001Sphk 529112368Sphk mtx_assert(&devstat_mutex, MA_OWNED); 530112001Sphk bzero(dsp, sizeof *dsp); 531112001Sphk TAILQ_FOREACH(spp, &pagelist, list) { 532112001Sphk if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) { 533112001Sphk spp->nfree++; 534112001Sphk return; 535112001Sphk } 536112001Sphk } 537112001Sphk} 538112288Sphk 539112288SphkSYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD, 540112288Sphk 0, sizeof(struct devstat), "sizeof(struct devstat)"); 541