memstat_uma.c revision 155542
1147997Srwatson/*- 2147997Srwatson * Copyright (c) 2005 Robert N. M. Watson 3147997Srwatson * All rights reserved. 4147997Srwatson * 5147997Srwatson * Redistribution and use in source and binary forms, with or without 6147997Srwatson * modification, are permitted provided that the following conditions 7147997Srwatson * are met: 8147997Srwatson * 1. Redistributions of source code must retain the above copyright 9147997Srwatson * notice, this list of conditions and the following disclaimer. 10147997Srwatson * 2. Redistributions in binary form must reproduce the above copyright 11147997Srwatson * notice, this list of conditions and the following disclaimer in the 12147997Srwatson * documentation and/or other materials provided with the distribution. 13147997Srwatson * 14147997Srwatson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15147997Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16147997Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17147997Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18147997Srwatson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19147997Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20147997Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21147997Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22147997Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23147997Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24147997Srwatson * SUCH DAMAGE. 25147997Srwatson * 26147997Srwatson * $FreeBSD: head/lib/libmemstat/memstat_uma.c 155542 2006-02-11 16:54:00Z rwatson $ 27147997Srwatson */ 28147997Srwatson 29147997Srwatson#include <sys/param.h> 30147997Srwatson#include <sys/sysctl.h> 31147997Srwatson 32148693Srwatson#define LIBMEMSTAT /* Cause vm_page.h not to include opt_vmpage.h */ 33148627Srwatson#include <vm/vm.h> 34148627Srwatson#include <vm/vm_page.h> 35148627Srwatson 36147997Srwatson#include <vm/uma.h> 37148627Srwatson#include <vm/uma_int.h> 38147997Srwatson 39147997Srwatson#include <err.h> 40147997Srwatson#include <errno.h> 41148627Srwatson#include <kvm.h> 42148627Srwatson#include <nlist.h> 43147997Srwatson#include <stdio.h> 44147997Srwatson#include <stdlib.h> 45147997Srwatson#include <string.h> 46147997Srwatson 47147997Srwatson#include "memstat.h" 48147997Srwatson#include "memstat_internal.h" 49147997Srwatson 50148627Srwatsonstatic struct nlist namelist[] = { 51148627Srwatson#define X_UMA_KEGS 0 52148627Srwatson { .n_name = "_uma_kegs" }, 53148627Srwatson#define X_MP_MAXID 1 54148627Srwatson { .n_name = "_mp_maxid" }, 55148627Srwatson { .n_name = "" }, 56148627Srwatson}; 57148627Srwatson 58147997Srwatson/* 59147997Srwatson * Extract uma(9) statistics from the running kernel, and store all memory 60147997Srwatson * type information in the passed list. For each type, check the list for an 61147997Srwatson * existing entry with the right name/allocator -- if present, update that 62147997Srwatson * entry. Otherwise, add a new entry. On error, the entire list will be 63147997Srwatson * cleared, as entries will be in an inconsistent state. 64147997Srwatson * 65147997Srwatson * To reduce the level of work for a list that starts empty, we keep around a 66147997Srwatson * hint as to whether it was empty when we began, so we can avoid searching 67147997Srwatson * the list for entries to update. Updates are O(n^2) due to searching for 68147997Srwatson * each entry before adding it. 69147997Srwatson */ 70147997Srwatsonint 71147997Srwatsonmemstat_sysctl_uma(struct memory_type_list *list, int flags) 72147997Srwatson{ 73147997Srwatson struct uma_stream_header *ushp; 74147997Srwatson struct uma_type_header *uthp; 75147997Srwatson struct uma_percpu_stat *upsp; 76147997Srwatson struct memory_type *mtp; 77148357Srwatson int count, hint_dontsearch, i, j, maxcpus; 78147997Srwatson char *buffer, *p; 79147997Srwatson size_t size; 80147997Srwatson 81148357Srwatson hint_dontsearch = LIST_EMPTY(&list->mtl_list); 82147997Srwatson 83147997Srwatson /* 84147997Srwatson * Query the number of CPUs, number of malloc types so that we can 85147997Srwatson * guess an initial buffer size. We loop until we succeed or really 86147997Srwatson * fail. Note that the value of maxcpus we query using sysctl is not 87147997Srwatson * the version we use when processing the real data -- that is read 88147997Srwatson * from the header. 89147997Srwatson */ 90147997Srwatsonretry: 91147997Srwatson size = sizeof(maxcpus); 92147997Srwatson if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) { 93148357Srwatson if (errno == EACCES || errno == EPERM) 94148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 95148357Srwatson else 96148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 97147997Srwatson return (-1); 98147997Srwatson } 99147997Srwatson if (size != sizeof(maxcpus)) { 100148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 101147997Srwatson return (-1); 102147997Srwatson } 103147997Srwatson 104147997Srwatson if (maxcpus > MEMSTAT_MAXCPU) { 105148357Srwatson list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 106147997Srwatson return (-1); 107147997Srwatson } 108147997Srwatson 109147997Srwatson size = sizeof(count); 110147997Srwatson if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) { 111148357Srwatson if (errno == EACCES || errno == EPERM) 112148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 113148357Srwatson else 114148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 115147997Srwatson return (-1); 116147997Srwatson } 117147997Srwatson if (size != sizeof(count)) { 118148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 119147997Srwatson return (-1); 120147997Srwatson } 121147997Srwatson 122147997Srwatson size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) * 123147997Srwatson maxcpus); 124147997Srwatson 125147997Srwatson buffer = malloc(size); 126147997Srwatson if (buffer == NULL) { 127148357Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 128147997Srwatson return (-1); 129147997Srwatson } 130147997Srwatson 131147997Srwatson if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) { 132147997Srwatson /* 133147997Srwatson * XXXRW: ENOMEM is an ambiguous return, we should bound the 134147997Srwatson * number of loops, perhaps. 135147997Srwatson */ 136147997Srwatson if (errno == ENOMEM) { 137147997Srwatson free(buffer); 138147997Srwatson goto retry; 139147997Srwatson } 140148357Srwatson if (errno == EACCES || errno == EPERM) 141148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 142148357Srwatson else 143148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 144147997Srwatson free(buffer); 145147997Srwatson return (-1); 146147997Srwatson } 147147997Srwatson 148147997Srwatson if (size == 0) { 149147997Srwatson free(buffer); 150147997Srwatson return (0); 151147997Srwatson } 152147997Srwatson 153147997Srwatson if (size < sizeof(*ushp)) { 154148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 155147997Srwatson free(buffer); 156147997Srwatson return (-1); 157147997Srwatson } 158147997Srwatson p = buffer; 159147997Srwatson ushp = (struct uma_stream_header *)p; 160147997Srwatson p += sizeof(*ushp); 161147997Srwatson 162147997Srwatson if (ushp->ush_version != UMA_STREAM_VERSION) { 163148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 164147997Srwatson free(buffer); 165147997Srwatson return (-1); 166147997Srwatson } 167147997Srwatson 168147997Srwatson if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) { 169148357Srwatson list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 170147997Srwatson free(buffer); 171147997Srwatson return (-1); 172147997Srwatson } 173147997Srwatson 174147997Srwatson /* 175147997Srwatson * For the remainder of this function, we are quite trusting about 176147997Srwatson * the layout of structures and sizes, since we've determined we have 177147997Srwatson * a matching version and acceptable CPU count. 178147997Srwatson */ 179147997Srwatson maxcpus = ushp->ush_maxcpus; 180147997Srwatson count = ushp->ush_count; 181147997Srwatson for (i = 0; i < count; i++) { 182147997Srwatson uthp = (struct uma_type_header *)p; 183147997Srwatson p += sizeof(*uthp); 184147997Srwatson 185147997Srwatson if (hint_dontsearch == 0) { 186147997Srwatson mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 187147997Srwatson uthp->uth_name); 188147997Srwatson } else 189147997Srwatson mtp = NULL; 190147997Srwatson if (mtp == NULL) 191148354Srwatson mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 192147997Srwatson uthp->uth_name); 193147997Srwatson if (mtp == NULL) { 194148619Srwatson _memstat_mtl_empty(list); 195147997Srwatson free(buffer); 196148357Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 197147997Srwatson return (-1); 198147997Srwatson } 199147997Srwatson 200147997Srwatson /* 201147997Srwatson * Reset the statistics on a current node. 202147997Srwatson */ 203148354Srwatson _memstat_mt_reset_stats(mtp); 204147997Srwatson 205148007Srwatson mtp->mt_numallocs = uthp->uth_allocs; 206148007Srwatson mtp->mt_numfrees = uthp->uth_frees; 207148071Srwatson mtp->mt_failures = uthp->uth_fails; 208148007Srwatson 209147997Srwatson for (j = 0; j < maxcpus; j++) { 210147997Srwatson upsp = (struct uma_percpu_stat *)p; 211147997Srwatson p += sizeof(*upsp); 212147997Srwatson 213147997Srwatson mtp->mt_percpu_cache[j].mtp_free = 214147997Srwatson upsp->ups_cache_free; 215147997Srwatson mtp->mt_free += upsp->ups_cache_free; 216147997Srwatson mtp->mt_numallocs += upsp->ups_allocs; 217147997Srwatson mtp->mt_numfrees += upsp->ups_frees; 218147997Srwatson } 219147997Srwatson 220147997Srwatson mtp->mt_size = uthp->uth_size; 221148007Srwatson mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size; 222148007Srwatson mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size; 223147997Srwatson mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 224147997Srwatson mtp->mt_countlimit = uthp->uth_limit; 225147997Srwatson mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size; 226147997Srwatson 227147997Srwatson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 228148170Srwatson mtp->mt_zonefree = uthp->uth_zone_free; 229148381Srwatson 230148381Srwatson /* 231148381Srwatson * UMA secondary zones share a keg with the primary zone. To 232148381Srwatson * avoid double-reporting of free items, report keg free 233148381Srwatson * items only in the primary zone. 234148381Srwatson */ 235148381Srwatson if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) { 236148619Srwatson mtp->mt_kegfree = uthp->uth_keg_free; 237148381Srwatson mtp->mt_free += mtp->mt_kegfree; 238148381Srwatson } 239147997Srwatson mtp->mt_free += mtp->mt_zonefree; 240147997Srwatson } 241147997Srwatson 242147997Srwatson free(buffer); 243147997Srwatson 244147997Srwatson return (0); 245147997Srwatson} 246148627Srwatson 247148627Srwatsonstatic int 248148627Srwatsonkread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size, 249148627Srwatson size_t offset) 250148627Srwatson{ 251148627Srwatson ssize_t ret; 252148627Srwatson 253148627Srwatson ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address, 254148627Srwatson size); 255148627Srwatson if (ret < 0) 256148627Srwatson return (MEMSTAT_ERROR_KVM); 257148627Srwatson if ((size_t)ret != size) 258148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 259148627Srwatson return (0); 260148627Srwatson} 261148627Srwatson 262148627Srwatsonstatic int 263148627Srwatsonkread_string(kvm_t *kvm, void *kvm_pointer, char *buffer, int buflen) 264148627Srwatson{ 265148627Srwatson ssize_t ret; 266148627Srwatson int i; 267148627Srwatson 268148627Srwatson for (i = 0; i < buflen; i++) { 269148627Srwatson ret = kvm_read(kvm, (unsigned long)kvm_pointer + i, 270148627Srwatson &(buffer[i]), sizeof(char)); 271148627Srwatson if (ret < 0) 272148627Srwatson return (MEMSTAT_ERROR_KVM); 273148627Srwatson if ((size_t)ret != sizeof(char)) 274148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 275148627Srwatson if (buffer[i] == '\0') 276148627Srwatson return (0); 277148627Srwatson } 278148627Srwatson /* Truncate. */ 279148627Srwatson buffer[i-1] = '\0'; 280148627Srwatson return (0); 281148627Srwatson} 282148627Srwatson 283148627Srwatsonstatic int 284148627Srwatsonkread_symbol(kvm_t *kvm, int index, void *address, size_t size, 285148627Srwatson size_t offset) 286148627Srwatson{ 287148627Srwatson ssize_t ret; 288148627Srwatson 289148627Srwatson ret = kvm_read(kvm, namelist[index].n_value + offset, address, size); 290148627Srwatson if (ret < 0) 291148627Srwatson return (MEMSTAT_ERROR_KVM); 292148627Srwatson if ((size_t)ret != size) 293148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 294148627Srwatson return (0); 295148627Srwatson} 296148627Srwatson 297148627Srwatson/* 298148627Srwatson * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts 299148627Srwatson * UMA(9) statistics from a kernel core/memory file. 300148627Srwatson */ 301148627Srwatsonint 302148627Srwatsonmemstat_kvm_uma(struct memory_type_list *list, void *kvm_handle) 303148627Srwatson{ 304154416Srwatson LIST_HEAD(, uma_keg) uma_kegs; 305148627Srwatson struct memory_type *mtp; 306148627Srwatson struct uma_bucket *ubp, ub; 307148627Srwatson struct uma_cache *ucp; 308148627Srwatson struct uma_zone *uzp, uz; 309148627Srwatson struct uma_keg *kzp, kz; 310148627Srwatson int hint_dontsearch, i, mp_maxid, ret; 311148627Srwatson char name[MEMTYPE_MAXNAME]; 312148627Srwatson kvm_t *kvm; 313148627Srwatson 314148627Srwatson kvm = (kvm_t *)kvm_handle; 315148627Srwatson hint_dontsearch = LIST_EMPTY(&list->mtl_list); 316148627Srwatson if (kvm_nlist(kvm, namelist) != 0) { 317148627Srwatson list->mtl_error = MEMSTAT_ERROR_KVM; 318148627Srwatson return (-1); 319148627Srwatson } 320148627Srwatson if (namelist[X_UMA_KEGS].n_type == 0 || 321148627Srwatson namelist[X_UMA_KEGS].n_value == 0) { 322148627Srwatson list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL; 323148627Srwatson return (-1); 324148627Srwatson } 325148627Srwatson ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0); 326148627Srwatson if (ret != 0) { 327148627Srwatson list->mtl_error = ret; 328148627Srwatson return (-1); 329148627Srwatson } 330148627Srwatson ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0); 331148627Srwatson if (ret != 0) { 332148627Srwatson list->mtl_error = ret; 333148627Srwatson return (-1); 334148627Srwatson } 335148627Srwatson for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp = 336148627Srwatson LIST_NEXT(&kz, uk_link)) { 337148627Srwatson ret = kread(kvm, kzp, &kz, sizeof(kz), 0); 338148627Srwatson if (ret != 0) { 339148627Srwatson _memstat_mtl_empty(list); 340148627Srwatson list->mtl_error = ret; 341148627Srwatson return (-1); 342148627Srwatson } 343148627Srwatson for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp = 344148627Srwatson LIST_NEXT(&uz, uz_link)) { 345148627Srwatson ret = kread(kvm, uzp, &uz, sizeof(uz), 0); 346148627Srwatson if (ret != 0) { 347148627Srwatson _memstat_mtl_empty(list); 348148627Srwatson list->mtl_error = ret; 349148627Srwatson return (-1); 350148627Srwatson } 351148627Srwatson ret = kread_string(kvm, uz.uz_name, name, 352148627Srwatson MEMTYPE_MAXNAME); 353148627Srwatson if (ret != 0) { 354148627Srwatson _memstat_mtl_empty(list); 355148627Srwatson list->mtl_error = ret; 356148627Srwatson return (-1); 357148627Srwatson } 358148627Srwatson if (hint_dontsearch == 0) { 359148627Srwatson mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 360148627Srwatson name); 361148627Srwatson } else 362148627Srwatson mtp = NULL; 363148627Srwatson if (mtp == NULL) 364148627Srwatson mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 365148627Srwatson name); 366148627Srwatson if (mtp == NULL) { 367148627Srwatson _memstat_mtl_empty(list); 368148627Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 369148627Srwatson return (-1); 370148627Srwatson } 371148627Srwatson /* 372148627Srwatson * Reset the statistics on a current node. 373148627Srwatson */ 374148627Srwatson _memstat_mt_reset_stats(mtp); 375148627Srwatson mtp->mt_numallocs = uz.uz_allocs; 376148627Srwatson mtp->mt_numfrees = uz.uz_frees; 377148627Srwatson mtp->mt_failures = uz.uz_fails; 378148627Srwatson if (kz.uk_flags & UMA_ZFLAG_INTERNAL) 379148627Srwatson goto skip_percpu; 380148627Srwatson for (i = 0; i < mp_maxid + 1; i++) { 381148627Srwatson ucp = &uz.uz_cpu[i]; 382148627Srwatson mtp->mt_numallocs += ucp->uc_allocs; 383148627Srwatson mtp->mt_numfrees += ucp->uc_frees; 384148627Srwatson 385148627Srwatson if (ucp->uc_allocbucket != NULL) { 386148627Srwatson ret = kread(kvm, ucp->uc_allocbucket, 387148627Srwatson &ub, sizeof(ub), 0); 388148627Srwatson if (ret != 0) { 389148627Srwatson _memstat_mtl_empty(list); 390148627Srwatson list->mtl_error = 391148627Srwatson MEMSTAT_ERROR_NOMEMORY; 392148627Srwatson return (-1); 393148627Srwatson } 394148627Srwatson mtp->mt_free += ub.ub_cnt; 395148627Srwatson } 396148627Srwatson if (ucp->uc_freebucket != NULL) { 397148627Srwatson ret = kread(kvm, ucp->uc_freebucket, 398148627Srwatson &ub, sizeof(ub), 0); 399148627Srwatson if (ret != 0) { 400148627Srwatson _memstat_mtl_empty(list); 401148627Srwatson list->mtl_error = 402148627Srwatson MEMSTAT_ERROR_NOMEMORY; 403148627Srwatson return (-1); 404148627Srwatson } 405148627Srwatson mtp->mt_free += ub.ub_cnt; 406148627Srwatson } 407148627Srwatson } 408148627Srwatsonskip_percpu: 409148627Srwatson mtp->mt_size = kz.uk_size; 410148627Srwatson mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size; 411148627Srwatson mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size; 412155542Srwatson mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 413148627Srwatson if (kz.uk_ppera > 1) 414148627Srwatson mtp->mt_countlimit = kz.uk_maxpages / 415148627Srwatson kz.uk_ipers; 416148627Srwatson else 417148627Srwatson mtp->mt_countlimit = kz.uk_maxpages * 418148627Srwatson kz.uk_ipers; 419148627Srwatson mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size; 420148627Srwatson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 421148627Srwatson for (ubp = LIST_FIRST(&uz.uz_full_bucket); ubp != 422148627Srwatson NULL; ubp = LIST_NEXT(&ub, ub_link)) { 423148627Srwatson ret = kread(kvm, ubp, &ub, sizeof(ub), 0); 424148627Srwatson mtp->mt_zonefree += ub.ub_cnt; 425148627Srwatson } 426148627Srwatson if (!((kz.uk_flags & UMA_ZONE_SECONDARY) && 427148627Srwatson LIST_FIRST(&kz.uk_zones) != uzp)) { 428148627Srwatson mtp->mt_kegfree = kz.uk_free; 429148627Srwatson mtp->mt_free += mtp->mt_kegfree; 430148627Srwatson } 431148627Srwatson mtp->mt_free += mtp->mt_zonefree; 432148627Srwatson } 433148627Srwatson } 434148627Srwatson return (0); 435148627Srwatson} 436