memstat_uma.c revision 148627
1147997Srwatson/*- 2147997Srwatson * Copyright (c) 2005 Robert N. M. Watson 3147997Srwatson * All rights reserved. 4147997Srwatson * 5147997Srwatson * Redistribution and use in source and binary forms, with or without 6147997Srwatson * modification, are permitted provided that the following conditions 7147997Srwatson * are met: 8147997Srwatson * 1. Redistributions of source code must retain the above copyright 9147997Srwatson * notice, this list of conditions and the following disclaimer. 10147997Srwatson * 2. Redistributions in binary form must reproduce the above copyright 11147997Srwatson * notice, this list of conditions and the following disclaimer in the 12147997Srwatson * documentation and/or other materials provided with the distribution. 13147997Srwatson * 14147997Srwatson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15147997Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16147997Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17147997Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18147997Srwatson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19147997Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20147997Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21147997Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22147997Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23147997Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24147997Srwatson * SUCH DAMAGE. 25147997Srwatson * 26147997Srwatson * $FreeBSD: head/lib/libmemstat/memstat_uma.c 148627 2005-08-01 19:07:39Z rwatson $ 27147997Srwatson */ 28147997Srwatson 29147997Srwatson#include <sys/param.h> 30147997Srwatson#include <sys/sysctl.h> 31147997Srwatson 32148627Srwatson/* 33148627Srwatson * XXX: Grubbing around in UMA(9) using libkvm requires internal knowledge of 34148627Srwatson * a number of VM-related bits. The ifdefs around those bits are not 35148627Srwatson * designed with a nosy user-space consumer in mind. 36148627Srwatson */ 37148627Srwatson#include <vm/vm.h> 38148627Srwatson#define _KERNEL /* XXX: vm_page.h confusion. */ 39148627Srwatson#define KLD_MODULE /* XXX: vm_page.h shouldn't include opt_vmpage.h. */ 40148627Srwatson#include <vm/vm_page.h> 41148627Srwatson#undef KLD_MODULE 42148627Srwatson#undef _KERNEL 43148627Srwatson 44147997Srwatson#include <vm/uma.h> 45148627Srwatson#include <vm/uma_int.h> 46147997Srwatson 47147997Srwatson#include <err.h> 48147997Srwatson#include <errno.h> 49148627Srwatson#include <kvm.h> 50148627Srwatson#include <nlist.h> 51147997Srwatson#include <stdio.h> 52147997Srwatson#include <stdlib.h> 53147997Srwatson#include <string.h> 54147997Srwatson 55147997Srwatson#include "memstat.h" 56147997Srwatson#include "memstat_internal.h" 57147997Srwatson 58148627Srwatsonstatic struct nlist namelist[] = { 59148627Srwatson#define X_UMA_KEGS 0 60148627Srwatson { .n_name = "_uma_kegs" }, 61148627Srwatson#define X_MP_MAXID 1 62148627Srwatson { .n_name = "_mp_maxid" }, 63148627Srwatson { .n_name = "" }, 64148627Srwatson}; 65148627Srwatson 66147997Srwatson/* 67147997Srwatson * Extract uma(9) statistics from the running kernel, and store all memory 68147997Srwatson * type information in the passed list. For each type, check the list for an 69147997Srwatson * existing entry with the right name/allocator -- if present, update that 70147997Srwatson * entry. Otherwise, add a new entry. On error, the entire list will be 71147997Srwatson * cleared, as entries will be in an inconsistent state. 72147997Srwatson * 73147997Srwatson * To reduce the level of work for a list that starts empty, we keep around a 74147997Srwatson * hint as to whether it was empty when we began, so we can avoid searching 75147997Srwatson * the list for entries to update. Updates are O(n^2) due to searching for 76147997Srwatson * each entry before adding it. 77147997Srwatson */ 78147997Srwatsonint 79147997Srwatsonmemstat_sysctl_uma(struct memory_type_list *list, int flags) 80147997Srwatson{ 81147997Srwatson struct uma_stream_header *ushp; 82147997Srwatson struct uma_type_header *uthp; 83147997Srwatson struct uma_percpu_stat *upsp; 84147997Srwatson struct memory_type *mtp; 85148357Srwatson int count, hint_dontsearch, i, j, maxcpus; 86147997Srwatson char *buffer, *p; 87147997Srwatson size_t size; 88147997Srwatson 89148357Srwatson hint_dontsearch = LIST_EMPTY(&list->mtl_list); 90147997Srwatson 91147997Srwatson /* 92147997Srwatson * Query the number of CPUs, number of malloc types so that we can 93147997Srwatson * guess an initial buffer size. We loop until we succeed or really 94147997Srwatson * fail. Note that the value of maxcpus we query using sysctl is not 95147997Srwatson * the version we use when processing the real data -- that is read 96147997Srwatson * from the header. 97147997Srwatson */ 98147997Srwatsonretry: 99147997Srwatson size = sizeof(maxcpus); 100147997Srwatson if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) { 101148357Srwatson if (errno == EACCES || errno == EPERM) 102148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 103148357Srwatson else 104148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 105147997Srwatson return (-1); 106147997Srwatson } 107147997Srwatson if (size != sizeof(maxcpus)) { 108148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 109147997Srwatson return (-1); 110147997Srwatson } 111147997Srwatson 112147997Srwatson if (maxcpus > MEMSTAT_MAXCPU) { 113148357Srwatson list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 114147997Srwatson return (-1); 115147997Srwatson } 116147997Srwatson 117147997Srwatson size = sizeof(count); 118147997Srwatson if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) { 119148357Srwatson if (errno == EACCES || errno == EPERM) 120148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 121148357Srwatson else 122148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 123147997Srwatson return (-1); 124147997Srwatson } 125147997Srwatson if (size != sizeof(count)) { 126148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 127147997Srwatson return (-1); 128147997Srwatson } 129147997Srwatson 130147997Srwatson size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) * 131147997Srwatson maxcpus); 132147997Srwatson 133147997Srwatson buffer = malloc(size); 134147997Srwatson if (buffer == NULL) { 135148357Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 136147997Srwatson return (-1); 137147997Srwatson } 138147997Srwatson 139147997Srwatson if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) { 140147997Srwatson /* 141147997Srwatson * XXXRW: ENOMEM is an ambiguous return, we should bound the 142147997Srwatson * number of loops, perhaps. 143147997Srwatson */ 144147997Srwatson if (errno == ENOMEM) { 145147997Srwatson free(buffer); 146147997Srwatson goto retry; 147147997Srwatson } 148148357Srwatson if (errno == EACCES || errno == EPERM) 149148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 150148357Srwatson else 151148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 152147997Srwatson free(buffer); 153147997Srwatson return (-1); 154147997Srwatson } 155147997Srwatson 156147997Srwatson if (size == 0) { 157147997Srwatson free(buffer); 158147997Srwatson return (0); 159147997Srwatson } 160147997Srwatson 161147997Srwatson if (size < sizeof(*ushp)) { 162148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 163147997Srwatson free(buffer); 164147997Srwatson return (-1); 165147997Srwatson } 166147997Srwatson p = buffer; 167147997Srwatson ushp = (struct uma_stream_header *)p; 168147997Srwatson p += sizeof(*ushp); 169147997Srwatson 170147997Srwatson if (ushp->ush_version != UMA_STREAM_VERSION) { 171148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 172147997Srwatson free(buffer); 173147997Srwatson return (-1); 174147997Srwatson } 175147997Srwatson 176147997Srwatson if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) { 177148357Srwatson list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 178147997Srwatson free(buffer); 179147997Srwatson return (-1); 180147997Srwatson } 181147997Srwatson 182147997Srwatson /* 183147997Srwatson * For the remainder of this function, we are quite trusting about 184147997Srwatson * the layout of structures and sizes, since we've determined we have 185147997Srwatson * a matching version and acceptable CPU count. 186147997Srwatson */ 187147997Srwatson maxcpus = ushp->ush_maxcpus; 188147997Srwatson count = ushp->ush_count; 189147997Srwatson for (i = 0; i < count; i++) { 190147997Srwatson uthp = (struct uma_type_header *)p; 191147997Srwatson p += sizeof(*uthp); 192147997Srwatson 193147997Srwatson if (hint_dontsearch == 0) { 194147997Srwatson mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 195147997Srwatson uthp->uth_name); 196147997Srwatson } else 197147997Srwatson mtp = NULL; 198147997Srwatson if (mtp == NULL) 199148354Srwatson mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 200147997Srwatson uthp->uth_name); 201147997Srwatson if (mtp == NULL) { 202148619Srwatson _memstat_mtl_empty(list); 203147997Srwatson free(buffer); 204148357Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 205147997Srwatson return (-1); 206147997Srwatson } 207147997Srwatson 208147997Srwatson /* 209147997Srwatson * Reset the statistics on a current node. 210147997Srwatson */ 211148354Srwatson _memstat_mt_reset_stats(mtp); 212147997Srwatson 213148007Srwatson mtp->mt_numallocs = uthp->uth_allocs; 214148007Srwatson mtp->mt_numfrees = uthp->uth_frees; 215148071Srwatson mtp->mt_failures = uthp->uth_fails; 216148007Srwatson 217147997Srwatson for (j = 0; j < maxcpus; j++) { 218147997Srwatson upsp = (struct uma_percpu_stat *)p; 219147997Srwatson p += sizeof(*upsp); 220147997Srwatson 221147997Srwatson mtp->mt_percpu_cache[j].mtp_free = 222147997Srwatson upsp->ups_cache_free; 223147997Srwatson mtp->mt_free += upsp->ups_cache_free; 224147997Srwatson mtp->mt_numallocs += upsp->ups_allocs; 225147997Srwatson mtp->mt_numfrees += upsp->ups_frees; 226147997Srwatson } 227147997Srwatson 228147997Srwatson mtp->mt_size = uthp->uth_size; 229148007Srwatson mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size; 230148007Srwatson mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size; 231147997Srwatson mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 232147997Srwatson mtp->mt_countlimit = uthp->uth_limit; 233147997Srwatson mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size; 234147997Srwatson 235147997Srwatson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 236148170Srwatson mtp->mt_zonefree = uthp->uth_zone_free; 237148381Srwatson 238148381Srwatson /* 239148381Srwatson * UMA secondary zones share a keg with the primary zone. To 240148381Srwatson * avoid double-reporting of free items, report keg free 241148381Srwatson * items only in the primary zone. 242148381Srwatson */ 243148381Srwatson if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) { 244148619Srwatson mtp->mt_kegfree = uthp->uth_keg_free; 245148381Srwatson mtp->mt_free += mtp->mt_kegfree; 246148381Srwatson } 247147997Srwatson mtp->mt_free += mtp->mt_zonefree; 248147997Srwatson } 249147997Srwatson 250147997Srwatson free(buffer); 251147997Srwatson 252147997Srwatson return (0); 253147997Srwatson} 254148627Srwatson 255148627Srwatsonstatic int 256148627Srwatsonkread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size, 257148627Srwatson size_t offset) 258148627Srwatson{ 259148627Srwatson ssize_t ret; 260148627Srwatson 261148627Srwatson ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address, 262148627Srwatson size); 263148627Srwatson if (ret < 0) 264148627Srwatson return (MEMSTAT_ERROR_KVM); 265148627Srwatson if ((size_t)ret != size) 266148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 267148627Srwatson return (0); 268148627Srwatson} 269148627Srwatson 270148627Srwatsonstatic int 271148627Srwatsonkread_string(kvm_t *kvm, void *kvm_pointer, char *buffer, int buflen) 272148627Srwatson{ 273148627Srwatson ssize_t ret; 274148627Srwatson int i; 275148627Srwatson 276148627Srwatson for (i = 0; i < buflen; i++) { 277148627Srwatson ret = kvm_read(kvm, (unsigned long)kvm_pointer + i, 278148627Srwatson &(buffer[i]), sizeof(char)); 279148627Srwatson if (ret < 0) 280148627Srwatson return (MEMSTAT_ERROR_KVM); 281148627Srwatson if ((size_t)ret != sizeof(char)) 282148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 283148627Srwatson if (buffer[i] == '\0') 284148627Srwatson return (0); 285148627Srwatson } 286148627Srwatson /* Truncate. */ 287148627Srwatson buffer[i-1] = '\0'; 288148627Srwatson return (0); 289148627Srwatson} 290148627Srwatson 291148627Srwatsonstatic int 292148627Srwatsonkread_symbol(kvm_t *kvm, int index, void *address, size_t size, 293148627Srwatson size_t offset) 294148627Srwatson{ 295148627Srwatson ssize_t ret; 296148627Srwatson 297148627Srwatson ret = kvm_read(kvm, namelist[index].n_value + offset, address, size); 298148627Srwatson if (ret < 0) 299148627Srwatson return (MEMSTAT_ERROR_KVM); 300148627Srwatson if ((size_t)ret != size) 301148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 302148627Srwatson return (0); 303148627Srwatson} 304148627Srwatson 305148627Srwatson/* 306148627Srwatson * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts 307148627Srwatson * UMA(9) statistics from a kernel core/memory file. 308148627Srwatson */ 309148627Srwatsonint 310148627Srwatsonmemstat_kvm_uma(struct memory_type_list *list, void *kvm_handle) 311148627Srwatson{ 312148627Srwatson static LIST_HEAD(, uma_keg) uma_kegs; 313148627Srwatson struct memory_type *mtp; 314148627Srwatson struct uma_bucket *ubp, ub; 315148627Srwatson struct uma_cache *ucp; 316148627Srwatson struct uma_zone *uzp, uz; 317148627Srwatson struct uma_keg *kzp, kz; 318148627Srwatson int hint_dontsearch, i, mp_maxid, ret; 319148627Srwatson char name[MEMTYPE_MAXNAME]; 320148627Srwatson kvm_t *kvm; 321148627Srwatson 322148627Srwatson kvm = (kvm_t *)kvm_handle; 323148627Srwatson hint_dontsearch = LIST_EMPTY(&list->mtl_list); 324148627Srwatson if (kvm_nlist(kvm, namelist) != 0) { 325148627Srwatson list->mtl_error = MEMSTAT_ERROR_KVM; 326148627Srwatson return (-1); 327148627Srwatson } 328148627Srwatson if (namelist[X_UMA_KEGS].n_type == 0 || 329148627Srwatson namelist[X_UMA_KEGS].n_value == 0) { 330148627Srwatson list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL; 331148627Srwatson return (-1); 332148627Srwatson } 333148627Srwatson ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0); 334148627Srwatson if (ret != 0) { 335148627Srwatson list->mtl_error = ret; 336148627Srwatson return (-1); 337148627Srwatson } 338148627Srwatson ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0); 339148627Srwatson if (ret != 0) { 340148627Srwatson list->mtl_error = ret; 341148627Srwatson return (-1); 342148627Srwatson } 343148627Srwatson for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp = 344148627Srwatson LIST_NEXT(&kz, uk_link)) { 345148627Srwatson ret = kread(kvm, kzp, &kz, sizeof(kz), 0); 346148627Srwatson if (ret != 0) { 347148627Srwatson _memstat_mtl_empty(list); 348148627Srwatson list->mtl_error = ret; 349148627Srwatson return (-1); 350148627Srwatson } 351148627Srwatson for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp = 352148627Srwatson LIST_NEXT(&uz, uz_link)) { 353148627Srwatson ret = kread(kvm, uzp, &uz, sizeof(uz), 0); 354148627Srwatson if (ret != 0) { 355148627Srwatson _memstat_mtl_empty(list); 356148627Srwatson list->mtl_error = ret; 357148627Srwatson return (-1); 358148627Srwatson } 359148627Srwatson ret = kread_string(kvm, uz.uz_name, name, 360148627Srwatson MEMTYPE_MAXNAME); 361148627Srwatson if (ret != 0) { 362148627Srwatson _memstat_mtl_empty(list); 363148627Srwatson list->mtl_error = ret; 364148627Srwatson return (-1); 365148627Srwatson } 366148627Srwatson if (hint_dontsearch == 0) { 367148627Srwatson mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 368148627Srwatson name); 369148627Srwatson } else 370148627Srwatson mtp = NULL; 371148627Srwatson if (mtp == NULL) 372148627Srwatson mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 373148627Srwatson name); 374148627Srwatson if (mtp == NULL) { 375148627Srwatson _memstat_mtl_empty(list); 376148627Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 377148627Srwatson return (-1); 378148627Srwatson } 379148627Srwatson /* 380148627Srwatson * Reset the statistics on a current node. 381148627Srwatson */ 382148627Srwatson _memstat_mt_reset_stats(mtp); 383148627Srwatson mtp->mt_numallocs = uz.uz_allocs; 384148627Srwatson mtp->mt_numfrees = uz.uz_frees; 385148627Srwatson mtp->mt_failures = uz.uz_fails; 386148627Srwatson if (kz.uk_flags & UMA_ZFLAG_INTERNAL) 387148627Srwatson goto skip_percpu; 388148627Srwatson for (i = 0; i < mp_maxid + 1; i++) { 389148627Srwatson ucp = &uz.uz_cpu[i]; 390148627Srwatson mtp->mt_numallocs += ucp->uc_allocs; 391148627Srwatson mtp->mt_numfrees += ucp->uc_frees; 392148627Srwatson 393148627Srwatson if (ucp->uc_allocbucket != NULL) { 394148627Srwatson ret = kread(kvm, ucp->uc_allocbucket, 395148627Srwatson &ub, sizeof(ub), 0); 396148627Srwatson if (ret != 0) { 397148627Srwatson _memstat_mtl_empty(list); 398148627Srwatson list->mtl_error = 399148627Srwatson MEMSTAT_ERROR_NOMEMORY; 400148627Srwatson return (-1); 401148627Srwatson } 402148627Srwatson mtp->mt_free += ub.ub_cnt; 403148627Srwatson } 404148627Srwatson if (ucp->uc_freebucket != NULL) { 405148627Srwatson ret = kread(kvm, ucp->uc_freebucket, 406148627Srwatson &ub, sizeof(ub), 0); 407148627Srwatson if (ret != 0) { 408148627Srwatson _memstat_mtl_empty(list); 409148627Srwatson list->mtl_error = 410148627Srwatson MEMSTAT_ERROR_NOMEMORY; 411148627Srwatson return (-1); 412148627Srwatson } 413148627Srwatson mtp->mt_free += ub.ub_cnt; 414148627Srwatson } 415148627Srwatson } 416148627Srwatsonskip_percpu: 417148627Srwatson mtp->mt_size = kz.uk_size; 418148627Srwatson mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size; 419148627Srwatson mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size; 420148627Srwatson mtp->mt_bytes = mtp->mt_memalloced = mtp->mt_memfreed; 421148627Srwatson if (kz.uk_ppera > 1) 422148627Srwatson mtp->mt_countlimit = kz.uk_maxpages / 423148627Srwatson kz.uk_ipers; 424148627Srwatson else 425148627Srwatson mtp->mt_countlimit = kz.uk_maxpages * 426148627Srwatson kz.uk_ipers; 427148627Srwatson mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size; 428148627Srwatson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 429148627Srwatson for (ubp = LIST_FIRST(&uz.uz_full_bucket); ubp != 430148627Srwatson NULL; ubp = LIST_NEXT(&ub, ub_link)) { 431148627Srwatson ret = kread(kvm, ubp, &ub, sizeof(ub), 0); 432148627Srwatson mtp->mt_zonefree += ub.ub_cnt; 433148627Srwatson } 434148627Srwatson if (!((kz.uk_flags & UMA_ZONE_SECONDARY) && 435148627Srwatson LIST_FIRST(&kz.uk_zones) != uzp)) { 436148627Srwatson mtp->mt_kegfree = kz.uk_free; 437148627Srwatson mtp->mt_free += mtp->mt_kegfree; 438148627Srwatson } 439148627Srwatson mtp->mt_free += mtp->mt_zonefree; 440148627Srwatson } 441148627Srwatson } 442148627Srwatson return (0); 443148627Srwatson} 444