memstat_uma.c revision 222813
1147997Srwatson/*- 2155552Srwatson * Copyright (c) 2005-2006 Robert N. M. Watson 3147997Srwatson * All rights reserved. 4147997Srwatson * 5147997Srwatson * Redistribution and use in source and binary forms, with or without 6147997Srwatson * modification, are permitted provided that the following conditions 7147997Srwatson * are met: 8147997Srwatson * 1. Redistributions of source code must retain the above copyright 9147997Srwatson * notice, this list of conditions and the following disclaimer. 10147997Srwatson * 2. Redistributions in binary form must reproduce the above copyright 11147997Srwatson * notice, this list of conditions and the following disclaimer in the 12147997Srwatson * documentation and/or other materials provided with the distribution. 13147997Srwatson * 14147997Srwatson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15147997Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16147997Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17147997Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18147997Srwatson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19147997Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20147997Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21147997Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22147997Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23147997Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24147997Srwatson * SUCH DAMAGE. 25147997Srwatson * 26147997Srwatson * $FreeBSD: head/lib/libmemstat/memstat_uma.c 222813 2011-06-07 08:46:13Z attilio $ 27147997Srwatson */ 28147997Srwatson 29147997Srwatson#include <sys/param.h> 30222813Sattilio#include <sys/cpuset.h> 31147997Srwatson#include <sys/sysctl.h> 32147997Srwatson 33148693Srwatson#define LIBMEMSTAT /* Cause vm_page.h not to include opt_vmpage.h */ 34148627Srwatson#include <vm/vm.h> 35148627Srwatson#include <vm/vm_page.h> 36148627Srwatson 37147997Srwatson#include <vm/uma.h> 38148627Srwatson#include <vm/uma_int.h> 39147997Srwatson 40147997Srwatson#include <err.h> 41147997Srwatson#include <errno.h> 42148627Srwatson#include <kvm.h> 43148627Srwatson#include <nlist.h> 44155550Srwatson#include <stddef.h> 45147997Srwatson#include <stdio.h> 46147997Srwatson#include <stdlib.h> 47147997Srwatson#include <string.h> 48222813Sattilio#include <unistd.h> 49147997Srwatson 50147997Srwatson#include "memstat.h" 51147997Srwatson#include "memstat_internal.h" 52147997Srwatson 53148627Srwatsonstatic struct nlist namelist[] = { 54148627Srwatson#define X_UMA_KEGS 0 55148627Srwatson { .n_name = "_uma_kegs" }, 56148627Srwatson#define X_MP_MAXID 1 57148627Srwatson { .n_name = "_mp_maxid" }, 58155547Srwatson#define X_ALL_CPUS 2 59155547Srwatson { .n_name = "_all_cpus" }, 60148627Srwatson { .n_name = "" }, 61148627Srwatson}; 62148627Srwatson 63147997Srwatson/* 64147997Srwatson * Extract uma(9) statistics from the running kernel, and store all memory 65147997Srwatson * type information in the passed list. For each type, check the list for an 66147997Srwatson * existing entry with the right name/allocator -- if present, update that 67147997Srwatson * entry. Otherwise, add a new entry. On error, the entire list will be 68147997Srwatson * cleared, as entries will be in an inconsistent state. 69147997Srwatson * 70147997Srwatson * To reduce the level of work for a list that starts empty, we keep around a 71147997Srwatson * hint as to whether it was empty when we began, so we can avoid searching 72147997Srwatson * the list for entries to update. Updates are O(n^2) due to searching for 73147997Srwatson * each entry before adding it. 74147997Srwatson */ 75147997Srwatsonint 76147997Srwatsonmemstat_sysctl_uma(struct memory_type_list *list, int flags) 77147997Srwatson{ 78147997Srwatson struct uma_stream_header *ushp; 79147997Srwatson struct uma_type_header *uthp; 80147997Srwatson struct uma_percpu_stat *upsp; 81147997Srwatson struct memory_type *mtp; 82148357Srwatson int count, hint_dontsearch, i, j, maxcpus; 83147997Srwatson char *buffer, *p; 84147997Srwatson size_t size; 85147997Srwatson 86148357Srwatson hint_dontsearch = LIST_EMPTY(&list->mtl_list); 87147997Srwatson 88147997Srwatson /* 89147997Srwatson * Query the number of CPUs, number of malloc types so that we can 90147997Srwatson * guess an initial buffer size. We loop until we succeed or really 91147997Srwatson * fail. Note that the value of maxcpus we query using sysctl is not 92147997Srwatson * the version we use when processing the real data -- that is read 93147997Srwatson * from the header. 94147997Srwatson */ 95147997Srwatsonretry: 96147997Srwatson size = sizeof(maxcpus); 97147997Srwatson if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) { 98148357Srwatson if (errno == EACCES || errno == EPERM) 99148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 100148357Srwatson else 101148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 102147997Srwatson return (-1); 103147997Srwatson } 104147997Srwatson if (size != sizeof(maxcpus)) { 105148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 106147997Srwatson return (-1); 107147997Srwatson } 108147997Srwatson 109147997Srwatson if (maxcpus > MEMSTAT_MAXCPU) { 110148357Srwatson list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 111147997Srwatson return (-1); 112147997Srwatson } 113147997Srwatson 114147997Srwatson size = sizeof(count); 115147997Srwatson if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) { 116148357Srwatson if (errno == EACCES || errno == EPERM) 117148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 118148357Srwatson else 119148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 120147997Srwatson return (-1); 121147997Srwatson } 122147997Srwatson if (size != sizeof(count)) { 123148357Srwatson list->mtl_error = MEMSTAT_ERROR_DATAERROR; 124147997Srwatson return (-1); 125147997Srwatson } 126147997Srwatson 127147997Srwatson size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) * 128147997Srwatson maxcpus); 129147997Srwatson 130147997Srwatson buffer = malloc(size); 131147997Srwatson if (buffer == NULL) { 132148357Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 133147997Srwatson return (-1); 134147997Srwatson } 135147997Srwatson 136147997Srwatson if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) { 137147997Srwatson /* 138147997Srwatson * XXXRW: ENOMEM is an ambiguous return, we should bound the 139147997Srwatson * number of loops, perhaps. 140147997Srwatson */ 141147997Srwatson if (errno == ENOMEM) { 142147997Srwatson free(buffer); 143147997Srwatson goto retry; 144147997Srwatson } 145148357Srwatson if (errno == EACCES || errno == EPERM) 146148357Srwatson list->mtl_error = MEMSTAT_ERROR_PERMISSION; 147148357Srwatson else 148148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 149147997Srwatson free(buffer); 150147997Srwatson return (-1); 151147997Srwatson } 152147997Srwatson 153147997Srwatson if (size == 0) { 154147997Srwatson free(buffer); 155147997Srwatson return (0); 156147997Srwatson } 157147997Srwatson 158147997Srwatson if (size < sizeof(*ushp)) { 159148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 160147997Srwatson free(buffer); 161147997Srwatson return (-1); 162147997Srwatson } 163147997Srwatson p = buffer; 164147997Srwatson ushp = (struct uma_stream_header *)p; 165147997Srwatson p += sizeof(*ushp); 166147997Srwatson 167147997Srwatson if (ushp->ush_version != UMA_STREAM_VERSION) { 168148357Srwatson list->mtl_error = MEMSTAT_ERROR_VERSION; 169147997Srwatson free(buffer); 170147997Srwatson return (-1); 171147997Srwatson } 172147997Srwatson 173147997Srwatson if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) { 174148357Srwatson list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 175147997Srwatson free(buffer); 176147997Srwatson return (-1); 177147997Srwatson } 178147997Srwatson 179147997Srwatson /* 180147997Srwatson * For the remainder of this function, we are quite trusting about 181147997Srwatson * the layout of structures and sizes, since we've determined we have 182147997Srwatson * a matching version and acceptable CPU count. 183147997Srwatson */ 184147997Srwatson maxcpus = ushp->ush_maxcpus; 185147997Srwatson count = ushp->ush_count; 186147997Srwatson for (i = 0; i < count; i++) { 187147997Srwatson uthp = (struct uma_type_header *)p; 188147997Srwatson p += sizeof(*uthp); 189147997Srwatson 190147997Srwatson if (hint_dontsearch == 0) { 191147997Srwatson mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 192147997Srwatson uthp->uth_name); 193147997Srwatson } else 194147997Srwatson mtp = NULL; 195147997Srwatson if (mtp == NULL) 196148354Srwatson mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 197147997Srwatson uthp->uth_name); 198147997Srwatson if (mtp == NULL) { 199148619Srwatson _memstat_mtl_empty(list); 200147997Srwatson free(buffer); 201148357Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 202147997Srwatson return (-1); 203147997Srwatson } 204147997Srwatson 205147997Srwatson /* 206147997Srwatson * Reset the statistics on a current node. 207147997Srwatson */ 208148354Srwatson _memstat_mt_reset_stats(mtp); 209147997Srwatson 210148007Srwatson mtp->mt_numallocs = uthp->uth_allocs; 211148007Srwatson mtp->mt_numfrees = uthp->uth_frees; 212148071Srwatson mtp->mt_failures = uthp->uth_fails; 213209215Ssbruno mtp->mt_sleeps = uthp->uth_sleeps; 214148007Srwatson 215147997Srwatson for (j = 0; j < maxcpus; j++) { 216147997Srwatson upsp = (struct uma_percpu_stat *)p; 217147997Srwatson p += sizeof(*upsp); 218147997Srwatson 219147997Srwatson mtp->mt_percpu_cache[j].mtp_free = 220147997Srwatson upsp->ups_cache_free; 221147997Srwatson mtp->mt_free += upsp->ups_cache_free; 222147997Srwatson mtp->mt_numallocs += upsp->ups_allocs; 223147997Srwatson mtp->mt_numfrees += upsp->ups_frees; 224147997Srwatson } 225147997Srwatson 226147997Srwatson mtp->mt_size = uthp->uth_size; 227148007Srwatson mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size; 228148007Srwatson mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size; 229147997Srwatson mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 230147997Srwatson mtp->mt_countlimit = uthp->uth_limit; 231147997Srwatson mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size; 232147997Srwatson 233147997Srwatson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 234148170Srwatson mtp->mt_zonefree = uthp->uth_zone_free; 235148381Srwatson 236148381Srwatson /* 237148381Srwatson * UMA secondary zones share a keg with the primary zone. To 238148381Srwatson * avoid double-reporting of free items, report keg free 239148381Srwatson * items only in the primary zone. 240148381Srwatson */ 241148381Srwatson if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) { 242148619Srwatson mtp->mt_kegfree = uthp->uth_keg_free; 243148381Srwatson mtp->mt_free += mtp->mt_kegfree; 244148381Srwatson } 245147997Srwatson mtp->mt_free += mtp->mt_zonefree; 246147997Srwatson } 247147997Srwatson 248147997Srwatson free(buffer); 249147997Srwatson 250147997Srwatson return (0); 251147997Srwatson} 252148627Srwatson 253148627Srwatsonstatic int 254148627Srwatsonkread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size, 255148627Srwatson size_t offset) 256148627Srwatson{ 257148627Srwatson ssize_t ret; 258148627Srwatson 259148627Srwatson ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address, 260148627Srwatson size); 261148627Srwatson if (ret < 0) 262148627Srwatson return (MEMSTAT_ERROR_KVM); 263148627Srwatson if ((size_t)ret != size) 264148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 265148627Srwatson return (0); 266148627Srwatson} 267148627Srwatson 268148627Srwatsonstatic int 269148627Srwatsonkread_string(kvm_t *kvm, void *kvm_pointer, char *buffer, int buflen) 270148627Srwatson{ 271148627Srwatson ssize_t ret; 272148627Srwatson int i; 273148627Srwatson 274148627Srwatson for (i = 0; i < buflen; i++) { 275148627Srwatson ret = kvm_read(kvm, (unsigned long)kvm_pointer + i, 276148627Srwatson &(buffer[i]), sizeof(char)); 277148627Srwatson if (ret < 0) 278148627Srwatson return (MEMSTAT_ERROR_KVM); 279148627Srwatson if ((size_t)ret != sizeof(char)) 280148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 281148627Srwatson if (buffer[i] == '\0') 282148627Srwatson return (0); 283148627Srwatson } 284148627Srwatson /* Truncate. */ 285148627Srwatson buffer[i-1] = '\0'; 286148627Srwatson return (0); 287148627Srwatson} 288148627Srwatson 289148627Srwatsonstatic int 290148627Srwatsonkread_symbol(kvm_t *kvm, int index, void *address, size_t size, 291148627Srwatson size_t offset) 292148627Srwatson{ 293148627Srwatson ssize_t ret; 294148627Srwatson 295148627Srwatson ret = kvm_read(kvm, namelist[index].n_value + offset, address, size); 296148627Srwatson if (ret < 0) 297148627Srwatson return (MEMSTAT_ERROR_KVM); 298148627Srwatson if ((size_t)ret != size) 299148627Srwatson return (MEMSTAT_ERROR_KVM_SHORTREAD); 300148627Srwatson return (0); 301148627Srwatson} 302148627Srwatson 303148627Srwatson/* 304148627Srwatson * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts 305148627Srwatson * UMA(9) statistics from a kernel core/memory file. 306148627Srwatson */ 307148627Srwatsonint 308148627Srwatsonmemstat_kvm_uma(struct memory_type_list *list, void *kvm_handle) 309148627Srwatson{ 310154416Srwatson LIST_HEAD(, uma_keg) uma_kegs; 311148627Srwatson struct memory_type *mtp; 312148627Srwatson struct uma_bucket *ubp, ub; 313155550Srwatson struct uma_cache *ucp, *ucp_array; 314148627Srwatson struct uma_zone *uzp, uz; 315148627Srwatson struct uma_keg *kzp, kz; 316148627Srwatson int hint_dontsearch, i, mp_maxid, ret; 317148627Srwatson char name[MEMTYPE_MAXNAME]; 318222813Sattilio cpuset_t all_cpus; 319222813Sattilio long cpusetsize; 320148627Srwatson kvm_t *kvm; 321148627Srwatson 322148627Srwatson kvm = (kvm_t *)kvm_handle; 323148627Srwatson hint_dontsearch = LIST_EMPTY(&list->mtl_list); 324148627Srwatson if (kvm_nlist(kvm, namelist) != 0) { 325148627Srwatson list->mtl_error = MEMSTAT_ERROR_KVM; 326148627Srwatson return (-1); 327148627Srwatson } 328148627Srwatson if (namelist[X_UMA_KEGS].n_type == 0 || 329148627Srwatson namelist[X_UMA_KEGS].n_value == 0) { 330148627Srwatson list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL; 331148627Srwatson return (-1); 332148627Srwatson } 333148627Srwatson ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0); 334148627Srwatson if (ret != 0) { 335148627Srwatson list->mtl_error = ret; 336148627Srwatson return (-1); 337148627Srwatson } 338148627Srwatson ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0); 339148627Srwatson if (ret != 0) { 340148627Srwatson list->mtl_error = ret; 341148627Srwatson return (-1); 342148627Srwatson } 343222813Sattilio cpusetsize = sysconf(_SC_CPUSET_SIZE); 344222813Sattilio if (cpusetsize == -1 || (u_long)cpusetsize > sizeof(cpuset_t)) { 345222813Sattilio list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL; 346222813Sattilio return (-1); 347222813Sattilio } 348222813Sattilio CPU_ZERO(&all_cpus); 349222813Sattilio ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, cpusetsize, 0); 350155547Srwatson if (ret != 0) { 351155547Srwatson list->mtl_error = ret; 352155547Srwatson return (-1); 353155547Srwatson } 354155550Srwatson ucp_array = malloc(sizeof(struct uma_cache) * (mp_maxid + 1)); 355155550Srwatson if (ucp_array == NULL) { 356155550Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 357155550Srwatson return (-1); 358155550Srwatson } 359148627Srwatson for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp = 360148627Srwatson LIST_NEXT(&kz, uk_link)) { 361148627Srwatson ret = kread(kvm, kzp, &kz, sizeof(kz), 0); 362148627Srwatson if (ret != 0) { 363155550Srwatson free(ucp_array); 364148627Srwatson _memstat_mtl_empty(list); 365148627Srwatson list->mtl_error = ret; 366148627Srwatson return (-1); 367148627Srwatson } 368148627Srwatson for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp = 369148627Srwatson LIST_NEXT(&uz, uz_link)) { 370148627Srwatson ret = kread(kvm, uzp, &uz, sizeof(uz), 0); 371148627Srwatson if (ret != 0) { 372155550Srwatson free(ucp_array); 373148627Srwatson _memstat_mtl_empty(list); 374148627Srwatson list->mtl_error = ret; 375148627Srwatson return (-1); 376148627Srwatson } 377155550Srwatson ret = kread(kvm, uzp, ucp_array, 378155550Srwatson sizeof(struct uma_cache) * (mp_maxid + 1), 379155550Srwatson offsetof(struct uma_zone, uz_cpu[0])); 380155550Srwatson if (ret != 0) { 381155550Srwatson free(ucp_array); 382155550Srwatson _memstat_mtl_empty(list); 383155550Srwatson list->mtl_error = ret; 384155550Srwatson return (-1); 385155550Srwatson } 386148627Srwatson ret = kread_string(kvm, uz.uz_name, name, 387148627Srwatson MEMTYPE_MAXNAME); 388148627Srwatson if (ret != 0) { 389155550Srwatson free(ucp_array); 390148627Srwatson _memstat_mtl_empty(list); 391148627Srwatson list->mtl_error = ret; 392148627Srwatson return (-1); 393148627Srwatson } 394148627Srwatson if (hint_dontsearch == 0) { 395148627Srwatson mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 396148627Srwatson name); 397148627Srwatson } else 398148627Srwatson mtp = NULL; 399148627Srwatson if (mtp == NULL) 400148627Srwatson mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 401148627Srwatson name); 402148627Srwatson if (mtp == NULL) { 403155550Srwatson free(ucp_array); 404148627Srwatson _memstat_mtl_empty(list); 405148627Srwatson list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 406148627Srwatson return (-1); 407148627Srwatson } 408148627Srwatson /* 409148627Srwatson * Reset the statistics on a current node. 410148627Srwatson */ 411148627Srwatson _memstat_mt_reset_stats(mtp); 412148627Srwatson mtp->mt_numallocs = uz.uz_allocs; 413148627Srwatson mtp->mt_numfrees = uz.uz_frees; 414148627Srwatson mtp->mt_failures = uz.uz_fails; 415209215Ssbruno mtp->mt_sleeps = uz.uz_sleeps; 416148627Srwatson if (kz.uk_flags & UMA_ZFLAG_INTERNAL) 417148627Srwatson goto skip_percpu; 418148627Srwatson for (i = 0; i < mp_maxid + 1; i++) { 419222813Sattilio if (!CPU_ISSET(i, &all_cpus)) 420155547Srwatson continue; 421155550Srwatson ucp = &ucp_array[i]; 422148627Srwatson mtp->mt_numallocs += ucp->uc_allocs; 423148627Srwatson mtp->mt_numfrees += ucp->uc_frees; 424148627Srwatson 425148627Srwatson if (ucp->uc_allocbucket != NULL) { 426148627Srwatson ret = kread(kvm, ucp->uc_allocbucket, 427148627Srwatson &ub, sizeof(ub), 0); 428148627Srwatson if (ret != 0) { 429155550Srwatson free(ucp_array); 430148627Srwatson _memstat_mtl_empty(list); 431155549Srwatson list->mtl_error = ret; 432148627Srwatson return (-1); 433148627Srwatson } 434148627Srwatson mtp->mt_free += ub.ub_cnt; 435148627Srwatson } 436148627Srwatson if (ucp->uc_freebucket != NULL) { 437148627Srwatson ret = kread(kvm, ucp->uc_freebucket, 438148627Srwatson &ub, sizeof(ub), 0); 439148627Srwatson if (ret != 0) { 440155550Srwatson free(ucp_array); 441148627Srwatson _memstat_mtl_empty(list); 442155549Srwatson list->mtl_error = ret; 443148627Srwatson return (-1); 444148627Srwatson } 445148627Srwatson mtp->mt_free += ub.ub_cnt; 446148627Srwatson } 447148627Srwatson } 448148627Srwatsonskip_percpu: 449148627Srwatson mtp->mt_size = kz.uk_size; 450148627Srwatson mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size; 451148627Srwatson mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size; 452155542Srwatson mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 453148627Srwatson if (kz.uk_ppera > 1) 454148627Srwatson mtp->mt_countlimit = kz.uk_maxpages / 455148627Srwatson kz.uk_ipers; 456148627Srwatson else 457148627Srwatson mtp->mt_countlimit = kz.uk_maxpages * 458148627Srwatson kz.uk_ipers; 459148627Srwatson mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size; 460148627Srwatson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 461148627Srwatson for (ubp = LIST_FIRST(&uz.uz_full_bucket); ubp != 462148627Srwatson NULL; ubp = LIST_NEXT(&ub, ub_link)) { 463148627Srwatson ret = kread(kvm, ubp, &ub, sizeof(ub), 0); 464148627Srwatson mtp->mt_zonefree += ub.ub_cnt; 465148627Srwatson } 466148627Srwatson if (!((kz.uk_flags & UMA_ZONE_SECONDARY) && 467148627Srwatson LIST_FIRST(&kz.uk_zones) != uzp)) { 468148627Srwatson mtp->mt_kegfree = kz.uk_free; 469148627Srwatson mtp->mt_free += mtp->mt_kegfree; 470148627Srwatson } 471148627Srwatson mtp->mt_free += mtp->mt_zonefree; 472148627Srwatson } 473148627Srwatson } 474155550Srwatson free(ucp_array); 475148627Srwatson return (0); 476148627Srwatson} 477