memstat_uma.c revision 330897
1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: stable/11/lib/libmemstat/memstat_uma.c 330897 2018-03-14 03:19:51Z eadler $ 29 */ 30 31#include <sys/param.h> 32#include <sys/cpuset.h> 33#include <sys/sysctl.h> 34 35#include <vm/vm.h> 36#include <vm/vm_page.h> 37 38#include <vm/uma.h> 39#include <vm/uma_int.h> 40 41#include <err.h> 42#include <errno.h> 43#include <kvm.h> 44#include <nlist.h> 45#include <stddef.h> 46#include <stdio.h> 47#include <stdlib.h> 48#include <string.h> 49#include <unistd.h> 50 51#include "memstat.h" 52#include "memstat_internal.h" 53 54static struct nlist namelist[] = { 55#define X_UMA_KEGS 0 56 { .n_name = "_uma_kegs" }, 57#define X_MP_MAXID 1 58 { .n_name = "_mp_maxid" }, 59#define X_ALL_CPUS 2 60 { .n_name = "_all_cpus" }, 61 { .n_name = "" }, 62}; 63 64/* 65 * Extract uma(9) statistics from the running kernel, and store all memory 66 * type information in the passed list. For each type, check the list for an 67 * existing entry with the right name/allocator -- if present, update that 68 * entry. Otherwise, add a new entry. On error, the entire list will be 69 * cleared, as entries will be in an inconsistent state. 70 * 71 * To reduce the level of work for a list that starts empty, we keep around a 72 * hint as to whether it was empty when we began, so we can avoid searching 73 * the list for entries to update. Updates are O(n^2) due to searching for 74 * each entry before adding it. 75 */ 76int 77memstat_sysctl_uma(struct memory_type_list *list, int flags) 78{ 79 struct uma_stream_header *ushp; 80 struct uma_type_header *uthp; 81 struct uma_percpu_stat *upsp; 82 struct memory_type *mtp; 83 int count, hint_dontsearch, i, j, maxcpus, maxid; 84 char *buffer, *p; 85 size_t size; 86 87 hint_dontsearch = LIST_EMPTY(&list->mtl_list); 88 89 /* 90 * Query the number of CPUs, number of malloc types so that we can 91 * guess an initial buffer size. We loop until we succeed or really 92 * fail. Note that the value of maxcpus we query using sysctl is not 93 * the version we use when processing the real data -- that is read 94 * from the header. 95 */ 96retry: 97 size = sizeof(maxid); 98 if (sysctlbyname("kern.smp.maxid", &maxid, &size, NULL, 0) < 0) { 99 if (errno == EACCES || errno == EPERM) 100 list->mtl_error = MEMSTAT_ERROR_PERMISSION; 101 else 102 list->mtl_error = MEMSTAT_ERROR_DATAERROR; 103 return (-1); 104 } 105 if (size != sizeof(maxid)) { 106 list->mtl_error = MEMSTAT_ERROR_DATAERROR; 107 return (-1); 108 } 109 110 size = sizeof(count); 111 if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) { 112 if (errno == EACCES || errno == EPERM) 113 list->mtl_error = MEMSTAT_ERROR_PERMISSION; 114 else 115 list->mtl_error = MEMSTAT_ERROR_VERSION; 116 return (-1); 117 } 118 if (size != sizeof(count)) { 119 list->mtl_error = MEMSTAT_ERROR_DATAERROR; 120 return (-1); 121 } 122 123 size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) * 124 (maxid + 1)); 125 126 buffer = malloc(size); 127 if (buffer == NULL) { 128 list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 129 return (-1); 130 } 131 132 if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) { 133 /* 134 * XXXRW: ENOMEM is an ambiguous return, we should bound the 135 * number of loops, perhaps. 136 */ 137 if (errno == ENOMEM) { 138 free(buffer); 139 goto retry; 140 } 141 if (errno == EACCES || errno == EPERM) 142 list->mtl_error = MEMSTAT_ERROR_PERMISSION; 143 else 144 list->mtl_error = MEMSTAT_ERROR_VERSION; 145 free(buffer); 146 return (-1); 147 } 148 149 if (size == 0) { 150 free(buffer); 151 return (0); 152 } 153 154 if (size < sizeof(*ushp)) { 155 list->mtl_error = MEMSTAT_ERROR_VERSION; 156 free(buffer); 157 return (-1); 158 } 159 p = buffer; 160 ushp = (struct uma_stream_header *)p; 161 p += sizeof(*ushp); 162 163 if (ushp->ush_version != UMA_STREAM_VERSION) { 164 list->mtl_error = MEMSTAT_ERROR_VERSION; 165 free(buffer); 166 return (-1); 167 } 168 169 /* 170 * For the remainder of this function, we are quite trusting about 171 * the layout of structures and sizes, since we've determined we have 172 * a matching version and acceptable CPU count. 173 */ 174 maxcpus = ushp->ush_maxcpus; 175 count = ushp->ush_count; 176 for (i = 0; i < count; i++) { 177 uthp = (struct uma_type_header *)p; 178 p += sizeof(*uthp); 179 180 if (hint_dontsearch == 0) { 181 mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 182 uthp->uth_name); 183 } else 184 mtp = NULL; 185 if (mtp == NULL) 186 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 187 uthp->uth_name, maxid + 1); 188 if (mtp == NULL) { 189 _memstat_mtl_empty(list); 190 free(buffer); 191 list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 192 return (-1); 193 } 194 195 /* 196 * Reset the statistics on a current node. 197 */ 198 _memstat_mt_reset_stats(mtp, maxid + 1); 199 200 mtp->mt_numallocs = uthp->uth_allocs; 201 mtp->mt_numfrees = uthp->uth_frees; 202 mtp->mt_failures = uthp->uth_fails; 203 mtp->mt_sleeps = uthp->uth_sleeps; 204 205 for (j = 0; j < maxcpus; j++) { 206 upsp = (struct uma_percpu_stat *)p; 207 p += sizeof(*upsp); 208 209 mtp->mt_percpu_cache[j].mtp_free = 210 upsp->ups_cache_free; 211 mtp->mt_free += upsp->ups_cache_free; 212 mtp->mt_numallocs += upsp->ups_allocs; 213 mtp->mt_numfrees += upsp->ups_frees; 214 } 215 216 mtp->mt_size = uthp->uth_size; 217 mtp->mt_rsize = uthp->uth_rsize; 218 mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size; 219 mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size; 220 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 221 mtp->mt_countlimit = uthp->uth_limit; 222 mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size; 223 224 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 225 mtp->mt_zonefree = uthp->uth_zone_free; 226 227 /* 228 * UMA secondary zones share a keg with the primary zone. To 229 * avoid double-reporting of free items, report keg free 230 * items only in the primary zone. 231 */ 232 if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) { 233 mtp->mt_kegfree = uthp->uth_keg_free; 234 mtp->mt_free += mtp->mt_kegfree; 235 } 236 mtp->mt_free += mtp->mt_zonefree; 237 } 238 239 free(buffer); 240 241 return (0); 242} 243 244static int 245kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size, 246 size_t offset) 247{ 248 ssize_t ret; 249 250 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address, 251 size); 252 if (ret < 0) 253 return (MEMSTAT_ERROR_KVM); 254 if ((size_t)ret != size) 255 return (MEMSTAT_ERROR_KVM_SHORTREAD); 256 return (0); 257} 258 259static int 260kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen) 261{ 262 ssize_t ret; 263 int i; 264 265 for (i = 0; i < buflen; i++) { 266 ret = kvm_read(kvm, (unsigned long)kvm_pointer + i, 267 &(buffer[i]), sizeof(char)); 268 if (ret < 0) 269 return (MEMSTAT_ERROR_KVM); 270 if ((size_t)ret != sizeof(char)) 271 return (MEMSTAT_ERROR_KVM_SHORTREAD); 272 if (buffer[i] == '\0') 273 return (0); 274 } 275 /* Truncate. */ 276 buffer[i-1] = '\0'; 277 return (0); 278} 279 280static int 281kread_symbol(kvm_t *kvm, int index, void *address, size_t size, 282 size_t offset) 283{ 284 ssize_t ret; 285 286 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size); 287 if (ret < 0) 288 return (MEMSTAT_ERROR_KVM); 289 if ((size_t)ret != size) 290 return (MEMSTAT_ERROR_KVM_SHORTREAD); 291 return (0); 292} 293 294/* 295 * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts 296 * UMA(9) statistics from a kernel core/memory file. 297 */ 298int 299memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle) 300{ 301 LIST_HEAD(, uma_keg) uma_kegs; 302 struct memory_type *mtp; 303 struct uma_bucket *ubp, ub; 304 struct uma_cache *ucp, *ucp_array; 305 struct uma_zone *uzp, uz; 306 struct uma_keg *kzp, kz; 307 int hint_dontsearch, i, mp_maxid, ret; 308 char name[MEMTYPE_MAXNAME]; 309 cpuset_t all_cpus; 310 long cpusetsize; 311 kvm_t *kvm; 312 313 kvm = (kvm_t *)kvm_handle; 314 hint_dontsearch = LIST_EMPTY(&list->mtl_list); 315 if (kvm_nlist(kvm, namelist) != 0) { 316 list->mtl_error = MEMSTAT_ERROR_KVM; 317 return (-1); 318 } 319 if (namelist[X_UMA_KEGS].n_type == 0 || 320 namelist[X_UMA_KEGS].n_value == 0) { 321 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL; 322 return (-1); 323 } 324 ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0); 325 if (ret != 0) { 326 list->mtl_error = ret; 327 return (-1); 328 } 329 ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0); 330 if (ret != 0) { 331 list->mtl_error = ret; 332 return (-1); 333 } 334 cpusetsize = sysconf(_SC_CPUSET_SIZE); 335 if (cpusetsize == -1 || (u_long)cpusetsize > sizeof(cpuset_t)) { 336 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL; 337 return (-1); 338 } 339 CPU_ZERO(&all_cpus); 340 ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, cpusetsize, 0); 341 if (ret != 0) { 342 list->mtl_error = ret; 343 return (-1); 344 } 345 ucp_array = malloc(sizeof(struct uma_cache) * (mp_maxid + 1)); 346 if (ucp_array == NULL) { 347 list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 348 return (-1); 349 } 350 for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp = 351 LIST_NEXT(&kz, uk_link)) { 352 ret = kread(kvm, kzp, &kz, sizeof(kz), 0); 353 if (ret != 0) { 354 free(ucp_array); 355 _memstat_mtl_empty(list); 356 list->mtl_error = ret; 357 return (-1); 358 } 359 for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp = 360 LIST_NEXT(&uz, uz_link)) { 361 ret = kread(kvm, uzp, &uz, sizeof(uz), 0); 362 if (ret != 0) { 363 free(ucp_array); 364 _memstat_mtl_empty(list); 365 list->mtl_error = ret; 366 return (-1); 367 } 368 ret = kread(kvm, uzp, ucp_array, 369 sizeof(struct uma_cache) * (mp_maxid + 1), 370 offsetof(struct uma_zone, uz_cpu[0])); 371 if (ret != 0) { 372 free(ucp_array); 373 _memstat_mtl_empty(list); 374 list->mtl_error = ret; 375 return (-1); 376 } 377 ret = kread_string(kvm, uz.uz_name, name, 378 MEMTYPE_MAXNAME); 379 if (ret != 0) { 380 free(ucp_array); 381 _memstat_mtl_empty(list); 382 list->mtl_error = ret; 383 return (-1); 384 } 385 if (hint_dontsearch == 0) { 386 mtp = memstat_mtl_find(list, ALLOCATOR_UMA, 387 name); 388 } else 389 mtp = NULL; 390 if (mtp == NULL) 391 mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA, 392 name, mp_maxid + 1); 393 if (mtp == NULL) { 394 free(ucp_array); 395 _memstat_mtl_empty(list); 396 list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 397 return (-1); 398 } 399 /* 400 * Reset the statistics on a current node. 401 */ 402 _memstat_mt_reset_stats(mtp, mp_maxid + 1); 403 mtp->mt_numallocs = uz.uz_allocs; 404 mtp->mt_numfrees = uz.uz_frees; 405 mtp->mt_failures = uz.uz_fails; 406 mtp->mt_sleeps = uz.uz_sleeps; 407 if (kz.uk_flags & UMA_ZFLAG_INTERNAL) 408 goto skip_percpu; 409 for (i = 0; i < mp_maxid + 1; i++) { 410 if (!CPU_ISSET(i, &all_cpus)) 411 continue; 412 ucp = &ucp_array[i]; 413 mtp->mt_numallocs += ucp->uc_allocs; 414 mtp->mt_numfrees += ucp->uc_frees; 415 416 if (ucp->uc_allocbucket != NULL) { 417 ret = kread(kvm, ucp->uc_allocbucket, 418 &ub, sizeof(ub), 0); 419 if (ret != 0) { 420 free(ucp_array); 421 _memstat_mtl_empty(list); 422 list->mtl_error = ret; 423 return (-1); 424 } 425 mtp->mt_free += ub.ub_cnt; 426 } 427 if (ucp->uc_freebucket != NULL) { 428 ret = kread(kvm, ucp->uc_freebucket, 429 &ub, sizeof(ub), 0); 430 if (ret != 0) { 431 free(ucp_array); 432 _memstat_mtl_empty(list); 433 list->mtl_error = ret; 434 return (-1); 435 } 436 mtp->mt_free += ub.ub_cnt; 437 } 438 } 439skip_percpu: 440 mtp->mt_size = kz.uk_size; 441 mtp->mt_rsize = kz.uk_rsize; 442 mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size; 443 mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size; 444 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 445 if (kz.uk_ppera > 1) 446 mtp->mt_countlimit = kz.uk_maxpages / 447 kz.uk_ipers; 448 else 449 mtp->mt_countlimit = kz.uk_maxpages * 450 kz.uk_ipers; 451 mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size; 452 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 453 for (ubp = LIST_FIRST(&uz.uz_buckets); ubp != 454 NULL; ubp = LIST_NEXT(&ub, ub_link)) { 455 ret = kread(kvm, ubp, &ub, sizeof(ub), 0); 456 mtp->mt_zonefree += ub.ub_cnt; 457 } 458 if (!((kz.uk_flags & UMA_ZONE_SECONDARY) && 459 LIST_FIRST(&kz.uk_zones) != uzp)) { 460 mtp->mt_kegfree = kz.uk_free; 461 mtp->mt_free += mtp->mt_kegfree; 462 } 463 mtp->mt_free += mtp->mt_zonefree; 464 } 465 } 466 free(ucp_array); 467 return (0); 468} 469