Deleted Added
full compact
52c52
< __FBSDID("$FreeBSD: head/sys/vm/uma_core.c 147995 2005-07-14 16:17:21Z rwatson $");
---
> __FBSDID("$FreeBSD: head/sys/vm/uma_core.c 147996 2005-07-14 16:35:13Z rwatson $");
72a73
> #include <sys/sbuf.h>
236a238,239
> static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
> static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
250a254,259
> SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
> 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
>
> SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
> 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
>
2753a2763,2804
> * Generate statistics across both the zone and its per-cpu cache's. Return
> * desired statistics if the pointer is non-NULL for that statistic.
> *
> * Note: does not update the zone statistics, as it can't safely clear the
> * per-CPU cache statistic.
> *
> * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
> * safe from off-CPU; we should modify the caches to track this information
> * directly so that we don't have to.
> */
> static void
> uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
> u_int64_t *freesp)
> {
> uma_cache_t cache;
> u_int64_t allocs, frees;
> int cachefree, cpu;
>
> allocs = frees = 0;
> cachefree = 0;
> for (cpu = 0; cpu <= mp_maxid; cpu++) {
> if (CPU_ABSENT(cpu))
> continue;
> cache = &z->uz_cpu[cpu];
> if (cache->uc_allocbucket != NULL)
> cachefree += cache->uc_allocbucket->ub_cnt;
> if (cache->uc_freebucket != NULL)
> cachefree += cache->uc_freebucket->ub_cnt;
> allocs += cache->uc_allocs;
> frees += cache->uc_frees;
> }
> allocs += z->uz_allocs;
> frees += z->uz_frees;
> if (cachefreep != NULL)
> *cachefreep = cachefree;
> if (allocsp != NULL)
> *allocsp = allocs;
> if (freesp != NULL)
> *freesp = frees;
> }
>
> /*
2768d2818
< int cpu;
2771,2772c2821
< uma_cache_t cache;
< u_int64_t alloc;
---
> u_int64_t allocs, frees;
2798d2846
< alloc = 0;
2800,2809c2848,2851
< for (cpu = 0; cpu <= mp_maxid; cpu++) {
< if (CPU_ABSENT(cpu))
< continue;
< cache = &z->uz_cpu[cpu];
< if (cache->uc_allocbucket != NULL)
< cachefree += cache->uc_allocbucket->ub_cnt;
< if (cache->uc_freebucket != NULL)
< cachefree += cache->uc_freebucket->ub_cnt;
< alloc += cache->uc_allocs;
< }
---
> uma_zone_sumstat(z, &cachefree, &allocs, &frees);
> } else {
> allocs = z->uz_allocs;
> frees = z->uz_frees;
2811d2852
< alloc += z->uz_allocs;
2823c2864
< (unsigned long long)alloc);
---
> (unsigned long long)allocs);
2838a2880,3014
>
> static int
> sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
> {
> uma_keg_t kz;
> uma_zone_t z;
> int count;
>
> count = 0;
> mtx_lock(&uma_mtx);
> LIST_FOREACH(kz, &uma_kegs, uk_link) {
> LIST_FOREACH(z, &kz->uk_zones, uz_link)
> count++;
> }
> mtx_unlock(&uma_mtx);
> return (sysctl_handle_int(oidp, &count, 0, req));
> }
>
> static int
> sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
> {
> struct uma_stream_header ush;
> struct uma_type_header uth;
> struct uma_percpu_stat ups;
> uma_bucket_t bucket;
> struct sbuf sbuf;
> uma_cache_t cache;
> uma_keg_t kz;
> uma_zone_t z;
> char *buffer;
> int buflen, count, error, i;
>
> mtx_lock(&uma_mtx);
> restart:
> mtx_assert(&uma_mtx, MA_OWNED);
> count = 0;
> LIST_FOREACH(kz, &uma_kegs, uk_link) {
> LIST_FOREACH(z, &kz->uk_zones, uz_link)
> count++;
> }
> mtx_unlock(&uma_mtx);
>
> buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) *
> MAXCPU) + 1;
> buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
>
> mtx_lock(&uma_mtx);
> i = 0;
> LIST_FOREACH(kz, &uma_kegs, uk_link) {
> LIST_FOREACH(z, &kz->uk_zones, uz_link)
> i++;
> }
> if (i > count) {
> free(buffer, M_TEMP);
> goto restart;
> }
> count = i;
>
> sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
>
> /*
> * Insert stream header.
> */
> bzero(&ush, sizeof(ush));
> ush.ush_version = UMA_STREAM_VERSION;
> ush.ush_maxcpus = MAXCPU;
> ush.ush_count = count;
> if (sbuf_bcat(&sbuf, &ush, sizeof(ush)) < 0) {
> mtx_unlock(&uma_mtx);
> error = ENOMEM;
> goto out;
> }
>
> LIST_FOREACH(kz, &uma_kegs, uk_link) {
> LIST_FOREACH(z, &kz->uk_zones, uz_link) {
> bzero(&uth, sizeof(uth));
> ZONE_LOCK(z);
> strlcpy(uth.uth_name, z->uz_name, UMA_MAX_NAME);
> uth.uth_align = kz->uk_align;
> uth.uth_pages = kz->uk_pages;
> uth.uth_keg_free = kz->uk_free;
> uth.uth_size = kz->uk_size;
> uth.uth_rsize = kz->uk_rsize;
> uth.uth_maxpages = kz->uk_maxpages;
> if (kz->uk_ppera > 1)
> uth.uth_limit = kz->uk_maxpages /
> kz->uk_ppera;
> else
> uth.uth_limit = kz->uk_maxpages *
> kz->uk_ipers;
> LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
> uth.uth_zone_free += bucket->ub_cnt;
> uth.uth_allocs = z->uz_allocs;
> uth.uth_frees = z->uz_frees;
> ZONE_UNLOCK(z);
> if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
> mtx_unlock(&uma_mtx);
> error = ENOMEM;
> goto out;
> }
> /*
> * XXXRW: Should not access bucket fields from
> * non-local CPU. Instead need to modify the caches
> * to directly maintain these statistics so we don't
> * have to.
> */
> for (i = 0; i < MAXCPU; i++) {
> bzero(&ups, sizeof(ups));
> if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
> goto skip;
> cache = &z->uz_cpu[i];
> if (cache->uc_allocbucket != NULL)
> ups.ups_cache_free +=
> cache->uc_allocbucket->ub_cnt;
> if (cache->uc_freebucket != NULL)
> ups.ups_cache_free +=
> cache->uc_freebucket->ub_cnt;
> ups.ups_allocs = cache->uc_allocs;
> ups.ups_frees = cache->uc_frees;
> skip:
> if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
> mtx_unlock(&uma_mtx);
> error = ENOMEM;
> goto out;
> }
> }
> }
> }
> mtx_unlock(&uma_mtx);
> sbuf_finish(&sbuf);
> error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
> out:
> free(buffer, M_TEMP);
> return (error);
> }