Deleted Added
full compact
uma_core.c (147995) uma_core.c (147996)
1/*-
2 * Copyright (c) 2004-2005 Robert N. M. Watson
3 * Copyright (c) 2004, 2005,
4 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
5 * Copyright (c) 2002, 2003, 2004, 2005,
6 * Jeffrey Roberson <jeff@FreeBSD.org>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 35 unchanged lines hidden (view full) ---

44
45/*
46 * TODO:
47 * - Improve memory usage for large allocations
48 * - Investigate cache size adjustments
49 */
50
51#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2004-2005 Robert N. M. Watson
3 * Copyright (c) 2004, 2005,
4 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
5 * Copyright (c) 2002, 2003, 2004, 2005,
6 * Jeffrey Roberson <jeff@FreeBSD.org>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 35 unchanged lines hidden (view full) ---

44
45/*
46 * TODO:
47 * - Improve memory usage for large allocations
48 * - Investigate cache size adjustments
49 */
50
51#include <sys/cdefs.h>
52__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 147995 2005-07-14 16:17:21Z rwatson $");
52__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 147996 2005-07-14 16:35:13Z rwatson $");
53
54/* I should really use ktr.. */
55/*
56#define UMA_DEBUG 1
57#define UMA_DEBUG_ALLOC 1
58#define UMA_DEBUG_ALLOC_1 1
59*/
60

--- 4 unchanged lines hidden (view full) ---

65#include <sys/types.h>
66#include <sys/queue.h>
67#include <sys/malloc.h>
68#include <sys/ktr.h>
69#include <sys/lock.h>
70#include <sys/sysctl.h>
71#include <sys/mutex.h>
72#include <sys/proc.h>
53
54/* I should really use ktr.. */
55/*
56#define UMA_DEBUG 1
57#define UMA_DEBUG_ALLOC 1
58#define UMA_DEBUG_ALLOC_1 1
59*/
60

--- 4 unchanged lines hidden (view full) ---

65#include <sys/types.h>
66#include <sys/queue.h>
67#include <sys/malloc.h>
68#include <sys/ktr.h>
69#include <sys/lock.h>
70#include <sys/sysctl.h>
71#include <sys/mutex.h>
72#include <sys/proc.h>
73#include <sys/sbuf.h>
73#include <sys/smp.h>
74#include <sys/vmmeter.h>
75
76#include <vm/vm.h>
77#include <vm/vm_object.h>
78#include <vm/vm_page.h>
79#include <vm/vm_param.h>
80#include <vm/vm_map.h>

--- 148 unchanged lines hidden (view full) ---

229static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
230static void zone_drain(uma_zone_t);
231static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
232 uma_fini fini, int align, u_int16_t flags);
233
234void uma_print_zone(uma_zone_t);
235void uma_print_stats(void);
236static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
74#include <sys/smp.h>
75#include <sys/vmmeter.h>
76
77#include <vm/vm.h>
78#include <vm/vm_object.h>
79#include <vm/vm_page.h>
80#include <vm/vm_param.h>
81#include <vm/vm_map.h>

--- 148 unchanged lines hidden (view full) ---

230static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
231static void zone_drain(uma_zone_t);
232static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
233 uma_fini fini, int align, u_int16_t flags);
234
235void uma_print_zone(uma_zone_t);
236void uma_print_stats(void);
237static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
238static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
239static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
237
238#ifdef WITNESS
239static int nosleepwithlocks = 1;
240SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
241 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
242#else
243static int nosleepwithlocks = 0;
244SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
245 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
246#endif
247SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
248 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
249SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
250
240
241#ifdef WITNESS
242static int nosleepwithlocks = 1;
243SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
244 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
245#else
246static int nosleepwithlocks = 0;
247SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
248 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
249#endif
250SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
251 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
252SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
253
254SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
255 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
256
257SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
258 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
259
251/*
252 * This routine checks to see whether or not it's safe to enable buckets.
253 */
254
255static void
256bucket_enable(void)
257{
258 if (cnt.v_free_count < cnt.v_free_min)

--- 2487 unchanged lines hidden (view full) ---

2746 continue;
2747 cache = &zone->uz_cpu[i];
2748 printf("CPU %d Cache:\n", i);
2749 cache_print(cache);
2750 }
2751}
2752
2753/*
260/*
261 * This routine checks to see whether or not it's safe to enable buckets.
262 */
263
264static void
265bucket_enable(void)
266{
267 if (cnt.v_free_count < cnt.v_free_min)

--- 2487 unchanged lines hidden (view full) ---

2755 continue;
2756 cache = &zone->uz_cpu[i];
2757 printf("CPU %d Cache:\n", i);
2758 cache_print(cache);
2759 }
2760}
2761
2762/*
2763 * Generate statistics across both the zone and its per-cpu cache's. Return
2764 * desired statistics if the pointer is non-NULL for that statistic.
2765 *
2766 * Note: does not update the zone statistics, as it can't safely clear the
2767 * per-CPU cache statistic.
2768 *
2769 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
2770 * safe from off-CPU; we should modify the caches to track this information
2771 * directly so that we don't have to.
2772 */
2773static void
2774uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
2775 u_int64_t *freesp)
2776{
2777 uma_cache_t cache;
2778 u_int64_t allocs, frees;
2779 int cachefree, cpu;
2780
2781 allocs = frees = 0;
2782 cachefree = 0;
2783 for (cpu = 0; cpu <= mp_maxid; cpu++) {
2784 if (CPU_ABSENT(cpu))
2785 continue;
2786 cache = &z->uz_cpu[cpu];
2787 if (cache->uc_allocbucket != NULL)
2788 cachefree += cache->uc_allocbucket->ub_cnt;
2789 if (cache->uc_freebucket != NULL)
2790 cachefree += cache->uc_freebucket->ub_cnt;
2791 allocs += cache->uc_allocs;
2792 frees += cache->uc_frees;
2793 }
2794 allocs += z->uz_allocs;
2795 frees += z->uz_frees;
2796 if (cachefreep != NULL)
2797 *cachefreep = cachefree;
2798 if (allocsp != NULL)
2799 *allocsp = allocs;
2800 if (freesp != NULL)
2801 *freesp = frees;
2802}
2803
2804/*
2754 * Sysctl handler for vm.zone
2755 *
2756 * stolen from vm_zone.c
2757 */
2758static int
2759sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2760{
2761 int error, len, cnt;
2762 const int linesize = 128; /* conservative */
2763 int totalfree;
2764 char *tmpbuf, *offset;
2765 uma_zone_t z;
2766 uma_keg_t zk;
2767 char *p;
2805 * Sysctl handler for vm.zone
2806 *
2807 * stolen from vm_zone.c
2808 */
2809static int
2810sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2811{
2812 int error, len, cnt;
2813 const int linesize = 128; /* conservative */
2814 int totalfree;
2815 char *tmpbuf, *offset;
2816 uma_zone_t z;
2817 uma_keg_t zk;
2818 char *p;
2768 int cpu;
2769 int cachefree;
2770 uma_bucket_t bucket;
2819 int cachefree;
2820 uma_bucket_t bucket;
2771 uma_cache_t cache;
2772 u_int64_t alloc;
2821 u_int64_t allocs, frees;
2773
2774 cnt = 0;
2775 mtx_lock(&uma_mtx);
2776 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2777 LIST_FOREACH(z, &zk->uk_zones, uz_link)
2778 cnt++;
2779 }
2780 mtx_unlock(&uma_mtx);

--- 9 unchanged lines hidden (view full) ---

2790 offset = tmpbuf;
2791 mtx_lock(&uma_mtx);
2792 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2793 LIST_FOREACH(z, &zk->uk_zones, uz_link) {
2794 if (cnt == 0) /* list may have changed size */
2795 break;
2796 ZONE_LOCK(z);
2797 cachefree = 0;
2822
2823 cnt = 0;
2824 mtx_lock(&uma_mtx);
2825 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2826 LIST_FOREACH(z, &zk->uk_zones, uz_link)
2827 cnt++;
2828 }
2829 mtx_unlock(&uma_mtx);

--- 9 unchanged lines hidden (view full) ---

2839 offset = tmpbuf;
2840 mtx_lock(&uma_mtx);
2841 LIST_FOREACH(zk, &uma_kegs, uk_link) {
2842 LIST_FOREACH(z, &zk->uk_zones, uz_link) {
2843 if (cnt == 0) /* list may have changed size */
2844 break;
2845 ZONE_LOCK(z);
2846 cachefree = 0;
2798 alloc = 0;
2799 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2847 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2800 for (cpu = 0; cpu <= mp_maxid; cpu++) {
2801 if (CPU_ABSENT(cpu))
2802 continue;
2803 cache = &z->uz_cpu[cpu];
2804 if (cache->uc_allocbucket != NULL)
2805 cachefree += cache->uc_allocbucket->ub_cnt;
2806 if (cache->uc_freebucket != NULL)
2807 cachefree += cache->uc_freebucket->ub_cnt;
2808 alloc += cache->uc_allocs;
2809 }
2848 uma_zone_sumstat(z, &cachefree, &allocs, &frees);
2849 } else {
2850 allocs = z->uz_allocs;
2851 frees = z->uz_frees;
2810 }
2852 }
2811 alloc += z->uz_allocs;
2812
2813 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2814 cachefree += bucket->ub_cnt;
2815 }
2816 totalfree = zk->uk_free + cachefree;
2817 len = snprintf(offset, linesize,
2818 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2819 z->uz_name, zk->uk_size,
2820 zk->uk_maxpages * zk->uk_ipers,
2821 (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
2822 totalfree,
2853
2854 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2855 cachefree += bucket->ub_cnt;
2856 }
2857 totalfree = zk->uk_free + cachefree;
2858 len = snprintf(offset, linesize,
2859 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2860 z->uz_name, zk->uk_size,
2861 zk->uk_maxpages * zk->uk_ipers,
2862 (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
2863 totalfree,
2823 (unsigned long long)alloc);
2864 (unsigned long long)allocs);
2824 ZONE_UNLOCK(z);
2825 for (p = offset + 12; p > offset && *p == ' '; --p)
2826 /* nothing */ ;
2827 p[1] = ':';
2828 cnt--;
2829 offset += len;
2830 }
2831 }
2832 mtx_unlock(&uma_mtx);
2833 *offset++ = '\0';
2834 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2835out:
2836 FREE(tmpbuf, M_TEMP);
2837 return (error);
2838}
2865 ZONE_UNLOCK(z);
2866 for (p = offset + 12; p > offset && *p == ' '; --p)
2867 /* nothing */ ;
2868 p[1] = ':';
2869 cnt--;
2870 offset += len;
2871 }
2872 }
2873 mtx_unlock(&uma_mtx);
2874 *offset++ = '\0';
2875 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2876out:
2877 FREE(tmpbuf, M_TEMP);
2878 return (error);
2879}
2880
2881static int
2882sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
2883{
2884 uma_keg_t kz;
2885 uma_zone_t z;
2886 int count;
2887
2888 count = 0;
2889 mtx_lock(&uma_mtx);
2890 LIST_FOREACH(kz, &uma_kegs, uk_link) {
2891 LIST_FOREACH(z, &kz->uk_zones, uz_link)
2892 count++;
2893 }
2894 mtx_unlock(&uma_mtx);
2895 return (sysctl_handle_int(oidp, &count, 0, req));
2896}
2897
2898static int
2899sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
2900{
2901 struct uma_stream_header ush;
2902 struct uma_type_header uth;
2903 struct uma_percpu_stat ups;
2904 uma_bucket_t bucket;
2905 struct sbuf sbuf;
2906 uma_cache_t cache;
2907 uma_keg_t kz;
2908 uma_zone_t z;
2909 char *buffer;
2910 int buflen, count, error, i;
2911
2912 mtx_lock(&uma_mtx);
2913restart:
2914 mtx_assert(&uma_mtx, MA_OWNED);
2915 count = 0;
2916 LIST_FOREACH(kz, &uma_kegs, uk_link) {
2917 LIST_FOREACH(z, &kz->uk_zones, uz_link)
2918 count++;
2919 }
2920 mtx_unlock(&uma_mtx);
2921
2922 buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) *
2923 MAXCPU) + 1;
2924 buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
2925
2926 mtx_lock(&uma_mtx);
2927 i = 0;
2928 LIST_FOREACH(kz, &uma_kegs, uk_link) {
2929 LIST_FOREACH(z, &kz->uk_zones, uz_link)
2930 i++;
2931 }
2932 if (i > count) {
2933 free(buffer, M_TEMP);
2934 goto restart;
2935 }
2936 count = i;
2937
2938 sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
2939
2940 /*
2941 * Insert stream header.
2942 */
2943 bzero(&ush, sizeof(ush));
2944 ush.ush_version = UMA_STREAM_VERSION;
2945 ush.ush_maxcpus = MAXCPU;
2946 ush.ush_count = count;
2947 if (sbuf_bcat(&sbuf, &ush, sizeof(ush)) < 0) {
2948 mtx_unlock(&uma_mtx);
2949 error = ENOMEM;
2950 goto out;
2951 }
2952
2953 LIST_FOREACH(kz, &uma_kegs, uk_link) {
2954 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
2955 bzero(&uth, sizeof(uth));
2956 ZONE_LOCK(z);
2957 strlcpy(uth.uth_name, z->uz_name, UMA_MAX_NAME);
2958 uth.uth_align = kz->uk_align;
2959 uth.uth_pages = kz->uk_pages;
2960 uth.uth_keg_free = kz->uk_free;
2961 uth.uth_size = kz->uk_size;
2962 uth.uth_rsize = kz->uk_rsize;
2963 uth.uth_maxpages = kz->uk_maxpages;
2964 if (kz->uk_ppera > 1)
2965 uth.uth_limit = kz->uk_maxpages /
2966 kz->uk_ppera;
2967 else
2968 uth.uth_limit = kz->uk_maxpages *
2969 kz->uk_ipers;
2970 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
2971 uth.uth_zone_free += bucket->ub_cnt;
2972 uth.uth_allocs = z->uz_allocs;
2973 uth.uth_frees = z->uz_frees;
2974 ZONE_UNLOCK(z);
2975 if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
2976 mtx_unlock(&uma_mtx);
2977 error = ENOMEM;
2978 goto out;
2979 }
2980 /*
2981 * XXXRW: Should not access bucket fields from
2982 * non-local CPU. Instead need to modify the caches
2983 * to directly maintain these statistics so we don't
2984 * have to.
2985 */
2986 for (i = 0; i < MAXCPU; i++) {
2987 bzero(&ups, sizeof(ups));
2988 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
2989 goto skip;
2990 cache = &z->uz_cpu[i];
2991 if (cache->uc_allocbucket != NULL)
2992 ups.ups_cache_free +=
2993 cache->uc_allocbucket->ub_cnt;
2994 if (cache->uc_freebucket != NULL)
2995 ups.ups_cache_free +=
2996 cache->uc_freebucket->ub_cnt;
2997 ups.ups_allocs = cache->uc_allocs;
2998 ups.ups_frees = cache->uc_frees;
2999skip:
3000 if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
3001 mtx_unlock(&uma_mtx);
3002 error = ENOMEM;
3003 goto out;
3004 }
3005 }
3006 }
3007 }
3008 mtx_unlock(&uma_mtx);
3009 sbuf_finish(&sbuf);
3010 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
3011out:
3012 free(buffer, M_TEMP);
3013 return (error);
3014}