opensolaris_kmem.c revision 270861
1/*- 2 * Copyright (c) 2006-2007 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c 270861 2014-08-30 21:44:32Z smh $"); 29 30#include <sys/param.h> 31#include <sys/kernel.h> 32#include <sys/systm.h> 33#include <sys/malloc.h> 34#include <sys/kmem.h> 35#include <sys/debug.h> 36#include <sys/mutex.h> 37 38#include <vm/vm_page.h> 39#include <vm/vm_object.h> 40#include <vm/vm_kern.h> 41#include <vm/vm_map.h> 42 43#ifdef KMEM_DEBUG 44#include <sys/queue.h> 45#include <sys/stack.h> 46#endif 47 48#ifdef _KERNEL 49MALLOC_DEFINE(M_SOLARIS, "solaris", "Solaris"); 50#else 51#define malloc(size, type, flags) malloc(size) 52#define free(addr, type) free(addr) 53#endif 54 55#ifdef KMEM_DEBUG 56struct kmem_item { 57 struct stack stack; 58 LIST_ENTRY(kmem_item) next; 59}; 60static LIST_HEAD(, kmem_item) kmem_items; 61static struct mtx kmem_items_mtx; 62MTX_SYSINIT(kmem_items_mtx, &kmem_items_mtx, "kmem_items", MTX_DEF); 63#endif /* KMEM_DEBUG */ 64 65#include <sys/vmem.h> 66 67void * 68zfs_kmem_alloc(size_t size, int kmflags) 69{ 70 void *p; 71#ifdef KMEM_DEBUG 72 struct kmem_item *i; 73 74 size += sizeof(struct kmem_item); 75#endif 76 p = malloc(size, M_SOLARIS, kmflags); 77#ifndef _KERNEL 78 if (kmflags & KM_SLEEP) 79 assert(p != NULL); 80#endif 81#ifdef KMEM_DEBUG 82 if (p != NULL) { 83 i = p; 84 p = (u_char *)p + sizeof(struct kmem_item); 85 stack_save(&i->stack); 86 mtx_lock(&kmem_items_mtx); 87 LIST_INSERT_HEAD(&kmem_items, i, next); 88 mtx_unlock(&kmem_items_mtx); 89 } 90#endif 91 return (p); 92} 93 94void 95zfs_kmem_free(void *buf, size_t size __unused) 96{ 97#ifdef KMEM_DEBUG 98 if (buf == NULL) { 99 printf("%s: attempt to free NULL\n", __func__); 100 return; 101 } 102 struct kmem_item *i; 103 104 buf = (u_char *)buf - sizeof(struct kmem_item); 105 mtx_lock(&kmem_items_mtx); 106 LIST_FOREACH(i, &kmem_items, next) { 107 if (i == buf) 108 break; 109 } 110 ASSERT(i != NULL); 111 LIST_REMOVE(i, next); 112 mtx_unlock(&kmem_items_mtx); 113#endif 114 free(buf, M_SOLARIS); 115} 116 117static uint64_t kmem_size_val; 118 119static void 120kmem_size_init(void *unused __unused) 121{ 122 123 kmem_size_val = (uint64_t)vm_cnt.v_page_count * PAGE_SIZE; 124 if (kmem_size_val > vm_kmem_size) 125 kmem_size_val = vm_kmem_size; 126} 127SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL); 128 129/* 130 * The return values from kmem_free_* are only valid once the pagedaemon 131 * has been initialised, before then they return 0. 132 * 133 * To ensure the returns are valid the caller can use a SYSINIT with 134 * subsystem set to SI_SUB_KTHREAD_PAGE and an order of at least 135 * SI_ORDER_SECOND. 136 */ 137u_int 138kmem_free_target(void) 139{ 140 141 return (vm_cnt.v_free_target); 142} 143 144u_int 145kmem_free_min(void) 146{ 147 148 return (vm_cnt.v_free_min); 149} 150 151u_int 152kmem_free_count(void) 153{ 154 155 return (vm_cnt.v_free_count + vm_cnt.v_cache_count); 156} 157 158u_int 159kmem_page_count(void) 160{ 161 162 return (vm_cnt.v_page_count); 163} 164 165uint64_t 166kmem_size(void) 167{ 168 169 return (kmem_size_val); 170} 171 172uint64_t 173kmem_used(void) 174{ 175 176 return (vmem_size(kmem_arena, VMEM_ALLOC)); 177} 178 179static int 180kmem_std_constructor(void *mem, int size __unused, void *private, int flags) 181{ 182 struct kmem_cache *cache = private; 183 184 return (cache->kc_constructor(mem, cache->kc_private, flags)); 185} 186 187static void 188kmem_std_destructor(void *mem, int size __unused, void *private) 189{ 190 struct kmem_cache *cache = private; 191 192 cache->kc_destructor(mem, cache->kc_private); 193} 194 195kmem_cache_t * 196kmem_cache_create(char *name, size_t bufsize, size_t align, 197 int (*constructor)(void *, void *, int), void (*destructor)(void *, void *), 198 void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags) 199{ 200 kmem_cache_t *cache; 201 202 ASSERT(vmp == NULL); 203 204 cache = kmem_alloc(sizeof(*cache), KM_SLEEP); 205 strlcpy(cache->kc_name, name, sizeof(cache->kc_name)); 206 cache->kc_constructor = constructor; 207 cache->kc_destructor = destructor; 208 cache->kc_private = private; 209#if defined(_KERNEL) && !defined(KMEM_DEBUG) 210 cache->kc_zone = uma_zcreate(cache->kc_name, bufsize, 211 constructor != NULL ? kmem_std_constructor : NULL, 212 destructor != NULL ? kmem_std_destructor : NULL, 213 NULL, NULL, align > 0 ? align - 1 : 0, cflags); 214#else 215 cache->kc_size = bufsize; 216#endif 217 218 return (cache); 219} 220 221void 222kmem_cache_destroy(kmem_cache_t *cache) 223{ 224#if defined(_KERNEL) && !defined(KMEM_DEBUG) 225 uma_zdestroy(cache->kc_zone); 226#endif 227 kmem_free(cache, sizeof(*cache)); 228} 229 230void * 231kmem_cache_alloc(kmem_cache_t *cache, int flags) 232{ 233#if defined(_KERNEL) && !defined(KMEM_DEBUG) 234 return (uma_zalloc_arg(cache->kc_zone, cache, flags)); 235#else 236 void *p; 237 238 p = kmem_alloc(cache->kc_size, flags); 239 if (p != NULL && cache->kc_constructor != NULL) 240 kmem_std_constructor(p, cache->kc_size, cache, flags); 241 return (p); 242#endif 243} 244 245void 246kmem_cache_free(kmem_cache_t *cache, void *buf) 247{ 248#if defined(_KERNEL) && !defined(KMEM_DEBUG) 249 uma_zfree_arg(cache->kc_zone, buf, cache); 250#else 251 if (cache->kc_destructor != NULL) 252 kmem_std_destructor(buf, cache->kc_size, cache); 253 kmem_free(buf, cache->kc_size); 254#endif 255} 256 257#ifdef _KERNEL 258void 259kmem_cache_reap_now(kmem_cache_t *cache) 260{ 261#ifndef KMEM_DEBUG 262 zone_drain(cache->kc_zone); 263#endif 264} 265 266void 267kmem_reap(void) 268{ 269 uma_reclaim(); 270} 271#else 272void 273kmem_cache_reap_now(kmem_cache_t *cache __unused) 274{ 275} 276 277void 278kmem_reap(void) 279{ 280} 281#endif 282 283int 284kmem_debugging(void) 285{ 286 return (0); 287} 288 289void * 290calloc(size_t n, size_t s) 291{ 292 return (kmem_zalloc(n * s, KM_NOSLEEP)); 293} 294 295#ifdef KMEM_DEBUG 296void kmem_show(void *); 297void 298kmem_show(void *dummy __unused) 299{ 300 struct kmem_item *i; 301 302 mtx_lock(&kmem_items_mtx); 303 if (LIST_EMPTY(&kmem_items)) 304 printf("KMEM_DEBUG: No leaked elements.\n"); 305 else { 306 printf("KMEM_DEBUG: Leaked elements:\n\n"); 307 LIST_FOREACH(i, &kmem_items, next) { 308 printf("address=%p\n", i); 309 stack_print_ddb(&i->stack); 310 printf("\n"); 311 } 312 } 313 mtx_unlock(&kmem_items_mtx); 314} 315 316SYSUNINIT(sol_kmem, SI_SUB_CPU, SI_ORDER_FIRST, kmem_show, NULL); 317#endif /* KMEM_DEBUG */ 318