1/*- 2 * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>. 3 * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 12 unchanged lines hidden (view full) --- 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> |
29__FBSDID("$FreeBSD: head/sys/vm/memguard.c 254025 2013-08-07 06:21:20Z jeff $"); |
30 31/* 32 * MemGuard is a simple replacement allocator for debugging only 33 * which provides ElectricFence-style memory barrier protection on 34 * objects being allocated, and is used to detect tampering-after-free 35 * scenarios. 36 * 37 * See the memguard(9) man page for more information on using MemGuard. --- 5 unchanged lines hidden (view full) --- 43#include <sys/systm.h> 44#include <sys/kernel.h> 45#include <sys/types.h> 46#include <sys/queue.h> 47#include <sys/lock.h> 48#include <sys/mutex.h> 49#include <sys/malloc.h> 50#include <sys/sysctl.h> |
51#include <sys/vmem.h> |
52 53#include <vm/vm.h> 54#include <vm/uma.h> 55#include <vm/vm_param.h> 56#include <vm/vm_page.h> 57#include <vm/vm_map.h> 58#include <vm/vm_object.h> 59#include <vm/vm_extern.h> --- 35 unchanged lines hidden (view full) --- 95 strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc)); 96 mtx_unlock(&malloc_mtx); 97 return (error); 98} 99SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, 100 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 101 memguard_sysctl_desc, "A", "Short description of memory type to monitor"); 102 |
103static vmem_t *memguard_map = NULL; |
104static vm_offset_t memguard_cursor; |
105static vm_offset_t memguard_base; |
106static vm_size_t memguard_mapsize; 107static vm_size_t memguard_physlimit; 108static u_long memguard_wasted; 109static u_long memguard_wrap; 110static u_long memguard_succ; 111static u_long memguard_fail_kva; 112static u_long memguard_fail_pgs; 113 114SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD, 115 &memguard_cursor, 0, "MemGuard cursor"); 116SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD, |
117 &memguard_mapsize, 0, "MemGuard private arena size"); |
118SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD, 119 &memguard_physlimit, 0, "Limit on MemGuard memory consumption"); 120SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD, 121 &memguard_wasted, 0, "Excess memory used through page promotion"); 122SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD, 123 &memguard_wrap, 0, "MemGuard cursor wrap count"); 124SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD, 125 &memguard_succ, 0, "Count of successful MemGuard allocations"); --- 71 unchanged lines hidden (view full) --- 197 return (km_size + memguard_mapsize); 198} 199 200/* 201 * Initialize the MemGuard mock allocator. All objects from MemGuard come 202 * out of a single VM map (contiguous chunk of address space). 203 */ 204void |
205memguard_init(vmem_t *parent) |
206{ |
207 vm_offset_t base; |
208 |
209 vmem_alloc(parent, memguard_mapsize, M_WAITOK, &base); 210 memguard_map = vmem_create("memguard arena", base, memguard_mapsize, 211 PAGE_SIZE, 0, M_WAITOK); |
212 memguard_cursor = base; |
213 memguard_base = base; |
214 215 printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); 216 printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base); |
217 printf("\tMEMGUARD map size: %jd KBytes\n", 218 (uintmax_t)memguard_mapsize >> 10); 219} 220 221/* 222 * Run things that can't be done as early as memguard_init(). 223 */ 224static void 225memguard_sysinit(void) 226{ 227 struct sysctl_oid_list *parent; 228 229 parent = SYSCTL_STATIC_CHILDREN(_vm_memguard); 230 231 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD, |
232 &memguard_base, "MemGuard KVA base"); |
233 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD, |
234 &memguard_mapsize, "MemGuard KVA size"); 235#if 0 |
236 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD, 237 &memguard_map->size, "MemGuard KVA used"); |
238#endif |
239} 240SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL); 241 242/* 243 * v2sizep() converts a virtual address of the first page allocated for 244 * an item to a pointer to u_long recording the size of the original 245 * allocation request. 246 * --- 12 unchanged lines hidden (view full) --- 259 if (pa == 0) 260 panic("MemGuard detected double-free of %p", (void *)va); 261 p = PHYS_TO_VM_PAGE(pa); 262 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 263 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 264 return ((u_long *)&p->pageq.tqe_next); 265} 266 |
267static u_long * 268v2sizev(vm_offset_t va) 269{ 270 vm_paddr_t pa; 271 struct vm_page *p; 272 273 pa = pmap_kextract(va); 274 if (pa == 0) 275 panic("MemGuard detected double-free of %p", (void *)va); 276 p = PHYS_TO_VM_PAGE(pa); 277 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 278 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 279 return ((u_long *)&p->pageq.tqe_prev); 280} 281 |
282/* 283 * Allocate a single object of specified size with specified flags 284 * (either M_WAITOK or M_NOWAIT). 285 */ 286void * 287memguard_alloc(unsigned long req_size, int flags) 288{ 289 vm_offset_t addr; --- 10 unchanged lines hidden (view full) --- 300 * the value of memguard_options so we have a consistent 301 * value. 302 */ 303 size_v = size_p; 304 do_guard = (memguard_options & MG_GUARD_AROUND) != 0; 305 if (do_guard) 306 size_v += 2 * PAGE_SIZE; 307 |
308 /* 309 * When we pass our memory limit, reject sub-page allocations. 310 * Page-size and larger allocations will use the same amount 311 * of physical memory whether we allocate or hand off to 312 * uma_large_alloc(), so keep those. 313 */ |
314 if (vmem_size(memguard_map, VMEM_ALLOC) >= memguard_physlimit && |
315 req_size < PAGE_SIZE) { 316 addr = (vm_offset_t)NULL; 317 memguard_fail_pgs++; 318 goto out; 319 } 320 /* 321 * Keep a moving cursor so we don't recycle KVA as long as 322 * possible. It's not perfect, since we don't know in what 323 * order previous allocations will be free'd, but it's simple 324 * and fast, and requires O(1) additional storage if guard 325 * pages are not used. 326 * 327 * XXX This scheme will lead to greater fragmentation of the 328 * map, unless vm_map_findspace() is tweaked. 329 */ 330 for (;;) { |
331 if (vmem_xalloc(memguard_map, size_v, 0, 0, 0, memguard_cursor, 332 VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr) == 0) |
333 break; 334 /* 335 * The map has no space. This may be due to 336 * fragmentation, or because the cursor is near the 337 * end of the map. 338 */ |
339 if (memguard_cursor == memguard_base) { |
340 memguard_fail_kva++; 341 addr = (vm_offset_t)NULL; 342 goto out; 343 } 344 memguard_wrap++; |
345 memguard_cursor = memguard_base; |
346 } 347 if (do_guard) 348 addr += PAGE_SIZE; |
349 rv = kmem_back(kmem_object, addr, size_p, flags); |
350 if (rv != KERN_SUCCESS) { |
351 vmem_xfree(memguard_map, addr, size_v); |
352 memguard_fail_pgs++; 353 addr = (vm_offset_t)NULL; 354 goto out; 355 } |
356 memguard_cursor = addr + size_v; |
357 *v2sizep(trunc_page(addr)) = req_size; |
358 *v2sizev(trunc_page(addr)) = size_v; |
359 memguard_succ++; 360 if (req_size < PAGE_SIZE) { 361 memguard_wasted += (PAGE_SIZE - req_size); 362 if (do_guard) { 363 /* 364 * Align the request to 16 bytes, and return 365 * an address near the end of the page, to 366 * better detect array overrun. 367 */ 368 req_size = roundup2(req_size, 16); 369 addr += (PAGE_SIZE - req_size); 370 } 371 } 372out: |
373 return ((void *)addr); 374} 375 376int 377is_memguard_addr(void *addr) 378{ 379 vm_offset_t a = (vm_offset_t)(uintptr_t)addr; 380 |
381 return (a >= memguard_base && a < memguard_base + memguard_mapsize); |
382} 383 384/* 385 * Free specified single object. 386 */ 387void 388memguard_free(void *ptr) 389{ 390 vm_offset_t addr; |
391 u_long req_size, size, sizev; |
392 char *temp; 393 int i; 394 395 addr = trunc_page((uintptr_t)ptr); 396 req_size = *v2sizep(addr); |
397 sizev = *v2sizev(addr); |
398 size = round_page(req_size); 399 400 /* 401 * Page should not be guarded right now, so force a write. 402 * The purpose of this is to increase the likelihood of 403 * catching a double-free, but not necessarily a 404 * tamper-after-free (the second thread freeing might not 405 * write before freeing, so this forces it to and, --- 5 unchanged lines hidden (view full) --- 411 412 /* 413 * This requires carnal knowledge of the implementation of 414 * kmem_free(), but since we've already replaced kmem_malloc() 415 * above, it's not really any worse. We want to use the 416 * vm_map lock to serialize updates to memguard_wasted, since 417 * we had the lock at increment. 418 */ |
419 kmem_unback(kmem_object, addr, size); 420 if (sizev > size) 421 addr -= PAGE_SIZE; 422 vmem_xfree(memguard_map, addr, sizev); |
423 if (req_size < PAGE_SIZE) 424 memguard_wasted -= (PAGE_SIZE - req_size); |
425} 426 427/* 428 * Re-allocate an allocation that was originally guarded. 429 */ 430void * 431memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp, 432 int flags) --- 86 unchanged lines hidden --- |