1/*- 2 * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>. 3 * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 12 unchanged lines hidden (view full) --- 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> |
29__FBSDID("$FreeBSD: head/sys/vm/memguard.c 226313 2011-10-12 18:08:28Z glebius $"); |
30 31/* 32 * MemGuard is a simple replacement allocator for debugging only 33 * which provides ElectricFence-style memory barrier protection on 34 * objects being allocated, and is used to detect tampering-after-free 35 * scenarios. 36 * 37 * See the memguard(9) man page for more information on using MemGuard. --- 13 unchanged lines hidden (view full) --- 51 52#include <vm/vm.h> 53#include <vm/uma.h> 54#include <vm/vm_param.h> 55#include <vm/vm_page.h> 56#include <vm/vm_map.h> 57#include <vm/vm_object.h> 58#include <vm/vm_extern.h> |
59#include <vm/uma_int.h> |
60#include <vm/memguard.h> 61 62SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); 63/* 64 * The vm_memguard_divisor variable controls how much of kmem_map should be 65 * reserved for MemGuard. 66 */ 67static u_int vm_memguard_divisor; --- 53 unchanged lines hidden (view full) --- 121 &memguard_wrap, 0, "MemGuard cursor wrap count"); 122SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD, 123 &memguard_succ, 0, "Count of successful MemGuard allocations"); 124SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD, 125 &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA"); 126SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD, 127 &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages"); 128 |
129#define MG_GUARD_AROUND 0x001 130#define MG_GUARD_ALLLARGE 0x002 131#define MG_GUARD_NOFREE 0x004 132static int memguard_options = MG_GUARD_AROUND; |
133TUNABLE_INT("vm.memguard.options", &memguard_options); 134SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW, 135 &memguard_options, 0, 136 "MemGuard options:\n" 137 "\t0x001 - add guard pages around each allocation\n" |
138 "\t0x002 - always use MemGuard for allocations over a page\n" 139 "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag"); |
140 141static u_int memguard_minsize; 142static u_long memguard_minsize_reject; 143SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW, 144 &memguard_minsize, 0, "Minimum size for page promotion"); 145SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD, 146 &memguard_minsize_reject, 0, "# times rejected for size"); 147 --- 132 unchanged lines hidden (view full) --- 280 /* 281 * To ensure there are holes on both sides of the allocation, 282 * request 2 extra pages of KVA. We will only actually add a 283 * vm_map_entry and get pages for the original request. Save 284 * the value of memguard_options so we have a consistent 285 * value. 286 */ 287 size_v = size_p; |
288 do_guard = (memguard_options & MG_GUARD_AROUND) != 0; |
289 if (do_guard) 290 size_v += 2 * PAGE_SIZE; 291 292 vm_map_lock(memguard_map); 293 /* 294 * When we pass our memory limit, reject sub-page allocations. 295 * Page-size and larger allocations will use the same amount 296 * of physical memory whether we allocate or hand off to --- 130 unchanged lines hidden (view full) --- 427 428 /* Copy over original contents. */ 429 old_size = *v2sizep(trunc_page((uintptr_t)addr)); 430 bcopy(addr, newaddr, min(size, old_size)); 431 memguard_free(addr); 432 return (newaddr); 433} 434 |
435static int 436memguard_cmp(unsigned long size) |
437{ 438 439 if (size < memguard_minsize) { 440 memguard_minsize_reject++; 441 return (0); 442 } |
443 if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE) |
444 return (1); 445 if (memguard_frequency > 0 && 446 (random() % 100000) < memguard_frequency) { 447 memguard_frequency_hits++; 448 return (1); 449 } |
450 451 return (0); 452} 453 454int 455memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size) 456{ 457 458 if (memguard_cmp(size)) 459 return(1); 460 |
461#if 1 462 /* 463 * The safest way of comparsion is to always compare short description 464 * string of memory type, but it is also the slowest way. 465 */ 466 return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0); 467#else 468 /* --- 7 unchanged lines hidden (view full) --- 476 return (mtp == vm_memguard_mtype); 477 if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) { 478 vm_memguard_mtype = mtp; 479 return (1); 480 } 481 return (0); 482#endif 483} |
484 485int 486memguard_cmp_zone(uma_zone_t zone) 487{ 488 489 if ((memguard_options & MG_GUARD_NOFREE) == 0 && 490 zone->uz_flags & UMA_ZONE_NOFREE) 491 return (0); 492 493 if (memguard_cmp(zone->uz_size)) 494 return (1); 495 496 /* 497 * The safest way of comparsion is to always compare zone name, 498 * but it is also the slowest way. 499 */ 500 return (strcmp(zone->uz_name, vm_memguard_desc) == 0); 501} |