52#include <sys/vmem.h> 53 54#include <vm/vm.h> 55#include <vm/uma.h> 56#include <vm/vm_param.h> 57#include <vm/vm_page.h> 58#include <vm/vm_map.h> 59#include <vm/vm_object.h> 60#include <vm/vm_kern.h> 61#include <vm/vm_extern.h> 62#include <vm/uma_int.h> 63#include <vm/memguard.h> 64 65static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); 66/* 67 * The vm_memguard_divisor variable controls how much of kmem_map should be 68 * reserved for MemGuard. 69 */ 70static u_int vm_memguard_divisor; 71SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 72 &vm_memguard_divisor, 73 0, "(kmem_size/memguard_divisor) == memguard submap size"); 74 75/* 76 * Short description (ks_shortdesc) of memory type to monitor. 77 */ 78static char vm_memguard_desc[128] = ""; 79static struct malloc_type *vm_memguard_mtype = NULL; 80TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc)); 81static int 82memguard_sysctl_desc(SYSCTL_HANDLER_ARGS) 83{ 84 char desc[sizeof(vm_memguard_desc)]; 85 int error; 86 87 strlcpy(desc, vm_memguard_desc, sizeof(desc)); 88 error = sysctl_handle_string(oidp, desc, sizeof(desc), req); 89 if (error != 0 || req->newptr == NULL) 90 return (error); 91 92 mtx_lock(&malloc_mtx); 93 /* If mtp is NULL, it will be initialized in memguard_cmp() */ 94 vm_memguard_mtype = malloc_desc2type(desc); 95 strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc)); 96 mtx_unlock(&malloc_mtx); 97 return (error); 98} 99SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, 100 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 101 memguard_sysctl_desc, "A", "Short description of memory type to monitor"); 102 103static vm_offset_t memguard_cursor; 104static vm_offset_t memguard_base; 105static vm_size_t memguard_mapsize; 106static vm_size_t memguard_physlimit; 107static u_long memguard_wasted; 108static u_long memguard_wrap; 109static u_long memguard_succ; 110static u_long memguard_fail_kva; 111static u_long memguard_fail_pgs; 112 113SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD, 114 &memguard_cursor, 0, "MemGuard cursor"); 115SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD, 116 &memguard_mapsize, 0, "MemGuard private arena size"); 117SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD, 118 &memguard_physlimit, 0, "Limit on MemGuard memory consumption"); 119SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD, 120 &memguard_wasted, 0, "Excess memory used through page promotion"); 121SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD, 122 &memguard_wrap, 0, "MemGuard cursor wrap count"); 123SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD, 124 &memguard_succ, 0, "Count of successful MemGuard allocations"); 125SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD, 126 &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA"); 127SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD, 128 &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages"); 129 130#define MG_GUARD_AROUND 0x001 131#define MG_GUARD_ALLLARGE 0x002 132#define MG_GUARD_NOFREE 0x004 133static int memguard_options = MG_GUARD_AROUND; 134SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN, 135 &memguard_options, 0, 136 "MemGuard options:\n" 137 "\t0x001 - add guard pages around each allocation\n" 138 "\t0x002 - always use MemGuard for allocations over a page\n" 139 "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag"); 140 141static u_int memguard_minsize; 142static u_long memguard_minsize_reject; 143SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW, 144 &memguard_minsize, 0, "Minimum size for page promotion"); 145SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD, 146 &memguard_minsize_reject, 0, "# times rejected for size"); 147 148static u_int memguard_frequency; 149static u_long memguard_frequency_hits; 150SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN, 151 &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run"); 152SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD, 153 &memguard_frequency_hits, 0, "# times MemGuard randomly chose"); 154 155 156/* 157 * Return a fudged value to be used for vm_kmem_size for allocating 158 * the kmem_map. The memguard memory will be a submap. 159 */ 160unsigned long 161memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) 162{ 163 u_long mem_pgs, parent_size; 164 165 vm_memguard_divisor = 10; 166 /* CTFLAG_RDTUN doesn't work during the early boot process. */ 167 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); 168 169 parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) + 170 PAGE_SIZE; 171 /* Pick a conservative value if provided value sucks. */ 172 if ((vm_memguard_divisor <= 0) || 173 ((parent_size / vm_memguard_divisor) == 0)) 174 vm_memguard_divisor = 10; 175 /* 176 * Limit consumption of physical pages to 177 * 1/vm_memguard_divisor of system memory. If the KVA is 178 * smaller than this then the KVA limit comes into play first. 179 * This prevents memguard's page promotions from completely 180 * using up memory, since most malloc(9) calls are sub-page. 181 */ 182 mem_pgs = vm_cnt.v_page_count; 183 memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; 184 /* 185 * We want as much KVA as we can take safely. Use at most our 186 * allotted fraction of the parent map's size. Limit this to 187 * twice the physical memory to avoid using too much memory as 188 * pagetable pages (size must be multiple of PAGE_SIZE). 189 */ 190 memguard_mapsize = round_page(parent_size / vm_memguard_divisor); 191 if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) 192 memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; 193 if (km_size + memguard_mapsize > parent_size) 194 memguard_mapsize = 0; 195 return (km_size + memguard_mapsize); 196} 197 198/* 199 * Initialize the MemGuard mock allocator. All objects from MemGuard come 200 * out of a single VM map (contiguous chunk of address space). 201 */ 202void 203memguard_init(vmem_t *parent) 204{ 205 vm_offset_t base; 206 207 vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base); 208 vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize, 209 PAGE_SIZE, 0, M_WAITOK); 210 memguard_cursor = base; 211 memguard_base = base; 212 213 printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); 214 printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base); 215 printf("\tMEMGUARD map size: %jd KBytes\n", 216 (uintmax_t)memguard_mapsize >> 10); 217} 218 219/* 220 * Run things that can't be done as early as memguard_init(). 221 */ 222static void 223memguard_sysinit(void) 224{ 225 struct sysctl_oid_list *parent; 226 227 parent = SYSCTL_STATIC_CHILDREN(_vm_memguard); 228 229 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD, 230 &memguard_base, "MemGuard KVA base"); 231 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD, 232 &memguard_mapsize, "MemGuard KVA size"); 233#if 0 234 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD, 235 &memguard_map->size, "MemGuard KVA used"); 236#endif 237} 238SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL); 239 240/* 241 * v2sizep() converts a virtual address of the first page allocated for 242 * an item to a pointer to u_long recording the size of the original 243 * allocation request. 244 * 245 * This routine is very similar to those defined by UMA in uma_int.h. 246 * The difference is that this routine stores the originally allocated 247 * size in one of the page's fields that is unused when the page is 248 * wired rather than the object field, which is used. 249 */ 250static u_long * 251v2sizep(vm_offset_t va) 252{ 253 vm_paddr_t pa; 254 struct vm_page *p; 255 256 pa = pmap_kextract(va); 257 if (pa == 0) 258 panic("MemGuard detected double-free of %p", (void *)va); 259 p = PHYS_TO_VM_PAGE(pa); 260 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 261 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 262 return (&p->plinks.memguard.p); 263} 264 265static u_long * 266v2sizev(vm_offset_t va) 267{ 268 vm_paddr_t pa; 269 struct vm_page *p; 270 271 pa = pmap_kextract(va); 272 if (pa == 0) 273 panic("MemGuard detected double-free of %p", (void *)va); 274 p = PHYS_TO_VM_PAGE(pa); 275 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 276 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 277 return (&p->plinks.memguard.v); 278} 279 280/* 281 * Allocate a single object of specified size with specified flags 282 * (either M_WAITOK or M_NOWAIT). 283 */ 284void * 285memguard_alloc(unsigned long req_size, int flags) 286{ 287 vm_offset_t addr; 288 u_long size_p, size_v; 289 int do_guard, rv; 290 291 size_p = round_page(req_size); 292 if (size_p == 0) 293 return (NULL); 294 /* 295 * To ensure there are holes on both sides of the allocation, 296 * request 2 extra pages of KVA. We will only actually add a 297 * vm_map_entry and get pages for the original request. Save 298 * the value of memguard_options so we have a consistent 299 * value. 300 */ 301 size_v = size_p; 302 do_guard = (memguard_options & MG_GUARD_AROUND) != 0; 303 if (do_guard) 304 size_v += 2 * PAGE_SIZE; 305 306 /* 307 * When we pass our memory limit, reject sub-page allocations. 308 * Page-size and larger allocations will use the same amount 309 * of physical memory whether we allocate or hand off to 310 * uma_large_alloc(), so keep those. 311 */ 312 if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit && 313 req_size < PAGE_SIZE) { 314 addr = (vm_offset_t)NULL; 315 memguard_fail_pgs++; 316 goto out; 317 } 318 /* 319 * Keep a moving cursor so we don't recycle KVA as long as 320 * possible. It's not perfect, since we don't know in what 321 * order previous allocations will be free'd, but it's simple 322 * and fast, and requires O(1) additional storage if guard 323 * pages are not used. 324 * 325 * XXX This scheme will lead to greater fragmentation of the 326 * map, unless vm_map_findspace() is tweaked. 327 */ 328 for (;;) { 329 if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0, 330 memguard_cursor, VMEM_ADDR_MAX, 331 M_BESTFIT | M_NOWAIT, &addr) == 0) 332 break; 333 /* 334 * The map has no space. This may be due to 335 * fragmentation, or because the cursor is near the 336 * end of the map. 337 */ 338 if (memguard_cursor == memguard_base) { 339 memguard_fail_kva++; 340 addr = (vm_offset_t)NULL; 341 goto out; 342 } 343 memguard_wrap++; 344 memguard_cursor = memguard_base; 345 } 346 if (do_guard) 347 addr += PAGE_SIZE; 348 rv = kmem_back(kmem_object, addr, size_p, flags); 349 if (rv != KERN_SUCCESS) { 350 vmem_xfree(memguard_arena, addr, size_v); 351 memguard_fail_pgs++; 352 addr = (vm_offset_t)NULL; 353 goto out; 354 } 355 memguard_cursor = addr + size_v; 356 *v2sizep(trunc_page(addr)) = req_size; 357 *v2sizev(trunc_page(addr)) = size_v; 358 memguard_succ++; 359 if (req_size < PAGE_SIZE) { 360 memguard_wasted += (PAGE_SIZE - req_size); 361 if (do_guard) { 362 /* 363 * Align the request to 16 bytes, and return 364 * an address near the end of the page, to 365 * better detect array overrun. 366 */ 367 req_size = roundup2(req_size, 16); 368 addr += (PAGE_SIZE - req_size); 369 } 370 } 371out: 372 return ((void *)addr); 373} 374 375int 376is_memguard_addr(void *addr) 377{ 378 vm_offset_t a = (vm_offset_t)(uintptr_t)addr; 379 380 return (a >= memguard_base && a < memguard_base + memguard_mapsize); 381} 382 383/* 384 * Free specified single object. 385 */ 386void 387memguard_free(void *ptr) 388{ 389 vm_offset_t addr; 390 u_long req_size, size, sizev; 391 char *temp; 392 int i; 393 394 addr = trunc_page((uintptr_t)ptr); 395 req_size = *v2sizep(addr); 396 sizev = *v2sizev(addr); 397 size = round_page(req_size); 398 399 /* 400 * Page should not be guarded right now, so force a write. 401 * The purpose of this is to increase the likelihood of 402 * catching a double-free, but not necessarily a 403 * tamper-after-free (the second thread freeing might not 404 * write before freeing, so this forces it to and, 405 * subsequently, trigger a fault). 406 */ 407 temp = ptr; 408 for (i = 0; i < size; i += PAGE_SIZE) 409 temp[i] = 'M'; 410 411 /* 412 * This requires carnal knowledge of the implementation of 413 * kmem_free(), but since we've already replaced kmem_malloc() 414 * above, it's not really any worse. We want to use the 415 * vm_map lock to serialize updates to memguard_wasted, since 416 * we had the lock at increment. 417 */ 418 kmem_unback(kmem_object, addr, size); 419 if (sizev > size) 420 addr -= PAGE_SIZE; 421 vmem_xfree(memguard_arena, addr, sizev); 422 if (req_size < PAGE_SIZE) 423 memguard_wasted -= (PAGE_SIZE - req_size); 424} 425 426/* 427 * Re-allocate an allocation that was originally guarded. 428 */ 429void * 430memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp, 431 int flags) 432{ 433 void *newaddr; 434 u_long old_size; 435 436 /* 437 * Allocate the new block. Force the allocation to be guarded 438 * as the original may have been guarded through random 439 * chance, and that should be preserved. 440 */ 441 if ((newaddr = memguard_alloc(size, flags)) == NULL) 442 return (NULL); 443 444 /* Copy over original contents. */ 445 old_size = *v2sizep(trunc_page((uintptr_t)addr)); 446 bcopy(addr, newaddr, min(size, old_size)); 447 memguard_free(addr); 448 return (newaddr); 449} 450 451static int 452memguard_cmp(unsigned long size) 453{ 454 455 if (size < memguard_minsize) { 456 memguard_minsize_reject++; 457 return (0); 458 } 459 if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE) 460 return (1); 461 if (memguard_frequency > 0 && 462 (random() % 100000) < memguard_frequency) { 463 memguard_frequency_hits++; 464 return (1); 465 } 466 467 return (0); 468} 469 470int 471memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size) 472{ 473 474 if (memguard_cmp(size)) 475 return(1); 476 477#if 1 478 /* 479 * The safest way of comparsion is to always compare short description 480 * string of memory type, but it is also the slowest way. 481 */ 482 return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0); 483#else 484 /* 485 * If we compare pointers, there are two possible problems: 486 * 1. Memory type was unloaded and new memory type was allocated at the 487 * same address. 488 * 2. Memory type was unloaded and loaded again, but allocated at a 489 * different address. 490 */ 491 if (vm_memguard_mtype != NULL) 492 return (mtp == vm_memguard_mtype); 493 if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) { 494 vm_memguard_mtype = mtp; 495 return (1); 496 } 497 return (0); 498#endif 499} 500 501int 502memguard_cmp_zone(uma_zone_t zone) 503{ 504 505 if ((memguard_options & MG_GUARD_NOFREE) == 0 && 506 zone->uz_flags & UMA_ZONE_NOFREE) 507 return (0); 508 509 if (memguard_cmp(zone->uz_size)) 510 return (1); 511 512 /* 513 * The safest way of comparsion is to always compare zone name, 514 * but it is also the slowest way. 515 */ 516 return (strcmp(zone->uz_name, vm_memguard_desc) == 0); 517}
|