Deleted Added
full compact
memguard.c (280665) memguard.c (295222)
1/*-
2 * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3 * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3 * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/vm/memguard.c 280665 2015-03-26 05:20:18Z rpaulo $");
29__FBSDID("$FreeBSD: head/sys/vm/memguard.c 295222 2016-02-03 23:30:17Z glebius $");
30
31/*
32 * MemGuard is a simple replacement allocator for debugging only
33 * which provides ElectricFence-style memory barrier protection on
34 * objects being allocated, and is used to detect tampering-after-free
35 * scenarios.
36 *
37 * See the memguard(9) man page for more information on using MemGuard.
38 */
39
40#include "opt_vm.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/types.h>
46#include <sys/queue.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/malloc.h>
50#include <sys/sysctl.h>
30
31/*
32 * MemGuard is a simple replacement allocator for debugging only
33 * which provides ElectricFence-style memory barrier protection on
34 * objects being allocated, and is used to detect tampering-after-free
35 * scenarios.
36 *
37 * See the memguard(9) man page for more information on using MemGuard.
38 */
39
40#include "opt_vm.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/types.h>
46#include <sys/queue.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/malloc.h>
50#include <sys/sysctl.h>
51#include <sys/taskqueue.h>
51#include <sys/vmem.h>
52
53#include <vm/vm.h>
54#include <vm/uma.h>
55#include <vm/vm_param.h>
56#include <vm/vm_page.h>
57#include <vm/vm_map.h>
58#include <vm/vm_object.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_extern.h>
61#include <vm/uma_int.h>
62#include <vm/memguard.h>
63
64static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
65/*
66 * The vm_memguard_divisor variable controls how much of kmem_map should be
67 * reserved for MemGuard.
68 */
69static u_int vm_memguard_divisor;
70SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
71 &vm_memguard_divisor,
72 0, "(kmem_size/memguard_divisor) == memguard submap size");
73
74/*
75 * Short description (ks_shortdesc) of memory type to monitor.
76 */
77static char vm_memguard_desc[128] = "";
78static struct malloc_type *vm_memguard_mtype = NULL;
79TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
80static int
81memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
82{
83 char desc[sizeof(vm_memguard_desc)];
84 int error;
85
86 strlcpy(desc, vm_memguard_desc, sizeof(desc));
87 error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
88 if (error != 0 || req->newptr == NULL)
89 return (error);
90
91 mtx_lock(&malloc_mtx);
92 /* If mtp is NULL, it will be initialized in memguard_cmp() */
93 vm_memguard_mtype = malloc_desc2type(desc);
94 strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
95 mtx_unlock(&malloc_mtx);
96 return (error);
97}
98SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
99 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
100 memguard_sysctl_desc, "A", "Short description of memory type to monitor");
101
102static vm_offset_t memguard_cursor;
103static vm_offset_t memguard_base;
104static vm_size_t memguard_mapsize;
105static vm_size_t memguard_physlimit;
106static u_long memguard_wasted;
107static u_long memguard_wrap;
108static u_long memguard_succ;
109static u_long memguard_fail_kva;
110static u_long memguard_fail_pgs;
111
112SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
113 &memguard_cursor, 0, "MemGuard cursor");
114SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
115 &memguard_mapsize, 0, "MemGuard private arena size");
116SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
117 &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
118SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
119 &memguard_wasted, 0, "Excess memory used through page promotion");
120SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
121 &memguard_wrap, 0, "MemGuard cursor wrap count");
122SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
123 &memguard_succ, 0, "Count of successful MemGuard allocations");
124SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
125 &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
126SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
127 &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
128
129#define MG_GUARD_AROUND 0x001
130#define MG_GUARD_ALLLARGE 0x002
131#define MG_GUARD_NOFREE 0x004
132static int memguard_options = MG_GUARD_AROUND;
133SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN,
134 &memguard_options, 0,
135 "MemGuard options:\n"
136 "\t0x001 - add guard pages around each allocation\n"
137 "\t0x002 - always use MemGuard for allocations over a page\n"
138 "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag");
139
140static u_int memguard_minsize;
141static u_long memguard_minsize_reject;
142SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
143 &memguard_minsize, 0, "Minimum size for page promotion");
144SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
145 &memguard_minsize_reject, 0, "# times rejected for size");
146
147static u_int memguard_frequency;
148static u_long memguard_frequency_hits;
149SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN,
150 &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
151SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
152 &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
153
154
155/*
156 * Return a fudged value to be used for vm_kmem_size for allocating
157 * the kmem_map. The memguard memory will be a submap.
158 */
159unsigned long
160memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
161{
162 u_long mem_pgs, parent_size;
163
164 vm_memguard_divisor = 10;
165 /* CTFLAG_RDTUN doesn't work during the early boot process. */
166 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
167
168 parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) +
169 PAGE_SIZE;
170 /* Pick a conservative value if provided value sucks. */
171 if ((vm_memguard_divisor <= 0) ||
172 ((parent_size / vm_memguard_divisor) == 0))
173 vm_memguard_divisor = 10;
174 /*
175 * Limit consumption of physical pages to
176 * 1/vm_memguard_divisor of system memory. If the KVA is
177 * smaller than this then the KVA limit comes into play first.
178 * This prevents memguard's page promotions from completely
179 * using up memory, since most malloc(9) calls are sub-page.
180 */
181 mem_pgs = vm_cnt.v_page_count;
182 memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
183 /*
184 * We want as much KVA as we can take safely. Use at most our
185 * allotted fraction of the parent map's size. Limit this to
186 * twice the physical memory to avoid using too much memory as
187 * pagetable pages (size must be multiple of PAGE_SIZE).
188 */
189 memguard_mapsize = round_page(parent_size / vm_memguard_divisor);
190 if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
191 memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
192 if (km_size + memguard_mapsize > parent_size)
193 memguard_mapsize = 0;
194 return (km_size + memguard_mapsize);
195}
196
197/*
198 * Initialize the MemGuard mock allocator. All objects from MemGuard come
199 * out of a single VM map (contiguous chunk of address space).
200 */
201void
202memguard_init(vmem_t *parent)
203{
204 vm_offset_t base;
205
206 vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
207 vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
208 PAGE_SIZE, 0, M_WAITOK);
209 memguard_cursor = base;
210 memguard_base = base;
211
212 printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
213 printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
214 printf("\tMEMGUARD map size: %jd KBytes\n",
215 (uintmax_t)memguard_mapsize >> 10);
216}
217
218/*
219 * Run things that can't be done as early as memguard_init().
220 */
221static void
222memguard_sysinit(void)
223{
224 struct sysctl_oid_list *parent;
225
226 parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
227
228 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
229 &memguard_base, "MemGuard KVA base");
230 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
231 &memguard_mapsize, "MemGuard KVA size");
232#if 0
233 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
234 &memguard_map->size, "MemGuard KVA used");
235#endif
236}
237SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
238
239/*
240 * v2sizep() converts a virtual address of the first page allocated for
241 * an item to a pointer to u_long recording the size of the original
242 * allocation request.
243 *
244 * This routine is very similar to those defined by UMA in uma_int.h.
245 * The difference is that this routine stores the originally allocated
246 * size in one of the page's fields that is unused when the page is
247 * wired rather than the object field, which is used.
248 */
249static u_long *
250v2sizep(vm_offset_t va)
251{
252 vm_paddr_t pa;
253 struct vm_page *p;
254
255 pa = pmap_kextract(va);
256 if (pa == 0)
257 panic("MemGuard detected double-free of %p", (void *)va);
258 p = PHYS_TO_VM_PAGE(pa);
259 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
260 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
261 return (&p->plinks.memguard.p);
262}
263
264static u_long *
265v2sizev(vm_offset_t va)
266{
267 vm_paddr_t pa;
268 struct vm_page *p;
269
270 pa = pmap_kextract(va);
271 if (pa == 0)
272 panic("MemGuard detected double-free of %p", (void *)va);
273 p = PHYS_TO_VM_PAGE(pa);
274 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
275 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
276 return (&p->plinks.memguard.v);
277}
278
279/*
280 * Allocate a single object of specified size with specified flags
281 * (either M_WAITOK or M_NOWAIT).
282 */
283void *
284memguard_alloc(unsigned long req_size, int flags)
285{
286 vm_offset_t addr;
287 u_long size_p, size_v;
288 int do_guard, rv;
289
290 size_p = round_page(req_size);
291 if (size_p == 0)
292 return (NULL);
293 /*
294 * To ensure there are holes on both sides of the allocation,
295 * request 2 extra pages of KVA. We will only actually add a
296 * vm_map_entry and get pages for the original request. Save
297 * the value of memguard_options so we have a consistent
298 * value.
299 */
300 size_v = size_p;
301 do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
302 if (do_guard)
303 size_v += 2 * PAGE_SIZE;
304
305 /*
306 * When we pass our memory limit, reject sub-page allocations.
307 * Page-size and larger allocations will use the same amount
308 * of physical memory whether we allocate or hand off to
309 * uma_large_alloc(), so keep those.
310 */
311 if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
312 req_size < PAGE_SIZE) {
313 addr = (vm_offset_t)NULL;
314 memguard_fail_pgs++;
315 goto out;
316 }
317 /*
318 * Keep a moving cursor so we don't recycle KVA as long as
319 * possible. It's not perfect, since we don't know in what
320 * order previous allocations will be free'd, but it's simple
321 * and fast, and requires O(1) additional storage if guard
322 * pages are not used.
323 *
324 * XXX This scheme will lead to greater fragmentation of the
325 * map, unless vm_map_findspace() is tweaked.
326 */
327 for (;;) {
328 if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0,
329 memguard_cursor, VMEM_ADDR_MAX,
330 M_BESTFIT | M_NOWAIT, &addr) == 0)
331 break;
332 /*
333 * The map has no space. This may be due to
334 * fragmentation, or because the cursor is near the
335 * end of the map.
336 */
337 if (memguard_cursor == memguard_base) {
338 memguard_fail_kva++;
339 addr = (vm_offset_t)NULL;
340 goto out;
341 }
342 memguard_wrap++;
343 memguard_cursor = memguard_base;
344 }
345 if (do_guard)
346 addr += PAGE_SIZE;
347 rv = kmem_back(kmem_object, addr, size_p, flags);
348 if (rv != KERN_SUCCESS) {
349 vmem_xfree(memguard_arena, addr, size_v);
350 memguard_fail_pgs++;
351 addr = (vm_offset_t)NULL;
352 goto out;
353 }
354 memguard_cursor = addr + size_v;
355 *v2sizep(trunc_page(addr)) = req_size;
356 *v2sizev(trunc_page(addr)) = size_v;
357 memguard_succ++;
358 if (req_size < PAGE_SIZE) {
359 memguard_wasted += (PAGE_SIZE - req_size);
360 if (do_guard) {
361 /*
362 * Align the request to 16 bytes, and return
363 * an address near the end of the page, to
364 * better detect array overrun.
365 */
366 req_size = roundup2(req_size, 16);
367 addr += (PAGE_SIZE - req_size);
368 }
369 }
370out:
371 return ((void *)addr);
372}
373
374int
375is_memguard_addr(void *addr)
376{
377 vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
378
379 return (a >= memguard_base && a < memguard_base + memguard_mapsize);
380}
381
382/*
383 * Free specified single object.
384 */
385void
386memguard_free(void *ptr)
387{
388 vm_offset_t addr;
389 u_long req_size, size, sizev;
390 char *temp;
391 int i;
392
393 addr = trunc_page((uintptr_t)ptr);
394 req_size = *v2sizep(addr);
395 sizev = *v2sizev(addr);
396 size = round_page(req_size);
397
398 /*
399 * Page should not be guarded right now, so force a write.
400 * The purpose of this is to increase the likelihood of
401 * catching a double-free, but not necessarily a
402 * tamper-after-free (the second thread freeing might not
403 * write before freeing, so this forces it to and,
404 * subsequently, trigger a fault).
405 */
406 temp = ptr;
407 for (i = 0; i < size; i += PAGE_SIZE)
408 temp[i] = 'M';
409
410 /*
411 * This requires carnal knowledge of the implementation of
412 * kmem_free(), but since we've already replaced kmem_malloc()
413 * above, it's not really any worse. We want to use the
414 * vm_map lock to serialize updates to memguard_wasted, since
415 * we had the lock at increment.
416 */
417 kmem_unback(kmem_object, addr, size);
418 if (sizev > size)
419 addr -= PAGE_SIZE;
420 vmem_xfree(memguard_arena, addr, sizev);
421 if (req_size < PAGE_SIZE)
422 memguard_wasted -= (PAGE_SIZE - req_size);
423}
424
425/*
426 * Re-allocate an allocation that was originally guarded.
427 */
428void *
429memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
430 int flags)
431{
432 void *newaddr;
433 u_long old_size;
434
435 /*
436 * Allocate the new block. Force the allocation to be guarded
437 * as the original may have been guarded through random
438 * chance, and that should be preserved.
439 */
440 if ((newaddr = memguard_alloc(size, flags)) == NULL)
441 return (NULL);
442
443 /* Copy over original contents. */
444 old_size = *v2sizep(trunc_page((uintptr_t)addr));
445 bcopy(addr, newaddr, min(size, old_size));
446 memguard_free(addr);
447 return (newaddr);
448}
449
450static int
451memguard_cmp(unsigned long size)
452{
453
454 if (size < memguard_minsize) {
455 memguard_minsize_reject++;
456 return (0);
457 }
458 if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE)
459 return (1);
460 if (memguard_frequency > 0 &&
461 (random() % 100000) < memguard_frequency) {
462 memguard_frequency_hits++;
463 return (1);
464 }
465
466 return (0);
467}
468
469int
470memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size)
471{
472
473 if (memguard_cmp(size))
474 return(1);
475
476#if 1
477 /*
478 * The safest way of comparsion is to always compare short description
479 * string of memory type, but it is also the slowest way.
480 */
481 return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
482#else
483 /*
484 * If we compare pointers, there are two possible problems:
485 * 1. Memory type was unloaded and new memory type was allocated at the
486 * same address.
487 * 2. Memory type was unloaded and loaded again, but allocated at a
488 * different address.
489 */
490 if (vm_memguard_mtype != NULL)
491 return (mtp == vm_memguard_mtype);
492 if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
493 vm_memguard_mtype = mtp;
494 return (1);
495 }
496 return (0);
497#endif
498}
499
500int
501memguard_cmp_zone(uma_zone_t zone)
502{
503
504 if ((memguard_options & MG_GUARD_NOFREE) == 0 &&
505 zone->uz_flags & UMA_ZONE_NOFREE)
506 return (0);
507
508 if (memguard_cmp(zone->uz_size))
509 return (1);
510
511 /*
512 * The safest way of comparsion is to always compare zone name,
513 * but it is also the slowest way.
514 */
515 return (strcmp(zone->uz_name, vm_memguard_desc) == 0);
516}
52#include <sys/vmem.h>
53
54#include <vm/vm.h>
55#include <vm/uma.h>
56#include <vm/vm_param.h>
57#include <vm/vm_page.h>
58#include <vm/vm_map.h>
59#include <vm/vm_object.h>
60#include <vm/vm_kern.h>
61#include <vm/vm_extern.h>
62#include <vm/uma_int.h>
63#include <vm/memguard.h>
64
65static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
66/*
67 * The vm_memguard_divisor variable controls how much of kmem_map should be
68 * reserved for MemGuard.
69 */
70static u_int vm_memguard_divisor;
71SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
72 &vm_memguard_divisor,
73 0, "(kmem_size/memguard_divisor) == memguard submap size");
74
75/*
76 * Short description (ks_shortdesc) of memory type to monitor.
77 */
78static char vm_memguard_desc[128] = "";
79static struct malloc_type *vm_memguard_mtype = NULL;
80TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
81static int
82memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
83{
84 char desc[sizeof(vm_memguard_desc)];
85 int error;
86
87 strlcpy(desc, vm_memguard_desc, sizeof(desc));
88 error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
89 if (error != 0 || req->newptr == NULL)
90 return (error);
91
92 mtx_lock(&malloc_mtx);
93 /* If mtp is NULL, it will be initialized in memguard_cmp() */
94 vm_memguard_mtype = malloc_desc2type(desc);
95 strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
96 mtx_unlock(&malloc_mtx);
97 return (error);
98}
99SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
100 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
101 memguard_sysctl_desc, "A", "Short description of memory type to monitor");
102
103static vm_offset_t memguard_cursor;
104static vm_offset_t memguard_base;
105static vm_size_t memguard_mapsize;
106static vm_size_t memguard_physlimit;
107static u_long memguard_wasted;
108static u_long memguard_wrap;
109static u_long memguard_succ;
110static u_long memguard_fail_kva;
111static u_long memguard_fail_pgs;
112
113SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
114 &memguard_cursor, 0, "MemGuard cursor");
115SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
116 &memguard_mapsize, 0, "MemGuard private arena size");
117SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
118 &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
119SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
120 &memguard_wasted, 0, "Excess memory used through page promotion");
121SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
122 &memguard_wrap, 0, "MemGuard cursor wrap count");
123SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
124 &memguard_succ, 0, "Count of successful MemGuard allocations");
125SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
126 &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
127SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
128 &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
129
130#define MG_GUARD_AROUND 0x001
131#define MG_GUARD_ALLLARGE 0x002
132#define MG_GUARD_NOFREE 0x004
133static int memguard_options = MG_GUARD_AROUND;
134SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN,
135 &memguard_options, 0,
136 "MemGuard options:\n"
137 "\t0x001 - add guard pages around each allocation\n"
138 "\t0x002 - always use MemGuard for allocations over a page\n"
139 "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag");
140
141static u_int memguard_minsize;
142static u_long memguard_minsize_reject;
143SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
144 &memguard_minsize, 0, "Minimum size for page promotion");
145SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
146 &memguard_minsize_reject, 0, "# times rejected for size");
147
148static u_int memguard_frequency;
149static u_long memguard_frequency_hits;
150SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN,
151 &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
152SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
153 &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
154
155
156/*
157 * Return a fudged value to be used for vm_kmem_size for allocating
158 * the kmem_map. The memguard memory will be a submap.
159 */
160unsigned long
161memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
162{
163 u_long mem_pgs, parent_size;
164
165 vm_memguard_divisor = 10;
166 /* CTFLAG_RDTUN doesn't work during the early boot process. */
167 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
168
169 parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) +
170 PAGE_SIZE;
171 /* Pick a conservative value if provided value sucks. */
172 if ((vm_memguard_divisor <= 0) ||
173 ((parent_size / vm_memguard_divisor) == 0))
174 vm_memguard_divisor = 10;
175 /*
176 * Limit consumption of physical pages to
177 * 1/vm_memguard_divisor of system memory. If the KVA is
178 * smaller than this then the KVA limit comes into play first.
179 * This prevents memguard's page promotions from completely
180 * using up memory, since most malloc(9) calls are sub-page.
181 */
182 mem_pgs = vm_cnt.v_page_count;
183 memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
184 /*
185 * We want as much KVA as we can take safely. Use at most our
186 * allotted fraction of the parent map's size. Limit this to
187 * twice the physical memory to avoid using too much memory as
188 * pagetable pages (size must be multiple of PAGE_SIZE).
189 */
190 memguard_mapsize = round_page(parent_size / vm_memguard_divisor);
191 if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
192 memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
193 if (km_size + memguard_mapsize > parent_size)
194 memguard_mapsize = 0;
195 return (km_size + memguard_mapsize);
196}
197
198/*
199 * Initialize the MemGuard mock allocator. All objects from MemGuard come
200 * out of a single VM map (contiguous chunk of address space).
201 */
202void
203memguard_init(vmem_t *parent)
204{
205 vm_offset_t base;
206
207 vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
208 vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
209 PAGE_SIZE, 0, M_WAITOK);
210 memguard_cursor = base;
211 memguard_base = base;
212
213 printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
214 printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
215 printf("\tMEMGUARD map size: %jd KBytes\n",
216 (uintmax_t)memguard_mapsize >> 10);
217}
218
219/*
220 * Run things that can't be done as early as memguard_init().
221 */
222static void
223memguard_sysinit(void)
224{
225 struct sysctl_oid_list *parent;
226
227 parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
228
229 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
230 &memguard_base, "MemGuard KVA base");
231 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
232 &memguard_mapsize, "MemGuard KVA size");
233#if 0
234 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
235 &memguard_map->size, "MemGuard KVA used");
236#endif
237}
238SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
239
240/*
241 * v2sizep() converts a virtual address of the first page allocated for
242 * an item to a pointer to u_long recording the size of the original
243 * allocation request.
244 *
245 * This routine is very similar to those defined by UMA in uma_int.h.
246 * The difference is that this routine stores the originally allocated
247 * size in one of the page's fields that is unused when the page is
248 * wired rather than the object field, which is used.
249 */
250static u_long *
251v2sizep(vm_offset_t va)
252{
253 vm_paddr_t pa;
254 struct vm_page *p;
255
256 pa = pmap_kextract(va);
257 if (pa == 0)
258 panic("MemGuard detected double-free of %p", (void *)va);
259 p = PHYS_TO_VM_PAGE(pa);
260 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
261 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
262 return (&p->plinks.memguard.p);
263}
264
265static u_long *
266v2sizev(vm_offset_t va)
267{
268 vm_paddr_t pa;
269 struct vm_page *p;
270
271 pa = pmap_kextract(va);
272 if (pa == 0)
273 panic("MemGuard detected double-free of %p", (void *)va);
274 p = PHYS_TO_VM_PAGE(pa);
275 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
276 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
277 return (&p->plinks.memguard.v);
278}
279
280/*
281 * Allocate a single object of specified size with specified flags
282 * (either M_WAITOK or M_NOWAIT).
283 */
284void *
285memguard_alloc(unsigned long req_size, int flags)
286{
287 vm_offset_t addr;
288 u_long size_p, size_v;
289 int do_guard, rv;
290
291 size_p = round_page(req_size);
292 if (size_p == 0)
293 return (NULL);
294 /*
295 * To ensure there are holes on both sides of the allocation,
296 * request 2 extra pages of KVA. We will only actually add a
297 * vm_map_entry and get pages for the original request. Save
298 * the value of memguard_options so we have a consistent
299 * value.
300 */
301 size_v = size_p;
302 do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
303 if (do_guard)
304 size_v += 2 * PAGE_SIZE;
305
306 /*
307 * When we pass our memory limit, reject sub-page allocations.
308 * Page-size and larger allocations will use the same amount
309 * of physical memory whether we allocate or hand off to
310 * uma_large_alloc(), so keep those.
311 */
312 if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
313 req_size < PAGE_SIZE) {
314 addr = (vm_offset_t)NULL;
315 memguard_fail_pgs++;
316 goto out;
317 }
318 /*
319 * Keep a moving cursor so we don't recycle KVA as long as
320 * possible. It's not perfect, since we don't know in what
321 * order previous allocations will be free'd, but it's simple
322 * and fast, and requires O(1) additional storage if guard
323 * pages are not used.
324 *
325 * XXX This scheme will lead to greater fragmentation of the
326 * map, unless vm_map_findspace() is tweaked.
327 */
328 for (;;) {
329 if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0,
330 memguard_cursor, VMEM_ADDR_MAX,
331 M_BESTFIT | M_NOWAIT, &addr) == 0)
332 break;
333 /*
334 * The map has no space. This may be due to
335 * fragmentation, or because the cursor is near the
336 * end of the map.
337 */
338 if (memguard_cursor == memguard_base) {
339 memguard_fail_kva++;
340 addr = (vm_offset_t)NULL;
341 goto out;
342 }
343 memguard_wrap++;
344 memguard_cursor = memguard_base;
345 }
346 if (do_guard)
347 addr += PAGE_SIZE;
348 rv = kmem_back(kmem_object, addr, size_p, flags);
349 if (rv != KERN_SUCCESS) {
350 vmem_xfree(memguard_arena, addr, size_v);
351 memguard_fail_pgs++;
352 addr = (vm_offset_t)NULL;
353 goto out;
354 }
355 memguard_cursor = addr + size_v;
356 *v2sizep(trunc_page(addr)) = req_size;
357 *v2sizev(trunc_page(addr)) = size_v;
358 memguard_succ++;
359 if (req_size < PAGE_SIZE) {
360 memguard_wasted += (PAGE_SIZE - req_size);
361 if (do_guard) {
362 /*
363 * Align the request to 16 bytes, and return
364 * an address near the end of the page, to
365 * better detect array overrun.
366 */
367 req_size = roundup2(req_size, 16);
368 addr += (PAGE_SIZE - req_size);
369 }
370 }
371out:
372 return ((void *)addr);
373}
374
375int
376is_memguard_addr(void *addr)
377{
378 vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
379
380 return (a >= memguard_base && a < memguard_base + memguard_mapsize);
381}
382
383/*
384 * Free specified single object.
385 */
386void
387memguard_free(void *ptr)
388{
389 vm_offset_t addr;
390 u_long req_size, size, sizev;
391 char *temp;
392 int i;
393
394 addr = trunc_page((uintptr_t)ptr);
395 req_size = *v2sizep(addr);
396 sizev = *v2sizev(addr);
397 size = round_page(req_size);
398
399 /*
400 * Page should not be guarded right now, so force a write.
401 * The purpose of this is to increase the likelihood of
402 * catching a double-free, but not necessarily a
403 * tamper-after-free (the second thread freeing might not
404 * write before freeing, so this forces it to and,
405 * subsequently, trigger a fault).
406 */
407 temp = ptr;
408 for (i = 0; i < size; i += PAGE_SIZE)
409 temp[i] = 'M';
410
411 /*
412 * This requires carnal knowledge of the implementation of
413 * kmem_free(), but since we've already replaced kmem_malloc()
414 * above, it's not really any worse. We want to use the
415 * vm_map lock to serialize updates to memguard_wasted, since
416 * we had the lock at increment.
417 */
418 kmem_unback(kmem_object, addr, size);
419 if (sizev > size)
420 addr -= PAGE_SIZE;
421 vmem_xfree(memguard_arena, addr, sizev);
422 if (req_size < PAGE_SIZE)
423 memguard_wasted -= (PAGE_SIZE - req_size);
424}
425
426/*
427 * Re-allocate an allocation that was originally guarded.
428 */
429void *
430memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
431 int flags)
432{
433 void *newaddr;
434 u_long old_size;
435
436 /*
437 * Allocate the new block. Force the allocation to be guarded
438 * as the original may have been guarded through random
439 * chance, and that should be preserved.
440 */
441 if ((newaddr = memguard_alloc(size, flags)) == NULL)
442 return (NULL);
443
444 /* Copy over original contents. */
445 old_size = *v2sizep(trunc_page((uintptr_t)addr));
446 bcopy(addr, newaddr, min(size, old_size));
447 memguard_free(addr);
448 return (newaddr);
449}
450
451static int
452memguard_cmp(unsigned long size)
453{
454
455 if (size < memguard_minsize) {
456 memguard_minsize_reject++;
457 return (0);
458 }
459 if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE)
460 return (1);
461 if (memguard_frequency > 0 &&
462 (random() % 100000) < memguard_frequency) {
463 memguard_frequency_hits++;
464 return (1);
465 }
466
467 return (0);
468}
469
470int
471memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size)
472{
473
474 if (memguard_cmp(size))
475 return(1);
476
477#if 1
478 /*
479 * The safest way of comparsion is to always compare short description
480 * string of memory type, but it is also the slowest way.
481 */
482 return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
483#else
484 /*
485 * If we compare pointers, there are two possible problems:
486 * 1. Memory type was unloaded and new memory type was allocated at the
487 * same address.
488 * 2. Memory type was unloaded and loaded again, but allocated at a
489 * different address.
490 */
491 if (vm_memguard_mtype != NULL)
492 return (mtp == vm_memguard_mtype);
493 if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
494 vm_memguard_mtype = mtp;
495 return (1);
496 }
497 return (0);
498#endif
499}
500
501int
502memguard_cmp_zone(uma_zone_t zone)
503{
504
505 if ((memguard_options & MG_GUARD_NOFREE) == 0 &&
506 zone->uz_flags & UMA_ZONE_NOFREE)
507 return (0);
508
509 if (memguard_cmp(zone->uz_size))
510 return (1);
511
512 /*
513 * The safest way of comparsion is to always compare zone name,
514 * but it is also the slowest way.
515 */
516 return (strcmp(zone->uz_name, vm_memguard_desc) == 0);
517}