1206360Sjoel/*-
2211194Smdf * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3211194Smdf * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4211194Smdf * All rights reserved.
5140587Sbmilekic *
6140587Sbmilekic * Redistribution and use in source and binary forms, with or without
7140587Sbmilekic * modification, are permitted provided that the following conditions
8140587Sbmilekic * are met:
9140587Sbmilekic * 1. Redistributions of source code must retain the above copyright
10140587Sbmilekic *    notice unmodified, this list of conditions, and the following
11140587Sbmilekic *    disclaimer.
12140587Sbmilekic * 2. Redistributions in binary form must reproduce the above copyright
13140587Sbmilekic *    notice, this list of conditions and the following disclaimer in the
14140587Sbmilekic *    documentation and/or other materials provided with the distribution.
15140587Sbmilekic *
16140587Sbmilekic * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17140587Sbmilekic * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18140587Sbmilekic * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19140587Sbmilekic * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20140587Sbmilekic * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21140587Sbmilekic * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22140587Sbmilekic * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23140587Sbmilekic * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24140587Sbmilekic * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25140587Sbmilekic * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26140587Sbmilekic */
27140587Sbmilekic
28140587Sbmilekic#include <sys/cdefs.h>
29140587Sbmilekic__FBSDID("$FreeBSD$");
30140587Sbmilekic
31140587Sbmilekic/*
32140587Sbmilekic * MemGuard is a simple replacement allocator for debugging only
33140587Sbmilekic * which provides ElectricFence-style memory barrier protection on
34140587Sbmilekic * objects being allocated, and is used to detect tampering-after-free
35140587Sbmilekic * scenarios.
36140587Sbmilekic *
37140587Sbmilekic * See the memguard(9) man page for more information on using MemGuard.
38140587Sbmilekic */
39140587Sbmilekic
40211229Smdf#include "opt_vm.h"
41211229Smdf
42140587Sbmilekic#include <sys/param.h>
43140587Sbmilekic#include <sys/systm.h>
44140587Sbmilekic#include <sys/kernel.h>
45140587Sbmilekic#include <sys/types.h>
46140587Sbmilekic#include <sys/queue.h>
47140587Sbmilekic#include <sys/lock.h>
48140587Sbmilekic#include <sys/mutex.h>
49140587Sbmilekic#include <sys/malloc.h>
50153880Spjd#include <sys/sysctl.h>
51254025Sjeff#include <sys/vmem.h>
52140587Sbmilekic
53140587Sbmilekic#include <vm/vm.h>
54211194Smdf#include <vm/uma.h>
55141670Sbmilekic#include <vm/vm_param.h>
56140587Sbmilekic#include <vm/vm_page.h>
57140587Sbmilekic#include <vm/vm_map.h>
58211194Smdf#include <vm/vm_object.h>
59254307Sjeff#include <vm/vm_kern.h>
60140587Sbmilekic#include <vm/vm_extern.h>
61226313Sglebius#include <vm/uma_int.h>
62140587Sbmilekic#include <vm/memguard.h>
63140587Sbmilekic
64227309Sedstatic SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
65141670Sbmilekic/*
66153880Spjd * The vm_memguard_divisor variable controls how much of kmem_map should be
67153880Spjd * reserved for MemGuard.
68153880Spjd */
69211194Smdfstatic u_int vm_memguard_divisor;
70211194SmdfSYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
71211194Smdf    &vm_memguard_divisor,
72153880Spjd    0, "(kmem_size/memguard_divisor) == memguard submap size");
73153880Spjd
74153880Spjd/*
75153880Spjd * Short description (ks_shortdesc) of memory type to monitor.
76153880Spjd */
77153880Spjdstatic char vm_memguard_desc[128] = "";
78153880Spjdstatic struct malloc_type *vm_memguard_mtype = NULL;
79153880SpjdTUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
80153880Spjdstatic int
81153880Spjdmemguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
82153880Spjd{
83211194Smdf	char desc[sizeof(vm_memguard_desc)];
84211194Smdf	int error;
85153880Spjd
86153880Spjd	strlcpy(desc, vm_memguard_desc, sizeof(desc));
87153880Spjd	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
88153880Spjd	if (error != 0 || req->newptr == NULL)
89153880Spjd		return (error);
90153880Spjd
91211194Smdf	mtx_lock(&malloc_mtx);
92153880Spjd	/*
93211194Smdf	 * If mtp is NULL, it will be initialized in memguard_cmp().
94153880Spjd	 */
95211194Smdf	vm_memguard_mtype = malloc_desc2type(desc);
96211194Smdf	strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
97153880Spjd	mtx_unlock(&malloc_mtx);
98153880Spjd	return (error);
99153880Spjd}
100211194SmdfSYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
101211194Smdf    CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
102153880Spjd    memguard_sysctl_desc, "A", "Short description of memory type to monitor");
103153880Spjd
104211194Smdfstatic vm_offset_t memguard_cursor;
105254025Sjeffstatic vm_offset_t memguard_base;
106211194Smdfstatic vm_size_t memguard_mapsize;
107211194Smdfstatic vm_size_t memguard_physlimit;
108211194Smdfstatic u_long memguard_wasted;
109211194Smdfstatic u_long memguard_wrap;
110211194Smdfstatic u_long memguard_succ;
111211194Smdfstatic u_long memguard_fail_kva;
112211194Smdfstatic u_long memguard_fail_pgs;
113140587Sbmilekic
114211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
115211194Smdf    &memguard_cursor, 0, "MemGuard cursor");
116211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
117254025Sjeff    &memguard_mapsize, 0, "MemGuard private arena size");
118211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
119211194Smdf    &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
120211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
121211194Smdf    &memguard_wasted, 0, "Excess memory used through page promotion");
122211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
123211194Smdf    &memguard_wrap, 0, "MemGuard cursor wrap count");
124211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
125211194Smdf    &memguard_succ, 0, "Count of successful MemGuard allocations");
126211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
127211194Smdf    &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
128211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
129211194Smdf    &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
130140587Sbmilekic
131226313Sglebius#define MG_GUARD_AROUND		0x001
132226313Sglebius#define MG_GUARD_ALLLARGE	0x002
133226313Sglebius#define MG_GUARD_NOFREE		0x004
134226313Sglebiusstatic int memguard_options = MG_GUARD_AROUND;
135211194SmdfTUNABLE_INT("vm.memguard.options", &memguard_options);
136211194SmdfSYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
137211194Smdf    &memguard_options, 0,
138211194Smdf    "MemGuard options:\n"
139211194Smdf    "\t0x001 - add guard pages around each allocation\n"
140226313Sglebius    "\t0x002 - always use MemGuard for allocations over a page\n"
141226313Sglebius    "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag");
142211194Smdf
143211194Smdfstatic u_int memguard_minsize;
144211194Smdfstatic u_long memguard_minsize_reject;
145211194SmdfSYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
146211194Smdf    &memguard_minsize, 0, "Minimum size for page promotion");
147211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
148211194Smdf    &memguard_minsize_reject, 0, "# times rejected for size");
149211194Smdf
150211194Smdfstatic u_int memguard_frequency;
151211194Smdfstatic u_long memguard_frequency_hits;
152211194SmdfTUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
153211194SmdfSYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
154211194Smdf    &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
155211194SmdfSYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
156211194Smdf    &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
157211194Smdf
158211194Smdf
159140587Sbmilekic/*
160211194Smdf * Return a fudged value to be used for vm_kmem_size for allocating
161211194Smdf * the kmem_map.  The memguard memory will be a submap.
162140587Sbmilekic */
163211194Smdfunsigned long
164238502Smdfmemguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
165211194Smdf{
166238502Smdf	u_long mem_pgs, parent_size;
167140587Sbmilekic
168211194Smdf	vm_memguard_divisor = 10;
169211194Smdf	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
170211194Smdf
171238502Smdf	parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) +
172238502Smdf	    PAGE_SIZE;
173211194Smdf	/* Pick a conservative value if provided value sucks. */
174211194Smdf	if ((vm_memguard_divisor <= 0) ||
175238502Smdf	    ((parent_size / vm_memguard_divisor) == 0))
176211194Smdf		vm_memguard_divisor = 10;
177211194Smdf	/*
178211194Smdf	 * Limit consumption of physical pages to
179211194Smdf	 * 1/vm_memguard_divisor of system memory.  If the KVA is
180211194Smdf	 * smaller than this then the KVA limit comes into play first.
181211194Smdf	 * This prevents memguard's page promotions from completely
182211194Smdf	 * using up memory, since most malloc(9) calls are sub-page.
183211194Smdf	 */
184238502Smdf	mem_pgs = cnt.v_page_count;
185211194Smdf	memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
186211194Smdf	/*
187211194Smdf	 * We want as much KVA as we can take safely.  Use at most our
188238502Smdf	 * allotted fraction of the parent map's size.  Limit this to
189238502Smdf	 * twice the physical memory to avoid using too much memory as
190238502Smdf	 * pagetable pages (size must be multiple of PAGE_SIZE).
191211194Smdf	 */
192238502Smdf	memguard_mapsize = round_page(parent_size / vm_memguard_divisor);
193238502Smdf	if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
194211194Smdf		memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
195238502Smdf	if (km_size + memguard_mapsize > parent_size)
196238502Smdf		memguard_mapsize = 0;
197211194Smdf	return (km_size + memguard_mapsize);
198211194Smdf}
199211194Smdf
200140587Sbmilekic/*
201140587Sbmilekic * Initialize the MemGuard mock allocator.  All objects from MemGuard come
202140587Sbmilekic * out of a single VM map (contiguous chunk of address space).
203140587Sbmilekic */
204140587Sbmilekicvoid
205254025Sjeffmemguard_init(vmem_t *parent)
206140587Sbmilekic{
207254025Sjeff	vm_offset_t base;
208140587Sbmilekic
209254307Sjeff	vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
210254307Sjeff	vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
211254025Sjeff	    PAGE_SIZE, 0, M_WAITOK);
212211194Smdf	memguard_cursor = base;
213254025Sjeff	memguard_base = base;
214140587Sbmilekic
215140587Sbmilekic	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
216211194Smdf	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
217211194Smdf	printf("\tMEMGUARD map size: %jd KBytes\n",
218211194Smdf	    (uintmax_t)memguard_mapsize >> 10);
219140587Sbmilekic}
220140587Sbmilekic
221140587Sbmilekic/*
222211194Smdf * Run things that can't be done as early as memguard_init().
223140587Sbmilekic */
224211194Smdfstatic void
225211194Smdfmemguard_sysinit(void)
226211194Smdf{
227211194Smdf	struct sysctl_oid_list *parent;
228211194Smdf
229211194Smdf	parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
230211194Smdf
231211194Smdf	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
232254025Sjeff	    &memguard_base, "MemGuard KVA base");
233211194Smdf	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
234254025Sjeff	    &memguard_mapsize, "MemGuard KVA size");
235254025Sjeff#if 0
236211194Smdf	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
237211194Smdf	    &memguard_map->size, "MemGuard KVA used");
238254025Sjeff#endif
239211194Smdf}
240211194SmdfSYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
241211194Smdf
242211194Smdf/*
243211194Smdf * v2sizep() converts a virtual address of the first page allocated for
244211194Smdf * an item to a pointer to u_long recording the size of the original
245211194Smdf * allocation request.
246211194Smdf *
247211194Smdf * This routine is very similar to those defined by UMA in uma_int.h.
248211194Smdf * The difference is that this routine stores the originally allocated
249211194Smdf * size in one of the page's fields that is unused when the page is
250211194Smdf * wired rather than the object field, which is used.
251211194Smdf */
252211194Smdfstatic u_long *
253211194Smdfv2sizep(vm_offset_t va)
254211194Smdf{
255212063Smdf	vm_paddr_t pa;
256211194Smdf	struct vm_page *p;
257211194Smdf
258212063Smdf	pa = pmap_kextract(va);
259212063Smdf	if (pa == 0)
260212063Smdf		panic("MemGuard detected double-free of %p", (void *)va);
261212063Smdf	p = PHYS_TO_VM_PAGE(pa);
262211194Smdf	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
263211194Smdf	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
264254182Skib	return (&p->plinks.memguard.p);
265211194Smdf}
266211194Smdf
267254025Sjeffstatic u_long *
268254025Sjeffv2sizev(vm_offset_t va)
269254025Sjeff{
270254025Sjeff	vm_paddr_t pa;
271254025Sjeff	struct vm_page *p;
272254025Sjeff
273254025Sjeff	pa = pmap_kextract(va);
274254025Sjeff	if (pa == 0)
275254025Sjeff		panic("MemGuard detected double-free of %p", (void *)va);
276254025Sjeff	p = PHYS_TO_VM_PAGE(pa);
277254025Sjeff	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
278254025Sjeff	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
279254182Skib	return (&p->plinks.memguard.v);
280254025Sjeff}
281254025Sjeff
282211194Smdf/*
283211194Smdf * Allocate a single object of specified size with specified flags
284211194Smdf * (either M_WAITOK or M_NOWAIT).
285211194Smdf */
286140587Sbmilekicvoid *
287211194Smdfmemguard_alloc(unsigned long req_size, int flags)
288140587Sbmilekic{
289211194Smdf	vm_offset_t addr;
290211194Smdf	u_long size_p, size_v;
291211194Smdf	int do_guard, rv;
292140587Sbmilekic
293211194Smdf	size_p = round_page(req_size);
294211194Smdf	if (size_p == 0)
295211194Smdf		return (NULL);
296211194Smdf	/*
297211194Smdf	 * To ensure there are holes on both sides of the allocation,
298211194Smdf	 * request 2 extra pages of KVA.  We will only actually add a
299211194Smdf	 * vm_map_entry and get pages for the original request.  Save
300211194Smdf	 * the value of memguard_options so we have a consistent
301211194Smdf	 * value.
302211194Smdf	 */
303211194Smdf	size_v = size_p;
304226313Sglebius	do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
305211194Smdf	if (do_guard)
306211194Smdf		size_v += 2 * PAGE_SIZE;
307140587Sbmilekic
308140587Sbmilekic	/*
309211194Smdf	 * When we pass our memory limit, reject sub-page allocations.
310211194Smdf	 * Page-size and larger allocations will use the same amount
311211194Smdf	 * of physical memory whether we allocate or hand off to
312211194Smdf	 * uma_large_alloc(), so keep those.
313140587Sbmilekic	 */
314254307Sjeff	if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
315211194Smdf	    req_size < PAGE_SIZE) {
316211194Smdf		addr = (vm_offset_t)NULL;
317211194Smdf		memguard_fail_pgs++;
318211194Smdf		goto out;
319211194Smdf	}
320211194Smdf	/*
321211194Smdf	 * Keep a moving cursor so we don't recycle KVA as long as
322211194Smdf	 * possible.  It's not perfect, since we don't know in what
323211194Smdf	 * order previous allocations will be free'd, but it's simple
324211194Smdf	 * and fast, and requires O(1) additional storage if guard
325211194Smdf	 * pages are not used.
326211194Smdf	 *
327211194Smdf	 * XXX This scheme will lead to greater fragmentation of the
328211194Smdf	 * map, unless vm_map_findspace() is tweaked.
329211194Smdf	 */
330211194Smdf	for (;;) {
331254307Sjeff		if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0,
332254307Sjeff		    memguard_cursor, VMEM_ADDR_MAX,
333254307Sjeff		    M_BESTFIT | M_NOWAIT, &addr) == 0)
334211194Smdf			break;
335211194Smdf		/*
336211194Smdf		 * The map has no space.  This may be due to
337211194Smdf		 * fragmentation, or because the cursor is near the
338211194Smdf		 * end of the map.
339211194Smdf		 */
340254025Sjeff		if (memguard_cursor == memguard_base) {
341211194Smdf			memguard_fail_kva++;
342211194Smdf			addr = (vm_offset_t)NULL;
343211194Smdf			goto out;
344140587Sbmilekic		}
345211194Smdf		memguard_wrap++;
346254025Sjeff		memguard_cursor = memguard_base;
347141670Sbmilekic	}
348211194Smdf	if (do_guard)
349211194Smdf		addr += PAGE_SIZE;
350254025Sjeff	rv = kmem_back(kmem_object, addr, size_p, flags);
351211194Smdf	if (rv != KERN_SUCCESS) {
352254307Sjeff		vmem_xfree(memguard_arena, addr, size_v);
353211194Smdf		memguard_fail_pgs++;
354211194Smdf		addr = (vm_offset_t)NULL;
355211194Smdf		goto out;
356140587Sbmilekic	}
357254025Sjeff	memguard_cursor = addr + size_v;
358211194Smdf	*v2sizep(trunc_page(addr)) = req_size;
359254025Sjeff	*v2sizev(trunc_page(addr)) = size_v;
360211194Smdf	memguard_succ++;
361211194Smdf	if (req_size < PAGE_SIZE) {
362211194Smdf		memguard_wasted += (PAGE_SIZE - req_size);
363211194Smdf		if (do_guard) {
364211194Smdf			/*
365211194Smdf			 * Align the request to 16 bytes, and return
366211194Smdf			 * an address near the end of the page, to
367211194Smdf			 * better detect array overrun.
368211194Smdf			 */
369211194Smdf			req_size = roundup2(req_size, 16);
370211194Smdf			addr += (PAGE_SIZE - req_size);
371211194Smdf		}
372211194Smdf	}
373211194Smdfout:
374211194Smdf	return ((void *)addr);
375140587Sbmilekic}
376140587Sbmilekic
377211194Smdfint
378211194Smdfis_memguard_addr(void *addr)
379211194Smdf{
380211194Smdf	vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
381211194Smdf
382254025Sjeff	return (a >= memguard_base && a < memguard_base + memguard_mapsize);
383211194Smdf}
384211194Smdf
385140587Sbmilekic/*
386140587Sbmilekic * Free specified single object.
387140587Sbmilekic */
388140587Sbmilekicvoid
389211194Smdfmemguard_free(void *ptr)
390140587Sbmilekic{
391211194Smdf	vm_offset_t addr;
392254025Sjeff	u_long req_size, size, sizev;
393211194Smdf	char *temp;
394211194Smdf	int i;
395140587Sbmilekic
396211194Smdf	addr = trunc_page((uintptr_t)ptr);
397211194Smdf	req_size = *v2sizep(addr);
398254025Sjeff	sizev = *v2sizev(addr);
399211194Smdf	size = round_page(req_size);
400141670Sbmilekic
401141670Sbmilekic	/*
402211194Smdf	 * Page should not be guarded right now, so force a write.
403211194Smdf	 * The purpose of this is to increase the likelihood of
404211194Smdf	 * catching a double-free, but not necessarily a
405211194Smdf	 * tamper-after-free (the second thread freeing might not
406211194Smdf	 * write before freeing, so this forces it to and,
407211194Smdf	 * subsequently, trigger a fault).
408141670Sbmilekic	 */
409211194Smdf	temp = ptr;
410211194Smdf	for (i = 0; i < size; i += PAGE_SIZE)
411211194Smdf		temp[i] = 'M';
412141670Sbmilekic
413211194Smdf	/*
414211194Smdf	 * This requires carnal knowledge of the implementation of
415211194Smdf	 * kmem_free(), but since we've already replaced kmem_malloc()
416211194Smdf	 * above, it's not really any worse.  We want to use the
417211194Smdf	 * vm_map lock to serialize updates to memguard_wasted, since
418211194Smdf	 * we had the lock at increment.
419211194Smdf	 */
420254025Sjeff	kmem_unback(kmem_object, addr, size);
421254025Sjeff	if (sizev > size)
422254025Sjeff		addr -= PAGE_SIZE;
423254307Sjeff	vmem_xfree(memguard_arena, addr, sizev);
424211194Smdf	if (req_size < PAGE_SIZE)
425211194Smdf		memguard_wasted -= (PAGE_SIZE - req_size);
426140587Sbmilekic}
427140587Sbmilekic
428212058Smdf/*
429212058Smdf * Re-allocate an allocation that was originally guarded.
430212058Smdf */
431212058Smdfvoid *
432212058Smdfmemguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
433212058Smdf    int flags)
434212058Smdf{
435212058Smdf	void *newaddr;
436212058Smdf	u_long old_size;
437212058Smdf
438212058Smdf	/*
439212058Smdf	 * Allocate the new block.  Force the allocation to be guarded
440212058Smdf	 * as the original may have been guarded through random
441212058Smdf	 * chance, and that should be preserved.
442212058Smdf	 */
443212058Smdf	if ((newaddr = memguard_alloc(size, flags)) == NULL)
444212058Smdf		return (NULL);
445212058Smdf
446212058Smdf	/* Copy over original contents. */
447212058Smdf	old_size = *v2sizep(trunc_page((uintptr_t)addr));
448212058Smdf	bcopy(addr, newaddr, min(size, old_size));
449212058Smdf	memguard_free(addr);
450212058Smdf	return (newaddr);
451212058Smdf}
452212058Smdf
453226313Sglebiusstatic int
454226313Sglebiusmemguard_cmp(unsigned long size)
455153880Spjd{
456153880Spjd
457211194Smdf	if (size < memguard_minsize) {
458211194Smdf		memguard_minsize_reject++;
459211194Smdf		return (0);
460211194Smdf	}
461226313Sglebius	if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE)
462211194Smdf		return (1);
463211194Smdf	if (memguard_frequency > 0 &&
464211194Smdf	    (random() % 100000) < memguard_frequency) {
465211194Smdf		memguard_frequency_hits++;
466211194Smdf		return (1);
467211194Smdf	}
468226313Sglebius
469226313Sglebius	return (0);
470226313Sglebius}
471226313Sglebius
472226313Sglebiusint
473226313Sglebiusmemguard_cmp_mtp(struct malloc_type *mtp, unsigned long size)
474226313Sglebius{
475226313Sglebius
476226313Sglebius	if (memguard_cmp(size))
477226313Sglebius		return(1);
478226313Sglebius
479153880Spjd#if 1
480153880Spjd	/*
481153880Spjd	 * The safest way of comparsion is to always compare short description
482153880Spjd	 * string of memory type, but it is also the slowest way.
483153880Spjd	 */
484153880Spjd	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
485153880Spjd#else
486153880Spjd	/*
487153880Spjd	 * If we compare pointers, there are two possible problems:
488153880Spjd	 * 1. Memory type was unloaded and new memory type was allocated at the
489153880Spjd	 *    same address.
490153880Spjd	 * 2. Memory type was unloaded and loaded again, but allocated at a
491153880Spjd	 *    different address.
492153880Spjd	 */
493153880Spjd	if (vm_memguard_mtype != NULL)
494153880Spjd		return (mtp == vm_memguard_mtype);
495153880Spjd	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
496153880Spjd		vm_memguard_mtype = mtp;
497153880Spjd		return (1);
498153880Spjd	}
499153880Spjd	return (0);
500153880Spjd#endif
501153880Spjd}
502226313Sglebius
503226313Sglebiusint
504226313Sglebiusmemguard_cmp_zone(uma_zone_t zone)
505226313Sglebius{
506226313Sglebius
507226313Sglebius	 if ((memguard_options & MG_GUARD_NOFREE) == 0 &&
508226313Sglebius	    zone->uz_flags & UMA_ZONE_NOFREE)
509226313Sglebius		return (0);
510226313Sglebius
511226313Sglebius	if (memguard_cmp(zone->uz_size))
512226313Sglebius		return (1);
513226313Sglebius
514226313Sglebius	/*
515226313Sglebius	 * The safest way of comparsion is to always compare zone name,
516226313Sglebius	 * but it is also the slowest way.
517226313Sglebius	 */
518226313Sglebius	return (strcmp(zone->uz_name, vm_memguard_desc) == 0);
519226313Sglebius}
520