memguard.c revision 140605
1/*
2 * Copyright (c) 2005,
3 *     Bosko Milekic <bmilekic@freebsd.org>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/vm/memguard.c 140605 2005-01-22 00:09:34Z bmilekic $");
29
30/*
31 * MemGuard is a simple replacement allocator for debugging only
32 * which provides ElectricFence-style memory barrier protection on
33 * objects being allocated, and is used to detect tampering-after-free
34 * scenarios.
35 *
36 * See the memguard(9) man page for more information on using MemGuard.
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/types.h>
43#include <sys/queue.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/malloc.h>
47
48#include <vm/vm.h>
49#include <vm/vm_page.h>
50#include <vm/vm_map.h>
51#include <vm/vm_extern.h>
52#include <vm/memguard.h>
53
54/*
55 * Global MemGuard data.
56 */
57static vm_map_t memguard_map;
58static unsigned long memguard_mapsize;
59static unsigned long memguard_mapused;
60struct memguard_entry {
61	STAILQ_ENTRY(memguard_entry) entries;
62	void *ptr;
63};
64static STAILQ_HEAD(memguard_fifo, memguard_entry) memguard_fifo_pool;
65
66/*
67 * Local prototypes.
68 */
69static void	memguard_guard(void *addr);
70static void	memguard_unguard(void *addr);
71
72/*
73 * Local macros.  MemGuard data is global, so replace these with whatever
74 * your system uses to protect global data (if it is kernel-level
75 * parallelized).  This is for porting among BSDs.
76 */
77#define	MEMGUARD_CRIT_SECTION_DECLARE	static struct mtx memguard_mtx
78#define	MEMGUARD_CRIT_SECTION_INIT				\
79	mtx_init(&memguard_mtx, "MemGuard mtx", NULL, MTX_DEF)
80#define	MEMGUARD_CRIT_SECTION_ENTER	mtx_lock(&memguard_mtx)
81#define	MEMGUARD_CRIT_SECTION_EXIT	mtx_unlock(&memguard_mtx)
82MEMGUARD_CRIT_SECTION_DECLARE;
83
84/*
85 * Initialize the MemGuard mock allocator.  All objects from MemGuard come
86 * out of a single VM map (contiguous chunk of address space).
87 */
88void
89memguard_init(vm_map_t parent_map, unsigned long size)
90{
91	char *base, *limit;
92
93	/* size must be multiple of PAGE_SIZE */
94	size /= PAGE_SIZE;
95	size++;
96	size *= PAGE_SIZE;
97
98	memguard_map = kmem_suballoc(parent_map, (vm_offset_t *)&base,
99	    (vm_offset_t *)&limit, (vm_size_t)size);
100	memguard_map->system_map = 1;
101	memguard_mapsize = size;
102	memguard_mapused = 0;
103
104	MEMGUARD_CRIT_SECTION_INIT;
105	MEMGUARD_CRIT_SECTION_ENTER;
106	STAILQ_INIT(&memguard_fifo_pool);
107	MEMGUARD_CRIT_SECTION_EXIT;
108
109	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
110	printf("\tMEMGUARD map base: %p\n", base);
111	printf("\tMEMGUARD map limit: %p\n", limit);
112	printf("\tMEMGUARD map size: %ld (Bytes)\n", size);
113}
114
115/*
116 * Allocate a single object of specified size with specified flags (either
117 * M_WAITOK or M_NOWAIT).
118 */
119void *
120memguard_alloc(unsigned long size, int flags)
121{
122	void *obj = NULL;
123	struct memguard_entry *e = NULL;
124
125	/* XXX: MemGuard does not handle > PAGE_SIZE objects. */
126	if (size > PAGE_SIZE)
127		panic("MEMGUARD: Cannot handle objects > PAGE_SIZE");
128
129	/*
130	 * If we haven't exhausted the memguard_map yet, allocate from
131	 * it and grab a new page, even if we have recycled pages in our
132	 * FIFO.  This is because we wish to allow recycled pages to live
133	 * guarded in the FIFO for as long as possible in order to catch
134	 * even very late tamper-after-frees, even though it means that
135	 * we end up wasting more memory, this is only a DEBUGGING allocator
136	 * after all.
137	 */
138	MEMGUARD_CRIT_SECTION_ENTER;
139	if (memguard_mapused >= memguard_mapsize) {
140		e = STAILQ_FIRST(&memguard_fifo_pool);
141		if (e != NULL) {
142			STAILQ_REMOVE(&memguard_fifo_pool, e,
143			    memguard_entry, entries);
144			MEMGUARD_CRIT_SECTION_EXIT;
145			obj = e->ptr;
146			free(e, M_TEMP);
147			memguard_unguard(obj);
148			if (flags & M_ZERO)
149				bzero(obj, PAGE_SIZE);
150			return obj;
151		}
152		MEMGUARD_CRIT_SECTION_EXIT;
153		if (flags & M_WAITOK)
154			panic("MEMGUARD: Failed with M_WAITOK: " \
155			    "memguard_map too small");
156		return NULL;
157	} else
158		memguard_mapused += PAGE_SIZE;
159	MEMGUARD_CRIT_SECTION_EXIT;
160
161	if (obj == NULL)
162		obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE, flags);
163	if (obj != NULL) {
164		memguard_unguard(obj);
165		if (flags & M_ZERO)
166			bzero(obj, PAGE_SIZE);
167	} else {
168		MEMGUARD_CRIT_SECTION_ENTER;
169		memguard_mapused -= PAGE_SIZE;
170		MEMGUARD_CRIT_SECTION_EXIT;
171	}
172	return obj;
173}
174
175/*
176 * Free specified single object.
177 */
178void
179memguard_free(void *addr)
180{
181	struct memguard_entry *e;
182
183	memguard_guard(addr);
184	e = malloc(sizeof(struct memguard_entry), M_TEMP, M_NOWAIT);
185	if (e == NULL) {
186		MEMGUARD_CRIT_SECTION_ENTER;
187		memguard_mapused -= PAGE_SIZE;
188		MEMGUARD_CRIT_SECTION_EXIT;
189		kmem_free(memguard_map, (vm_offset_t)trunc_page(
190		    (unsigned long)addr), PAGE_SIZE);
191		return;
192	}
193	e->ptr = (void *)trunc_page((unsigned long)addr);
194	MEMGUARD_CRIT_SECTION_ENTER;
195	STAILQ_INSERT_TAIL(&memguard_fifo_pool, e, entries);
196	MEMGUARD_CRIT_SECTION_EXIT;
197}
198
199/*
200 * Guard a page containing specified object (make it read-only so that
201 * future writes to it fail).
202 */
203static void
204memguard_guard(void *addr)
205{
206	void *a = (void *)trunc_page((unsigned long)addr);
207	(void)vm_map_protect(memguard_map, (vm_offset_t)a,
208	    (vm_offset_t)((unsigned long)a + PAGE_SIZE), VM_PROT_READ, 0);
209}
210
211/*
212 * Unguard a page containing specified object (make it read-and-write to
213 * allow full data access).
214 */
215static void
216memguard_unguard(void *addr)
217{
218	void *a = (void *)trunc_page((unsigned long)addr);
219	(void)vm_map_protect(memguard_map, (vm_offset_t)a,
220	    (vm_offset_t)((unsigned long)a + PAGE_SIZE),
221	    VM_PROT_READ | VM_PROT_WRITE, 0);
222}
223