1/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2   Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3   Contributed by Frank Ch. Eigler <fche@redhat.com>
4   and Graydon Hoare <graydon@redhat.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13In addition to the permissions in the GNU General Public License, the
14Free Software Foundation gives you unlimited permission to link the
15compiled version of this file into combinations with other programs,
16and to distribute those combinations without any restriction coming
17from the use of this file.  (The General Public License restrictions
18do apply in other respects; for example, they cover modification of
19the file, and distribution when not linked into a combine
20executable.)
21
22GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23WARRANTY; without even the implied warranty of MERCHANTABILITY or
24FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
25for more details.
26
27You should have received a copy of the GNU General Public License
28along with GCC; see the file COPYING.  If not, write to the Free
29Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
3002110-1301, USA.  */
31
32
33#include "config.h"
34
35#ifndef HAVE_SOCKLEN_T
36#define socklen_t int
37#endif
38
39
40/* These attempt to coax various unix flavours to declare all our
41   needed tidbits in the system headers.  */
42#if !defined(__FreeBSD__)  && !defined(__APPLE__)
43#define _POSIX_SOURCE
44#endif /* Some BSDs break <sys/socket.h> if this is defined. */
45#define _GNU_SOURCE
46#define _XOPEN_SOURCE
47#define _BSD_TYPES
48#define __EXTENSIONS__
49#define _ALL_SOURCE
50#define _LARGE_FILE_API
51#define _XOPEN_SOURCE_EXTENDED 1
52
53#include <string.h>
54#include <stdio.h>
55#include <stdlib.h>
56#include <sys/time.h>
57#include <sys/types.h>
58#include <unistd.h>
59#include <assert.h>
60#include <errno.h>
61#include <limits.h>
62#include <time.h>
63
64#include "mf-runtime.h"
65#include "mf-impl.h"
66
67#ifdef _MUDFLAP
68#error "Do not compile this file with -fmudflap!"
69#endif
70
71
72/* Memory allocation related hook functions.  Some of these are
73   intercepted via linker wrapping or symbol interposition.  Others
74   use plain macros in mf-runtime.h.  */
75
76
77#if PIC
78/* A special bootstrap variant. */
79void *
80__mf_0fn_malloc (size_t c)
81{
82  enum foo { BS = 4096, NB=10 };
83  static char bufs[NB][BS];
84  static unsigned bufs_used[NB];
85  unsigned i;
86
87  for (i=0; i<NB; i++)
88    {
89      if (! bufs_used[i] && c < BS)
90	{
91	  bufs_used[i] = 1;
92	  return & bufs[i][0];
93	}
94    }
95  return NULL;
96}
97#endif
98
99
100#undef malloc
101WRAPPER(void *, malloc, size_t c)
102{
103  size_t size_with_crumple_zones;
104  DECLARE(void *, malloc, size_t c);
105  void *result;
106  BEGIN_PROTECT (malloc, c);
107
108  size_with_crumple_zones =
109    CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
110			__mf_opts.crumple_zone));
111  BEGIN_MALLOC_PROTECT ();
112  result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
113  END_MALLOC_PROTECT ();
114
115  if (LIKELY(result))
116    {
117      result += __mf_opts.crumple_zone;
118      __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
119      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
120    }
121
122  return result;
123}
124
125
126#ifdef PIC
127/* A special bootstrap variant. */
128void *
129__mf_0fn_calloc (size_t c, size_t n)
130{
131  return __mf_0fn_malloc (c * n);
132}
133#endif
134
135
136#undef calloc
137WRAPPER(void *, calloc, size_t c, size_t n)
138{
139  size_t size_with_crumple_zones;
140  DECLARE(void *, calloc, size_t, size_t);
141  DECLARE(void *, malloc, size_t);
142  DECLARE(void *, memset, void *, int, size_t);
143  char *result;
144  BEGIN_PROTECT (calloc, c, n);
145
146  size_with_crumple_zones =
147    CLAMPADD((c * n), /* XXX: CLAMPMUL */
148	     CLAMPADD(__mf_opts.crumple_zone,
149		      __mf_opts.crumple_zone));
150  BEGIN_MALLOC_PROTECT ();
151  result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
152  END_MALLOC_PROTECT ();
153
154  if (LIKELY(result))
155    memset (result, 0, size_with_crumple_zones);
156
157  if (LIKELY(result))
158    {
159      result += __mf_opts.crumple_zone;
160      __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
161      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
162    }
163
164  return result;
165}
166
167
168#if PIC
169/* A special bootstrap variant. */
170void *
171__mf_0fn_realloc (void *buf, size_t c)
172{
173  return NULL;
174}
175#endif
176
177
178#undef realloc
179WRAPPER(void *, realloc, void *buf, size_t c)
180{
181  DECLARE(void * , realloc, void *, size_t);
182  size_t size_with_crumple_zones;
183  char *base = buf;
184  unsigned saved_wipe_heap;
185  char *result;
186  BEGIN_PROTECT (realloc, buf, c);
187
188  if (LIKELY(buf))
189    base -= __mf_opts.crumple_zone;
190
191  size_with_crumple_zones =
192    CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
193			 __mf_opts.crumple_zone));
194  BEGIN_MALLOC_PROTECT ();
195  result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
196  END_MALLOC_PROTECT ();
197
198  /* Ensure heap wiping doesn't occur during this peculiar
199     unregister/reregister pair.  */
200  LOCKTH ();
201  __mf_set_state (reentrant);
202  saved_wipe_heap = __mf_opts.wipe_heap;
203  __mf_opts.wipe_heap = 0;
204
205  if (LIKELY(buf))
206    __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
207  /* NB: underlying region may have been __MF_TYPE_HEAP. */
208
209  if (LIKELY(result))
210    {
211      result += __mf_opts.crumple_zone;
212      __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
213      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
214    }
215
216  /* Restore previous setting.  */
217  __mf_opts.wipe_heap = saved_wipe_heap;
218
219  __mf_set_state (active);
220  UNLOCKTH ();
221
222  return result;
223}
224
225
226#if PIC
227/* A special bootstrap variant. */
228void
229__mf_0fn_free (void *buf)
230{
231  return;
232}
233#endif
234
235#undef free
236WRAPPER(void, free, void *buf)
237{
238  /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s.  */
239  static void *free_queue [__MF_FREEQ_MAX];
240  static unsigned free_ptr = 0;
241  static int freeq_initialized = 0;
242  DECLARE(void, free, void *);
243
244  BEGIN_PROTECT (free, buf);
245
246  if (UNLIKELY(buf == NULL))
247    return;
248
249  LOCKTH ();
250  if (UNLIKELY(!freeq_initialized))
251    {
252      memset (free_queue, 0,
253		     __MF_FREEQ_MAX * sizeof (void *));
254      freeq_initialized = 1;
255    }
256  UNLOCKTH ();
257
258  __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
259  /* NB: underlying region may have been __MF_TYPE_HEAP. */
260
261  if (UNLIKELY(__mf_opts.free_queue_length > 0))
262    {
263      char *freeme = NULL;
264      LOCKTH ();
265      if (free_queue [free_ptr] != NULL)
266	{
267	  freeme = free_queue [free_ptr];
268	  freeme -= __mf_opts.crumple_zone;
269	}
270      free_queue [free_ptr] = buf;
271      free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
272      UNLOCKTH ();
273      if (freeme)
274	{
275	  if (__mf_opts.trace_mf_calls)
276	    {
277	      VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
278			     (void *) freeme,
279			     __mf_opts.crumple_zone);
280	    }
281	  BEGIN_MALLOC_PROTECT ();
282	  CALL_REAL (free, freeme);
283	  END_MALLOC_PROTECT ();
284	}
285    }
286  else
287    {
288      /* back pointer up a bit to the beginning of crumple zone */
289      char *base = (char *)buf;
290      base -= __mf_opts.crumple_zone;
291      if (__mf_opts.trace_mf_calls)
292	{
293	  VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
294			 (void *) base,
295			 (void *) buf,
296			 __mf_opts.crumple_zone);
297	}
298      BEGIN_MALLOC_PROTECT ();
299      CALL_REAL (free, base);
300      END_MALLOC_PROTECT ();
301    }
302}
303
304
305/* We can only wrap mmap if the target supports it.  Likewise for munmap.
306   We assume we have both if we have mmap.  */
307#ifdef HAVE_MMAP
308
309#if PIC
310/* A special bootstrap variant. */
311void *
312__mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
313{
314  return (void *) -1;
315}
316#endif
317
318
319#undef mmap
320WRAPPER(void *, mmap,
321	void  *start,  size_t length, int prot,
322	int flags, int fd, off_t offset)
323{
324  DECLARE(void *, mmap, void *, size_t, int,
325			    int, int, off_t);
326  void *result;
327  BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
328
329  result = CALL_REAL (mmap, start, length, prot,
330			flags, fd, offset);
331
332  /*
333  VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
334		 (uintptr_t) start, (uintptr_t) length,
335		 (uintptr_t) result);
336  */
337
338  if (result != (void *)-1)
339    {
340      /* Register each page as a heap object.  Why not register it all
341	 as a single segment?  That's so that a later munmap() call
342	 can unmap individual pages.  XXX: would __MF_TYPE_GUESS make
343	 this more automatic?  */
344      size_t ps = getpagesize ();
345      uintptr_t base = (uintptr_t) result;
346      uintptr_t offset;
347
348      for (offset=0; offset<length; offset+=ps)
349	{
350	  /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
351	  /* XXX: Unaccessed HEAP pages are reported as leaks.  Is this
352	     appropriate for unaccessed mmap pages? */
353	  __mf_register ((void *) CLAMPADD (base, offset), ps,
354			 __MF_TYPE_HEAP_I, "mmap page");
355	}
356    }
357
358  return result;
359}
360
361
362#if PIC
363/* A special bootstrap variant. */
364int
365__mf_0fn_munmap (void *start, size_t length)
366{
367  return -1;
368}
369#endif
370
371
372#undef munmap
373WRAPPER(int , munmap, void *start, size_t length)
374{
375  DECLARE(int, munmap, void *, size_t);
376  int result;
377  BEGIN_PROTECT (munmap, start, length);
378
379  result = CALL_REAL (munmap, start, length);
380
381  /*
382  VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
383		 (uintptr_t) start, (uintptr_t) length,
384		 (uintptr_t) result);
385  */
386
387  if (result == 0)
388    {
389      /* Unregister each page as a heap object.  */
390      size_t ps = getpagesize ();
391      uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
392      uintptr_t offset;
393
394      for (offset=0; offset<length; offset+=ps)
395	__mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
396    }
397  return result;
398}
399#endif /* HAVE_MMAP */
400
401
402/* This wrapper is a little different, as it's called indirectly from
403   __mf_fini also to clean up pending allocations.  */
404void *
405__mf_wrap_alloca_indirect (size_t c)
406{
407  DECLARE (void *, malloc, size_t);
408  DECLARE (void, free, void *);
409
410  /* This struct, a linked list, tracks alloca'd objects.  The newest
411     object is at the head of the list.  If we detect that we've
412     popped a few levels of stack, then the listed objects are freed
413     as needed.  NB: The tracking struct is allocated with
414     real_malloc; the user data with wrap_malloc.
415  */
416  struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
417  static struct alloca_tracking *alloca_history = NULL;
418
419  void *stack = __builtin_frame_address (0);
420  void *result;
421  struct alloca_tracking *track;
422
423  TRACE ("%s\n", __PRETTY_FUNCTION__);
424  VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
425
426  /* XXX: thread locking! */
427
428  /* Free any previously alloca'd blocks that belong to deeper-nested functions,
429     which must therefore have exited by now.  */
430
431#define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
432
433  while (alloca_history &&
434	 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
435    {
436      struct alloca_tracking *next = alloca_history->next;
437      __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
438      BEGIN_MALLOC_PROTECT ();
439      CALL_REAL (free, alloca_history->ptr);
440      CALL_REAL (free, alloca_history);
441      END_MALLOC_PROTECT ();
442      alloca_history = next;
443    }
444
445  /* Allocate new block.  */
446  result = NULL;
447  if (LIKELY (c > 0)) /* alloca(0) causes no allocation.  */
448    {
449      BEGIN_MALLOC_PROTECT ();
450      track = (struct alloca_tracking *) CALL_REAL (malloc,
451						    sizeof (struct alloca_tracking));
452      END_MALLOC_PROTECT ();
453      if (LIKELY (track != NULL))
454	{
455	  BEGIN_MALLOC_PROTECT ();
456	  result = CALL_REAL (malloc, c);
457	  END_MALLOC_PROTECT ();
458	  if (UNLIKELY (result == NULL))
459	    {
460	      BEGIN_MALLOC_PROTECT ();
461	      CALL_REAL (free, track);
462	      END_MALLOC_PROTECT ();
463	      /* Too bad.  XXX: What about errno?  */
464	    }
465	  else
466	    {
467	      __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
468	      track->ptr = result;
469	      track->stack = stack;
470	      track->next = alloca_history;
471	      alloca_history = track;
472	    }
473	}
474    }
475
476  return result;
477}
478
479
480#undef alloca
481WRAPPER(void *, alloca, size_t c)
482{
483  return __mf_wrap_alloca_indirect (c);
484}
485
486