1/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2   Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc.
3   Contributed by Frank Ch. Eigler <fche@redhat.com>
4   and Graydon Hoare <graydon@redhat.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18Under Section 7 of GPL version 3, you are granted additional
19permissions described in the GCC Runtime Library Exception, version
203.1, as published by the Free Software Foundation.
21
22You should have received a copy of the GNU General Public License and
23a copy of the GCC Runtime Library Exception along with this program;
24see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
25<http://www.gnu.org/licenses/>.  */
26
27#include "config.h"
28
29#ifndef HAVE_SOCKLEN_T
30#define socklen_t int
31#endif
32
33
34/* These attempt to coax various unix flavours to declare all our
35   needed tidbits in the system headers.  */
36#if !defined(__FreeBSD__)  && !defined(__APPLE__)
37#define _POSIX_SOURCE
38#endif /* Some BSDs break <sys/socket.h> if this is defined. */
39#define _GNU_SOURCE
40#define _XOPEN_SOURCE
41#define _BSD_TYPES
42#define __EXTENSIONS__
43#define _ALL_SOURCE
44#define _LARGE_FILE_API
45#define _XOPEN_SOURCE_EXTENDED 1
46
47#include <string.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <sys/time.h>
51#include <sys/types.h>
52#include <unistd.h>
53#include <assert.h>
54#include <errno.h>
55#include <limits.h>
56#include <time.h>
57
58#include "mf-runtime.h"
59#include "mf-impl.h"
60
61#ifdef _MUDFLAP
62#error "Do not compile this file with -fmudflap!"
63#endif
64
65
66/* Memory allocation related hook functions.  Some of these are
67   intercepted via linker wrapping or symbol interposition.  Others
68   use plain macros in mf-runtime.h.  */
69
70
71#if PIC
72
73enum { BS = 4096, NB=10 };
74static char __mf_0fn_bufs[NB][BS];
75static unsigned __mf_0fn_bufs_used[NB];
76
77
78/* A special bootstrap variant. */
79void *
80__mf_0fn_malloc (size_t c)
81{
82  unsigned i;
83
84  for (i=0; i<NB; i++)
85    {
86      if (! __mf_0fn_bufs_used[i] && c < BS)
87	{
88	  __mf_0fn_bufs_used[i] = 1;
89	  return & __mf_0fn_bufs[i][0];
90	}
91    }
92  return NULL;
93}
94#endif
95
96
97#undef malloc
98WRAPPER(void *, malloc, size_t c)
99{
100  size_t size_with_crumple_zones;
101  DECLARE(void *, malloc, size_t c);
102  void *result;
103  BEGIN_PROTECT (malloc, c);
104
105  size_with_crumple_zones =
106    CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
107			__mf_opts.crumple_zone));
108  BEGIN_MALLOC_PROTECT ();
109  result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
110  END_MALLOC_PROTECT ();
111
112  if (LIKELY(result))
113    {
114      result += __mf_opts.crumple_zone;
115      __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
116      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
117    }
118
119  return result;
120}
121
122
123#ifdef PIC
124/* A special bootstrap variant. */
125void *
126__mf_0fn_calloc (size_t c, size_t n)
127{
128  return __mf_0fn_malloc (c * n);
129}
130#endif
131
132
133#undef calloc
134WRAPPER(void *, calloc, size_t c, size_t n)
135{
136  size_t size_with_crumple_zones;
137  DECLARE(void *, calloc, size_t, size_t);
138  DECLARE(void *, malloc, size_t);
139  DECLARE(void *, memset, void *, int, size_t);
140  char *result;
141  BEGIN_PROTECT (calloc, c, n);
142
143  size_with_crumple_zones =
144    CLAMPADD((c * n), /* XXX: CLAMPMUL */
145	     CLAMPADD(__mf_opts.crumple_zone,
146		      __mf_opts.crumple_zone));
147  BEGIN_MALLOC_PROTECT ();
148  result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
149  END_MALLOC_PROTECT ();
150
151  if (LIKELY(result))
152    memset (result, 0, size_with_crumple_zones);
153
154  if (LIKELY(result))
155    {
156      result += __mf_opts.crumple_zone;
157      __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
158      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
159    }
160
161  return result;
162}
163
164
165#if PIC
166/* A special bootstrap variant. */
167void *
168__mf_0fn_realloc (void *buf, size_t c)
169{
170  return NULL;
171}
172#endif
173
174
175#undef realloc
176WRAPPER(void *, realloc, void *buf, size_t c)
177{
178  DECLARE(void * , realloc, void *, size_t);
179  size_t size_with_crumple_zones;
180  char *base = buf;
181  unsigned saved_wipe_heap;
182  char *result;
183  BEGIN_PROTECT (realloc, buf, c);
184
185  if (LIKELY(buf))
186    base -= __mf_opts.crumple_zone;
187
188  size_with_crumple_zones =
189    CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
190			 __mf_opts.crumple_zone));
191  BEGIN_MALLOC_PROTECT ();
192  result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
193  END_MALLOC_PROTECT ();
194
195  /* Ensure heap wiping doesn't occur during this peculiar
196     unregister/reregister pair.  */
197  LOCKTH ();
198  __mf_set_state (reentrant);
199  saved_wipe_heap = __mf_opts.wipe_heap;
200  __mf_opts.wipe_heap = 0;
201
202  if (LIKELY(buf))
203    __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
204  /* NB: underlying region may have been __MF_TYPE_HEAP. */
205
206  if (LIKELY(result))
207    {
208      result += __mf_opts.crumple_zone;
209      __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
210      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
211    }
212
213  /* Restore previous setting.  */
214  __mf_opts.wipe_heap = saved_wipe_heap;
215
216  __mf_set_state (active);
217  UNLOCKTH ();
218
219  return result;
220}
221
222
223#if PIC
224/* A special bootstrap variant. */
225void
226__mf_0fn_free (void *buf)
227{
228  return;
229}
230#endif
231
232#undef free
233WRAPPER(void, free, void *buf)
234{
235  /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s.  */
236  static void *free_queue [__MF_FREEQ_MAX];
237  static unsigned free_ptr = 0;
238  static int freeq_initialized = 0;
239  DECLARE(void, free, void *);
240
241  BEGIN_PROTECT (free, buf);
242
243  if (UNLIKELY(buf == NULL))
244    return;
245
246#if PIC
247  /* Check whether the given buffer might have come from a
248     __mf_0fn_malloc/calloc call that for whatever reason was not
249     redirected back to __mf_0fn_free.  If so, we just ignore the
250     call. */
251  if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
252               (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
253  {
254    VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
255    return;
256  }
257#endif
258
259  LOCKTH ();
260  if (UNLIKELY(!freeq_initialized))
261    {
262      memset (free_queue, 0,
263		     __MF_FREEQ_MAX * sizeof (void *));
264      freeq_initialized = 1;
265    }
266  UNLOCKTH ();
267
268  __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
269  /* NB: underlying region may have been __MF_TYPE_HEAP. */
270
271  if (UNLIKELY(__mf_opts.free_queue_length > 0))
272    {
273      char *freeme = NULL;
274      LOCKTH ();
275      if (free_queue [free_ptr] != NULL)
276	{
277	  freeme = free_queue [free_ptr];
278	  freeme -= __mf_opts.crumple_zone;
279	}
280      free_queue [free_ptr] = buf;
281      free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
282      UNLOCKTH ();
283      if (freeme)
284	{
285	  if (__mf_opts.trace_mf_calls)
286	    {
287	      VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
288			     (void *) freeme,
289			     __mf_opts.crumple_zone);
290	    }
291	  BEGIN_MALLOC_PROTECT ();
292	  CALL_REAL (free, freeme);
293	  END_MALLOC_PROTECT ();
294	}
295    }
296  else
297    {
298      /* back pointer up a bit to the beginning of crumple zone */
299      char *base = (char *)buf;
300      base -= __mf_opts.crumple_zone;
301      if (__mf_opts.trace_mf_calls)
302	{
303	  VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
304			 (void *) base,
305			 (void *) buf,
306			 __mf_opts.crumple_zone);
307	}
308      BEGIN_MALLOC_PROTECT ();
309      CALL_REAL (free, base);
310      END_MALLOC_PROTECT ();
311    }
312}
313
314
315/* We can only wrap mmap if the target supports it.  Likewise for munmap.
316   We assume we have both if we have mmap.  */
317#ifdef HAVE_MMAP
318
319#if PIC
320/* A special bootstrap variant. */
321void *
322__mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
323{
324#if defined(__FreeBSD__)
325  if (f == 0x1000 && fd == -1 && prot == 0 && off == 0)
326    return 0;
327#endif /* Ignore red zone allocation request for initial thread's stack. */
328
329  return (void *) -1;
330}
331#endif
332
333
334#undef mmap
335WRAPPER(void *, mmap,
336	void  *start,  size_t length, int prot,
337	int flags, int fd, off_t offset)
338{
339  DECLARE(void *, mmap, void *, size_t, int,
340			    int, int, off_t);
341  void *result;
342  BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
343
344  result = CALL_REAL (mmap, start, length, prot,
345			flags, fd, offset);
346
347  /*
348  VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
349		 (uintptr_t) start, (uintptr_t) length,
350		 (uintptr_t) result);
351  */
352
353  if (result != (void *)-1)
354    {
355      /* Register each page as a heap object.  Why not register it all
356	 as a single segment?  That's so that a later munmap() call
357	 can unmap individual pages.  XXX: would __MF_TYPE_GUESS make
358	 this more automatic?  */
359      size_t ps = getpagesize ();
360      uintptr_t base = (uintptr_t) result;
361      uintptr_t offset;
362
363      for (offset=0; offset<length; offset+=ps)
364	{
365	  /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
366	  /* XXX: Unaccessed HEAP pages are reported as leaks.  Is this
367	     appropriate for unaccessed mmap pages? */
368	  __mf_register ((void *) CLAMPADD (base, offset), ps,
369			 __MF_TYPE_HEAP_I, "mmap page");
370	}
371    }
372
373  return result;
374}
375
376
377#if PIC
378/* A special bootstrap variant. */
379int
380__mf_0fn_munmap (void *start, size_t length)
381{
382  return -1;
383}
384#endif
385
386
387#undef munmap
388WRAPPER(int , munmap, void *start, size_t length)
389{
390  DECLARE(int, munmap, void *, size_t);
391  int result;
392  BEGIN_PROTECT (munmap, start, length);
393
394  result = CALL_REAL (munmap, start, length);
395
396  /*
397  VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
398		 (uintptr_t) start, (uintptr_t) length,
399		 (uintptr_t) result);
400  */
401
402  if (result == 0)
403    {
404      /* Unregister each page as a heap object.  */
405      size_t ps = getpagesize ();
406      uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
407      uintptr_t offset;
408
409      for (offset=0; offset<length; offset+=ps)
410	__mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
411    }
412  return result;
413}
414#endif /* HAVE_MMAP */
415
416
417/* This wrapper is a little different, as it's called indirectly from
418   __mf_fini also to clean up pending allocations.  */
419void *
420__mf_wrap_alloca_indirect (size_t c)
421{
422  DECLARE (void *, malloc, size_t);
423  DECLARE (void, free, void *);
424
425  /* This struct, a linked list, tracks alloca'd objects.  The newest
426     object is at the head of the list.  If we detect that we've
427     popped a few levels of stack, then the listed objects are freed
428     as needed.  NB: The tracking struct is allocated with
429     real_malloc; the user data with wrap_malloc.
430  */
431  struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
432  static struct alloca_tracking *alloca_history = NULL;
433
434  void *stack = __builtin_frame_address (0);
435  void *result;
436  struct alloca_tracking *track;
437
438  TRACE ("%s\n", __PRETTY_FUNCTION__);
439  VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
440
441  /* XXX: thread locking! */
442
443  /* Free any previously alloca'd blocks that belong to deeper-nested functions,
444     which must therefore have exited by now.  */
445
446#define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
447
448  while (alloca_history &&
449	 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
450    {
451      struct alloca_tracking *next = alloca_history->next;
452      __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
453      BEGIN_MALLOC_PROTECT ();
454      CALL_REAL (free, alloca_history->ptr);
455      CALL_REAL (free, alloca_history);
456      END_MALLOC_PROTECT ();
457      alloca_history = next;
458    }
459
460  /* Allocate new block.  */
461  result = NULL;
462  if (LIKELY (c > 0)) /* alloca(0) causes no allocation.  */
463    {
464      BEGIN_MALLOC_PROTECT ();
465      track = (struct alloca_tracking *) CALL_REAL (malloc,
466						    sizeof (struct alloca_tracking));
467      END_MALLOC_PROTECT ();
468      if (LIKELY (track != NULL))
469	{
470	  BEGIN_MALLOC_PROTECT ();
471	  result = CALL_REAL (malloc, c);
472	  END_MALLOC_PROTECT ();
473	  if (UNLIKELY (result == NULL))
474	    {
475	      BEGIN_MALLOC_PROTECT ();
476	      CALL_REAL (free, track);
477	      END_MALLOC_PROTECT ();
478	      /* Too bad.  XXX: What about errno?  */
479	    }
480	  else
481	    {
482	      __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
483	      track->ptr = result;
484	      track->stack = stack;
485	      track->next = alloca_history;
486	      alloca_history = track;
487	    }
488	}
489    }
490
491  return result;
492}
493
494
495#undef alloca
496WRAPPER(void *, alloca, size_t c)
497{
498  return __mf_wrap_alloca_indirect (c);
499}
500
501