1/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2   Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3   Contributed by Frank Ch. Eigler <fche@redhat.com>
4   and Graydon Hoare <graydon@redhat.com>
5   Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6   adapted from libiberty.
7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
12Software Foundation; either version 2, or (at your option) any later
13version.
14
15In addition to the permissions in the GNU General Public License, the
16Free Software Foundation gives you unlimited permission to link the
17compiled version of this file into combinations with other programs,
18and to distribute those combinations without any restriction coming
19from the use of this file.  (The General Public License restrictions
20do apply in other respects; for example, they cover modification of
21the file, and distribution when not linked into a combine
22executable.)
23
24GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25WARRANTY; without even the implied warranty of MERCHANTABILITY or
26FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
27for more details.
28
29You should have received a copy of the GNU General Public License
30along with GCC; see the file COPYING.  If not, write to the Free
31Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
3202110-1301, USA.  */
33
34#include "config.h"
35
36/* These attempt to coax various unix flavours to declare all our
37   needed tidbits in the system headers.  */
38#if !defined(__FreeBSD__) && !defined(__APPLE__)
39#define _POSIX_SOURCE
40#endif /* Some BSDs break <sys/socket.h> if this is defined. */
41#define _GNU_SOURCE
42#define _XOPEN_SOURCE
43#define _BSD_TYPES
44#define __EXTENSIONS__
45#define _ALL_SOURCE
46#define _LARGE_FILE_API
47#define _XOPEN_SOURCE_EXTENDED 1
48
49#include <stdio.h>
50#include <stdlib.h>
51#include <sys/types.h>
52#include <sys/time.h>
53#include <time.h>
54#include <unistd.h>
55#ifdef HAVE_EXECINFO_H
56#include <execinfo.h>
57#endif
58#ifdef HAVE_SIGNAL_H
59#include <signal.h>
60#endif
61#include <assert.h>
62
63#include <string.h>
64#include <limits.h>
65#include <sys/types.h>
66#include <signal.h>
67#include <errno.h>
68#include <ctype.h>
69
70#include "mf-runtime.h"
71#include "mf-impl.h"
72
73
74/* ------------------------------------------------------------------------ */
75/* Splay-tree implementation.  */
76
77typedef uintptr_t mfsplay_tree_key;
78typedef void *mfsplay_tree_value;
79
80/* Forward declaration for a node in the tree.  */
81typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
82
83/* The type of a function used to iterate over the tree.  */
84typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
85
86/* The nodes in the splay tree.  */
87struct mfsplay_tree_node_s
88{
89  /* Data.  */
90  mfsplay_tree_key key;
91  mfsplay_tree_value value;
92  /* Children.  */
93  mfsplay_tree_node left;
94  mfsplay_tree_node right;
95  /* XXX: The addition of a parent pointer may eliminate some recursion.  */
96};
97
98/* The splay tree itself.  */
99struct mfsplay_tree_s
100{
101  /* The root of the tree.  */
102  mfsplay_tree_node root;
103
104  /* The last key value for which the tree has been splayed, but not
105     since modified.  */
106  mfsplay_tree_key last_splayed_key;
107  int last_splayed_key_p;
108
109  /* Statistics.  */
110  unsigned num_keys;
111
112  /* Traversal recursion control flags.  */
113  unsigned max_depth;
114  unsigned depth;
115  unsigned rebalance_p;
116};
117typedef struct mfsplay_tree_s *mfsplay_tree;
118
119static mfsplay_tree mfsplay_tree_new (void);
120static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126static void mfsplay_tree_rebalance (mfsplay_tree sp);
127
128/* ------------------------------------------------------------------------ */
129/* Utility macros */
130
131#define CTOR  __attribute__ ((constructor))
132#define DTOR  __attribute__ ((destructor))
133
134
135/* Codes to describe the context in which a violation occurs. */
136#define __MF_VIOL_UNKNOWN 0
137#define __MF_VIOL_READ 1
138#define __MF_VIOL_WRITE 2
139#define __MF_VIOL_REGISTER 3
140#define __MF_VIOL_UNREGISTER 4
141#define __MF_VIOL_WATCH 5
142
143/* Protect against recursive calls. */
144
145static void
146begin_recursion_protect1 (const char *pf)
147{
148  if (__mf_get_state () == reentrant)
149    {
150      write (2, "mf: erroneous reentrancy detected in `", 38);
151      write (2, pf, strlen(pf));
152      write (2, "'\n", 2); \
153      abort ();
154    }
155  __mf_set_state (reentrant);
156}
157
158#define BEGIN_RECURSION_PROTECT() \
159  begin_recursion_protect1 (__PRETTY_FUNCTION__)
160
161#define END_RECURSION_PROTECT() \
162  __mf_set_state (active)
163
164/* ------------------------------------------------------------------------ */
165/* Required globals.  */
166
167#define LOOKUP_CACHE_MASK_DFL 1023
168#define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
169#define LOOKUP_CACHE_SHIFT_DFL 2
170
171struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
172uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
173unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
174#define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
175
176struct __mf_options __mf_opts;
177int __mf_starting_p = 1;
178
179#ifdef LIBMUDFLAPTH
180#ifdef HAVE_TLS
181__thread enum __mf_state_enum __mf_state_1 = reentrant;
182#endif
183#else
184enum __mf_state_enum __mf_state_1 = reentrant;
185#endif
186
187#ifdef LIBMUDFLAPTH
188pthread_mutex_t __mf_biglock =
189#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
190       PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
191#else
192       PTHREAD_MUTEX_INITIALIZER;
193#endif
194#endif
195
196/* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
197   the libmudflap.la (no threading support) can diagnose whether
198   the application is linked with -lpthread.  See __mf_usage() below.  */
199#if HAVE_PTHREAD_H
200#ifdef _POSIX_THREADS
201#pragma weak pthread_join
202#else
203#define pthread_join NULL
204#endif
205#endif
206
207
208/* ------------------------------------------------------------------------ */
209/* stats-related globals.  */
210
211static unsigned long __mf_count_check;
212static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
213static unsigned long __mf_count_register;
214static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
215static unsigned long __mf_count_unregister;
216static unsigned long __mf_total_unregister_size;
217static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
218static unsigned long __mf_sigusr1_received;
219static unsigned long __mf_sigusr1_handled;
220/* not static */ unsigned long __mf_reentrancy;
221#ifdef LIBMUDFLAPTH
222/* not static */ unsigned long __mf_lock_contention;
223#endif
224
225
226/* ------------------------------------------------------------------------ */
227/* mode-check-related globals.  */
228
229typedef struct __mf_object
230{
231  uintptr_t low, high; /* __mf_register parameters */
232  const char *name;
233  char type; /* __MF_TYPE_something */
234  char watching_p; /* Trigger a VIOL_WATCH on access? */
235  unsigned read_count; /* Number of times __mf_check/read was called on this object.  */
236  unsigned write_count; /* Likewise for __mf_check/write.  */
237  unsigned liveness; /* A measure of recent checking activity.  */
238  unsigned description_epoch; /* Last epoch __mf_describe_object printed this.  */
239
240  uintptr_t alloc_pc;
241  struct timeval alloc_time;
242  char **alloc_backtrace;
243  size_t alloc_backtrace_size;
244#ifdef LIBMUDFLAPTH
245  pthread_t alloc_thread;
246#endif
247
248  int deallocated_p;
249  uintptr_t dealloc_pc;
250  struct timeval dealloc_time;
251  char **dealloc_backtrace;
252  size_t dealloc_backtrace_size;
253#ifdef LIBMUDFLAPTH
254  pthread_t dealloc_thread;
255#endif
256} __mf_object_t;
257
258/* Live objects: splay trees, separated by type, ordered on .low (base address).  */
259/* Actually stored as static vars within lookup function below.  */
260
261/* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
262static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
263static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
264
265
266/* ------------------------------------------------------------------------ */
267/* Forward function declarations */
268
269void __mf_init () CTOR;
270static void __mf_sigusr1_respond ();
271static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272                                   __mf_object_t **objs, unsigned max_objs);
273static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
274                                    __mf_object_t **objs, unsigned max_objs, int type);
275static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
276                                        __mf_object_t **objs, unsigned max_objs);
277static void __mf_adapt_cache ();
278static void __mf_describe_object (__mf_object_t *obj);
279static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
280static mfsplay_tree __mf_object_tree (int type);
281static void __mf_link_object (__mf_object_t *node);
282static void __mf_unlink_object (__mf_object_t *node);
283
284
285/* ------------------------------------------------------------------------ */
286/* Configuration engine */
287
288static void
289__mf_set_default_options ()
290{
291  memset (& __mf_opts, 0, sizeof (__mf_opts));
292
293  __mf_opts.adapt_cache = 1000003;
294  __mf_opts.abbreviate = 1;
295  __mf_opts.verbose_violations = 1;
296  __mf_opts.free_queue_length = 4;
297  __mf_opts.persistent_count = 100;
298  __mf_opts.crumple_zone = 32;
299  __mf_opts.backtrace = 4;
300  __mf_opts.timestamps = 1;
301  __mf_opts.mudflap_mode = mode_check;
302  __mf_opts.violation_mode = viol_nop;
303  __mf_opts.heur_std_data = 1;
304#ifdef LIBMUDFLAPTH
305  __mf_opts.thread_stack = 0;
306#endif
307}
308
309static struct option
310{
311  char *name;
312  char *description;
313  enum
314    {
315      set_option,
316      read_integer_option,
317    } type;
318  unsigned value;
319  unsigned *target;
320}
321options [] =
322  {
323    {"mode-nop",
324     "mudflaps do nothing",
325     set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
326    {"mode-populate",
327     "mudflaps populate object tree",
328     set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
329    {"mode-check",
330     "mudflaps check for memory violations",
331     set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
332    {"mode-violate",
333     "mudflaps always cause violations (diagnostic)",
334     set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
335
336    {"viol-nop",
337     "violations do not change program execution",
338     set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
339    {"viol-abort",
340     "violations cause a call to abort()",
341     set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
342    {"viol-segv",
343     "violations are promoted to SIGSEGV signals",
344     set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
345    {"viol-gdb",
346     "violations fork a gdb process attached to current program",
347     set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
348    {"trace-calls",
349     "trace calls to mudflap runtime library",
350     set_option, 1, &__mf_opts.trace_mf_calls},
351    {"verbose-trace",
352     "trace internal events within mudflap runtime library",
353     set_option, 1, &__mf_opts.verbose_trace},
354    {"collect-stats",
355     "collect statistics on mudflap's operation",
356     set_option, 1, &__mf_opts.collect_stats},
357#ifdef SIGUSR1
358    {"sigusr1-report",
359     "print report upon SIGUSR1",
360     set_option, 1, &__mf_opts.sigusr1_report},
361#endif
362    {"internal-checking",
363     "perform more expensive internal checking",
364     set_option, 1, &__mf_opts.internal_checking},
365    {"print-leaks",
366     "print any memory leaks at program shutdown",
367     set_option, 1, &__mf_opts.print_leaks},
368    {"check-initialization",
369     "detect uninitialized object reads",
370     set_option, 1, &__mf_opts.check_initialization},
371    {"verbose-violations",
372     "print verbose messages when memory violations occur",
373     set_option, 1, &__mf_opts.verbose_violations},
374    {"abbreviate",
375     "abbreviate repetitive listings",
376     set_option, 1, &__mf_opts.abbreviate},
377    {"timestamps",
378     "track object lifetime timestamps",
379     set_option, 1, &__mf_opts.timestamps},
380    {"ignore-reads",
381     "ignore read accesses - assume okay",
382     set_option, 1, &__mf_opts.ignore_reads},
383    {"wipe-stack",
384     "wipe stack objects at unwind",
385     set_option, 1, &__mf_opts.wipe_stack},
386    {"wipe-heap",
387     "wipe heap objects at free",
388     set_option, 1, &__mf_opts.wipe_heap},
389    {"heur-proc-map",
390     "support /proc/self/map heuristics",
391     set_option, 1, &__mf_opts.heur_proc_map},
392    {"heur-stack-bound",
393     "enable a simple upper stack bound heuristic",
394     set_option, 1, &__mf_opts.heur_stack_bound},
395    {"heur-start-end",
396     "support _start.._end heuristics",
397     set_option, 1, &__mf_opts.heur_start_end},
398    {"heur-stdlib",
399     "register standard library data (argv, errno, stdin, ...)",
400     set_option, 1, &__mf_opts.heur_std_data},
401    {"free-queue-length",
402     "queue N deferred free() calls before performing them",
403     read_integer_option, 0, &__mf_opts.free_queue_length},
404    {"persistent-count",
405     "keep a history of N unregistered regions",
406     read_integer_option, 0, &__mf_opts.persistent_count},
407    {"crumple-zone",
408     "surround allocations with crumple zones of N bytes",
409     read_integer_option, 0, &__mf_opts.crumple_zone},
410    /* XXX: not type-safe.
411    {"lc-mask",
412     "set lookup cache size mask to N (2**M - 1)",
413     read_integer_option, 0, (int *)(&__mf_lc_mask)},
414    {"lc-shift",
415     "set lookup cache pointer shift",
416     read_integer_option, 0, (int *)(&__mf_lc_shift)},
417    */
418    {"lc-adapt",
419     "adapt mask/shift parameters after N cache misses",
420     read_integer_option, 1, &__mf_opts.adapt_cache},
421    {"backtrace",
422     "keep an N-level stack trace of each call context",
423     read_integer_option, 0, &__mf_opts.backtrace},
424#ifdef LIBMUDFLAPTH
425    {"thread-stack",
426     "override thread stacks allocation: N kB",
427     read_integer_option, 0, &__mf_opts.thread_stack},
428#endif
429    {0, 0, set_option, 0, NULL}
430  };
431
432static void
433__mf_usage ()
434{
435  struct option *opt;
436
437  fprintf (stderr,
438           "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
439           "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
440           "\n"
441           "The mudflap code can be controlled by an environment variable:\n"
442           "\n"
443           "$ export MUDFLAP_OPTIONS='<options>'\n"
444           "$ <mudflapped_program>\n"
445           "\n"
446           "where <options> is a space-separated list of \n"
447           "any of the following options.  Use `-no-OPTION' to disable options.\n"
448           "\n",
449#if HAVE_PTHREAD_H
450           (pthread_join ? "multi-threaded " : "single-threaded "),
451#else
452           "",
453#endif
454#if LIBMUDFLAPTH
455           "thread-aware "
456#else
457           "thread-unaware "
458#endif
459            );
460  /* XXX: The multi-threaded thread-unaware combination is bad.  */
461
462  for (opt = options; opt->name; opt++)
463    {
464      int default_p = (opt->value == * opt->target);
465
466      switch (opt->type)
467        {
468          char buf[128];
469        case set_option:
470          fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
471          if (default_p)
472            fprintf (stderr, " [active]\n");
473          else
474            fprintf (stderr, "\n");
475          break;
476        case read_integer_option:
477          strncpy (buf, opt->name, 128);
478          strncpy (buf + strlen (opt->name), "=N", 2);
479          fprintf (stderr, "-%-23.23s %s", buf, opt->description);
480          fprintf (stderr, " [%d]\n", * opt->target);
481          break;
482        default: abort();
483        }
484    }
485
486  fprintf (stderr, "\n");
487}
488
489
490int
491__mf_set_options (const char *optstr)
492{
493  int rc;
494  LOCKTH ();
495  BEGIN_RECURSION_PROTECT ();
496  rc = __mfu_set_options (optstr);
497  /* XXX: It's not really that easy.  A change to a bunch of parameters
498     can require updating auxiliary state or risk crashing:
499     free_queue_length, crumple_zone ... */
500  END_RECURSION_PROTECT ();
501  UNLOCKTH ();
502  return rc;
503}
504
505
506int
507__mfu_set_options (const char *optstr)
508{
509  struct option *opts = 0;
510  char *nxt = 0;
511  long tmp = 0;
512  int rc = 0;
513  const char *saved_optstr = optstr;
514
515  /* XXX: bounds-check for optstr! */
516
517  while (*optstr)
518    {
519      switch (*optstr) {
520      case ' ':
521      case '\t':
522      case '\n':
523        optstr++;
524        break;
525
526      case '-':
527        if (*optstr+1)
528          {
529            int negate = 0;
530            optstr++;
531
532            if (*optstr == '?' ||
533                strncmp (optstr, "help", 4) == 0)
534              {
535                /* Caller will print help and exit.  */
536                return -1;
537              }
538
539            if (strncmp (optstr, "no-", 3) == 0)
540              {
541                negate = 1;
542                optstr = & optstr[3];
543              }
544
545            for (opts = options; opts->name; opts++)
546              {
547                if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
548                  {
549                    optstr += strlen (opts->name);
550                    assert (opts->target);
551                    switch (opts->type)
552                      {
553                      case set_option:
554                        if (negate)
555                          *(opts->target) = 0;
556                        else
557                          *(opts->target) = opts->value;
558                        break;
559                      case read_integer_option:
560                        if (! negate && (*optstr == '=' && *(optstr+1)))
561                          {
562                            optstr++;
563                            tmp = strtol (optstr, &nxt, 10);
564                            if ((optstr != nxt) && (tmp != LONG_MAX))
565                              {
566                                optstr = nxt;
567                                *(opts->target) = (int)tmp;
568                              }
569                          }
570                        else if (negate)
571                          * opts->target = 0;
572                        break;
573                      }
574                  }
575              }
576          }
577        break;
578
579      default:
580        fprintf (stderr,
581                 "warning: unrecognized string '%s' in mudflap options\n",
582                 optstr);
583        optstr += strlen (optstr);
584        rc = -1;
585        break;
586      }
587    }
588
589  /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
590  __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
591  __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
592
593  /* Clear the lookup cache, in case the parameters got changed.  */
594  /* XXX: race */
595  memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
596  /* void slot 0 */
597  __mf_lookup_cache[0].low = MAXPTR;
598
599  TRACE ("set options from `%s'\n", saved_optstr);
600
601  /* Call this unconditionally, in case -sigusr1-report was toggled. */
602  __mf_sigusr1_respond ();
603
604  return rc;
605}
606
607
608#ifdef PIC
609
610void
611__mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
612{
613  char *err;
614
615  assert (e);
616  if (e->pointer) return;
617
618#if HAVE_DLVSYM
619  if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
620    e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
621  else
622#endif
623    e->pointer = dlsym (RTLD_NEXT, e->name);
624
625  err = dlerror ();
626
627  if (err)
628    {
629      fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
630               e->name, err);
631      abort ();
632    }
633  if (! e->pointer)
634    {
635      fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
636      abort ();
637    }
638}
639
640
641static void
642__mf_resolve_dynamics ()
643{
644  int i;
645  for (i = 0; i < dyn_INITRESOLVE; i++)
646    __mf_resolve_single_dynamic (& __mf_dynamic[i]);
647}
648
649
650/* NB: order must match enums in mf-impl.h */
651struct __mf_dynamic_entry __mf_dynamic [] =
652{
653  {NULL, "calloc", NULL},
654  {NULL, "free", NULL},
655  {NULL, "malloc", NULL},
656  {NULL, "mmap", NULL},
657  {NULL, "munmap", NULL},
658  {NULL, "realloc", NULL},
659  {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
660#ifdef LIBMUDFLAPTH
661  {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
662  {NULL, "pthread_join", NULL},
663  {NULL, "pthread_exit", NULL}
664#endif
665};
666
667#endif /* PIC */
668
669
670
671/* ------------------------------------------------------------------------ */
672
673/* Lookup & manage automatic initialization of the five or so splay trees.  */
674static mfsplay_tree
675__mf_object_tree (int type)
676{
677  static mfsplay_tree trees [__MF_TYPE_MAX+1];
678  assert (type >= 0 && type <= __MF_TYPE_MAX);
679  if (UNLIKELY (trees[type] == NULL))
680    trees[type] = mfsplay_tree_new ();
681  return trees[type];
682}
683
684
685/* not static */void
686__mf_init ()
687{
688  char *ov = 0;
689
690  /* Return if initialization has already been done. */
691  if (LIKELY (__mf_starting_p == 0))
692    return;
693
694  /* This initial bootstrap phase requires that __mf_starting_p = 1. */
695#ifdef PIC
696  __mf_resolve_dynamics ();
697#endif
698  __mf_starting_p = 0;
699
700  __mf_set_state (active);
701
702  __mf_set_default_options ();
703
704  ov = getenv ("MUDFLAP_OPTIONS");
705  if (ov)
706    {
707      int rc = __mfu_set_options (ov);
708      if (rc < 0)
709        {
710          __mf_usage ();
711          exit (1);
712        }
713    }
714
715  /* Initialize to a non-zero description epoch. */
716  __mf_describe_object (NULL);
717
718#define REG_RESERVED(obj) \
719  __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
720
721  REG_RESERVED (__mf_lookup_cache);
722  REG_RESERVED (__mf_lc_mask);
723  REG_RESERVED (__mf_lc_shift);
724  /* XXX: others of our statics?  */
725
726  /* Prevent access to *NULL. */
727  __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
728  __mf_lookup_cache[0].low = (uintptr_t) -1;
729}
730
731
732
733int
734__wrap_main (int argc, char* argv[])
735{
736  extern char **environ;
737  extern int main ();
738  extern int __real_main ();
739  static int been_here = 0;
740
741  if (__mf_opts.heur_std_data && ! been_here)
742    {
743      unsigned i;
744
745      been_here = 1;
746      __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
747      for (i=0; i<argc; i++)
748        {
749          unsigned j = strlen (argv[i]);
750          __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
751        }
752
753      for (i=0; ; i++)
754        {
755          char *e = environ[i];
756          unsigned j;
757          if (e == NULL) break;
758          j = strlen (environ[i]);
759          __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
760        }
761      __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
762
763      __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
764
765      __mf_register (stdin,  sizeof (*stdin),  __MF_TYPE_STATIC, "stdin");
766      __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
767      __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
768
769      /* Make some effort to register ctype.h static arrays.  */
770      /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
771      /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
772         with in mf-hooks2.c.  */
773    }
774
775#ifdef PIC
776  return main (argc, argv, environ);
777#else
778  return __real_main (argc, argv, environ);
779#endif
780}
781
782
783
784extern void __mf_fini () DTOR;
785void __mf_fini ()
786{
787  TRACE ("__mf_fini\n");
788  __mfu_report ();
789
790#ifndef PIC
791/* Since we didn't populate the tree for allocations in constructors
792   before __mf_init, we cannot check destructors after __mf_fini.  */
793  __mf_opts.mudflap_mode = mode_nop;
794#endif
795}
796
797
798
799/* ------------------------------------------------------------------------ */
800/* __mf_check */
801
802void __mf_check (void *ptr, size_t sz, int type, const char *location)
803{
804  LOCKTH ();
805  BEGIN_RECURSION_PROTECT ();
806  __mfu_check (ptr, sz, type, location);
807  END_RECURSION_PROTECT ();
808  UNLOCKTH ();
809}
810
811
812void __mfu_check (void *ptr, size_t sz, int type, const char *location)
813{
814  unsigned entry_idx = __MF_CACHE_INDEX (ptr);
815  struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
816  int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
817  uintptr_t ptr_low = (uintptr_t) ptr;
818  uintptr_t ptr_high = CLAMPSZ (ptr, sz);
819  struct __mf_cache old_entry = *entry;
820
821  if (UNLIKELY (__mf_opts.sigusr1_report))
822    __mf_sigusr1_respond ();
823  if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
824    return;
825
826  TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
827         ptr, entry_idx, (unsigned long)sz,
828         (type == 0 ? "read" : "write"), location);
829
830  switch (__mf_opts.mudflap_mode)
831    {
832    case mode_nop:
833      /* It is tempting to poison the cache here similarly to
834         mode_populate.  However that eliminates a valuable
835         distinction between these two modes.  mode_nop is useful to
836         let a user count & trace every single check / registration
837         call.  mode_populate is useful to let a program run fast
838         while unchecked.
839      */
840      judgement = 1;
841      break;
842
843    case mode_populate:
844      entry->low = ptr_low;
845      entry->high = ptr_high;
846      judgement = 1;
847      break;
848
849    case mode_check:
850      {
851        unsigned heuristics = 0;
852
853        /* Advance aging/adaptation counters.  */
854        static unsigned adapt_count;
855        adapt_count ++;
856        if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
857                      adapt_count > __mf_opts.adapt_cache))
858          {
859            adapt_count = 0;
860            __mf_adapt_cache ();
861          }
862
863        /* Looping only occurs if heuristics were triggered.  */
864        while (judgement == 0)
865          {
866            DECLARE (void, free, void *p);
867            __mf_object_t* ovr_obj[1];
868            unsigned obj_count;
869            __mf_object_t** all_ovr_obj = NULL;
870            __mf_object_t** dealloc_me = NULL;
871            unsigned i;
872
873            /* Find all overlapping objects.  Be optimistic that there is just one.  */
874            obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
875            if (UNLIKELY (obj_count > 1))
876              {
877                /* Allocate a real buffer and do the search again.  */
878                DECLARE (void *, malloc, size_t c);
879                unsigned n;
880                all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
881                                                   obj_count));
882                if (all_ovr_obj == NULL) abort ();
883                n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
884                assert (n == obj_count);
885                dealloc_me = all_ovr_obj;
886              }
887            else
888              {
889                all_ovr_obj = ovr_obj;
890                dealloc_me = NULL;
891              }
892
893            /* Update object statistics.  */
894            for (i = 0; i < obj_count; i++)
895              {
896                __mf_object_t *obj = all_ovr_obj[i];
897                assert (obj != NULL);
898                if (type == __MF_CHECK_READ)
899                  obj->read_count ++;
900                else
901                  obj->write_count ++;
902                obj->liveness ++;
903              }
904
905            /* Iterate over the various objects.  There are a number of special cases.  */
906            for (i = 0; i < obj_count; i++)
907              {
908                  __mf_object_t *obj = all_ovr_obj[i];
909
910                /* Any __MF_TYPE_NOACCESS hit is bad.  */
911                if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
912                  judgement = -1;
913
914                /* Any object with a watch flag is bad.  */
915                if (UNLIKELY (obj->watching_p))
916                  judgement = -2; /* trigger VIOL_WATCH */
917
918                /* A read from an uninitialized object is bad. */
919                if (UNLIKELY (__mf_opts.check_initialization
920                              /* reading */
921                              && type == __MF_CHECK_READ
922                              /* not written */
923                              && obj->write_count == 0
924                              /* uninitialized (heap) */
925                              && obj->type == __MF_TYPE_HEAP))
926                  judgement = -1;
927              }
928
929            /* We now know that the access spans no invalid objects.  */
930            if (LIKELY (judgement >= 0))
931              for (i = 0; i < obj_count; i++)
932                {
933                  __mf_object_t *obj = all_ovr_obj[i];
934
935                  /* Is this access entirely contained within this object?  */
936                  if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
937                    {
938                      /* Valid access.  */
939                      entry->low = obj->low;
940                      entry->high = obj->high;
941                      judgement = 1;
942                    }
943                }
944
945            /* This access runs off the end of one valid object.  That
946                could be okay, if other valid objects fill in all the
947                holes.  We allow this only for HEAP and GUESS type
948                objects.  Accesses to STATIC and STACK variables
949                should not be allowed to span.  */
950            if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
951              {
952                unsigned uncovered = 0;
953                for (i = 0; i < obj_count; i++)
954                  {
955                    __mf_object_t *obj = all_ovr_obj[i];
956                    int j, uncovered_low_p, uncovered_high_p;
957                    uintptr_t ptr_lower, ptr_higher;
958
959                    uncovered_low_p = ptr_low < obj->low;
960                    ptr_lower = CLAMPSUB (obj->low, 1);
961                    uncovered_high_p = ptr_high > obj->high;
962                    ptr_higher = CLAMPADD (obj->high, 1);
963
964                    for (j = 0; j < obj_count; j++)
965                      {
966                        __mf_object_t *obj2 = all_ovr_obj[j];
967
968                        if (i == j) continue;
969
970                        /* Filter out objects that cannot be spanned across.  */
971                        if (obj2->type == __MF_TYPE_STACK
972                            || obj2->type == __MF_TYPE_STATIC)
973                          continue;
974
975                          /* Consider a side "covered" if obj2 includes
976                             the next byte on that side.  */
977                          if (uncovered_low_p
978                              && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
979                            uncovered_low_p = 0;
980                          if (uncovered_high_p
981                              && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
982                            uncovered_high_p = 0;
983                      }
984
985                    if (uncovered_low_p || uncovered_high_p)
986                      uncovered ++;
987                  }
988
989                /* Success if no overlapping objects are uncovered.  */
990                if (uncovered == 0)
991                  judgement = 1;
992                }
993
994
995            if (dealloc_me != NULL)
996              CALL_REAL (free, dealloc_me);
997
998            /* If the judgment is still unknown at this stage, loop
999               around at most one more time.  */
1000            if (judgement == 0)
1001              {
1002                if (heuristics++ < 2) /* XXX parametrize this number? */
1003                  judgement = __mf_heuristic_check (ptr_low, ptr_high);
1004                else
1005                  judgement = -1;
1006              }
1007          }
1008
1009      }
1010      break;
1011
1012    case mode_violate:
1013      judgement = -1;
1014      break;
1015    }
1016
1017  if (__mf_opts.collect_stats)
1018    {
1019      __mf_count_check ++;
1020
1021      if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1022        /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1023        __mf_lookup_cache_reusecount [entry_idx] ++;
1024    }
1025
1026  if (UNLIKELY (judgement < 0))
1027    __mf_violation (ptr, sz,
1028                    (uintptr_t) __builtin_return_address (0), location,
1029                    ((judgement == -1) ?
1030                     (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1031                     __MF_VIOL_WATCH));
1032}
1033
1034
1035static __mf_object_t *
1036__mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1037                        const char *name, uintptr_t pc)
1038{
1039  DECLARE (void *, calloc, size_t c, size_t n);
1040
1041  __mf_object_t *new_obj;
1042  new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1043  new_obj->low = low;
1044  new_obj->high = high;
1045  new_obj->type = type;
1046  new_obj->name = name;
1047  new_obj->alloc_pc = pc;
1048#if HAVE_GETTIMEOFDAY
1049  if (__mf_opts.timestamps)
1050    gettimeofday (& new_obj->alloc_time, NULL);
1051#endif
1052#if LIBMUDFLAPTH
1053  new_obj->alloc_thread = pthread_self ();
1054#endif
1055
1056  if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1057    new_obj->alloc_backtrace_size =
1058      __mf_backtrace (& new_obj->alloc_backtrace,
1059                      (void *) pc, 2);
1060
1061  __mf_link_object (new_obj);
1062  return new_obj;
1063}
1064
1065
1066static void
1067__mf_uncache_object (__mf_object_t *old_obj)
1068{
1069  /* Remove any low/high pointers for this object from the lookup cache.  */
1070
1071  /* Can it possibly exist in the cache?  */
1072  if (LIKELY (old_obj->read_count + old_obj->write_count))
1073    {
1074      /* As reported by Herman ten Brugge, we need to scan the entire
1075         cache for entries that may hit this object. */
1076      uintptr_t low = old_obj->low;
1077      uintptr_t high = old_obj->high;
1078      struct __mf_cache *entry = & __mf_lookup_cache [0];
1079      unsigned i;
1080      for (i = 0; i <= __mf_lc_mask; i++, entry++)
1081        {
1082          /* NB: the "||" in the following test permits this code to
1083             tolerate the situation introduced by __mf_check over
1084             contiguous objects, where a cache entry spans several
1085             objects.  */
1086          if (entry->low == low || entry->high == high)
1087            {
1088              entry->low = MAXPTR;
1089              entry->high = MINPTR;
1090            }
1091        }
1092    }
1093}
1094
1095
1096void
1097__mf_register (void *ptr, size_t sz, int type, const char *name)
1098{
1099  LOCKTH ();
1100  BEGIN_RECURSION_PROTECT ();
1101  __mfu_register (ptr, sz, type, name);
1102  END_RECURSION_PROTECT ();
1103  UNLOCKTH ();
1104}
1105
1106
1107void
1108__mfu_register (void *ptr, size_t sz, int type, const char *name)
1109{
1110  TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1111         ptr, (unsigned long) sz, type, name ? name : "");
1112
1113  if (__mf_opts.collect_stats)
1114    {
1115      __mf_count_register ++;
1116      __mf_total_register_size [(type < 0) ? 0 :
1117                                (type > __MF_TYPE_MAX) ? 0 :
1118                                type] += sz;
1119    }
1120
1121  if (UNLIKELY (__mf_opts.sigusr1_report))
1122    __mf_sigusr1_respond ();
1123
1124  switch (__mf_opts.mudflap_mode)
1125    {
1126    case mode_nop:
1127      break;
1128
1129    case mode_violate:
1130      __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1131                      __MF_VIOL_REGISTER);
1132      break;
1133
1134    case mode_populate:
1135      /* Clear the cache.  */
1136      /* XXX: why the entire cache? */
1137      /* XXX: race */
1138      memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1139      /* void slot 0 */
1140      __mf_lookup_cache[0].low = MAXPTR;
1141      break;
1142
1143    case mode_check:
1144      {
1145        __mf_object_t *ovr_objs [1];
1146        unsigned num_overlapping_objs;
1147        uintptr_t low = (uintptr_t) ptr;
1148        uintptr_t high = CLAMPSZ (ptr, sz);
1149        uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1150
1151        /* Treat unknown size indication as 1.  */
1152        if (UNLIKELY (sz == 0)) sz = 1;
1153
1154        /* Look for objects only of the same type.  This will e.g. permit a registration
1155           of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS.  At
1156           __mf_check time however harmful overlaps will be detected. */
1157        num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1158
1159        /* Handle overlaps.  */
1160        if (UNLIKELY (num_overlapping_objs > 0))
1161          {
1162            __mf_object_t *ovr_obj = ovr_objs[0];
1163
1164            /* Accept certain specific duplication pairs.  */
1165            if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1166                && ovr_obj->low == low
1167                && ovr_obj->high == high
1168                && ovr_obj->type == type)
1169              {
1170                /* Duplicate registration for static objects may come
1171                   from distinct compilation units.  */
1172                VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1173                               (void *) low, (void *) high,
1174                               (ovr_obj->name ? ovr_obj->name : ""));
1175                break;
1176              }
1177
1178            /* Alas, a genuine violation.  */
1179            else
1180              {
1181                /* Two or more *real* mappings here. */
1182                __mf_violation ((void *) ptr, sz,
1183                                (uintptr_t) __builtin_return_address (0), NULL,
1184                                __MF_VIOL_REGISTER);
1185              }
1186          }
1187        else /* No overlapping objects: AOK.  */
1188          __mf_insert_new_object (low, high, type, name, pc);
1189
1190        /* We could conceivably call __mf_check() here to prime the cache,
1191           but then the read_count/write_count field is not reliable.  */
1192        break;
1193      }
1194    } /* end switch (__mf_opts.mudflap_mode) */
1195}
1196
1197
1198void
1199__mf_unregister (void *ptr, size_t sz, int type)
1200{
1201  LOCKTH ();
1202  BEGIN_RECURSION_PROTECT ();
1203  __mfu_unregister (ptr, sz, type);
1204  END_RECURSION_PROTECT ();
1205  UNLOCKTH ();
1206}
1207
1208
1209void
1210__mfu_unregister (void *ptr, size_t sz, int type)
1211{
1212  DECLARE (void, free, void *ptr);
1213
1214  if (UNLIKELY (__mf_opts.sigusr1_report))
1215    __mf_sigusr1_respond ();
1216
1217  TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1218
1219  switch (__mf_opts.mudflap_mode)
1220    {
1221    case mode_nop:
1222      break;
1223
1224    case mode_violate:
1225      __mf_violation (ptr, sz,
1226                      (uintptr_t) __builtin_return_address (0), NULL,
1227                      __MF_VIOL_UNREGISTER);
1228      break;
1229
1230    case mode_populate:
1231      /* Clear the cache.  */
1232      /* XXX: race */
1233      memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1234      /* void slot 0 */
1235      __mf_lookup_cache[0].low = MAXPTR;
1236      break;
1237
1238    case mode_check:
1239      {
1240        __mf_object_t *old_obj = NULL;
1241        __mf_object_t *del_obj = NULL;  /* Object to actually delete. */
1242        __mf_object_t *objs[1] = {NULL};
1243        unsigned num_overlapping_objs;
1244
1245        num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1246                                                   CLAMPSZ (ptr, sz), objs, 1, type);
1247
1248        /* Special case for HEAP_I - see free & realloc hook.  They don't
1249           know whether the input region was HEAP or HEAP_I before
1250           unmapping it.  Here we give HEAP a try in case HEAP_I
1251           failed.  */
1252        if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1253          {
1254            num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1255                                                       CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1256          }
1257
1258        old_obj = objs[0];
1259        if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1260                      || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1261                      || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1262          {
1263            __mf_violation (ptr, sz,
1264                            (uintptr_t) __builtin_return_address (0), NULL,
1265                            __MF_VIOL_UNREGISTER);
1266            break;
1267          }
1268
1269        __mf_unlink_object (old_obj);
1270        __mf_uncache_object (old_obj);
1271
1272        /* Wipe buffer contents if desired.  */
1273        if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1274            || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1275                                        || old_obj->type == __MF_TYPE_HEAP_I)))
1276          {
1277            memset ((void *) old_obj->low,
1278                    0,
1279                    (size_t) (old_obj->high - old_obj->low + 1));
1280          }
1281
1282        /* Manage the object cemetary.  */
1283        if (__mf_opts.persistent_count > 0
1284	    && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1285          {
1286            old_obj->deallocated_p = 1;
1287            old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1288#if HAVE_GETTIMEOFDAY
1289            if (__mf_opts.timestamps)
1290              gettimeofday (& old_obj->dealloc_time, NULL);
1291#endif
1292#ifdef LIBMUDFLAPTH
1293            old_obj->dealloc_thread = pthread_self ();
1294#endif
1295
1296            if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1297              old_obj->dealloc_backtrace_size =
1298                __mf_backtrace (& old_obj->dealloc_backtrace,
1299                                NULL, 2);
1300
1301            /* Encourage this object to be displayed again in current epoch.  */
1302            old_obj->description_epoch --;
1303
1304            /* Put this object into the cemetary.  This may require this plot to
1305               be recycled, and the previous resident to be designated del_obj.  */
1306            {
1307              unsigned row = old_obj->type;
1308              unsigned plot = __mf_object_dead_head [row];
1309
1310              del_obj = __mf_object_cemetary [row][plot];
1311              __mf_object_cemetary [row][plot] = old_obj;
1312              plot ++;
1313              if (plot == __mf_opts.persistent_count) plot = 0;
1314              __mf_object_dead_head [row] = plot;
1315            }
1316          }
1317        else
1318          del_obj = old_obj;
1319
1320        if (__mf_opts.print_leaks)
1321          {
1322            if ((old_obj->read_count + old_obj->write_count) == 0 &&
1323                (old_obj->type == __MF_TYPE_HEAP
1324                 || old_obj->type == __MF_TYPE_HEAP_I))
1325              {
1326                fprintf (stderr,
1327                         "*******\n"
1328                         "mudflap warning: unaccessed registered object:\n");
1329                __mf_describe_object (old_obj);
1330              }
1331          }
1332
1333        if (del_obj != NULL) /* May or may not equal old_obj.  */
1334          {
1335            if (__mf_opts.backtrace > 0)
1336              {
1337                CALL_REAL(free, del_obj->alloc_backtrace);
1338                if (__mf_opts.persistent_count > 0)
1339                  {
1340                    CALL_REAL(free, del_obj->dealloc_backtrace);
1341                  }
1342              }
1343            CALL_REAL(free, del_obj);
1344          }
1345
1346        break;
1347      }
1348    } /* end switch (__mf_opts.mudflap_mode) */
1349
1350
1351  if (__mf_opts.collect_stats)
1352    {
1353      __mf_count_unregister ++;
1354      __mf_total_unregister_size += sz;
1355    }
1356}
1357
1358
1359
1360struct tree_stats
1361{
1362  unsigned obj_count;
1363  unsigned long total_size;
1364  unsigned live_obj_count;
1365  double total_weight;
1366  double weighted_size;
1367  unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1368};
1369
1370
1371
1372static int
1373__mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1374{
1375  __mf_object_t *obj = (__mf_object_t *) n->value;
1376  struct tree_stats *s = (struct tree_stats *) param;
1377
1378  assert (obj != NULL && s != NULL);
1379
1380  /* Exclude never-accessed objects.  */
1381  if (obj->read_count + obj->write_count)
1382    {
1383      s->obj_count ++;
1384      s->total_size += (obj->high - obj->low + 1);
1385
1386      if (obj->liveness)
1387        {
1388          unsigned i;
1389          uintptr_t addr;
1390
1391          /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1392             (void *) obj->low, obj->liveness, obj->name); */
1393
1394          s->live_obj_count ++;
1395          s->total_weight += (double) obj->liveness;
1396          s->weighted_size +=
1397            (double) (obj->high - obj->low + 1) *
1398            (double) obj->liveness;
1399
1400          addr = obj->low;
1401          for (i=0; i<sizeof(uintptr_t) * 8; i++)
1402            {
1403              unsigned bit = addr & 1;
1404              s->weighted_address_bits[i][bit] += obj->liveness;
1405              addr = addr >> 1;
1406            }
1407
1408          /* Age the liveness value.  */
1409          obj->liveness >>= 1;
1410        }
1411    }
1412
1413  return 0;
1414}
1415
1416
1417static void
1418__mf_adapt_cache ()
1419{
1420  struct tree_stats s;
1421  uintptr_t new_mask = 0;
1422  unsigned char new_shift;
1423  float cache_utilization;
1424  float max_value;
1425  static float smoothed_new_shift = -1.0;
1426  unsigned i;
1427
1428  memset (&s, 0, sizeof (s));
1429
1430  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1431  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1432  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1433  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1434  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1435
1436  /* Maybe we're dealing with funny aging/adaptation parameters, or an
1437     empty tree.  Just leave the cache alone in such cases, rather
1438     than risk dying by division-by-zero.  */
1439  if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1440    return;
1441
1442  /* Guess a good value for the shift parameter by finding an address bit that is a
1443     good discriminant of lively objects.  */
1444  max_value = 0.0;
1445  for (i=0; i<sizeof (uintptr_t)*8; i++)
1446    {
1447      float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1448      if (max_value < value) max_value = value;
1449    }
1450  for (i=0; i<sizeof (uintptr_t)*8; i++)
1451    {
1452      float shoulder_factor = 0.7;  /* Include slightly less popular bits too.  */
1453      float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1454      if (value >= max_value * shoulder_factor)
1455        break;
1456    }
1457  if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1458  /* Converge toward this slowly to reduce flapping. */
1459  smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1460  new_shift = (unsigned) (smoothed_new_shift + 0.5);
1461  assert (new_shift < sizeof (uintptr_t)*8);
1462
1463  /* Count number of used buckets.  */
1464  cache_utilization = 0.0;
1465  for (i = 0; i < (1 + __mf_lc_mask); i++)
1466    if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1467      cache_utilization += 1.0;
1468  cache_utilization /= (1 + __mf_lc_mask);
1469
1470  new_mask |= 0xffff; /* XXX: force a large cache.  */
1471  new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1472
1473  VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1474                 "util=%u%% m=%p s=%u\n",
1475                 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1476                 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1477
1478  /* We should reinitialize cache if its parameters have changed.  */
1479  if (new_mask != __mf_lc_mask ||
1480      new_shift != __mf_lc_shift)
1481    {
1482      __mf_lc_mask = new_mask;
1483      __mf_lc_shift = new_shift;
1484      /* XXX: race */
1485      memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1486      /* void slot 0 */
1487      __mf_lookup_cache[0].low = MAXPTR;
1488    }
1489}
1490
1491
1492
1493/* __mf_find_object[s] */
1494
1495/* Find overlapping live objecs between [low,high].  Return up to
1496   max_objs of their pointers in objs[].  Return total count of
1497   overlaps (may exceed max_objs). */
1498
1499unsigned
1500__mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1501                    __mf_object_t **objs, unsigned max_objs, int type)
1502{
1503  unsigned count = 0;
1504  mfsplay_tree t = __mf_object_tree (type);
1505  mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1506  int direction;
1507
1508  mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1509  /* An exact match for base address implies a hit.  */
1510  if (n != NULL)
1511    {
1512      if (count < max_objs)
1513        objs[count] = (__mf_object_t *) n->value;
1514      count ++;
1515    }
1516
1517  /* Iterate left then right near this key value to find all overlapping objects. */
1518  for (direction = 0; direction < 2; direction ++)
1519    {
1520      /* Reset search origin.  */
1521      k = (mfsplay_tree_key) ptr_low;
1522
1523      while (1)
1524        {
1525          __mf_object_t *obj;
1526
1527          n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1528          if (n == NULL) break;
1529          obj = (__mf_object_t *) n->value;
1530
1531          if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1532            break;
1533
1534          if (count < max_objs)
1535            objs[count] = (__mf_object_t *) n->value;
1536          count ++;
1537
1538          k = (mfsplay_tree_key) obj->low;
1539        }
1540    }
1541
1542  return count;
1543}
1544
1545
1546unsigned
1547__mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1548                   __mf_object_t **objs, unsigned max_objs)
1549{
1550  int type;
1551  unsigned count = 0;
1552
1553  /* Search each splay tree for overlaps.  */
1554  for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1555    {
1556      unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1557      if (c > max_objs)
1558        {
1559          max_objs = 0;
1560          objs = NULL;
1561        }
1562      else /* NB: C may equal 0 */
1563        {
1564          max_objs -= c;
1565          objs += c;
1566        }
1567      count += c;
1568    }
1569
1570  return count;
1571}
1572
1573
1574
1575/* __mf_link_object */
1576
1577static void
1578__mf_link_object (__mf_object_t *node)
1579{
1580  mfsplay_tree t = __mf_object_tree (node->type);
1581  mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1582}
1583
1584/* __mf_unlink_object */
1585
1586static void
1587__mf_unlink_object (__mf_object_t *node)
1588{
1589  mfsplay_tree t = __mf_object_tree (node->type);
1590  mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1591}
1592
1593/* __mf_find_dead_objects */
1594
1595/* Find overlapping dead objecs between [low,high].  Return up to
1596   max_objs of their pointers in objs[].  Return total count of
1597   overlaps (may exceed max_objs).  */
1598
1599static unsigned
1600__mf_find_dead_objects (uintptr_t low, uintptr_t high,
1601                        __mf_object_t **objs, unsigned max_objs)
1602{
1603  if (__mf_opts.persistent_count > 0)
1604    {
1605      unsigned count = 0;
1606      unsigned recollection = 0;
1607      unsigned row = 0;
1608
1609      assert (low <= high);
1610      assert (max_objs == 0 || objs != NULL);
1611
1612      /* Widen the search from the most recent plots in each row, looking
1613         backward in time.  */
1614      recollection = 0;
1615      while (recollection < __mf_opts.persistent_count)
1616        {
1617          count = 0;
1618
1619          for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1620            {
1621              unsigned plot;
1622              unsigned i;
1623
1624              plot = __mf_object_dead_head [row];
1625              for (i = 0; i <= recollection; i ++)
1626                {
1627                  __mf_object_t *obj;
1628
1629                  /* Look backward through row: it's a circular buffer.  */
1630                  if (plot > 0) plot --;
1631                  else plot = __mf_opts.persistent_count - 1;
1632
1633                  obj = __mf_object_cemetary [row][plot];
1634                  if (obj && obj->low <= high && obj->high >= low)
1635                    {
1636                      /* Found an overlapping dead object!  */
1637                      if (count < max_objs)
1638                        objs [count] = obj;
1639                      count ++;
1640                    }
1641                }
1642            }
1643
1644          if (count)
1645            break;
1646
1647          /* Look farther back in time.  */
1648          recollection = (recollection * 2) + 1;
1649        }
1650
1651      return count;
1652    } else {
1653      return 0;
1654    }
1655}
1656
1657/* __mf_describe_object */
1658
1659static void
1660__mf_describe_object (__mf_object_t *obj)
1661{
1662  static unsigned epoch = 0;
1663  if (obj == NULL)
1664    {
1665      epoch ++;
1666      return;
1667    }
1668
1669  if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1670    {
1671      fprintf (stderr,
1672               "mudflap %sobject %p: name=`%s'\n",
1673               (obj->deallocated_p ? "dead " : ""),
1674               (void *) obj, (obj->name ? obj->name : ""));
1675      return;
1676    }
1677  else
1678    obj->description_epoch = epoch;
1679
1680  fprintf (stderr,
1681           "mudflap %sobject %p: name=`%s'\n"
1682           "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1683           "alloc time=%lu.%06lu pc=%p"
1684#ifdef LIBMUDFLAPTH
1685           " thread=%u"
1686#endif
1687           "\n",
1688           (obj->deallocated_p ? "dead " : ""),
1689           (void *) obj, (obj->name ? obj->name : ""),
1690           (void *) obj->low, (void *) obj->high,
1691           (unsigned long) (obj->high - obj->low + 1),
1692           (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1693            obj->type == __MF_TYPE_HEAP ? "heap" :
1694            obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1695            obj->type == __MF_TYPE_STACK ? "stack" :
1696            obj->type == __MF_TYPE_STATIC ? "static" :
1697            obj->type == __MF_TYPE_GUESS ? "guess" :
1698            "unknown"),
1699           obj->read_count, obj->write_count, obj->liveness,
1700           obj->watching_p ? " watching" : "",
1701           obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1702           (void *) obj->alloc_pc
1703#ifdef LIBMUDFLAPTH
1704           , (unsigned) obj->alloc_thread
1705#endif
1706           );
1707
1708  if (__mf_opts.backtrace > 0)
1709  {
1710    unsigned i;
1711    for (i=0; i<obj->alloc_backtrace_size; i++)
1712      fprintf (stderr, "      %s\n", obj->alloc_backtrace[i]);
1713  }
1714
1715  if (__mf_opts.persistent_count > 0)
1716    {
1717      if (obj->deallocated_p)
1718        {
1719          fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1720#ifdef LIBMUDFLAPTH
1721                   " thread=%u"
1722#endif
1723                   "\n",
1724                   obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1725                   (void *) obj->dealloc_pc
1726#ifdef LIBMUDFLAPTH
1727                   , (unsigned) obj->dealloc_thread
1728#endif
1729                   );
1730
1731
1732          if (__mf_opts.backtrace > 0)
1733          {
1734            unsigned i;
1735            for (i=0; i<obj->dealloc_backtrace_size; i++)
1736              fprintf (stderr, "      %s\n", obj->dealloc_backtrace[i]);
1737          }
1738        }
1739    }
1740}
1741
1742
1743static int
1744__mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1745{
1746  __mf_object_t *node = (__mf_object_t *) n->value;
1747  unsigned *count = (unsigned *) param;
1748
1749  if (count != NULL)
1750    (*count) ++;
1751
1752  fprintf (stderr, "Leaked object %u:\n", (*count));
1753  __mf_describe_object (node);
1754
1755  return 0;
1756}
1757
1758
1759static unsigned
1760__mf_report_leaks ()
1761{
1762  unsigned count = 0;
1763
1764  (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1765                             __mf_report_leaks_fn, & count);
1766  (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1767                             __mf_report_leaks_fn, & count);
1768
1769  return count;
1770}
1771
1772/* ------------------------------------------------------------------------ */
1773/* __mf_report */
1774
1775void
1776__mf_report ()
1777{
1778  LOCKTH ();
1779  BEGIN_RECURSION_PROTECT ();
1780  __mfu_report ();
1781  END_RECURSION_PROTECT ();
1782  UNLOCKTH ();
1783}
1784
1785void
1786__mfu_report ()
1787{
1788  if (__mf_opts.collect_stats)
1789    {
1790      fprintf (stderr,
1791               "*******\n"
1792               "mudflap stats:\n"
1793               "calls to __mf_check: %lu\n"
1794               "         __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1795               "         __mf_unregister: %lu [%luB]\n"
1796               "         __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1797               __mf_count_check,
1798               __mf_count_register,
1799               __mf_total_register_size[0], __mf_total_register_size[1],
1800               __mf_total_register_size[2], __mf_total_register_size[3],
1801               __mf_total_register_size[4], /* XXX */
1802               __mf_count_unregister, __mf_total_unregister_size,
1803               __mf_count_violation[0], __mf_count_violation[1],
1804               __mf_count_violation[2], __mf_count_violation[3],
1805               __mf_count_violation[4]);
1806
1807      fprintf (stderr,
1808               "calls with reentrancy: %lu\n", __mf_reentrancy);
1809#ifdef LIBMUDFLAPTH
1810      fprintf (stderr,
1811               "           lock contention: %lu\n", __mf_lock_contention);
1812#endif
1813
1814      /* Lookup cache stats.  */
1815      {
1816        unsigned i;
1817        unsigned max_reuse = 0;
1818        unsigned num_used = 0;
1819        unsigned num_unused = 0;
1820
1821        for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1822          {
1823            if (__mf_lookup_cache_reusecount[i])
1824              num_used ++;
1825            else
1826              num_unused ++;
1827            if (max_reuse < __mf_lookup_cache_reusecount[i])
1828              max_reuse = __mf_lookup_cache_reusecount[i];
1829          }
1830        fprintf (stderr, "lookup cache slots used: %u  unused: %u  peak-reuse: %u\n",
1831                 num_used, num_unused, max_reuse);
1832      }
1833
1834      {
1835        unsigned live_count;
1836        live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1837        fprintf (stderr, "number of live objects: %u\n", live_count);
1838      }
1839
1840      if (__mf_opts.persistent_count > 0)
1841        {
1842          unsigned dead_count = 0;
1843          unsigned row, plot;
1844          for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1845            for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1846              if (__mf_object_cemetary [row][plot] != 0)
1847                dead_count ++;
1848          fprintf (stderr, "          zombie objects: %u\n", dead_count);
1849        }
1850    }
1851  if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1852    {
1853      unsigned l;
1854      extern void * __mf_wrap_alloca_indirect (size_t c);
1855
1856      /* Free up any remaining alloca()'d blocks.  */
1857      __mf_wrap_alloca_indirect (0);
1858      __mf_describe_object (NULL); /* Reset description epoch.  */
1859      l = __mf_report_leaks ();
1860      fprintf (stderr, "number of leaked objects: %u\n", l);
1861    }
1862}
1863
1864/* __mf_backtrace */
1865
1866size_t
1867__mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1868{
1869  void ** pc_array;
1870  unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1871  unsigned remaining_size;
1872  unsigned omitted_size = 0;
1873  unsigned i;
1874  DECLARE (void, free, void *ptr);
1875  DECLARE (void *, calloc, size_t c, size_t n);
1876  DECLARE (void *, malloc, size_t n);
1877
1878  pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1879#ifdef HAVE_BACKTRACE
1880  pc_array_size = backtrace (pc_array, pc_array_size);
1881#else
1882#define FETCH(n) do { if (pc_array_size >= n) { \
1883                 pc_array[n] = __builtin_return_address(n); \
1884                 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1885
1886  /* Unroll some calls __builtin_return_address because this function
1887     only takes a literal integer parameter.  */
1888  FETCH (0);
1889#if 0
1890  /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1891     rather than simply returning 0.  :-(  */
1892  FETCH (1);
1893  FETCH (2);
1894  FETCH (3);
1895  FETCH (4);
1896  FETCH (5);
1897  FETCH (6);
1898  FETCH (7);
1899  FETCH (8);
1900  if (pc_array_size > 8) pc_array_size = 9;
1901#else
1902  if (pc_array_size > 0) pc_array_size = 1;
1903#endif
1904
1905#undef FETCH
1906#endif
1907
1908  /* We want to trim the first few levels of the stack traceback,
1909     since they contain libmudflap wrappers and junk.  If pc_array[]
1910     ends up containing a non-NULL guess_pc, then trim everything
1911     before that.  Otherwise, omit the first guess_omit_levels
1912     entries. */
1913
1914  if (guess_pc != NULL)
1915    for (i=0; i<pc_array_size; i++)
1916      if (pc_array [i] == guess_pc)
1917        omitted_size = i;
1918
1919  if (omitted_size == 0) /* No match? */
1920    if (pc_array_size > guess_omit_levels)
1921      omitted_size = guess_omit_levels;
1922
1923  remaining_size = pc_array_size - omitted_size;
1924
1925#ifdef HAVE_BACKTRACE_SYMBOLS
1926  *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1927#else
1928  {
1929    /* Let's construct a buffer by hand.  It will have <remaining_size>
1930       char*'s at the front, pointing at individual strings immediately
1931       afterwards.  */
1932    void *buffer;
1933    char *chars;
1934    char **pointers;
1935    enum { perline = 30 };
1936    buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1937    pointers = (char **) buffer;
1938    chars = (char *)buffer + (remaining_size * sizeof (char *));
1939    for (i = 0; i < remaining_size; i++)
1940      {
1941        pointers[i] = chars;
1942        sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1943        chars = chars + perline;
1944      }
1945    *symbols = pointers;
1946  }
1947#endif
1948  CALL_REAL (free, pc_array);
1949
1950  return remaining_size;
1951}
1952
1953/* ------------------------------------------------------------------------ */
1954/* __mf_violation */
1955
1956void
1957__mf_violation (void *ptr, size_t sz, uintptr_t pc,
1958                const char *location, int type)
1959{
1960  char buf [128];
1961  static unsigned violation_number;
1962  DECLARE(void, free, void *ptr);
1963
1964  TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1965         (void *) pc,
1966         (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1967
1968  if (__mf_opts.collect_stats)
1969    __mf_count_violation [(type < 0) ? 0 :
1970                          (type > __MF_VIOL_WATCH) ? 0 :
1971                          type] ++;
1972
1973  /* Print out a basic warning message.  */
1974  if (__mf_opts.verbose_violations)
1975  {
1976    unsigned dead_p;
1977    unsigned num_helpful = 0;
1978    struct timeval now = { 0, 0 };
1979#if HAVE_GETTIMEOFDAY
1980    gettimeofday (& now, NULL);
1981#endif
1982
1983    violation_number ++;
1984    fprintf (stderr,
1985             "*******\n"
1986             "mudflap violation %u (%s): time=%lu.%06lu "
1987             "ptr=%p size=%lu\npc=%p%s%s%s\n",
1988             violation_number,
1989             ((type == __MF_VIOL_READ) ? "check/read" :
1990              (type == __MF_VIOL_WRITE) ? "check/write" :
1991              (type == __MF_VIOL_REGISTER) ? "register" :
1992              (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1993              (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1994             now.tv_sec, now.tv_usec,
1995             (void *) ptr, (unsigned long)sz, (void *) pc,
1996             (location != NULL ? " location=`" : ""),
1997             (location != NULL ? location : ""),
1998             (location != NULL ? "'" : ""));
1999
2000    if (__mf_opts.backtrace > 0)
2001      {
2002        char ** symbols;
2003        unsigned i, num;
2004
2005        num = __mf_backtrace (& symbols, (void *) pc, 2);
2006        /* Note: backtrace_symbols calls malloc().  But since we're in
2007           __mf_violation and presumably __mf_check, it'll detect
2008           recursion, and not put the new string into the database.  */
2009
2010        for (i=0; i<num; i++)
2011          fprintf (stderr, "      %s\n", symbols[i]);
2012
2013        /* Calling free() here would trigger a violation.  */
2014        CALL_REAL(free, symbols);
2015      }
2016
2017
2018    /* Look for nearby objects.  For this, we start with s_low/s_high
2019       pointing to the given area, looking for overlapping objects.
2020       If none show up, widen the search area and keep looking. */
2021
2022    if (sz == 0) sz = 1;
2023
2024    for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2025      {
2026        enum {max_objs = 3}; /* magic */
2027        __mf_object_t *objs[max_objs];
2028        unsigned num_objs = 0;
2029        uintptr_t s_low, s_high;
2030        unsigned tries = 0;
2031        unsigned i;
2032
2033        s_low = (uintptr_t) ptr;
2034        s_high = CLAMPSZ (ptr, sz);
2035
2036        while (tries < 16) /* magic */
2037          {
2038            if (dead_p)
2039              num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2040            else
2041              num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2042
2043            if (num_objs) /* good enough */
2044              break;
2045
2046            tries ++;
2047
2048            /* XXX: tune this search strategy.  It's too dependent on
2049             sz, which can vary from 1 to very big (when array index
2050             checking) numbers. */
2051            s_low = CLAMPSUB (s_low, (sz * tries * tries));
2052            s_high = CLAMPADD (s_high, (sz * tries * tries));
2053          }
2054
2055        for (i = 0; i < min (num_objs, max_objs); i++)
2056          {
2057            __mf_object_t *obj = objs[i];
2058            uintptr_t low = (uintptr_t) ptr;
2059            uintptr_t high = CLAMPSZ (ptr, sz);
2060            unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2061            unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2062            unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2063            unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2064            unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2065            unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2066
2067            fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2068                     num_helpful + i + 1,
2069                     (before1 ? before1 : after1 ? after1 : into1),
2070                     (before1 ? "before" : after1 ? "after" : "into"),
2071                     (before2 ? before2 : after2 ? after2 : into2),
2072                     (before2 ? "before" : after2 ? "after" : "into"));
2073            __mf_describe_object (obj);
2074          }
2075        num_helpful += num_objs;
2076      }
2077
2078    fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2079  }
2080
2081  /* How to finally handle this violation?  */
2082  switch (__mf_opts.violation_mode)
2083    {
2084    case viol_nop:
2085      break;
2086    case viol_segv:
2087      kill (getpid(), SIGSEGV);
2088      break;
2089    case viol_abort:
2090      abort ();
2091      break;
2092    case viol_gdb:
2093
2094      snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2095      system (buf);
2096      /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2097      instead, and let the forked child execlp() gdb.  That way, this
2098      subject process can be resumed under the supervision of gdb.
2099      This can't happen now, since system() only returns when gdb
2100      dies.  In that case, we need to beware of starting a second
2101      concurrent gdb child upon the next violation.  (But if the first
2102      gdb dies, then starting a new one is appropriate.)  */
2103      break;
2104    }
2105}
2106
2107/* ------------------------------------------------------------------------ */
2108
2109
2110unsigned __mf_watch (void *ptr, size_t sz)
2111{
2112  unsigned rc;
2113  LOCKTH ();
2114  BEGIN_RECURSION_PROTECT ();
2115  rc = __mf_watch_or_not (ptr, sz, 1);
2116  END_RECURSION_PROTECT ();
2117  UNLOCKTH ();
2118  return rc;
2119}
2120
2121unsigned __mf_unwatch (void *ptr, size_t sz)
2122{
2123  unsigned rc;
2124  LOCKTH ();
2125  rc = __mf_watch_or_not (ptr, sz, 0);
2126  UNLOCKTH ();
2127  return rc;
2128}
2129
2130
2131static unsigned
2132__mf_watch_or_not (void *ptr, size_t sz, char flag)
2133{
2134  uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2135  uintptr_t ptr_low = (uintptr_t) ptr;
2136  unsigned count = 0;
2137
2138  TRACE ("%s ptr=%p size=%lu\n",
2139         (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2140
2141  switch (__mf_opts.mudflap_mode)
2142    {
2143    case mode_nop:
2144    case mode_populate:
2145    case mode_violate:
2146      count = 0;
2147      break;
2148
2149    case mode_check:
2150      {
2151        __mf_object_t **all_ovr_objs;
2152        unsigned obj_count;
2153        unsigned n;
2154        DECLARE (void *, malloc, size_t c);
2155        DECLARE (void, free, void *p);
2156
2157        obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2158        VERBOSE_TRACE (" %u:", obj_count);
2159
2160        all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2161        if (all_ovr_objs == NULL) abort ();
2162        n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2163        assert (n == obj_count);
2164
2165        for (n = 0; n < obj_count; n ++)
2166          {
2167            __mf_object_t *obj = all_ovr_objs[n];
2168
2169            VERBOSE_TRACE (" [%p]", (void *) obj);
2170            if (obj->watching_p != flag)
2171              {
2172                obj->watching_p = flag;
2173                count ++;
2174
2175                /* Remove object from cache, to ensure next access
2176                   goes through __mf_check().  */
2177                if (flag)
2178                  __mf_uncache_object (obj);
2179              }
2180          }
2181        CALL_REAL (free, all_ovr_objs);
2182      }
2183      break;
2184    }
2185
2186  return count;
2187}
2188
2189
2190void
2191__mf_sigusr1_handler (int num)
2192{
2193  __mf_sigusr1_received ++;
2194}
2195
2196/* Install or remove SIGUSR1 handler as necessary.
2197   Also, respond to a received pending SIGUSR1.  */
2198void
2199__mf_sigusr1_respond ()
2200{
2201  static int handler_installed;
2202
2203#ifdef SIGUSR1
2204  /* Manage handler */
2205  if (__mf_opts.sigusr1_report && ! handler_installed)
2206    {
2207      signal (SIGUSR1, __mf_sigusr1_handler);
2208      handler_installed = 1;
2209    }
2210  else if(! __mf_opts.sigusr1_report && handler_installed)
2211    {
2212      signal (SIGUSR1, SIG_DFL);
2213      handler_installed = 0;
2214    }
2215#endif
2216
2217  /* Manage enqueued signals */
2218  if (__mf_sigusr1_received > __mf_sigusr1_handled)
2219    {
2220      __mf_sigusr1_handled ++;
2221      assert (__mf_get_state () == reentrant);
2222      __mfu_report ();
2223      handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2224    }
2225}
2226
2227
2228/* XXX: provide an alternative __assert_fail function that cannot
2229   fail due to libmudflap infinite recursion.  */
2230#ifndef NDEBUG
2231
2232static void
2233write_itoa (int fd, unsigned n)
2234{
2235  enum x { bufsize = sizeof(n)*4 };
2236  char buf [bufsize];
2237  unsigned i;
2238
2239  for (i=0; i<bufsize-1; i++)
2240    {
2241      unsigned digit = n % 10;
2242      buf[bufsize-2-i] = digit + '0';
2243      n /= 10;
2244      if (n == 0)
2245        {
2246          char *m = & buf [bufsize-2-i];
2247          buf[bufsize-1] = '\0';
2248          write (fd, m, strlen(m));
2249          break;
2250        }
2251    }
2252}
2253
2254
2255void
2256__assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2257{
2258#define write2(string) write (2, (string), strlen ((string)));
2259  write2("mf");
2260#ifdef LIBMUDFLAPTH
2261  write2("(");
2262  write_itoa (2, (unsigned) pthread_self ());
2263  write2(")");
2264#endif
2265  write2(": assertion failure: `");
2266  write (2, msg, strlen (msg));
2267  write2("' in ");
2268  write (2, func, strlen (func));
2269  write2(" at ");
2270  write (2, file, strlen (file));
2271  write2(":");
2272  write_itoa (2, line);
2273  write2("\n");
2274#undef write2
2275  abort ();
2276}
2277
2278
2279#endif
2280
2281
2282
2283/* Adapted splay tree code, originally from libiberty.  It has been
2284   specialized for libmudflap as requested by RMS.  */
2285
2286static void
2287mfsplay_tree_free (void *p)
2288{
2289  DECLARE (void, free, void *p);
2290  CALL_REAL (free, p);
2291}
2292
2293static void *
2294mfsplay_tree_xmalloc (size_t s)
2295{
2296  DECLARE (void *, malloc, size_t s);
2297  return CALL_REAL (malloc, s);
2298}
2299
2300
2301static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2302static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2303                                                mfsplay_tree_key,
2304                                                mfsplay_tree_node *,
2305                                                mfsplay_tree_node *,
2306                                                mfsplay_tree_node *);
2307
2308
2309/* Help splay SP around KEY.  PARENT and GRANDPARENT are the parent
2310   and grandparent, respectively, of NODE.  */
2311
2312static mfsplay_tree_node
2313mfsplay_tree_splay_helper (mfsplay_tree sp,
2314                         mfsplay_tree_key key,
2315                         mfsplay_tree_node * node,
2316                         mfsplay_tree_node * parent,
2317                         mfsplay_tree_node * grandparent)
2318{
2319  mfsplay_tree_node *next;
2320  mfsplay_tree_node n;
2321  int comparison;
2322
2323  n = *node;
2324
2325  if (!n)
2326    return *parent;
2327
2328  comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2329
2330  if (comparison == 0)
2331    /* We've found the target.  */
2332    next = 0;
2333  else if (comparison < 0)
2334    /* The target is to the left.  */
2335    next = &n->left;
2336  else
2337    /* The target is to the right.  */
2338    next = &n->right;
2339
2340  if (next)
2341    {
2342      /* Check whether our recursion depth is too high.  Abort this search,
2343         and signal that a rebalance is required to continue.  */
2344      if (sp->depth > sp->max_depth)
2345        {
2346          sp->rebalance_p = 1;
2347          return n;
2348         }
2349
2350      /* Continue down the tree.  */
2351      sp->depth ++;
2352      n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2353      sp->depth --;
2354
2355      /* The recursive call will change the place to which NODE
2356         points.  */
2357      if (*node != n || sp->rebalance_p)
2358        return n;
2359    }
2360
2361  if (!parent)
2362    /* NODE is the root.  We are done.  */
2363    return n;
2364
2365  /* First, handle the case where there is no grandparent (i.e.,
2366   *PARENT is the root of the tree.)  */
2367  if (!grandparent)
2368    {
2369      if (n == (*parent)->left)
2370        {
2371          *node = n->right;
2372          n->right = *parent;
2373        }
2374      else
2375        {
2376          *node = n->left;
2377          n->left = *parent;
2378        }
2379      *parent = n;
2380      return n;
2381    }
2382
2383  /* Next handle the cases where both N and *PARENT are left children,
2384     or where both are right children.  */
2385  if (n == (*parent)->left && *parent == (*grandparent)->left)
2386    {
2387      mfsplay_tree_node p = *parent;
2388
2389      (*grandparent)->left = p->right;
2390      p->right = *grandparent;
2391      p->left = n->right;
2392      n->right = p;
2393      *grandparent = n;
2394      return n;
2395    }
2396  else if (n == (*parent)->right && *parent == (*grandparent)->right)
2397    {
2398      mfsplay_tree_node p = *parent;
2399
2400      (*grandparent)->right = p->left;
2401      p->left = *grandparent;
2402      p->right = n->left;
2403      n->left = p;
2404      *grandparent = n;
2405      return n;
2406    }
2407
2408  /* Finally, deal with the case where N is a left child, but *PARENT
2409     is a right child, or vice versa.  */
2410  if (n == (*parent)->left)
2411    {
2412      (*parent)->left = n->right;
2413      n->right = *parent;
2414      (*grandparent)->right = n->left;
2415      n->left = *grandparent;
2416      *grandparent = n;
2417      return n;
2418    }
2419  else
2420    {
2421      (*parent)->right = n->left;
2422      n->left = *parent;
2423      (*grandparent)->left = n->right;
2424      n->right = *grandparent;
2425      *grandparent = n;
2426      return n;
2427    }
2428}
2429
2430
2431
2432static int
2433mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2434{
2435  mfsplay_tree_node **p = array_ptr;
2436  *(*p) = n;
2437  (*p)++;
2438  return 0;
2439}
2440
2441
2442static mfsplay_tree_node
2443mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2444                              unsigned high)
2445{
2446  unsigned middle = low + (high - low) / 2;
2447  mfsplay_tree_node n = array[middle];
2448
2449  /* Note that since we're producing a balanced binary tree, it is not a problem
2450     that this function is recursive.  */
2451  if (low + 1 <= middle)
2452    n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2453  else
2454    n->left = NULL;
2455
2456  if (middle + 1 <= high)
2457    n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2458  else
2459    n->right = NULL;
2460
2461  return n;
2462}
2463
2464
2465/* Rebalance the entire tree.  Do this by copying all the node
2466   pointers into an array, then cleverly re-linking them.  */
2467static void
2468mfsplay_tree_rebalance (mfsplay_tree sp)
2469{
2470  mfsplay_tree_node *all_nodes, *all_nodes_1;
2471
2472  if (sp->num_keys <= 2)
2473    return;
2474
2475  all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2476
2477  /* Traverse all nodes to copy their addresses into this array.  */
2478  all_nodes_1 = all_nodes;
2479  mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2480                      (void *) &all_nodes_1);
2481
2482  /* Relink all the nodes.  */
2483  sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2484
2485  mfsplay_tree_free (all_nodes);
2486}
2487
2488
2489/* Splay SP around KEY.  */
2490static void
2491mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2492{
2493  if (sp->root == 0)
2494    return;
2495
2496  /* If we just splayed the tree with the same key, do nothing.  */
2497  if (sp->last_splayed_key_p &&
2498      (sp->last_splayed_key == key))
2499    return;
2500
2501  /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2502     The idea is to limit excessive stack usage if we're facing
2503     degenerate access patterns.  Unfortunately such patterns can occur
2504     e.g. during static initialization, where many static objects might
2505     be registered in increasing address sequence, or during a case where
2506     large tree-like heap data structures are allocated quickly.
2507
2508     On x86, this corresponds to roughly 200K of stack usage.
2509     XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack.  */
2510  sp->max_depth = 2500;
2511  sp->rebalance_p = sp->depth = 0;
2512
2513  mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2514  if (sp->rebalance_p)
2515    {
2516      mfsplay_tree_rebalance (sp);
2517
2518      sp->rebalance_p = sp->depth = 0;
2519      mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2520
2521      if (sp->rebalance_p)
2522        abort ();
2523    }
2524
2525
2526  /* Cache this splay key. */
2527  sp->last_splayed_key = key;
2528  sp->last_splayed_key_p = 1;
2529}
2530
2531
2532
2533/* Allocate a new splay tree.  */
2534static mfsplay_tree
2535mfsplay_tree_new ()
2536{
2537  mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2538  sp->root = NULL;
2539  sp->last_splayed_key_p = 0;
2540  sp->num_keys = 0;
2541
2542  return sp;
2543}
2544
2545
2546
2547/* Insert a new node (associating KEY with DATA) into SP.  If a
2548   previous node with the indicated KEY exists, its data is replaced
2549   with the new value.  Returns the new node.  */
2550static mfsplay_tree_node
2551mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2552{
2553  int comparison = 0;
2554
2555  mfsplay_tree_splay (sp, key);
2556
2557  if (sp->root)
2558    comparison = ((sp->root->key > key) ? 1 :
2559                  ((sp->root->key < key) ? -1 : 0));
2560
2561  if (sp->root && comparison == 0)
2562    {
2563      /* If the root of the tree already has the indicated KEY, just
2564         replace the value with VALUE.  */
2565      sp->root->value = value;
2566    }
2567  else
2568    {
2569      /* Create a new node, and insert it at the root.  */
2570      mfsplay_tree_node node;
2571
2572      node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2573      node->key = key;
2574      node->value = value;
2575      sp->num_keys++;
2576      if (!sp->root)
2577        node->left = node->right = 0;
2578      else if (comparison < 0)
2579        {
2580          node->left = sp->root;
2581          node->right = node->left->right;
2582          node->left->right = 0;
2583        }
2584      else
2585        {
2586          node->right = sp->root;
2587          node->left = node->right->left;
2588          node->right->left = 0;
2589        }
2590
2591      sp->root = node;
2592      sp->last_splayed_key_p = 0;
2593    }
2594
2595  return sp->root;
2596}
2597
2598/* Remove KEY from SP.  It is not an error if it did not exist.  */
2599
2600static void
2601mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2602{
2603  mfsplay_tree_splay (sp, key);
2604  sp->last_splayed_key_p = 0;
2605  if (sp->root && (sp->root->key == key))
2606    {
2607      mfsplay_tree_node left, right;
2608      left = sp->root->left;
2609      right = sp->root->right;
2610      /* Delete the root node itself.  */
2611      mfsplay_tree_free (sp->root);
2612      sp->num_keys--;
2613      /* One of the children is now the root.  Doesn't matter much
2614         which, so long as we preserve the properties of the tree.  */
2615      if (left)
2616        {
2617          sp->root = left;
2618          /* If there was a right child as well, hang it off the
2619             right-most leaf of the left child.  */
2620          if (right)
2621            {
2622              while (left->right)
2623                left = left->right;
2624              left->right = right;
2625            }
2626        }
2627      else
2628        sp->root = right;
2629    }
2630}
2631
2632/* Lookup KEY in SP, returning VALUE if present, and NULL
2633   otherwise.  */
2634
2635static mfsplay_tree_node
2636mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2637{
2638  mfsplay_tree_splay (sp, key);
2639  if (sp->root && (sp->root->key == key))
2640    return sp->root;
2641  else
2642    return 0;
2643}
2644
2645
2646/* Return the immediate predecessor KEY, or NULL if there is no
2647   predecessor.  KEY need not be present in the tree.  */
2648
2649static mfsplay_tree_node
2650mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2651{
2652  int comparison;
2653  mfsplay_tree_node node;
2654  /* If the tree is empty, there is certainly no predecessor.  */
2655  if (!sp->root)
2656    return NULL;
2657  /* Splay the tree around KEY.  That will leave either the KEY
2658     itself, its predecessor, or its successor at the root.  */
2659  mfsplay_tree_splay (sp, key);
2660  comparison = ((sp->root->key > key) ? 1 :
2661                ((sp->root->key < key) ? -1 : 0));
2662
2663  /* If the predecessor is at the root, just return it.  */
2664  if (comparison < 0)
2665    return sp->root;
2666  /* Otherwise, find the rightmost element of the left subtree.  */
2667  node = sp->root->left;
2668  if (node)
2669    while (node->right)
2670      node = node->right;
2671  return node;
2672}
2673
2674/* Return the immediate successor KEY, or NULL if there is no
2675   successor.  KEY need not be present in the tree.  */
2676
2677static mfsplay_tree_node
2678mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2679{
2680  int comparison;
2681  mfsplay_tree_node node;
2682  /* If the tree is empty, there is certainly no successor.  */
2683  if (!sp->root)
2684    return NULL;
2685  /* Splay the tree around KEY.  That will leave either the KEY
2686     itself, its predecessor, or its successor at the root.  */
2687  mfsplay_tree_splay (sp, key);
2688  comparison = ((sp->root->key > key) ? 1 :
2689                ((sp->root->key < key) ? -1 : 0));
2690  /* If the successor is at the root, just return it.  */
2691  if (comparison > 0)
2692    return sp->root;
2693  /* Otherwise, find the leftmost element of the right subtree.  */
2694  node = sp->root->right;
2695  if (node)
2696    while (node->left)
2697      node = node->left;
2698  return node;
2699}
2700
2701/* Call FN, passing it the DATA, for every node in SP, following an
2702   in-order traversal.  If FN every returns a non-zero value, the
2703   iteration ceases immediately, and the value is returned.
2704   Otherwise, this function returns 0.
2705
2706   This function simulates recursion using dynamically allocated
2707   arrays, since it may be called from mfsplay_tree_rebalance(), which
2708   in turn means that the tree is already uncomfortably deep for stack
2709   space limits.  */
2710static int
2711mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2712{
2713  mfsplay_tree_node *stack1;
2714  char *stack2;
2715  unsigned sp;
2716  int val = 0;
2717  enum s { s_left, s_here, s_right, s_up };
2718
2719  if (st->root == NULL) /* => num_keys == 0 */
2720    return 0;
2721
2722  stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2723  stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2724
2725  sp = 0;
2726  stack1 [sp] = st->root;
2727  stack2 [sp] = s_left;
2728
2729  while (1)
2730    {
2731      mfsplay_tree_node n;
2732      enum s s;
2733
2734      n = stack1 [sp];
2735      s = stack2 [sp];
2736
2737      /* Handle each of the four possible states separately.  */
2738
2739      /* 1: We're here to traverse the left subtree (if any).  */
2740      if (s == s_left)
2741        {
2742          stack2 [sp] = s_here;
2743          if (n->left != NULL)
2744            {
2745              sp ++;
2746              stack1 [sp] = n->left;
2747              stack2 [sp] = s_left;
2748            }
2749        }
2750
2751      /* 2: We're here to traverse this node.  */
2752      else if (s == s_here)
2753        {
2754          stack2 [sp] = s_right;
2755          val = (*fn) (n, data);
2756          if (val) break;
2757        }
2758
2759      /* 3: We're here to traverse the right subtree (if any).  */
2760      else if (s == s_right)
2761        {
2762          stack2 [sp] = s_up;
2763          if (n->right != NULL)
2764            {
2765              sp ++;
2766              stack1 [sp] = n->right;
2767              stack2 [sp] = s_left;
2768            }
2769        }
2770
2771      /* 4: We're here after both subtrees (if any) have been traversed.  */
2772      else if (s == s_up)
2773        {
2774          /* Pop the stack.  */
2775          if (sp == 0) break; /* Popping off the root note: we're finished!  */
2776          sp --;
2777        }
2778
2779      else
2780        abort ();
2781    }
2782
2783  mfsplay_tree_free (stack1);
2784  mfsplay_tree_free (stack2);
2785  return val;
2786}
2787