1/* mmap.c -- Memory allocation with mmap.
2   Copyright (C) 2012-2016 Free Software Foundation, Inc.
3   Written by Ian Lance Taylor, Google.
4
5Redistribution and use in source and binary forms, with or without
6modification, are permitted provided that the following conditions are
7met:
8
9    (1) Redistributions of source code must retain the above copyright
10    notice, this list of conditions and the following disclaimer.
11
12    (2) Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in
14    the documentation and/or other materials provided with the
15    distribution.
16
17    (3) The name of the author may not be used to
18    endorse or promote products derived from this software without
19    specific prior written permission.
20
21THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31POSSIBILITY OF SUCH DAMAGE.  */
32
33#include "config.h"
34
35#include <errno.h>
36#include <string.h>
37#include <stdlib.h>
38#include <unistd.h>
39#include <sys/types.h>
40#include <sys/mman.h>
41
42#include "backtrace.h"
43#include "internal.h"
44
45/* Memory allocation on systems that provide anonymous mmap.  This
46   permits the backtrace functions to be invoked from a signal
47   handler, assuming that mmap is async-signal safe.  */
48
49#ifndef MAP_ANONYMOUS
50#define MAP_ANONYMOUS MAP_ANON
51#endif
52
53#ifndef MAP_FAILED
54#define MAP_FAILED ((void *)-1)
55#endif
56
57/* A list of free memory blocks.  */
58
59struct backtrace_freelist_struct
60{
61  /* Next on list.  */
62  struct backtrace_freelist_struct *next;
63  /* Size of this block, including this structure.  */
64  size_t size;
65};
66
67/* Free memory allocated by backtrace_alloc.  */
68
69static void
70backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
71{
72  /* Just leak small blocks.  We don't have to be perfect.  */
73  if (size >= sizeof (struct backtrace_freelist_struct))
74    {
75      struct backtrace_freelist_struct *p;
76
77      p = (struct backtrace_freelist_struct *) addr;
78      p->next = state->freelist;
79      p->size = size;
80      state->freelist = p;
81    }
82}
83
84/* Allocate memory like malloc.  If ERROR_CALLBACK is NULL, don't
85   report an error.  */
86
87void *
88backtrace_alloc (struct backtrace_state *state,
89		 size_t size, backtrace_error_callback error_callback,
90		 void *data)
91{
92  void *ret;
93  int locked;
94  struct backtrace_freelist_struct **pp;
95  size_t pagesize;
96  size_t asksize;
97  void *page;
98
99  ret = NULL;
100
101  /* If we can acquire the lock, then see if there is space on the
102     free list.  If we can't acquire the lock, drop straight into
103     using mmap.  __sync_lock_test_and_set returns the old state of
104     the lock, so we have acquired it if it returns 0.  */
105
106  if (!state->threaded)
107    locked = 1;
108  else
109    locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
110
111  if (locked)
112    {
113      for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
114	{
115	  if ((*pp)->size >= size)
116	    {
117	      struct backtrace_freelist_struct *p;
118
119	      p = *pp;
120	      *pp = p->next;
121
122	      /* Round for alignment; we assume that no type we care about
123		 is more than 8 bytes.  */
124	      size = (size + 7) & ~ (size_t) 7;
125	      if (size < p->size)
126		backtrace_free_locked (state, (char *) p + size,
127				       p->size - size);
128
129	      ret = (void *) p;
130
131	      break;
132	    }
133	}
134
135      if (state->threaded)
136	__sync_lock_release (&state->lock_alloc);
137    }
138
139  if (ret == NULL)
140    {
141      /* Allocate a new page.  */
142
143      pagesize = getpagesize ();
144      asksize = (size + pagesize - 1) & ~ (pagesize - 1);
145      page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
146		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
147      if (page == MAP_FAILED)
148	{
149	  if (error_callback)
150	    error_callback (data, "mmap", errno);
151	}
152      else
153	{
154	  size = (size + 7) & ~ (size_t) 7;
155	  if (size < asksize)
156	    backtrace_free (state, (char *) page + size, asksize - size,
157			    error_callback, data);
158
159	  ret = page;
160	}
161    }
162
163  return ret;
164}
165
166/* Free memory allocated by backtrace_alloc.  */
167
168void
169backtrace_free (struct backtrace_state *state, void *addr, size_t size,
170		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
171		void *data ATTRIBUTE_UNUSED)
172{
173  int locked;
174
175  /* If we are freeing a large aligned block, just release it back to
176     the system.  This case arises when growing a vector for a large
177     binary with lots of debug info.  Calling munmap here may cause us
178     to call mmap again if there is also a large shared library; we
179     just live with that.  */
180  if (size >= 16 * 4096)
181    {
182      size_t pagesize;
183
184      pagesize = getpagesize ();
185      if (((uintptr_t) addr & (pagesize - 1)) == 0
186	  && (size & (pagesize - 1)) == 0)
187	{
188	  /* If munmap fails for some reason, just add the block to
189	     the freelist.  */
190	  if (munmap (addr, size) == 0)
191	    return;
192	}
193    }
194
195  /* If we can acquire the lock, add the new space to the free list.
196     If we can't acquire the lock, just leak the memory.
197     __sync_lock_test_and_set returns the old state of the lock, so we
198     have acquired it if it returns 0.  */
199
200  if (!state->threaded)
201    locked = 1;
202  else
203    locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
204
205  if (locked)
206    {
207      backtrace_free_locked (state, addr, size);
208
209      if (state->threaded)
210	__sync_lock_release (&state->lock_alloc);
211    }
212}
213
214/* Grow VEC by SIZE bytes.  */
215
216void *
217backtrace_vector_grow (struct backtrace_state *state,size_t size,
218		       backtrace_error_callback error_callback,
219		       void *data, struct backtrace_vector *vec)
220{
221  void *ret;
222
223  if (size > vec->alc)
224    {
225      size_t pagesize;
226      size_t alc;
227      void *base;
228
229      pagesize = getpagesize ();
230      alc = vec->size + size;
231      if (vec->size == 0)
232	alc = 16 * size;
233      else if (alc < pagesize)
234	{
235	  alc *= 2;
236	  if (alc > pagesize)
237	    alc = pagesize;
238	}
239      else
240	{
241	  alc *= 2;
242	  alc = (alc + pagesize - 1) & ~ (pagesize - 1);
243	}
244      base = backtrace_alloc (state, alc, error_callback, data);
245      if (base == NULL)
246	return NULL;
247      if (vec->base != NULL)
248	{
249	  memcpy (base, vec->base, vec->size);
250	  backtrace_free (state, vec->base, vec->size + vec->alc,
251			  error_callback, data);
252	}
253      vec->base = base;
254      vec->alc = alc - vec->size;
255    }
256
257  ret = (char *) vec->base + vec->size;
258  vec->size += size;
259  vec->alc -= size;
260  return ret;
261}
262
263/* Finish the current allocation on VEC.  */
264
265void *
266backtrace_vector_finish (
267  struct backtrace_state *state ATTRIBUTE_UNUSED,
268  struct backtrace_vector *vec,
269  backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
270  void *data ATTRIBUTE_UNUSED)
271{
272  void *ret;
273
274  ret = vec->base;
275  vec->base = (char *) vec->base + vec->size;
276  vec->size = 0;
277  return ret;
278}
279
280/* Release any extra space allocated for VEC.  */
281
282int
283backtrace_vector_release (struct backtrace_state *state,
284			  struct backtrace_vector *vec,
285			  backtrace_error_callback error_callback,
286			  void *data)
287{
288  size_t size;
289  size_t alc;
290  size_t aligned;
291
292  /* Make sure that the block that we free is aligned on an 8-byte
293     boundary.  */
294  size = vec->size;
295  alc = vec->alc;
296  aligned = (size + 7) & ~ (size_t) 7;
297  alc -= aligned - size;
298
299  backtrace_free (state, (char *) vec->base + aligned, alc,
300		  error_callback, data);
301  vec->alc = 0;
302  return 1;
303}
304