1/* Copyright (C) 2020-2022 Free Software Foundation, Inc.
2   Contributed by Jakub Jelinek <jakub@redhat.com>.
3
4   This file is part of the GNU Offloading and Multi Processing Library
5   (libgomp).
6
7   Libgomp is free software; you can redistribute it and/or modify it
8   under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15   more details.
16
17   Under Section 7 of GPL version 3, you are granted additional
18   permissions described in the GCC Runtime Library Exception, version
19   3.1, as published by the Free Software Foundation.
20
21   You should have received a copy of the GNU General Public License and
22   a copy of the GCC Runtime Library Exception along with this program;
23   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24   <http://www.gnu.org/licenses/>.  */
25
26/* This file contains wrappers for the system allocation routines.  Most
27   places in the OpenMP API do not make any provision for failure, so in
28   general we cannot allow memory allocation to fail.  */
29
30#define _GNU_SOURCE
31#include "libgomp.h"
32#include <stdlib.h>
33#include <string.h>
34
35#define omp_max_predefined_alloc omp_thread_mem_alloc
36
37struct omp_allocator_data
38{
39  omp_memspace_handle_t memspace;
40  omp_uintptr_t alignment;
41  omp_uintptr_t pool_size;
42  omp_uintptr_t used_pool_size;
43  omp_allocator_handle_t fb_data;
44  unsigned int sync_hint : 8;
45  unsigned int access : 8;
46  unsigned int fallback : 8;
47  unsigned int pinned : 1;
48  unsigned int partition : 7;
49#ifndef HAVE_SYNC_BUILTINS
50  gomp_mutex_t lock;
51#endif
52};
53
54struct omp_mem_header
55{
56  void *ptr;
57  size_t size;
58  omp_allocator_handle_t allocator;
59  void *pad;
60};
61
62omp_allocator_handle_t
63omp_init_allocator (omp_memspace_handle_t memspace, int ntraits,
64		    const omp_alloctrait_t traits[])
65{
66  struct omp_allocator_data data
67    = { memspace, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended, omp_atv_all,
68	omp_atv_default_mem_fb, omp_atv_false, omp_atv_environment };
69  struct omp_allocator_data *ret;
70  int i;
71
72  if (memspace > omp_low_lat_mem_space)
73    return omp_null_allocator;
74  for (i = 0; i < ntraits; i++)
75    switch (traits[i].key)
76      {
77      case omp_atk_sync_hint:
78	switch (traits[i].value)
79	  {
80	  case omp_atv_default:
81	    data.sync_hint = omp_atv_contended;
82	    break;
83	  case omp_atv_contended:
84	  case omp_atv_uncontended:
85	  case omp_atv_serialized:
86	  case omp_atv_private:
87	    data.sync_hint = traits[i].value;
88	    break;
89	  default:
90	    return omp_null_allocator;
91	  }
92	break;
93      case omp_atk_alignment:
94        if (traits[i].value == omp_atv_default)
95	  {
96	    data.alignment = 1;
97	    break;
98	  }
99	if ((traits[i].value & (traits[i].value - 1)) != 0
100	    || !traits[i].value)
101	  return omp_null_allocator;
102	data.alignment = traits[i].value;
103	break;
104      case omp_atk_access:
105	switch (traits[i].value)
106	  {
107	  case omp_atv_default:
108	    data.access = omp_atv_all;
109	    break;
110	  case omp_atv_all:
111	  case omp_atv_cgroup:
112	  case omp_atv_pteam:
113	  case omp_atv_thread:
114	    data.access = traits[i].value;
115	    break;
116	  default:
117	    return omp_null_allocator;
118	  }
119	break;
120      case omp_atk_pool_size:
121	if (traits[i].value == omp_atv_default)
122	  data.pool_size = ~(uintptr_t) 0;
123	else
124	  data.pool_size = traits[i].value;
125	break;
126      case omp_atk_fallback:
127	switch (traits[i].value)
128	  {
129	  case omp_atv_default:
130	    data.fallback = omp_atv_default_mem_fb;
131	    break;
132	  case omp_atv_default_mem_fb:
133	  case omp_atv_null_fb:
134	  case omp_atv_abort_fb:
135	  case omp_atv_allocator_fb:
136	    data.fallback = traits[i].value;
137	    break;
138	  default:
139	    return omp_null_allocator;
140	  }
141	break;
142      case omp_atk_fb_data:
143	data.fb_data = traits[i].value;
144	break;
145      case omp_atk_pinned:
146	switch (traits[i].value)
147	  {
148	  case omp_atv_default:
149	  case omp_atv_false:
150	    data.pinned = omp_atv_false;
151	    break;
152	  case omp_atv_true:
153	    data.pinned = omp_atv_true;
154	    break;
155	  default:
156	    return omp_null_allocator;
157	  }
158	break;
159      case omp_atk_partition:
160	switch (traits[i].value)
161	  {
162	  case omp_atv_default:
163	    data.partition = omp_atv_environment;
164	    break;
165	  case omp_atv_environment:
166	  case omp_atv_nearest:
167	  case omp_atv_blocked:
168	  case omp_atv_interleaved:
169	    data.partition = traits[i].value;
170	    break;
171	  default:
172	    return omp_null_allocator;
173	  }
174	break;
175      default:
176	return omp_null_allocator;
177      }
178
179  if (data.alignment < sizeof (void *))
180    data.alignment = sizeof (void *);
181
182  /* No support for these so far (for hbw will use memkind).  */
183  if (data.pinned || data.memspace == omp_high_bw_mem_space)
184    return omp_null_allocator;
185
186  ret = gomp_malloc (sizeof (struct omp_allocator_data));
187  *ret = data;
188#ifndef HAVE_SYNC_BUILTINS
189  gomp_mutex_init (&ret->lock);
190#endif
191  return (omp_allocator_handle_t) ret;
192}
193
194void
195omp_destroy_allocator (omp_allocator_handle_t allocator)
196{
197  if (allocator != omp_null_allocator)
198    {
199#ifndef HAVE_SYNC_BUILTINS
200      gomp_mutex_destroy (&((struct omp_allocator_data *) allocator)->lock);
201#endif
202      free ((void *) allocator);
203    }
204}
205
206ialias (omp_init_allocator)
207ialias (omp_destroy_allocator)
208
209void *
210omp_aligned_alloc (size_t alignment, size_t size,
211		   omp_allocator_handle_t allocator)
212{
213  struct omp_allocator_data *allocator_data;
214  size_t new_size, new_alignment;
215  void *ptr, *ret;
216
217  if (__builtin_expect (size == 0, 0))
218    return NULL;
219
220retry:
221  new_alignment = alignment;
222  if (allocator == omp_null_allocator)
223    {
224      struct gomp_thread *thr = gomp_thread ();
225      if (thr->ts.def_allocator == omp_null_allocator)
226	thr->ts.def_allocator = gomp_def_allocator;
227      allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
228    }
229
230  if (allocator > omp_max_predefined_alloc)
231    {
232      allocator_data = (struct omp_allocator_data *) allocator;
233      if (new_alignment < allocator_data->alignment)
234	new_alignment = allocator_data->alignment;
235    }
236  else
237    {
238      allocator_data = NULL;
239      if (new_alignment < sizeof (void *))
240	new_alignment = sizeof (void *);
241    }
242
243  new_size = sizeof (struct omp_mem_header);
244  if (new_alignment > sizeof (void *))
245    new_size += new_alignment - sizeof (void *);
246  if (__builtin_add_overflow (size, new_size, &new_size))
247    goto fail;
248
249  if (__builtin_expect (allocator_data
250			&& allocator_data->pool_size < ~(uintptr_t) 0, 0))
251    {
252      uintptr_t used_pool_size;
253      if (new_size > allocator_data->pool_size)
254	goto fail;
255#ifdef HAVE_SYNC_BUILTINS
256      used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
257					MEMMODEL_RELAXED);
258      do
259	{
260	  uintptr_t new_pool_size;
261	  if (__builtin_add_overflow (used_pool_size, new_size,
262				      &new_pool_size)
263	      || new_pool_size > allocator_data->pool_size)
264	    goto fail;
265	  if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
266					   &used_pool_size, new_pool_size,
267					   true, MEMMODEL_RELAXED,
268					   MEMMODEL_RELAXED))
269	    break;
270	}
271      while (1);
272#else
273      gomp_mutex_lock (&allocator_data->lock);
274      if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
275				  &used_pool_size)
276	  || used_pool_size > allocator_data->pool_size)
277	{
278	  gomp_mutex_unlock (&allocator_data->lock);
279	  goto fail;
280	}
281      allocator_data->used_pool_size = used_pool_size;
282      gomp_mutex_unlock (&allocator_data->lock);
283#endif
284      ptr = malloc (new_size);
285      if (ptr == NULL)
286	{
287#ifdef HAVE_SYNC_BUILTINS
288	  __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
289			      MEMMODEL_RELAXED);
290#else
291	  gomp_mutex_lock (&allocator_data->lock);
292	  allocator_data->used_pool_size -= new_size;
293	  gomp_mutex_unlock (&allocator_data->lock);
294#endif
295	  goto fail;
296	}
297    }
298  else
299    {
300      ptr = malloc (new_size);
301      if (ptr == NULL)
302	goto fail;
303    }
304
305  if (new_alignment > sizeof (void *))
306    ret = (void *) (((uintptr_t) ptr
307		     + sizeof (struct omp_mem_header)
308		     + new_alignment - sizeof (void *))
309		    & ~(new_alignment - 1));
310  else
311    ret = (char *) ptr + sizeof (struct omp_mem_header);
312  ((struct omp_mem_header *) ret)[-1].ptr = ptr;
313  ((struct omp_mem_header *) ret)[-1].size = new_size;
314  ((struct omp_mem_header *) ret)[-1].allocator = allocator;
315  return ret;
316
317fail:
318  if (allocator_data)
319    {
320      switch (allocator_data->fallback)
321	{
322	case omp_atv_default_mem_fb:
323	  if ((new_alignment > sizeof (void *) && new_alignment > alignment)
324	      || (allocator_data
325		  && allocator_data->pool_size < ~(uintptr_t) 0))
326	    {
327	      allocator = omp_default_mem_alloc;
328	      goto retry;
329	    }
330	  /* Otherwise, we've already performed default mem allocation
331	     and if that failed, it won't succeed again (unless it was
332	     intermittent.  Return NULL then, as that is the fallback.  */
333	  break;
334	case omp_atv_null_fb:
335	  break;
336	default:
337	case omp_atv_abort_fb:
338	  gomp_fatal ("Out of memory allocating %lu bytes",
339		      (unsigned long) size);
340	case omp_atv_allocator_fb:
341	  allocator = allocator_data->fb_data;
342	  goto retry;
343	}
344    }
345  return NULL;
346}
347
348ialias (omp_aligned_alloc)
349
350void *
351omp_alloc (size_t size, omp_allocator_handle_t allocator)
352{
353  return ialias_call (omp_aligned_alloc) (1, size, allocator);
354}
355
356/* Like omp_aligned_alloc, but apply on top of that:
357   "For allocations that arise from this ... the null_fb value of the
358   fallback allocator trait behaves as if the abort_fb had been specified."  */
359
360void *
361GOMP_alloc (size_t alignment, size_t size, uintptr_t allocator)
362{
363  void *ret
364    = ialias_call (omp_aligned_alloc) (alignment, size,
365				       (omp_allocator_handle_t) allocator);
366  if (__builtin_expect (ret == NULL, 0) && size)
367    gomp_fatal ("Out of memory allocating %lu bytes",
368		(unsigned long) size);
369  return ret;
370}
371
372void
373omp_free (void *ptr, omp_allocator_handle_t allocator)
374{
375  struct omp_mem_header *data;
376
377  if (ptr == NULL)
378    return;
379  (void) allocator;
380  data = &((struct omp_mem_header *) ptr)[-1];
381  if (data->allocator > omp_max_predefined_alloc)
382    {
383      struct omp_allocator_data *allocator_data
384	= (struct omp_allocator_data *) (data->allocator);
385      if (allocator_data->pool_size < ~(uintptr_t) 0)
386	{
387#ifdef HAVE_SYNC_BUILTINS
388	  __atomic_add_fetch (&allocator_data->used_pool_size, -data->size,
389			      MEMMODEL_RELAXED);
390#else
391	  gomp_mutex_lock (&allocator_data->lock);
392	  allocator_data->used_pool_size -= data->size;
393	  gomp_mutex_unlock (&allocator_data->lock);
394#endif
395	}
396    }
397  free (data->ptr);
398}
399
400ialias (omp_free)
401
402void
403GOMP_free (void *ptr, uintptr_t allocator)
404{
405  return ialias_call (omp_free) (ptr, (omp_allocator_handle_t) allocator);
406}
407
408void *
409omp_aligned_calloc (size_t alignment, size_t nmemb, size_t size,
410		    omp_allocator_handle_t allocator)
411{
412  struct omp_allocator_data *allocator_data;
413  size_t new_size, size_temp, new_alignment;
414  void *ptr, *ret;
415
416  if (__builtin_expect (size == 0 || nmemb == 0, 0))
417    return NULL;
418
419retry:
420  new_alignment = alignment;
421  if (allocator == omp_null_allocator)
422    {
423      struct gomp_thread *thr = gomp_thread ();
424      if (thr->ts.def_allocator == omp_null_allocator)
425	thr->ts.def_allocator = gomp_def_allocator;
426      allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
427    }
428
429  if (allocator > omp_max_predefined_alloc)
430    {
431      allocator_data = (struct omp_allocator_data *) allocator;
432      if (new_alignment < allocator_data->alignment)
433	new_alignment = allocator_data->alignment;
434    }
435  else
436    {
437      allocator_data = NULL;
438      if (new_alignment < sizeof (void *))
439	new_alignment = sizeof (void *);
440    }
441
442  new_size = sizeof (struct omp_mem_header);
443  if (new_alignment > sizeof (void *))
444    new_size += new_alignment - sizeof (void *);
445  if (__builtin_mul_overflow (size, nmemb, &size_temp))
446    goto fail;
447  if (__builtin_add_overflow (size_temp, new_size, &new_size))
448    goto fail;
449
450  if (__builtin_expect (allocator_data
451			&& allocator_data->pool_size < ~(uintptr_t) 0, 0))
452    {
453      uintptr_t used_pool_size;
454      if (new_size > allocator_data->pool_size)
455	goto fail;
456#ifdef HAVE_SYNC_BUILTINS
457      used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
458					MEMMODEL_RELAXED);
459      do
460	{
461	  uintptr_t new_pool_size;
462	  if (__builtin_add_overflow (used_pool_size, new_size,
463				      &new_pool_size)
464	      || new_pool_size > allocator_data->pool_size)
465	    goto fail;
466	  if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
467					   &used_pool_size, new_pool_size,
468					   true, MEMMODEL_RELAXED,
469					   MEMMODEL_RELAXED))
470	    break;
471	}
472      while (1);
473#else
474      gomp_mutex_lock (&allocator_data->lock);
475      if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
476				  &used_pool_size)
477	  || used_pool_size > allocator_data->pool_size)
478	{
479	  gomp_mutex_unlock (&allocator_data->lock);
480	  goto fail;
481	}
482      allocator_data->used_pool_size = used_pool_size;
483      gomp_mutex_unlock (&allocator_data->lock);
484#endif
485      ptr = calloc (1, new_size);
486      if (ptr == NULL)
487	{
488#ifdef HAVE_SYNC_BUILTINS
489	  __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
490			      MEMMODEL_RELAXED);
491#else
492	  gomp_mutex_lock (&allocator_data->lock);
493	  allocator_data->used_pool_size -= new_size;
494	  gomp_mutex_unlock (&allocator_data->lock);
495#endif
496	  goto fail;
497	}
498    }
499  else
500    {
501      ptr = calloc (1, new_size);
502      if (ptr == NULL)
503	goto fail;
504    }
505
506  if (new_alignment > sizeof (void *))
507    ret = (void *) (((uintptr_t) ptr
508		     + sizeof (struct omp_mem_header)
509		     + new_alignment - sizeof (void *))
510		    & ~(new_alignment - 1));
511  else
512    ret = (char *) ptr + sizeof (struct omp_mem_header);
513  ((struct omp_mem_header *) ret)[-1].ptr = ptr;
514  ((struct omp_mem_header *) ret)[-1].size = new_size;
515  ((struct omp_mem_header *) ret)[-1].allocator = allocator;
516  return ret;
517
518fail:
519  if (allocator_data)
520    {
521      switch (allocator_data->fallback)
522	{
523	case omp_atv_default_mem_fb:
524	  if ((new_alignment > sizeof (void *) && new_alignment > alignment)
525	      || (allocator_data
526		  && allocator_data->pool_size < ~(uintptr_t) 0))
527	    {
528	      allocator = omp_default_mem_alloc;
529	      goto retry;
530	    }
531	  /* Otherwise, we've already performed default mem allocation
532	     and if that failed, it won't succeed again (unless it was
533	     intermittent.  Return NULL then, as that is the fallback.  */
534	  break;
535	case omp_atv_null_fb:
536	  break;
537	default:
538	case omp_atv_abort_fb:
539	  gomp_fatal ("Out of memory allocating %lu bytes",
540		      (unsigned long) (size * nmemb));
541	case omp_atv_allocator_fb:
542	  allocator = allocator_data->fb_data;
543	  goto retry;
544	}
545    }
546  return NULL;
547}
548
549ialias (omp_aligned_calloc)
550
551void *
552omp_calloc (size_t nmemb, size_t size, omp_allocator_handle_t allocator)
553{
554  return ialias_call (omp_aligned_calloc) (1, nmemb, size, allocator);
555}
556
557void *
558omp_realloc (void *ptr, size_t size, omp_allocator_handle_t allocator,
559	     omp_allocator_handle_t free_allocator)
560{
561  struct omp_allocator_data *allocator_data, *free_allocator_data;
562  size_t new_size, old_size, new_alignment, old_alignment;
563  void *new_ptr, *ret;
564  struct omp_mem_header *data;
565
566  if (__builtin_expect (ptr == NULL, 0))
567    return ialias_call (omp_aligned_alloc) (1, size, allocator);
568
569  if (__builtin_expect (size == 0, 0))
570    {
571      ialias_call (omp_free) (ptr, free_allocator);
572      return NULL;
573    }
574
575  data = &((struct omp_mem_header *) ptr)[-1];
576  free_allocator = data->allocator;
577
578retry:
579  new_alignment = sizeof (void *);
580  if (allocator == omp_null_allocator)
581    allocator = free_allocator;
582
583  if (allocator > omp_max_predefined_alloc)
584    {
585      allocator_data = (struct omp_allocator_data *) allocator;
586      if (new_alignment < allocator_data->alignment)
587	new_alignment = allocator_data->alignment;
588    }
589  else
590    allocator_data = NULL;
591  if (free_allocator > omp_max_predefined_alloc)
592    free_allocator_data = (struct omp_allocator_data *) free_allocator;
593  else
594    free_allocator_data = NULL;
595  old_alignment = (uintptr_t) ptr - (uintptr_t) (data->ptr);
596
597  new_size = sizeof (struct omp_mem_header);
598  if (new_alignment > sizeof (void *))
599    new_size += new_alignment - sizeof (void *);
600  if (__builtin_add_overflow (size, new_size, &new_size))
601    goto fail;
602  old_size = data->size;
603
604  if (__builtin_expect (allocator_data
605			&& allocator_data->pool_size < ~(uintptr_t) 0, 0))
606    {
607      uintptr_t used_pool_size;
608      size_t prev_size = 0;
609      /* Check if we can use realloc.  Don't use it if extra alignment
610	 was used previously or newly, because realloc might return a pointer
611	 with different alignment and then we'd need to memmove the data
612	 again.  */
613      if (free_allocator_data
614	  && free_allocator_data == allocator_data
615	  && new_alignment == sizeof (void *)
616	  && old_alignment == sizeof (struct omp_mem_header))
617	prev_size = old_size;
618      if (new_size > prev_size
619	  && new_size - prev_size > allocator_data->pool_size)
620	goto fail;
621#ifdef HAVE_SYNC_BUILTINS
622      used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
623					MEMMODEL_RELAXED);
624      do
625	{
626	  uintptr_t new_pool_size;
627	  if (new_size > prev_size)
628	    {
629	      if (__builtin_add_overflow (used_pool_size, new_size - prev_size,
630					  &new_pool_size)
631		  || new_pool_size > allocator_data->pool_size)
632		goto fail;
633	    }
634	  else
635	    new_pool_size = used_pool_size + new_size - prev_size;
636	  if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
637					   &used_pool_size, new_pool_size,
638					   true, MEMMODEL_RELAXED,
639					   MEMMODEL_RELAXED))
640	    break;
641	}
642      while (1);
643#else
644      gomp_mutex_lock (&allocator_data->lock);
645      if (new_size > prev_size)
646	{
647	  if (__builtin_add_overflow (allocator_data->used_pool_size,
648				      new_size - prev_size,
649				      &used_pool_size)
650	      || used_pool_size > allocator_data->pool_size)
651	    {
652	      gomp_mutex_unlock (&allocator_data->lock);
653	      goto fail;
654	    }
655	}
656      else
657	used_pool_size = (allocator_data->used_pool_size
658			  + new_size - prev_size);
659      allocator_data->used_pool_size = used_pool_size;
660      gomp_mutex_unlock (&allocator_data->lock);
661#endif
662      if (prev_size)
663	new_ptr = realloc (data->ptr, new_size);
664      else
665	new_ptr = malloc (new_size);
666      if (new_ptr == NULL)
667	{
668#ifdef HAVE_SYNC_BUILTINS
669	  __atomic_add_fetch (&allocator_data->used_pool_size,
670			      prev_size - new_size,
671			      MEMMODEL_RELAXED);
672#else
673	  gomp_mutex_lock (&allocator_data->lock);
674	  allocator_data->used_pool_size -= new_size - prev_size;
675	  gomp_mutex_unlock (&allocator_data->lock);
676#endif
677	  goto fail;
678	}
679      else if (prev_size)
680	{
681	  ret = (char *) new_ptr + sizeof (struct omp_mem_header);
682	  ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
683	  ((struct omp_mem_header *) ret)[-1].size = new_size;
684	  ((struct omp_mem_header *) ret)[-1].allocator = allocator;
685	  return ret;
686	}
687    }
688  else if (new_alignment == sizeof (void *)
689	   && old_alignment == sizeof (struct omp_mem_header)
690	   && (free_allocator_data == NULL
691	       || free_allocator_data->pool_size == ~(uintptr_t) 0))
692    {
693      new_ptr = realloc (data->ptr, new_size);
694      if (new_ptr == NULL)
695	goto fail;
696      ret = (char *) new_ptr + sizeof (struct omp_mem_header);
697      ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
698      ((struct omp_mem_header *) ret)[-1].size = new_size;
699      ((struct omp_mem_header *) ret)[-1].allocator = allocator;
700      return ret;
701    }
702  else
703    {
704      new_ptr = malloc (new_size);
705      if (new_ptr == NULL)
706	goto fail;
707    }
708
709  if (new_alignment > sizeof (void *))
710    ret = (void *) (((uintptr_t) new_ptr
711		     + sizeof (struct omp_mem_header)
712		     + new_alignment - sizeof (void *))
713		    & ~(new_alignment - 1));
714  else
715    ret = (char *) new_ptr + sizeof (struct omp_mem_header);
716  ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
717  ((struct omp_mem_header *) ret)[-1].size = new_size;
718  ((struct omp_mem_header *) ret)[-1].allocator = allocator;
719  if (old_size - old_alignment < size)
720    size = old_size - old_alignment;
721  memcpy (ret, ptr, size);
722  if (__builtin_expect (free_allocator_data
723			&& free_allocator_data->pool_size < ~(uintptr_t) 0, 0))
724    {
725#ifdef HAVE_SYNC_BUILTINS
726      __atomic_add_fetch (&free_allocator_data->used_pool_size, -data->size,
727			  MEMMODEL_RELAXED);
728#else
729      gomp_mutex_lock (&free_allocator_data->lock);
730      free_allocator_data->used_pool_size -= data->size;
731      gomp_mutex_unlock (&free_allocator_data->lock);
732#endif
733    }
734  free (data->ptr);
735  return ret;
736
737fail:
738  if (allocator_data)
739    {
740      switch (allocator_data->fallback)
741	{
742	case omp_atv_default_mem_fb:
743	  if (new_alignment > sizeof (void *)
744	      || (allocator_data
745		  && allocator_data->pool_size < ~(uintptr_t) 0))
746	    {
747	      allocator = omp_default_mem_alloc;
748	      goto retry;
749	    }
750	  /* Otherwise, we've already performed default mem allocation
751	     and if that failed, it won't succeed again (unless it was
752	     intermittent.  Return NULL then, as that is the fallback.  */
753	  break;
754	case omp_atv_null_fb:
755	  break;
756	default:
757	case omp_atv_abort_fb:
758	  gomp_fatal ("Out of memory allocating %lu bytes",
759		      (unsigned long) size);
760	case omp_atv_allocator_fb:
761	  allocator = allocator_data->fb_data;
762	  goto retry;
763	}
764    }
765  return NULL;
766}
767