1// MT-optimized allocator -*- C++ -*-
2
3// Copyright (C) 2003-2015 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file ext/mt_allocator.h
26 *  This file is a GNU extension to the Standard C++ Library.
27 */
28
29#ifndef _MT_ALLOCATOR_H
30#define _MT_ALLOCATOR_H 1
31
32#include <new>
33#include <cstdlib>
34#include <bits/functexcept.h>
35#include <ext/atomicity.h>
36#include <bits/move.h>
37#if __cplusplus >= 201103L
38#include <type_traits>
39#endif
40
41namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45  using std::size_t;
46  using std::ptrdiff_t;
47
48  typedef void (*__destroy_handler)(void*);
49
50  /// Base class for pool object.
51  struct __pool_base
52  {
53    // Using short int as type for the binmap implies we are never
54    // caching blocks larger than 32768 with this allocator.
55    typedef unsigned short int _Binmap_type;
56
57    // Variables used to configure the behavior of the allocator,
58    // assigned and explained in detail below.
59    struct _Tune
60     {
61      // Compile time constants for the default _Tune values.
62      enum { _S_align = 8 };
63      enum { _S_max_bytes = 128 };
64      enum { _S_min_bin = 8 };
65      enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
66      enum { _S_max_threads = 4096 };
67      enum { _S_freelist_headroom = 10 };
68
69      // Alignment needed.
70      // NB: In any case must be >= sizeof(_Block_record), that
71      // is 4 on 32 bit machines and 8 on 64 bit machines.
72      size_t	_M_align;
73
74      // Allocation requests (after round-up to power of 2) below
75      // this value will be handled by the allocator. A raw new/
76      // call will be used for requests larger than this value.
77      // NB: Must be much smaller than _M_chunk_size and in any
78      // case <= 32768.
79      size_t	_M_max_bytes;
80
81      // Size in bytes of the smallest bin.
82      // NB: Must be a power of 2 and >= _M_align (and of course
83      // much smaller than _M_max_bytes).
84      size_t	_M_min_bin;
85
86      // In order to avoid fragmenting and minimize the number of
87      // new() calls we always request new memory using this
88      // value. Based on previous discussions on the libstdc++
89      // mailing list we have chosen the value below.
90      // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
91      // NB: At least one order of magnitude > _M_max_bytes.
92      size_t	_M_chunk_size;
93
94      // The maximum number of supported threads. For
95      // single-threaded operation, use one. Maximum values will
96      // vary depending on details of the underlying system. (For
97      // instance, Linux 2.4.18 reports 4070 in
98      // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
99      // 65534)
100      size_t 	_M_max_threads;
101
102      // Each time a deallocation occurs in a threaded application
103      // we make sure that there are no more than
104      // _M_freelist_headroom % of used memory on the freelist. If
105      // the number of additional records is more than
106      // _M_freelist_headroom % of the freelist, we move these
107      // records back to the global pool.
108      size_t 	_M_freelist_headroom;
109
110      // Set to true forces all allocations to use new().
111      bool 	_M_force_new;
112
113      explicit
114      _Tune()
115      : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
116      _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
117      _M_freelist_headroom(_S_freelist_headroom),
118      _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
119      { }
120
121      explicit
122      _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
123	    size_t __maxthreads, size_t __headroom, bool __force)
124      : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
125      _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
126      _M_freelist_headroom(__headroom), _M_force_new(__force)
127      { }
128    };
129
130    struct _Block_address
131    {
132      void* 			_M_initial;
133      _Block_address* 		_M_next;
134    };
135
136    const _Tune&
137    _M_get_options() const
138    { return _M_options; }
139
140    void
141    _M_set_options(_Tune __t)
142    {
143      if (!_M_init)
144	_M_options = __t;
145    }
146
147    bool
148    _M_check_threshold(size_t __bytes)
149    { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
150
151    size_t
152    _M_get_binmap(size_t __bytes)
153    { return _M_binmap[__bytes]; }
154
155    size_t
156    _M_get_align()
157    { return _M_options._M_align; }
158
159    explicit
160    __pool_base()
161    : _M_options(_Tune()), _M_binmap(0), _M_init(false) { }
162
163    explicit
164    __pool_base(const _Tune& __options)
165    : _M_options(__options), _M_binmap(0), _M_init(false) { }
166
167  private:
168    explicit
169    __pool_base(const __pool_base&);
170
171    __pool_base&
172    operator=(const __pool_base&);
173
174  protected:
175    // Configuration options.
176    _Tune 	       		_M_options;
177
178    _Binmap_type* 		_M_binmap;
179
180    // Configuration of the pool object via _M_options can happen
181    // after construction but before initialization. After
182    // initialization is complete, this variable is set to true.
183    bool 			_M_init;
184  };
185
186
187  /**
188   *  @brief  Data describing the underlying memory pool, parameterized on
189   *  threading support.
190   */
191  template<bool _Thread>
192    class __pool;
193
194  /// Specialization for single thread.
195  template<>
196    class __pool<false> : public __pool_base
197    {
198    public:
199      union _Block_record
200      {
201	// Points to the block_record of the next free block.
202	_Block_record* 			_M_next;
203      };
204
205      struct _Bin_record
206      {
207	// An "array" of pointers to the first free block.
208	_Block_record**			_M_first;
209
210	// A list of the initial addresses of all allocated blocks.
211	_Block_address*		     	_M_address;
212      };
213
214      void
215      _M_initialize_once()
216      {
217	if (__builtin_expect(_M_init == false, false))
218	  _M_initialize();
219      }
220
221      void
222      _M_destroy() throw();
223
224      char*
225      _M_reserve_block(size_t __bytes, const size_t __thread_id);
226
227      void
228      _M_reclaim_block(char* __p, size_t __bytes) throw ();
229
230      size_t
231      _M_get_thread_id() { return 0; }
232
233      const _Bin_record&
234      _M_get_bin(size_t __which)
235      { return _M_bin[__which]; }
236
237      void
238      _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
239      { }
240
241      explicit __pool()
242      : _M_bin(0), _M_bin_size(1) { }
243
244      explicit __pool(const __pool_base::_Tune& __tune)
245      : __pool_base(__tune), _M_bin(0), _M_bin_size(1) { }
246
247    private:
248      // An "array" of bin_records each of which represents a specific
249      // power of 2 size. Memory to this "array" is allocated in
250      // _M_initialize().
251      _Bin_record*		 _M_bin;
252
253      // Actual value calculated in _M_initialize().
254      size_t 	       	     	_M_bin_size;
255
256      void
257      _M_initialize();
258  };
259
260#ifdef __GTHREADS
261  /// Specialization for thread enabled, via gthreads.h.
262  template<>
263    class __pool<true> : public __pool_base
264    {
265    public:
266      // Each requesting thread is assigned an id ranging from 1 to
267      // _S_max_threads. Thread id 0 is used as a global memory pool.
268      // In order to get constant performance on the thread assignment
269      // routine, we keep a list of free ids. When a thread first
270      // requests memory we remove the first record in this list and
271      // stores the address in a __gthread_key. When initializing the
272      // __gthread_key we specify a destructor. When this destructor
273      // (i.e. the thread dies) is called, we return the thread id to
274      // the front of this list.
275      struct _Thread_record
276      {
277	// Points to next free thread id record. NULL if last record in list.
278	_Thread_record*			_M_next;
279
280	// Thread id ranging from 1 to _S_max_threads.
281	size_t                          _M_id;
282      };
283
284      union _Block_record
285      {
286	// Points to the block_record of the next free block.
287	_Block_record*			_M_next;
288
289	// The thread id of the thread which has requested this block.
290	size_t                          _M_thread_id;
291      };
292
293      struct _Bin_record
294      {
295	// An "array" of pointers to the first free block for each
296	// thread id. Memory to this "array" is allocated in
297	// _S_initialize() for _S_max_threads + global pool 0.
298	_Block_record**			_M_first;
299
300	// A list of the initial addresses of all allocated blocks.
301	_Block_address*		     	_M_address;
302
303	// An "array" of counters used to keep track of the amount of
304	// blocks that are on the freelist/used for each thread id.
305	// - Note that the second part of the allocated _M_used "array"
306	//   actually hosts (atomic) counters of reclaimed blocks:  in
307	//   _M_reserve_block and in _M_reclaim_block those numbers are
308	//   subtracted from the first ones to obtain the actual size
309	//   of the "working set" of the given thread.
310	// - Memory to these "arrays" is allocated in _S_initialize()
311	//   for _S_max_threads + global pool 0.
312	size_t*				_M_free;
313	size_t*			        _M_used;
314
315	// Each bin has its own mutex which is used to ensure data
316	// integrity while changing "ownership" on a block.  The mutex
317	// is initialized in _S_initialize().
318	__gthread_mutex_t*              _M_mutex;
319      };
320
321      // XXX GLIBCXX_ABI Deprecated
322      void
323      _M_initialize(__destroy_handler);
324
325      void
326      _M_initialize_once()
327      {
328	if (__builtin_expect(_M_init == false, false))
329	  _M_initialize();
330      }
331
332      void
333      _M_destroy() throw();
334
335      char*
336      _M_reserve_block(size_t __bytes, const size_t __thread_id);
337
338      void
339      _M_reclaim_block(char* __p, size_t __bytes) throw ();
340
341      const _Bin_record&
342      _M_get_bin(size_t __which)
343      { return _M_bin[__which]; }
344
345      void
346      _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
347			 size_t __thread_id)
348      {
349	if (__gthread_active_p())
350	  {
351	    __block->_M_thread_id = __thread_id;
352	    --__bin._M_free[__thread_id];
353	    ++__bin._M_used[__thread_id];
354	  }
355      }
356
357      // XXX GLIBCXX_ABI Deprecated
358      _GLIBCXX_CONST void
359      _M_destroy_thread_key(void*) throw ();
360
361      size_t
362      _M_get_thread_id();
363
364      explicit __pool()
365      : _M_bin(0), _M_bin_size(1), _M_thread_freelist(0)
366      { }
367
368      explicit __pool(const __pool_base::_Tune& __tune)
369      : __pool_base(__tune), _M_bin(0), _M_bin_size(1),
370	_M_thread_freelist(0)
371      { }
372
373    private:
374      // An "array" of bin_records each of which represents a specific
375      // power of 2 size. Memory to this "array" is allocated in
376      // _M_initialize().
377      _Bin_record*		_M_bin;
378
379      // Actual value calculated in _M_initialize().
380      size_t 	       	     	_M_bin_size;
381
382      _Thread_record* 		_M_thread_freelist;
383      void*			_M_thread_freelist_initial;
384
385      void
386      _M_initialize();
387    };
388#endif
389
390  template<template <bool> class _PoolTp, bool _Thread>
391    struct __common_pool
392    {
393      typedef _PoolTp<_Thread> 		pool_type;
394
395      static pool_type&
396      _S_get_pool()
397      {
398	static pool_type _S_pool;
399	return _S_pool;
400      }
401    };
402
403  template<template <bool> class _PoolTp, bool _Thread>
404    struct __common_pool_base;
405
406  template<template <bool> class _PoolTp>
407    struct __common_pool_base<_PoolTp, false>
408    : public __common_pool<_PoolTp, false>
409    {
410      using  __common_pool<_PoolTp, false>::_S_get_pool;
411
412      static void
413      _S_initialize_once()
414      {
415	static bool __init;
416	if (__builtin_expect(__init == false, false))
417	  {
418	    _S_get_pool()._M_initialize_once();
419	    __init = true;
420	  }
421      }
422    };
423
424#ifdef __GTHREADS
425  template<template <bool> class _PoolTp>
426    struct __common_pool_base<_PoolTp, true>
427    : public __common_pool<_PoolTp, true>
428    {
429      using  __common_pool<_PoolTp, true>::_S_get_pool;
430
431      static void
432      _S_initialize()
433      { _S_get_pool()._M_initialize_once(); }
434
435      static void
436      _S_initialize_once()
437      {
438	static bool __init;
439	if (__builtin_expect(__init == false, false))
440	  {
441	    if (__gthread_active_p())
442	      {
443		// On some platforms, __gthread_once_t is an aggregate.
444		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
445		__gthread_once(&__once, _S_initialize);
446	      }
447
448	    // Double check initialization. May be necessary on some
449	    // systems for proper construction when not compiling with
450	    // thread flags.
451	    _S_get_pool()._M_initialize_once();
452	    __init = true;
453	  }
454      }
455    };
456#endif
457
458  /// Policy for shared __pool objects.
459  template<template <bool> class _PoolTp, bool _Thread>
460    struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
461    {
462      template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
463	       bool _Thread1 = _Thread>
464        struct _M_rebind
465        { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
466
467      using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
468      using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
469  };
470
471
472  template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
473    struct __per_type_pool
474    {
475      typedef _Tp 			value_type;
476      typedef _PoolTp<_Thread> 		pool_type;
477
478      static pool_type&
479      _S_get_pool()
480      {
481	// Sane defaults for the _PoolTp.
482	typedef typename pool_type::_Block_record _Block_record;
483	const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
484				   ? __alignof__(_Tp) : sizeof(_Block_record));
485
486	typedef typename __pool_base::_Tune _Tune;
487	static _Tune _S_tune(__a, sizeof(_Tp) * 64,
488			     sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
489			     sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
490			     _Tune::_S_max_threads,
491			     _Tune::_S_freelist_headroom,
492			     std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
493	static pool_type _S_pool(_S_tune);
494	return _S_pool;
495      }
496    };
497
498  template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
499    struct __per_type_pool_base;
500
501  template<typename _Tp, template <bool> class _PoolTp>
502    struct __per_type_pool_base<_Tp, _PoolTp, false>
503    : public __per_type_pool<_Tp, _PoolTp, false>
504    {
505      using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
506
507      static void
508      _S_initialize_once()
509      {
510	static bool __init;
511	if (__builtin_expect(__init == false, false))
512	  {
513	    _S_get_pool()._M_initialize_once();
514	    __init = true;
515	  }
516      }
517    };
518
519 #ifdef __GTHREADS
520 template<typename _Tp, template <bool> class _PoolTp>
521    struct __per_type_pool_base<_Tp, _PoolTp, true>
522    : public __per_type_pool<_Tp, _PoolTp, true>
523    {
524      using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
525
526      static void
527      _S_initialize()
528      { _S_get_pool()._M_initialize_once(); }
529
530      static void
531      _S_initialize_once()
532      {
533	static bool __init;
534	if (__builtin_expect(__init == false, false))
535	  {
536	    if (__gthread_active_p())
537	      {
538		// On some platforms, __gthread_once_t is an aggregate.
539		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
540		__gthread_once(&__once, _S_initialize);
541	      }
542
543	    // Double check initialization. May be necessary on some
544	    // systems for proper construction when not compiling with
545	    // thread flags.
546	    _S_get_pool()._M_initialize_once();
547	    __init = true;
548	  }
549      }
550    };
551#endif
552
553  /// Policy for individual __pool objects.
554  template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
555    struct __per_type_pool_policy
556    : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
557    {
558      template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
559	       bool _Thread1 = _Thread>
560        struct _M_rebind
561        { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
562
563      using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
564      using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
565  };
566
567
568  /// Base class for _Tp dependent member functions.
569  template<typename _Tp>
570    class __mt_alloc_base
571    {
572    public:
573      typedef size_t                    size_type;
574      typedef ptrdiff_t                 difference_type;
575      typedef _Tp*                      pointer;
576      typedef const _Tp*                const_pointer;
577      typedef _Tp&                      reference;
578      typedef const _Tp&                const_reference;
579      typedef _Tp                       value_type;
580
581#if __cplusplus >= 201103L
582      // _GLIBCXX_RESOLVE_LIB_DEFECTS
583      // 2103. propagate_on_container_move_assignment
584      typedef std::true_type propagate_on_container_move_assignment;
585#endif
586
587      pointer
588      address(reference __x) const _GLIBCXX_NOEXCEPT
589      { return std::__addressof(__x); }
590
591      const_pointer
592      address(const_reference __x) const _GLIBCXX_NOEXCEPT
593      { return std::__addressof(__x); }
594
595      size_type
596      max_size() const _GLIBCXX_USE_NOEXCEPT
597      { return size_t(-1) / sizeof(_Tp); }
598
599#if __cplusplus >= 201103L
600      template<typename _Up, typename... _Args>
601        void
602        construct(_Up* __p, _Args&&... __args)
603	{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
604
605      template<typename _Up>
606        void
607        destroy(_Up* __p) { __p->~_Up(); }
608#else
609      // _GLIBCXX_RESOLVE_LIB_DEFECTS
610      // 402. wrong new expression in [some_] allocator::construct
611      void
612      construct(pointer __p, const _Tp& __val)
613      { ::new((void *)__p) _Tp(__val); }
614
615      void
616      destroy(pointer __p) { __p->~_Tp(); }
617#endif
618    };
619
620#ifdef __GTHREADS
621#define __thread_default true
622#else
623#define __thread_default false
624#endif
625
626  /**
627   *  @brief  This is a fixed size (power of 2) allocator which - when
628   *  compiled with thread support - will maintain one freelist per
629   *  size per thread plus a @a global one. Steps are taken to limit
630   *  the per thread freelist sizes (by returning excess back to
631   *  the @a global list).
632   *  @ingroup allocators
633   *
634   *  Further details:
635   *  https://gcc.gnu.org/onlinedocs/libstdc++/manual/mt_allocator.html
636   */
637  template<typename _Tp,
638	   typename _Poolp = __common_pool_policy<__pool, __thread_default> >
639    class __mt_alloc : public __mt_alloc_base<_Tp>
640    {
641    public:
642      typedef size_t                    	size_type;
643      typedef ptrdiff_t                 	difference_type;
644      typedef _Tp*                      	pointer;
645      typedef const _Tp*                	const_pointer;
646      typedef _Tp&                      	reference;
647      typedef const _Tp&                	const_reference;
648      typedef _Tp                       	value_type;
649      typedef _Poolp      			__policy_type;
650      typedef typename _Poolp::pool_type	__pool_type;
651
652      template<typename _Tp1, typename _Poolp1 = _Poolp>
653        struct rebind
654        {
655	  typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
656	  typedef __mt_alloc<_Tp1, pol_type> other;
657	};
658
659      __mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
660
661      __mt_alloc(const __mt_alloc&) _GLIBCXX_USE_NOEXCEPT { }
662
663      template<typename _Tp1, typename _Poolp1>
664        __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) _GLIBCXX_USE_NOEXCEPT { }
665
666      ~__mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
667
668      pointer
669      allocate(size_type __n, const void* = 0);
670
671      void
672      deallocate(pointer __p, size_type __n);
673
674      const __pool_base::_Tune
675      _M_get_options()
676      {
677	// Return a copy, not a reference, for external consumption.
678	return __policy_type::_S_get_pool()._M_get_options();
679      }
680
681      void
682      _M_set_options(__pool_base::_Tune __t)
683      { __policy_type::_S_get_pool()._M_set_options(__t); }
684    };
685
686  template<typename _Tp, typename _Poolp>
687    typename __mt_alloc<_Tp, _Poolp>::pointer
688    __mt_alloc<_Tp, _Poolp>::
689    allocate(size_type __n, const void*)
690    {
691      if (__n > this->max_size())
692	std::__throw_bad_alloc();
693
694      __policy_type::_S_initialize_once();
695
696      // Requests larger than _M_max_bytes are handled by operator
697      // new/delete directly.
698      __pool_type& __pool = __policy_type::_S_get_pool();
699      const size_t __bytes = __n * sizeof(_Tp);
700      if (__pool._M_check_threshold(__bytes))
701	{
702	  void* __ret = ::operator new(__bytes);
703	  return static_cast<_Tp*>(__ret);
704	}
705
706      // Round up to power of 2 and figure out which bin to use.
707      const size_t __which = __pool._M_get_binmap(__bytes);
708      const size_t __thread_id = __pool._M_get_thread_id();
709
710      // Find out if we have blocks on our freelist.  If so, go ahead
711      // and use them directly without having to lock anything.
712      char* __c;
713      typedef typename __pool_type::_Bin_record _Bin_record;
714      const _Bin_record& __bin = __pool._M_get_bin(__which);
715      if (__bin._M_first[__thread_id])
716	{
717	  // Already reserved.
718	  typedef typename __pool_type::_Block_record _Block_record;
719	  _Block_record* __block = __bin._M_first[__thread_id];
720	  __bin._M_first[__thread_id] = __block->_M_next;
721
722	  __pool._M_adjust_freelist(__bin, __block, __thread_id);
723	  __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
724	}
725      else
726	{
727	  // Null, reserve.
728	  __c = __pool._M_reserve_block(__bytes, __thread_id);
729	}
730      return static_cast<_Tp*>(static_cast<void*>(__c));
731    }
732
733  template<typename _Tp, typename _Poolp>
734    void
735    __mt_alloc<_Tp, _Poolp>::
736    deallocate(pointer __p, size_type __n)
737    {
738      if (__builtin_expect(__p != 0, true))
739	{
740	  // Requests larger than _M_max_bytes are handled by
741	  // operators new/delete directly.
742	  __pool_type& __pool = __policy_type::_S_get_pool();
743	  const size_t __bytes = __n * sizeof(_Tp);
744	  if (__pool._M_check_threshold(__bytes))
745	    ::operator delete(__p);
746	  else
747	    __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
748	}
749    }
750
751  template<typename _Tp, typename _Poolp>
752    inline bool
753    operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
754    { return true; }
755
756  template<typename _Tp, typename _Poolp>
757    inline bool
758    operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
759    { return false; }
760
761#undef __thread_default
762
763_GLIBCXX_END_NAMESPACE_VERSION
764} // namespace
765
766#endif
767