mt_allocator.h revision 132720
1// MT-optimized allocator -*- C++ -*-
2
3// Copyright (C) 2003, 2004 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 2, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License along
17// with this library; see the file COPYING.  If not, write to the Free
18// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19// USA.
20
21// As a special exception, you may use this file as part of a free software
22// library without restriction.  Specifically, if other files instantiate
23// templates or use macros or inline functions from this file, or you compile
24// this file and link it with other files to produce an executable, this
25// file does not by itself cause the resulting executable to be covered by
26// the GNU General Public License.  This exception does not however
27// invalidate any other reasons why the executable file might be covered by
28// the GNU General Public License.
29
30/** @file ext/mt_allocator.h
31 *  This file is a GNU extension to the Standard C++ Library.
32 *  You should only include this header if you are using GCC 3 or later.
33 */
34
35#ifndef _MT_ALLOCATOR_H
36#define _MT_ALLOCATOR_H 1
37
38#include <new>
39#include <cstdlib>
40#include <bits/functexcept.h>
41#include <bits/gthr.h>
42#include <bits/atomicity.h>
43
44namespace __gnu_cxx
45{
46  /**
47   *  This is a fixed size (power of 2) allocator which - when
48   *  compiled with thread support - will maintain one freelist per
49   *  size per thread plus a "global" one. Steps are taken to limit
50   *  the per thread freelist sizes (by returning excess back to
51   *  "global").
52   *
53   *  Further details:
54   *  http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
55   */
56  template<typename _Tp>
57    class __mt_alloc
58    {
59    public:
60      typedef size_t                    size_type;
61      typedef ptrdiff_t                 difference_type;
62      typedef _Tp*                      pointer;
63      typedef const _Tp*                const_pointer;
64      typedef _Tp&                      reference;
65      typedef const _Tp&                const_reference;
66      typedef _Tp                       value_type;
67
68      template<typename _Tp1>
69        struct rebind
70        { typedef __mt_alloc<_Tp1> other; };
71
72      __mt_alloc() throw()
73      {
74	// XXX
75      }
76
77      __mt_alloc(const __mt_alloc&) throw()
78      {
79	// XXX
80      }
81
82      template<typename _Tp1>
83        __mt_alloc(const __mt_alloc<_Tp1>& obj) throw()
84        {
85	  // XXX
86	}
87
88      ~__mt_alloc() throw() { }
89
90      pointer
91      address(reference __x) const
92      { return &__x; }
93
94      const_pointer
95      address(const_reference __x) const
96      { return &__x; }
97
98      size_type
99      max_size() const throw()
100      { return size_t(-1) / sizeof(_Tp); }
101
102      // _GLIBCXX_RESOLVE_LIB_DEFECTS
103      // 402. wrong new expression in [some_] allocator::construct
104      void
105      construct(pointer __p, const _Tp& __val)
106      { ::new(__p) _Tp(__val); }
107
108      void
109      destroy(pointer __p) { __p->~_Tp(); }
110
111      pointer
112      allocate(size_type __n, const void* = 0);
113
114      void
115      deallocate(pointer __p, size_type __n);
116
117      // Variables used to configure the behavior of the allocator,
118      // assigned and explained in detail below.
119      struct _Tune
120      {
121	// Alignment needed.
122	// NB: In any case must be >= sizeof(_Block_record), that
123	// is 4 on 32 bit machines and 8 on 64 bit machines.
124	size_t  _M_align;
125
126	// Allocation requests (after round-up to power of 2) below
127	// this value will be handled by the allocator. A raw new/
128	// call will be used for requests larger than this value.
129	size_t	_M_max_bytes;
130
131	// Size in bytes of the smallest bin.
132	// NB: Must be a power of 2 and >= _M_align.
133	size_t  _M_min_bin;
134
135	// In order to avoid fragmenting and minimize the number of
136	// new() calls we always request new memory using this
137	// value. Based on previous discussions on the libstdc++
138	// mailing list we have choosen the value below.
139	// See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
140	size_t 	_M_chunk_size;
141
142	// The maximum number of supported threads. Our Linux 2.4.18
143	// reports 4070 in /proc/sys/kernel/threads-max
144	size_t 	_M_max_threads;
145
146	// Each time a deallocation occurs in a threaded application
147	// we make sure that there are no more than
148	// _M_freelist_headroom % of used memory on the freelist. If
149	// the number of additional records is more than
150	// _M_freelist_headroom % of the freelist, we move these
151	// records back to the global pool.
152	size_t 	_M_freelist_headroom;
153
154	// Set to true forces all allocations to use new().
155	bool 	_M_force_new;
156
157	explicit
158	_Tune()
159	: _M_align(8), _M_max_bytes(128), _M_min_bin(8),
160	  _M_chunk_size(4096 - 4 * sizeof(void*)),
161	  _M_max_threads(4096), _M_freelist_headroom(10),
162	  _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
163	{ }
164
165	explicit
166	_Tune(size_t __align, size_t __maxb, size_t __minbin,
167	      size_t __chunk, size_t __maxthreads, size_t __headroom,
168	      bool __force)
169	: _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
170	  _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
171	  _M_freelist_headroom(__headroom), _M_force_new(__force)
172	{ }
173      };
174
175    private:
176      // We need to create the initial lists and set up some variables
177      // before we can answer to the first request for memory.
178#ifdef __GTHREADS
179      static __gthread_once_t 		_S_once;
180#endif
181      static bool 			_S_init;
182
183      static void
184      _S_initialize();
185
186      // Configuration options.
187      static _Tune 	       		_S_options;
188
189      static const _Tune
190      _S_get_options()
191      { return _S_options; }
192
193      static void
194      _S_set_options(_Tune __t)
195      {
196	if (!_S_init)
197	  _S_options = __t;
198      }
199
200      // Using short int as type for the binmap implies we are never
201      // caching blocks larger than 65535 with this allocator
202      typedef unsigned short int        _Binmap_type;
203      static _Binmap_type* 		_S_binmap;
204
205      // Each requesting thread is assigned an id ranging from 1 to
206      // _S_max_threads. Thread id 0 is used as a global memory pool.
207      // In order to get constant performance on the thread assignment
208      // routine, we keep a list of free ids. When a thread first
209      // requests memory we remove the first record in this list and
210      // stores the address in a __gthread_key. When initializing the
211      // __gthread_key we specify a destructor. When this destructor
212      // (i.e. the thread dies) is called, we return the thread id to
213      // the front of this list.
214#ifdef __GTHREADS
215      struct _Thread_record
216      {
217        // Points to next free thread id record. NULL if last record in list.
218        _Thread_record* volatile        _M_next;
219
220	// Thread id ranging from 1 to _S_max_threads.
221        size_t                          _M_id;
222      };
223
224      static _Thread_record* volatile 	_S_thread_freelist_first;
225      static __gthread_mutex_t 		_S_thread_freelist_mutex;
226      static __gthread_key_t 		_S_thread_key;
227
228      static void
229      _S_destroy_thread_key(void* __freelist_pos);
230#endif
231
232      static size_t
233      _S_get_thread_id();
234
235      union _Block_record
236      {
237	// Points to the block_record of the next free block.
238        _Block_record* volatile         _M_next;
239
240#ifdef __GTHREADS
241	// The thread id of the thread which has requested this block.
242        size_t                          _M_thread_id;
243#endif
244      };
245
246      struct _Bin_record
247      {
248	// An "array" of pointers to the first free block for each
249	// thread id. Memory to this "array" is allocated in _S_initialize()
250	// for _S_max_threads + global pool 0.
251        _Block_record** volatile        _M_first;
252
253#ifdef __GTHREADS
254	// An "array" of counters used to keep track of the amount of
255	// blocks that are on the freelist/used for each thread id.
256	// Memory to these "arrays" is allocated in _S_initialize() for
257	// _S_max_threads + global pool 0.
258        size_t* volatile                _M_free;
259        size_t* volatile                _M_used;
260
261	// Each bin has its own mutex which is used to ensure data
262	// integrity while changing "ownership" on a block.  The mutex
263	// is initialized in _S_initialize().
264        __gthread_mutex_t*              _M_mutex;
265#endif
266      };
267
268      // An "array" of bin_records each of which represents a specific
269      // power of 2 size. Memory to this "array" is allocated in
270      // _S_initialize().
271      static _Bin_record* volatile     	_S_bin;
272
273      // Actual value calculated in _S_initialize().
274      static size_t 	       	     	_S_bin_size;
275    };
276
277  template<typename _Tp>
278    typename __mt_alloc<_Tp>::pointer
279    __mt_alloc<_Tp>::
280    allocate(size_type __n, const void*)
281    {
282      // Although the test in __gthread_once() would suffice, we wrap
283      // test of the once condition in our own unlocked check. This
284      // saves one function call to pthread_once() (which itself only
285      // tests for the once value unlocked anyway and immediately
286      // returns if set)
287      if (!_S_init)
288	{
289#ifdef __GTHREADS
290	  if (__gthread_active_p())
291	    __gthread_once(&_S_once, _S_initialize);
292#endif
293	  if (!_S_init)
294	    _S_initialize();
295	}
296
297      // Requests larger than _M_max_bytes are handled by new/delete
298      // directly.
299      const size_t __bytes = __n * sizeof(_Tp);
300      if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
301	{
302	  void* __ret = ::operator new(__bytes);
303	  return static_cast<_Tp*>(__ret);
304	}
305
306      // Round up to power of 2 and figure out which bin to use.
307      const size_t __which = _S_binmap[__bytes];
308      const size_t __thread_id = _S_get_thread_id();
309
310      // Find out if we have blocks on our freelist.  If so, go ahead
311      // and use them directly without having to lock anything.
312      const _Bin_record& __bin = _S_bin[__which];
313      _Block_record* __block = NULL;
314      if (__bin._M_first[__thread_id] == NULL)
315	{
316	  // NB: For alignment reasons, we can't use the first _M_align
317	  // bytes, even when sizeof(_Block_record) < _M_align.
318	  const size_t __bin_size = ((_S_options._M_min_bin << __which)
319				     + _S_options._M_align);
320	  size_t __block_count = _S_options._M_chunk_size / __bin_size;
321
322	  // Are we using threads?
323	  // - Yes, check if there are free blocks on the global
324	  //   list. If so, grab up to __block_count blocks in one
325	  //   lock and change ownership. If the global list is
326	  //   empty, we allocate a new chunk and add those blocks
327	  //   directly to our own freelist (with us as owner).
328	  // - No, all operations are made directly to global pool 0
329	  //   no need to lock or change ownership but check for free
330	  //   blocks on global list (and if not add new ones) and
331	  //   get the first one.
332#ifdef __GTHREADS
333	  if (__gthread_active_p())
334	    {
335	      __gthread_mutex_lock(__bin._M_mutex);
336	      if (__bin._M_first[0] == NULL)
337		{
338		  // No need to hold the lock when we are adding a
339		  // whole chunk to our own list.
340		  __gthread_mutex_unlock(__bin._M_mutex);
341
342		  void* __v = ::operator new(_S_options._M_chunk_size);
343		  __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
344		  __bin._M_free[__thread_id] = __block_count;
345
346		  --__block_count;
347		  __block = __bin._M_first[__thread_id];
348		  while (__block_count-- > 0)
349		    {
350		      char* __c = reinterpret_cast<char*>(__block) + __bin_size;
351		      __block->_M_next = reinterpret_cast<_Block_record*>(__c);
352		      __block = __block->_M_next;
353		    }
354		  __block->_M_next = NULL;
355		}
356	      else
357		{
358		  // Is the number of required blocks greater than or
359		  // equal to the number that can be provided by the
360		  // global free list?
361		  __bin._M_first[__thread_id] = __bin._M_first[0];
362		  if (__block_count >= __bin._M_free[0])
363		    {
364		      __bin._M_free[__thread_id] = __bin._M_free[0];
365		      __bin._M_free[0] = 0;
366		      __bin._M_first[0] = NULL;
367		    }
368		  else
369		    {
370		      __bin._M_free[__thread_id] = __block_count;
371		      __bin._M_free[0] -= __block_count;
372		      --__block_count;
373		      __block = __bin._M_first[0];
374		      while (__block_count-- > 0)
375			__block = __block->_M_next;
376		      __bin._M_first[0] = __block->_M_next;
377		      __block->_M_next = NULL;
378		    }
379		  __gthread_mutex_unlock(__bin._M_mutex);
380		}
381	    }
382	  else
383#endif
384	    {
385	      void* __v = ::operator new(_S_options._M_chunk_size);
386	      __bin._M_first[0] = static_cast<_Block_record*>(__v);
387
388	      --__block_count;
389	      __block = __bin._M_first[0];
390	      while (__block_count-- > 0)
391		{
392		  char* __c = reinterpret_cast<char*>(__block) + __bin_size;
393		  __block->_M_next = reinterpret_cast<_Block_record*>(__c);
394		  __block = __block->_M_next;
395		}
396	      __block->_M_next = NULL;
397	    }
398	}
399
400      __block = __bin._M_first[__thread_id];
401      __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
402#ifdef __GTHREADS
403      if (__gthread_active_p())
404	{
405	  __block->_M_thread_id = __thread_id;
406	  --__bin._M_free[__thread_id];
407	  ++__bin._M_used[__thread_id];
408	}
409#endif
410
411      char* __c = reinterpret_cast<char*>(__block) + _S_options._M_align;
412      return static_cast<_Tp*>(static_cast<void*>(__c));
413    }
414
415  template<typename _Tp>
416    void
417    __mt_alloc<_Tp>::
418    deallocate(pointer __p, size_type __n)
419    {
420      // Requests larger than _M_max_bytes are handled by operators
421      // new/delete directly.
422      const size_t __bytes = __n * sizeof(_Tp);
423      if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
424	{
425	  ::operator delete(__p);
426	  return;
427	}
428
429      // Round up to power of 2 and figure out which bin to use.
430      const size_t __which = _S_binmap[__bytes];
431      const _Bin_record& __bin = _S_bin[__which];
432
433      char* __c = reinterpret_cast<char*>(__p) - _S_options._M_align;
434      _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
435
436#ifdef __GTHREADS
437      if (__gthread_active_p())
438	{
439	  // Calculate the number of records to remove from our freelist:
440	  // in order to avoid too much contention we wait until the
441	  // number of records is "high enough".
442	  const size_t __thread_id = _S_get_thread_id();
443
444	  long __remove = ((__bin._M_free[__thread_id]
445			    * _S_options._M_freelist_headroom)
446			   - __bin._M_used[__thread_id]);
447	  if (__remove > static_cast<long>(100 * (_S_bin_size - __which)
448					   * _S_options._M_freelist_headroom)
449	      && __remove > static_cast<long>(__bin._M_free[__thread_id]))
450	    {
451	      _Block_record* __tmp = __bin._M_first[__thread_id];
452	      _Block_record* __first = __tmp;
453	      __remove /= _S_options._M_freelist_headroom;
454	      const long __removed = __remove;
455	      --__remove;
456	      while (__remove-- > 0)
457		__tmp = __tmp->_M_next;
458	      __bin._M_first[__thread_id] = __tmp->_M_next;
459	      __bin._M_free[__thread_id] -= __removed;
460
461	      __gthread_mutex_lock(__bin._M_mutex);
462	      __tmp->_M_next = __bin._M_first[0];
463	      __bin._M_first[0] = __first;
464	      __bin._M_free[0] += __removed;
465	      __gthread_mutex_unlock(__bin._M_mutex);
466	    }
467
468	  // Return this block to our list and update counters and
469	  // owner id as needed.
470	  --__bin._M_used[__block->_M_thread_id];
471
472	  __block->_M_next = __bin._M_first[__thread_id];
473	  __bin._M_first[__thread_id] = __block;
474
475	  ++__bin._M_free[__thread_id];
476	}
477      else
478#endif
479	{
480	  // Single threaded application - return to global pool.
481	  __block->_M_next = __bin._M_first[0];
482	  __bin._M_first[0] = __block;
483	}
484    }
485
486  template<typename _Tp>
487    void
488    __mt_alloc<_Tp>::
489    _S_initialize()
490    {
491      // This method is called on the first allocation (when _S_init is still
492      // false) to create the bins.
493
494      // Ensure that the static initialization of _S_options has
495      // happened.  This depends on (a) _M_align == 0 being an invalid
496      // value that is only present at startup, and (b) the real
497      // static initialization that happens later not actually
498      // changing anything.
499      if (_S_options._M_align == 0)
500        new (&_S_options) _Tune;
501
502      // _M_force_new must not change after the first allocate(),
503      // which in turn calls this method, so if it's false, it's false
504      // forever and we don't need to return here ever again.
505      if (_S_options._M_force_new)
506	{
507	  _S_init = true;
508	  return;
509	}
510
511      // Calculate the number of bins required based on _M_max_bytes.
512      // _S_bin_size is statically-initialized to one.
513      size_t __bin_size = _S_options._M_min_bin;
514      while (_S_options._M_max_bytes > __bin_size)
515	{
516	  __bin_size <<= 1;
517	  ++_S_bin_size;
518	}
519
520      // Setup the bin map for quick lookup of the relevant bin.
521      const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
522      _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));
523
524      _Binmap_type* __bp = _S_binmap;
525      _Binmap_type __bin_max = _S_options._M_min_bin;
526      _Binmap_type __bint = 0;
527      for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
528        {
529          if (__ct > __bin_max)
530            {
531              __bin_max <<= 1;
532              ++__bint;
533            }
534          *__bp++ = __bint;
535        }
536
537      // Initialize _S_bin and its members.
538      void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
539      _S_bin = static_cast<_Bin_record*>(__v);
540
541      // If __gthread_active_p() create and initialize the list of
542      // free thread ids. Single threaded applications use thread id 0
543      // directly and have no need for this.
544#ifdef __GTHREADS
545      if (__gthread_active_p())
546        {
547	  const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
548	  __v = ::operator new(__k);
549          _S_thread_freelist_first = static_cast<_Thread_record*>(__v);
550
551	  // NOTE! The first assignable thread id is 1 since the
552	  // global pool uses id 0
553          size_t __i;
554          for (__i = 1; __i < _S_options._M_max_threads; ++__i)
555            {
556	      _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
557              __tr._M_next = &_S_thread_freelist_first[__i];
558              __tr._M_id = __i;
559            }
560
561          // Set last record.
562          _S_thread_freelist_first[__i - 1]._M_next = NULL;
563          _S_thread_freelist_first[__i - 1]._M_id = __i;
564
565	  // Make sure this is initialized.
566#ifndef __GTHREAD_MUTEX_INIT
567	  __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
568#endif
569          // Initialize per thread key to hold pointer to
570          // _S_thread_freelist.
571          __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);
572
573	  const size_t __max_threads = _S_options._M_max_threads + 1;
574	  for (size_t __n = 0; __n < _S_bin_size; ++__n)
575	    {
576	      _Bin_record& __bin = _S_bin[__n];
577	      __v = ::operator new(sizeof(_Block_record*) * __max_threads);
578	      __bin._M_first = static_cast<_Block_record**>(__v);
579
580	      __v = ::operator new(sizeof(size_t) * __max_threads);
581              __bin._M_free = static_cast<size_t*>(__v);
582
583	      __v = ::operator new(sizeof(size_t) * __max_threads);
584              __bin._M_used = static_cast<size_t*>(__v);
585
586	      __v = ::operator new(sizeof(__gthread_mutex_t));
587              __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
588
589#ifdef __GTHREAD_MUTEX_INIT
590              {
591                // Do not copy a POSIX/gthr mutex once in use.
592                __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
593                *__bin._M_mutex = __tmp;
594              }
595#else
596              { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
597#endif
598
599	      for (size_t __threadn = 0; __threadn < __max_threads;
600		   ++__threadn)
601		{
602		  __bin._M_first[__threadn] = NULL;
603		  __bin._M_free[__threadn] = 0;
604		  __bin._M_used[__threadn] = 0;
605		}
606	    }
607	}
608      else
609#endif
610	for (size_t __n = 0; __n < _S_bin_size; ++__n)
611	  {
612	    _Bin_record& __bin = _S_bin[__n];
613	    __v = ::operator new(sizeof(_Block_record*));
614	    __bin._M_first = static_cast<_Block_record**>(__v);
615	    __bin._M_first[0] = NULL;
616	  }
617
618      _S_init = true;
619    }
620
621  template<typename _Tp>
622    size_t
623    __mt_alloc<_Tp>::
624    _S_get_thread_id()
625    {
626#ifdef __GTHREADS
627      // If we have thread support and it's active we check the thread
628      // key value and return its id or if it's not set we take the
629      // first record from _S_thread_freelist and sets the key and
630      // returns it's id.
631      if (__gthread_active_p())
632        {
633          _Thread_record* __freelist_pos =
634	    static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key));
635	  if (__freelist_pos == NULL)
636            {
637	      // Since _S_options._M_max_threads must be larger than
638	      // the theoretical max number of threads of the OS the
639	      // list can never be empty.
640              __gthread_mutex_lock(&_S_thread_freelist_mutex);
641              __freelist_pos = _S_thread_freelist_first;
642              _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
643              __gthread_mutex_unlock(&_S_thread_freelist_mutex);
644
645              __gthread_setspecific(_S_thread_key,
646				    static_cast<void*>(__freelist_pos));
647            }
648          return __freelist_pos->_M_id;
649        }
650#endif
651      // Otherwise (no thread support or inactive) all requests are
652      // served from the global pool 0.
653      return 0;
654    }
655
656#ifdef __GTHREADS
657  template<typename _Tp>
658    void
659    __mt_alloc<_Tp>::
660    _S_destroy_thread_key(void* __freelist_pos)
661    {
662      // Return this thread id record to front of thread_freelist.
663      __gthread_mutex_lock(&_S_thread_freelist_mutex);
664      _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
665      __tr->_M_next = _S_thread_freelist_first;
666      _S_thread_freelist_first = __tr;
667      __gthread_mutex_unlock(&_S_thread_freelist_mutex);
668    }
669#endif
670
671  template<typename _Tp>
672    inline bool
673    operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
674    { return true; }
675
676  template<typename _Tp>
677    inline bool
678    operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
679    { return false; }
680
681  template<typename _Tp>
682    bool __mt_alloc<_Tp>::_S_init = false;
683
684  template<typename _Tp>
685    typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options;
686
687  template<typename _Tp>
688    typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap;
689
690  template<typename _Tp>
691    typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin;
692
693  template<typename _Tp>
694    size_t __mt_alloc<_Tp>::_S_bin_size = 1;
695
696  // Actual initialization in _S_initialize().
697#ifdef __GTHREADS
698  template<typename _Tp>
699    __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
700
701  template<typename _Tp>
702    typename __mt_alloc<_Tp>::_Thread_record*
703    volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
704
705  template<typename _Tp>
706    __gthread_key_t __mt_alloc<_Tp>::_S_thread_key;
707
708  template<typename _Tp>
709    __gthread_mutex_t
710#ifdef __GTHREAD_MUTEX_INIT
711    __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
712#else
713    __mt_alloc<_Tp>::_S_thread_freelist_mutex;
714#endif
715#endif
716} // namespace __gnu_cxx
717
718#endif
719