mt_allocator.h revision 132720
161452Sdfr// MT-optimized allocator -*- C++ -*-
261452Sdfr
361452Sdfr// Copyright (C) 2003, 2004 Free Software Foundation, Inc.
461452Sdfr//
561452Sdfr// This file is part of the GNU ISO C++ Library.  This library is free
661452Sdfr// software; you can redistribute it and/or modify it under the
761452Sdfr// terms of the GNU General Public License as published by the
861452Sdfr// Free Software Foundation; either version 2, or (at your option)
961452Sdfr// any later version.
1061452Sdfr
1161452Sdfr// This library is distributed in the hope that it will be useful,
1261452Sdfr// but WITHOUT ANY WARRANTY; without even the implied warranty of
1361452Sdfr// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1461452Sdfr// GNU General Public License for more details.
1561452Sdfr
1661452Sdfr// You should have received a copy of the GNU General Public License along
1761452Sdfr// with this library; see the file COPYING.  If not, write to the Free
1861452Sdfr// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
1961452Sdfr// USA.
2061452Sdfr
2161452Sdfr// As a special exception, you may use this file as part of a free software
2261452Sdfr// library without restriction.  Specifically, if other files instantiate
2361452Sdfr// templates or use macros or inline functions from this file, or you compile
2461452Sdfr// this file and link it with other files to produce an executable, this
2561452Sdfr// file does not by itself cause the resulting executable to be covered by
2661452Sdfr// the GNU General Public License.  This exception does not however
27116192Sobrien// invalidate any other reasons why the executable file might be covered by
28116192Sobrien// the GNU General Public License.
29116192Sobrien
3061452Sdfr/** @file ext/mt_allocator.h
3161452Sdfr *  This file is a GNU extension to the Standard C++ Library.
3261452Sdfr *  You should only include this header if you are using GCC 3 or later.
3361452Sdfr */
34129878Sphk
3561452Sdfr#ifndef _MT_ALLOCATOR_H
3661452Sdfr#define _MT_ALLOCATOR_H 1
3776827Salfred
3879339Sjhb#include <new>
3961452Sdfr#include <cstdlib>
40173573Sjhb#include <bits/functexcept.h>
41173573Sjhb#include <bits/gthr.h>
42119288Simp#include <bits/atomicity.h>
43119288Simp
4461452Sdfrnamespace __gnu_cxx
4561452Sdfr{
4661452Sdfr  /**
4761452Sdfr   *  This is a fixed size (power of 2) allocator which - when
4861452Sdfr   *  compiled with thread support - will maintain one freelist per
49129189Sjhb   *  size per thread plus a "global" one. Steps are taken to limit
50129189Sjhb   *  the per thread freelist sizes (by returning excess back to
51129189Sjhb   *  "global").
52129189Sjhb   *
5361452Sdfr   *  Further details:
5461452Sdfr   *  http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
5561452Sdfr   */
5661452Sdfr  template<typename _Tp>
57129189Sjhb    class __mt_alloc
5861452Sdfr    {
5961452Sdfr    public:
60129189Sjhb      typedef size_t                    size_type;
61129189Sjhb      typedef ptrdiff_t                 difference_type;
62129189Sjhb      typedef _Tp*                      pointer;
63129189Sjhb      typedef const _Tp*                const_pointer;
64129189Sjhb      typedef _Tp&                      reference;
6561452Sdfr      typedef const _Tp&                const_reference;
6661452Sdfr      typedef _Tp                       value_type;
6761452Sdfr
6861452Sdfr      template<typename _Tp1>
6961452Sdfr        struct rebind
7061452Sdfr        { typedef __mt_alloc<_Tp1> other; };
7161452Sdfr
7261452Sdfr      __mt_alloc() throw()
7361452Sdfr      {
7461452Sdfr	// XXX
7561452Sdfr      }
76139431Sanholt
77139431Sanholt      __mt_alloc(const __mt_alloc&) throw()
78139431Sanholt      {
79139431Sanholt	// XXX
80139431Sanholt      }
81139431Sanholt
82139431Sanholt      template<typename _Tp1>
83139431Sanholt        __mt_alloc(const __mt_alloc<_Tp1>& obj) throw()
84116921Sjhb        {
85139431Sanholt	  // XXX
86187633Sjkim	}
87187633Sjkim
88172262Skevlo      ~__mt_alloc() throw() { }
89172262Skevlo
90187633Sjkim      pointer
91187633Sjkim      address(reference __x) const
92187633Sjkim      { return &__x; }
93187633Sjkim
94139431Sanholt      const_pointer
95139431Sanholt      address(const_reference __x) const
9661452Sdfr      { return &__x; }
9761452Sdfr
9861452Sdfr      size_type
9961452Sdfr      max_size() const throw()
10061452Sdfr      { return size_t(-1) / sizeof(_Tp); }
10161452Sdfr
102139431Sanholt      // _GLIBCXX_RESOLVE_LIB_DEFECTS
103139431Sanholt      // 402. wrong new expression in [some_] allocator::construct
104116921Sjhb      void
105116921Sjhb      construct(pointer __p, const _Tp& __val)
10661452Sdfr      { ::new(__p) _Tp(__val); }
10761452Sdfr
108139431Sanholt      void
109139431Sanholt      destroy(pointer __p) { __p->~_Tp(); }
110139431Sanholt
111139431Sanholt      pointer
112139431Sanholt      allocate(size_type __n, const void* = 0);
113139431Sanholt
114139431Sanholt      void
115139431Sanholt      deallocate(pointer __p, size_type __n);
116139431Sanholt
117139431Sanholt      // Variables used to configure the behavior of the allocator,
118139431Sanholt      // assigned and explained in detail below.
119139431Sanholt      struct _Tune
120139431Sanholt      {
121139431Sanholt	// Alignment needed.
122139431Sanholt	// NB: In any case must be >= sizeof(_Block_record), that
123139431Sanholt	// is 4 on 32 bit machines and 8 on 64 bit machines.
124139431Sanholt	size_t  _M_align;
125139431Sanholt
126139431Sanholt	// Allocation requests (after round-up to power of 2) below
127139431Sanholt	// this value will be handled by the allocator. A raw new/
128131433Sjhb	// call will be used for requests larger than this value.
129131433Sjhb	size_t	_M_max_bytes;
130139431Sanholt
131139431Sanholt	// Size in bytes of the smallest bin.
132139431Sanholt	// NB: Must be a power of 2 and >= _M_align.
133139431Sanholt	size_t  _M_min_bin;
134139431Sanholt
135139431Sanholt	// In order to avoid fragmenting and minimize the number of
136139431Sanholt	// new() calls we always request new memory using this
137139431Sanholt	// value. Based on previous discussions on the libstdc++
138244926Santoine	// mailing list we have choosen the value below.
13961452Sdfr	// See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
14061452Sdfr	size_t 	_M_chunk_size;
14161452Sdfr
14261452Sdfr	// The maximum number of supported threads. Our Linux 2.4.18
14361452Sdfr	// reports 4070 in /proc/sys/kernel/threads-max
14461452Sdfr	size_t 	_M_max_threads;
14561452Sdfr
14661452Sdfr	// Each time a deallocation occurs in a threaded application
14761452Sdfr	// we make sure that there are no more than
148241885Seadler	// _M_freelist_headroom % of used memory on the freelist. If
149241885Seadler	// the number of additional records is more than
15061452Sdfr	// _M_freelist_headroom % of the freelist, we move these
15161452Sdfr	// records back to the global pool.
15261452Sdfr	size_t 	_M_freelist_headroom;
153142398Simp
15461452Sdfr	// Set to true forces all allocations to use new().
15561452Sdfr	bool 	_M_force_new;
15661452Sdfr
15761452Sdfr	explicit
15861452Sdfr	_Tune()
15961452Sdfr	: _M_align(8), _M_max_bytes(128), _M_min_bin(8),
16061452Sdfr	  _M_chunk_size(4096 - 4 * sizeof(void*)),
16161452Sdfr	  _M_max_threads(4096), _M_freelist_headroom(10),
16261452Sdfr	  _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
16361452Sdfr	{ }
16461452Sdfr
165133406Sanholt	explicit
166200764Srnoland	_Tune(size_t __align, size_t __maxb, size_t __minbin,
16761452Sdfr	      size_t __chunk, size_t __maxthreads, size_t __headroom,
168200764Srnoland	      bool __force)
169200764Srnoland	: _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
170200764Srnoland	  _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
171200764Srnoland	  _M_freelist_headroom(__headroom), _M_force_new(__force)
172200764Srnoland	{ }
173133406Sanholt      };
174133406Sanholt
175133406Sanholt    private:
176129189Sjhb      // We need to create the initial lists and set up some variables
177129189Sjhb      // before we can answer to the first request for memory.
17861452Sdfr#ifdef __GTHREADS
17961452Sdfr      static __gthread_once_t 		_S_once;
18061452Sdfr#endif
18161452Sdfr      static bool 			_S_init;
18261452Sdfr
18361452Sdfr      static void
18461452Sdfr      _S_initialize();
18561452Sdfr
18661452Sdfr      // Configuration options.
18761452Sdfr      static _Tune 	       		_S_options;
18861452Sdfr
18961452Sdfr      static const _Tune
19061452Sdfr      _S_get_options()
19161452Sdfr      { return _S_options; }
19261452Sdfr
19361452Sdfr      static void
19461452Sdfr      _S_set_options(_Tune __t)
19561452Sdfr      {
19661452Sdfr	if (!_S_init)
19761452Sdfr	  _S_options = __t;
19861452Sdfr      }
19961452Sdfr
200147606Sanholt      // Using short int as type for the binmap implies we are never
201147606Sanholt      // caching blocks larger than 65535 with this allocator
202147606Sanholt      typedef unsigned short int        _Binmap_type;
203147606Sanholt      static _Binmap_type* 		_S_binmap;
204147606Sanholt
205147606Sanholt      // Each requesting thread is assigned an id ranging from 1 to
206147606Sanholt      // _S_max_threads. Thread id 0 is used as a global memory pool.
207147606Sanholt      // In order to get constant performance on the thread assignment
20861452Sdfr      // routine, we keep a list of free ids. When a thread first
209147606Sanholt      // requests memory we remove the first record in this list and
210147606Sanholt      // stores the address in a __gthread_key. When initializing the
211147606Sanholt      // __gthread_key we specify a destructor. When this destructor
212147606Sanholt      // (i.e. the thread dies) is called, we return the thread id to
213200764Srnoland      // the front of this list.
214147606Sanholt#ifdef __GTHREADS
215147606Sanholt      struct _Thread_record
216147606Sanholt      {
217172262Skevlo        // Points to next free thread id record. NULL if last record in list.
218172262Skevlo        _Thread_record* volatile        _M_next;
219172262Skevlo
22061452Sdfr	// Thread id ranging from 1 to _S_max_threads.
22161452Sdfr        size_t                          _M_id;
22261452Sdfr      };
22361452Sdfr
22461452Sdfr      static _Thread_record* volatile 	_S_thread_freelist_first;
22561452Sdfr      static __gthread_mutex_t 		_S_thread_freelist_mutex;
22661452Sdfr      static __gthread_key_t 		_S_thread_key;
22761452Sdfr
228173203Sjhb      static void
22961452Sdfr      _S_destroy_thread_key(void* __freelist_pos);
230129189Sjhb#endif
231129189Sjhb
23261452Sdfr      static size_t
23361452Sdfr      _S_get_thread_id();
234173203Sjhb
23561452Sdfr      union _Block_record
23661452Sdfr      {
23761452Sdfr	// Points to the block_record of the next free block.
23861452Sdfr        _Block_record* volatile         _M_next;
23961452Sdfr
24061452Sdfr#ifdef __GTHREADS
24161452Sdfr	// The thread id of the thread which has requested this block.
242129189Sjhb        size_t                          _M_thread_id;
24361452Sdfr#endif
24461452Sdfr      };
245172262Skevlo
246200764Srnoland      struct _Bin_record
24761452Sdfr      {
248172262Skevlo	// An "array" of pointers to the first free block for each
249172262Skevlo	// thread id. Memory to this "array" is allocated in _S_initialize()
250172262Skevlo	// for _S_max_threads + global pool 0.
251172262Skevlo        _Block_record** volatile        _M_first;
252172262Skevlo
253172262Skevlo#ifdef __GTHREADS
254172262Skevlo	// An "array" of counters used to keep track of the amount of
255172262Skevlo	// blocks that are on the freelist/used for each thread id.
256172262Skevlo	// Memory to these "arrays" is allocated in _S_initialize() for
257172262Skevlo	// _S_max_threads + global pool 0.
258172262Skevlo        size_t* volatile                _M_free;
259172262Skevlo        size_t* volatile                _M_used;
260172262Skevlo
261172262Skevlo	// Each bin has its own mutex which is used to ensure data
262172262Skevlo	// integrity while changing "ownership" on a block.  The mutex
263172262Skevlo	// is initialized in _S_initialize().
264172262Skevlo        __gthread_mutex_t*              _M_mutex;
265172262Skevlo#endif
266172262Skevlo      };
267172262Skevlo
268172262Skevlo      // An "array" of bin_records each of which represents a specific
269172262Skevlo      // power of 2 size. Memory to this "array" is allocated in
270172262Skevlo      // _S_initialize().
271172262Skevlo      static _Bin_record* volatile     	_S_bin;
272172262Skevlo
273200764Srnoland      // Actual value calculated in _S_initialize().
274200764Srnoland      static size_t 	       	     	_S_bin_size;
275200764Srnoland    };
276200764Srnoland
277200764Srnoland  template<typename _Tp>
278200764Srnoland    typename __mt_alloc<_Tp>::pointer
279172262Skevlo    __mt_alloc<_Tp>::
280200764Srnoland    allocate(size_type __n, const void*)
281172262Skevlo    {
282172262Skevlo      // Although the test in __gthread_once() would suffice, we wrap
283172262Skevlo      // test of the once condition in our own unlocked check. This
284172262Skevlo      // saves one function call to pthread_once() (which itself only
28561452Sdfr      // tests for the once value unlocked anyway and immediately
28661452Sdfr      // returns if set)
28761452Sdfr      if (!_S_init)
28861452Sdfr	{
28961452Sdfr#ifdef __GTHREADS
290129189Sjhb	  if (__gthread_active_p())
291172262Skevlo	    __gthread_once(&_S_once, _S_initialize);
29261452Sdfr#endif
293172262Skevlo	  if (!_S_init)
294172262Skevlo	    _S_initialize();
295172262Skevlo	}
296172262Skevlo
297172262Skevlo      // Requests larger than _M_max_bytes are handled by new/delete
29861452Sdfr      // directly.
299172262Skevlo      const size_t __bytes = __n * sizeof(_Tp);
300172262Skevlo      if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
301172262Skevlo	{
302172262Skevlo	  void* __ret = ::operator new(__bytes);
303172262Skevlo	  return static_cast<_Tp*>(__ret);
30461452Sdfr	}
305172262Skevlo
306172262Skevlo      // Round up to power of 2 and figure out which bin to use.
307172262Skevlo      const size_t __which = _S_binmap[__bytes];
308172262Skevlo      const size_t __thread_id = _S_get_thread_id();
309172262Skevlo
310172262Skevlo      // Find out if we have blocks on our freelist.  If so, go ahead
311172262Skevlo      // and use them directly without having to lock anything.
312172262Skevlo      const _Bin_record& __bin = _S_bin[__which];
313172262Skevlo      _Block_record* __block = NULL;
314172262Skevlo      if (__bin._M_first[__thread_id] == NULL)
315172262Skevlo	{
316172262Skevlo	  // NB: For alignment reasons, we can't use the first _M_align
317172262Skevlo	  // bytes, even when sizeof(_Block_record) < _M_align.
318172262Skevlo	  const size_t __bin_size = ((_S_options._M_min_bin << __which)
319172262Skevlo				     + _S_options._M_align);
320172262Skevlo	  size_t __block_count = _S_options._M_chunk_size / __bin_size;
321172262Skevlo
322172262Skevlo	  // Are we using threads?
323172262Skevlo	  // - Yes, check if there are free blocks on the global
324172262Skevlo	  //   list. If so, grab up to __block_count blocks in one
325172262Skevlo	  //   lock and change ownership. If the global list is
326172262Skevlo	  //   empty, we allocate a new chunk and add those blocks
327172262Skevlo	  //   directly to our own freelist (with us as owner).
328172262Skevlo	  // - No, all operations are made directly to global pool 0
329200764Srnoland	  //   no need to lock or change ownership but check for free
330200764Srnoland	  //   blocks on global list (and if not add new ones) and
331200764Srnoland	  //   get the first one.
332200764Srnoland#ifdef __GTHREADS
333200764Srnoland	  if (__gthread_active_p())
334200764Srnoland	    {
335200764Srnoland	      __gthread_mutex_lock(__bin._M_mutex);
336200764Srnoland	      if (__bin._M_first[0] == NULL)
337200764Srnoland		{
338172262Skevlo		  // No need to hold the lock when we are adding a
339172262Skevlo		  // whole chunk to our own list.
340172262Skevlo		  __gthread_mutex_unlock(__bin._M_mutex);
341172262Skevlo
342172262Skevlo		  void* __v = ::operator new(_S_options._M_chunk_size);
343172262Skevlo		  __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
344172262Skevlo		  __bin._M_free[__thread_id] = __block_count;
345172262Skevlo
346172262Skevlo		  --__block_count;
34761452Sdfr		  __block = __bin._M_first[__thread_id];
34861452Sdfr		  while (__block_count-- > 0)
34961452Sdfr		    {
35061452Sdfr		      char* __c = reinterpret_cast<char*>(__block) + __bin_size;
351189578Simp		      __block->_M_next = reinterpret_cast<_Block_record*>(__c);
35261452Sdfr		      __block = __block->_M_next;
35361452Sdfr		    }
35461452Sdfr		  __block->_M_next = NULL;
355190169Srnoland		}
35661452Sdfr	      else
35761452Sdfr		{
35861452Sdfr		  // Is the number of required blocks greater than or
35961452Sdfr		  // equal to the number that can be provided by the
36061452Sdfr		  // global free list?
36161452Sdfr		  __bin._M_first[__thread_id] = __bin._M_first[0];
36261452Sdfr		  if (__block_count >= __bin._M_free[0])
363189578Simp		    {
36461452Sdfr		      __bin._M_free[__thread_id] = __bin._M_free[0];
36561452Sdfr		      __bin._M_free[0] = 0;
36661452Sdfr		      __bin._M_first[0] = NULL;
367190169Srnoland		    }
36861452Sdfr		  else
36961452Sdfr		    {
37061452Sdfr		      __bin._M_free[__thread_id] = __block_count;
37161452Sdfr		      __bin._M_free[0] -= __block_count;
37261452Sdfr		      --__block_count;
37361452Sdfr		      __block = __bin._M_first[0];
37461452Sdfr		      while (__block_count-- > 0)
37561452Sdfr			__block = __block->_M_next;
37661452Sdfr		      __bin._M_first[0] = __block->_M_next;
377129189Sjhb		      __block->_M_next = NULL;
378147606Sanholt		    }
379129189Sjhb		  __gthread_mutex_unlock(__bin._M_mutex);
380147606Sanholt		}
381147606Sanholt	    }
382147606Sanholt	  else
383147606Sanholt#endif
384147606Sanholt	    {
385147606Sanholt	      void* __v = ::operator new(_S_options._M_chunk_size);
386147606Sanholt	      __bin._M_first[0] = static_cast<_Block_record*>(__v);
387147606Sanholt
388147606Sanholt	      --__block_count;
389147606Sanholt	      __block = __bin._M_first[0];
39061452Sdfr	      while (__block_count-- > 0)
39161452Sdfr		{
39261452Sdfr		  char* __c = reinterpret_cast<char*>(__block) + __bin_size;
39361452Sdfr		  __block->_M_next = reinterpret_cast<_Block_record*>(__c);
39461452Sdfr		  __block = __block->_M_next;
39561452Sdfr		}
39661452Sdfr	      __block->_M_next = NULL;
39761452Sdfr	    }
39861452Sdfr	}
39961452Sdfr
40061452Sdfr      __block = __bin._M_first[__thread_id];
40161452Sdfr      __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
40261452Sdfr#ifdef __GTHREADS
40361452Sdfr      if (__gthread_active_p())
40461452Sdfr	{
40561452Sdfr	  __block->_M_thread_id = __thread_id;
40661452Sdfr	  --__bin._M_free[__thread_id];
40761452Sdfr	  ++__bin._M_used[__thread_id];
40861452Sdfr	}
40961452Sdfr#endif
41061452Sdfr
41161452Sdfr      char* __c = reinterpret_cast<char*>(__block) + _S_options._M_align;
41261452Sdfr      return static_cast<_Tp*>(static_cast<void*>(__c));
41361452Sdfr    }
41461452Sdfr
41561452Sdfr  template<typename _Tp>
41661452Sdfr    void
41761452Sdfr    __mt_alloc<_Tp>::
41861452Sdfr    deallocate(pointer __p, size_type __n)
41961452Sdfr    {
42061452Sdfr      // Requests larger than _M_max_bytes are handled by operators
42161452Sdfr      // new/delete directly.
42261452Sdfr      const size_t __bytes = __n * sizeof(_Tp);
42361452Sdfr      if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
424153572Sjhb	{
425113506Smdodd	  ::operator delete(__p);
426113506Smdodd	  return;
427	}
428
429      // Round up to power of 2 and figure out which bin to use.
430      const size_t __which = _S_binmap[__bytes];
431      const _Bin_record& __bin = _S_bin[__which];
432
433      char* __c = reinterpret_cast<char*>(__p) - _S_options._M_align;
434      _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
435
436#ifdef __GTHREADS
437      if (__gthread_active_p())
438	{
439	  // Calculate the number of records to remove from our freelist:
440	  // in order to avoid too much contention we wait until the
441	  // number of records is "high enough".
442	  const size_t __thread_id = _S_get_thread_id();
443
444	  long __remove = ((__bin._M_free[__thread_id]
445			    * _S_options._M_freelist_headroom)
446			   - __bin._M_used[__thread_id]);
447	  if (__remove > static_cast<long>(100 * (_S_bin_size - __which)
448					   * _S_options._M_freelist_headroom)
449	      && __remove > static_cast<long>(__bin._M_free[__thread_id]))
450	    {
451	      _Block_record* __tmp = __bin._M_first[__thread_id];
452	      _Block_record* __first = __tmp;
453	      __remove /= _S_options._M_freelist_headroom;
454	      const long __removed = __remove;
455	      --__remove;
456	      while (__remove-- > 0)
457		__tmp = __tmp->_M_next;
458	      __bin._M_first[__thread_id] = __tmp->_M_next;
459	      __bin._M_free[__thread_id] -= __removed;
460
461	      __gthread_mutex_lock(__bin._M_mutex);
462	      __tmp->_M_next = __bin._M_first[0];
463	      __bin._M_first[0] = __first;
464	      __bin._M_free[0] += __removed;
465	      __gthread_mutex_unlock(__bin._M_mutex);
466	    }
467
468	  // Return this block to our list and update counters and
469	  // owner id as needed.
470	  --__bin._M_used[__block->_M_thread_id];
471
472	  __block->_M_next = __bin._M_first[__thread_id];
473	  __bin._M_first[__thread_id] = __block;
474
475	  ++__bin._M_free[__thread_id];
476	}
477      else
478#endif
479	{
480	  // Single threaded application - return to global pool.
481	  __block->_M_next = __bin._M_first[0];
482	  __bin._M_first[0] = __block;
483	}
484    }
485
486  template<typename _Tp>
487    void
488    __mt_alloc<_Tp>::
489    _S_initialize()
490    {
491      // This method is called on the first allocation (when _S_init is still
492      // false) to create the bins.
493
494      // Ensure that the static initialization of _S_options has
495      // happened.  This depends on (a) _M_align == 0 being an invalid
496      // value that is only present at startup, and (b) the real
497      // static initialization that happens later not actually
498      // changing anything.
499      if (_S_options._M_align == 0)
500        new (&_S_options) _Tune;
501
502      // _M_force_new must not change after the first allocate(),
503      // which in turn calls this method, so if it's false, it's false
504      // forever and we don't need to return here ever again.
505      if (_S_options._M_force_new)
506	{
507	  _S_init = true;
508	  return;
509	}
510
511      // Calculate the number of bins required based on _M_max_bytes.
512      // _S_bin_size is statically-initialized to one.
513      size_t __bin_size = _S_options._M_min_bin;
514      while (_S_options._M_max_bytes > __bin_size)
515	{
516	  __bin_size <<= 1;
517	  ++_S_bin_size;
518	}
519
520      // Setup the bin map for quick lookup of the relevant bin.
521      const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
522      _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));
523
524      _Binmap_type* __bp = _S_binmap;
525      _Binmap_type __bin_max = _S_options._M_min_bin;
526      _Binmap_type __bint = 0;
527      for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
528        {
529          if (__ct > __bin_max)
530            {
531              __bin_max <<= 1;
532              ++__bint;
533            }
534          *__bp++ = __bint;
535        }
536
537      // Initialize _S_bin and its members.
538      void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
539      _S_bin = static_cast<_Bin_record*>(__v);
540
541      // If __gthread_active_p() create and initialize the list of
542      // free thread ids. Single threaded applications use thread id 0
543      // directly and have no need for this.
544#ifdef __GTHREADS
545      if (__gthread_active_p())
546        {
547	  const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
548	  __v = ::operator new(__k);
549          _S_thread_freelist_first = static_cast<_Thread_record*>(__v);
550
551	  // NOTE! The first assignable thread id is 1 since the
552	  // global pool uses id 0
553          size_t __i;
554          for (__i = 1; __i < _S_options._M_max_threads; ++__i)
555            {
556	      _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
557              __tr._M_next = &_S_thread_freelist_first[__i];
558              __tr._M_id = __i;
559            }
560
561          // Set last record.
562          _S_thread_freelist_first[__i - 1]._M_next = NULL;
563          _S_thread_freelist_first[__i - 1]._M_id = __i;
564
565	  // Make sure this is initialized.
566#ifndef __GTHREAD_MUTEX_INIT
567	  __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
568#endif
569          // Initialize per thread key to hold pointer to
570          // _S_thread_freelist.
571          __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);
572
573	  const size_t __max_threads = _S_options._M_max_threads + 1;
574	  for (size_t __n = 0; __n < _S_bin_size; ++__n)
575	    {
576	      _Bin_record& __bin = _S_bin[__n];
577	      __v = ::operator new(sizeof(_Block_record*) * __max_threads);
578	      __bin._M_first = static_cast<_Block_record**>(__v);
579
580	      __v = ::operator new(sizeof(size_t) * __max_threads);
581              __bin._M_free = static_cast<size_t*>(__v);
582
583	      __v = ::operator new(sizeof(size_t) * __max_threads);
584              __bin._M_used = static_cast<size_t*>(__v);
585
586	      __v = ::operator new(sizeof(__gthread_mutex_t));
587              __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
588
589#ifdef __GTHREAD_MUTEX_INIT
590              {
591                // Do not copy a POSIX/gthr mutex once in use.
592                __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
593                *__bin._M_mutex = __tmp;
594              }
595#else
596              { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
597#endif
598
599	      for (size_t __threadn = 0; __threadn < __max_threads;
600		   ++__threadn)
601		{
602		  __bin._M_first[__threadn] = NULL;
603		  __bin._M_free[__threadn] = 0;
604		  __bin._M_used[__threadn] = 0;
605		}
606	    }
607	}
608      else
609#endif
610	for (size_t __n = 0; __n < _S_bin_size; ++__n)
611	  {
612	    _Bin_record& __bin = _S_bin[__n];
613	    __v = ::operator new(sizeof(_Block_record*));
614	    __bin._M_first = static_cast<_Block_record**>(__v);
615	    __bin._M_first[0] = NULL;
616	  }
617
618      _S_init = true;
619    }
620
621  template<typename _Tp>
622    size_t
623    __mt_alloc<_Tp>::
624    _S_get_thread_id()
625    {
626#ifdef __GTHREADS
627      // If we have thread support and it's active we check the thread
628      // key value and return its id or if it's not set we take the
629      // first record from _S_thread_freelist and sets the key and
630      // returns it's id.
631      if (__gthread_active_p())
632        {
633          _Thread_record* __freelist_pos =
634	    static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key));
635	  if (__freelist_pos == NULL)
636            {
637	      // Since _S_options._M_max_threads must be larger than
638	      // the theoretical max number of threads of the OS the
639	      // list can never be empty.
640              __gthread_mutex_lock(&_S_thread_freelist_mutex);
641              __freelist_pos = _S_thread_freelist_first;
642              _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
643              __gthread_mutex_unlock(&_S_thread_freelist_mutex);
644
645              __gthread_setspecific(_S_thread_key,
646				    static_cast<void*>(__freelist_pos));
647            }
648          return __freelist_pos->_M_id;
649        }
650#endif
651      // Otherwise (no thread support or inactive) all requests are
652      // served from the global pool 0.
653      return 0;
654    }
655
656#ifdef __GTHREADS
657  template<typename _Tp>
658    void
659    __mt_alloc<_Tp>::
660    _S_destroy_thread_key(void* __freelist_pos)
661    {
662      // Return this thread id record to front of thread_freelist.
663      __gthread_mutex_lock(&_S_thread_freelist_mutex);
664      _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
665      __tr->_M_next = _S_thread_freelist_first;
666      _S_thread_freelist_first = __tr;
667      __gthread_mutex_unlock(&_S_thread_freelist_mutex);
668    }
669#endif
670
671  template<typename _Tp>
672    inline bool
673    operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
674    { return true; }
675
676  template<typename _Tp>
677    inline bool
678    operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
679    { return false; }
680
681  template<typename _Tp>
682    bool __mt_alloc<_Tp>::_S_init = false;
683
684  template<typename _Tp>
685    typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options;
686
687  template<typename _Tp>
688    typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap;
689
690  template<typename _Tp>
691    typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin;
692
693  template<typename _Tp>
694    size_t __mt_alloc<_Tp>::_S_bin_size = 1;
695
696  // Actual initialization in _S_initialize().
697#ifdef __GTHREADS
698  template<typename _Tp>
699    __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
700
701  template<typename _Tp>
702    typename __mt_alloc<_Tp>::_Thread_record*
703    volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
704
705  template<typename _Tp>
706    __gthread_key_t __mt_alloc<_Tp>::_S_thread_key;
707
708  template<typename _Tp>
709    __gthread_mutex_t
710#ifdef __GTHREAD_MUTEX_INIT
711    __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
712#else
713    __mt_alloc<_Tp>::_S_thread_freelist_mutex;
714#endif
715#endif
716} // namespace __gnu_cxx
717
718#endif
719