1// Allocator details.
2
3// Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 2, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License along
17// with this library; see the file COPYING.  If not, write to the Free
18// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19// USA.
20
21// As a special exception, you may use this file as part of a free software
22// library without restriction.  Specifically, if other files instantiate
23// templates or use macros or inline functions from this file, or you compile
24// this file and link it with other files to produce an executable, this
25// file does not by itself cause the resulting executable to be covered by
26// the GNU General Public License.  This exception does not however
27// invalidate any other reasons why the executable file might be covered by
28// the GNU General Public License.
29
30//
31// ISO C++ 14882:
32//
33
34#include <bits/c++config.h>
35#include <ext/concurrence.h>
36#include <ext/mt_allocator.h>
37#include <cstring>
38
39namespace
40{
41#ifdef __GTHREADS
42  struct __freelist
43  {
44    typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
45    _Thread_record* 	_M_thread_freelist;
46    _Thread_record* 	_M_thread_freelist_array;
47    size_t 		_M_max_threads;
48    __gthread_key_t 	_M_key;
49
50    ~__freelist()
51    {
52      if (_M_thread_freelist_array)
53	{
54	  __gthread_key_delete(_M_key);
55	  ::operator delete(static_cast<void*>(_M_thread_freelist_array));
56	}
57    }
58  };
59
60  // Ensure freelist is constructed first.
61  static __freelist freelist;
62  __gnu_cxx::__mutex freelist_mutex;
63
64  static void
65  _M_destroy_thread_key(void* __id)
66  {
67    // Return this thread id record to the front of thread_freelist.
68    __gnu_cxx::__scoped_lock sentry(freelist_mutex);
69    size_t _M_id = reinterpret_cast<size_t>(__id);
70
71    typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
72    _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
73    __tr->_M_next = freelist._M_thread_freelist;
74    freelist._M_thread_freelist = __tr;
75  }
76#endif
77} // anonymous namespace
78
79_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
80
81  void
82  __pool<false>::_M_destroy() throw()
83  {
84    if (_M_init && !_M_options._M_force_new)
85      {
86	for (size_t __n = 0; __n < _M_bin_size; ++__n)
87	  {
88	    _Bin_record& __bin = _M_bin[__n];
89	    while (__bin._M_address)
90	      {
91		_Block_address* __tmp = __bin._M_address->_M_next;
92		::operator delete(__bin._M_address->_M_initial);
93		__bin._M_address = __tmp;
94	      }
95	    ::operator delete(__bin._M_first);
96	  }
97	::operator delete(_M_bin);
98	::operator delete(_M_binmap);
99      }
100  }
101
102  void
103  __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
104  {
105    // Round up to power of 2 and figure out which bin to use.
106    const size_t __which = _M_binmap[__bytes];
107    _Bin_record& __bin = _M_bin[__which];
108
109    char* __c = __p - _M_get_align();
110    _Block_record* __block_record = reinterpret_cast<_Block_record*>(__c);
111
112    // Single threaded application - return to global pool.
113    __block_record->_M_next = __bin._M_first[0];
114    __bin._M_first[0] = __block_record;
115  }
116
117  char*
118  __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
119  {
120    // Round up to power of 2 and figure out which bin to use.
121    const size_t __which = _M_binmap[__bytes];
122    _Bin_record& __bin = _M_bin[__which];
123    const _Tune& __options = _M_get_options();
124    const size_t __bin_size = (__options._M_min_bin << __which)
125			       + __options._M_align;
126    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
127    __block_count /= __bin_size;
128
129    // Get a new block dynamically, set it up for use.
130    void* __v = ::operator new(__options._M_chunk_size);
131    _Block_address* __address = static_cast<_Block_address*>(__v);
132    __address->_M_initial = __v;
133    __address->_M_next = __bin._M_address;
134    __bin._M_address = __address;
135
136    char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
137    _Block_record* __block_record = reinterpret_cast<_Block_record*>(__c);
138    __bin._M_first[__thread_id] = __block_record;
139    while (--__block_count > 0)
140      {
141	__c += __bin_size;
142	__block_record->_M_next = reinterpret_cast<_Block_record*>(__c);
143	__block_record = __block_record->_M_next;
144      }
145    __block_record->_M_next = NULL;
146
147    __block_record = __bin._M_first[__thread_id];
148    __bin._M_first[__thread_id] = __block_record->_M_next;
149
150    // NB: For alignment reasons, we can't use the first _M_align
151    // bytes, even when sizeof(_Block_record) < _M_align.
152    return reinterpret_cast<char*>(__block_record) + __options._M_align;
153  }
154
155  void
156  __pool<false>::_M_initialize()
157  {
158    // _M_force_new must not change after the first allocate(), which
159    // in turn calls this method, so if it's false, it's false forever
160    // and we don't need to return here ever again.
161    if (_M_options._M_force_new)
162      {
163	_M_init = true;
164	return;
165      }
166
167    // Create the bins.
168    // Calculate the number of bins required based on _M_max_bytes.
169    // _M_bin_size is statically-initialized to one.
170    size_t __bin_size = _M_options._M_min_bin;
171    while (_M_options._M_max_bytes > __bin_size)
172      {
173	__bin_size <<= 1;
174	++_M_bin_size;
175      }
176
177    // Setup the bin map for quick lookup of the relevant bin.
178    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
179    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
180    _Binmap_type* __bp = _M_binmap;
181    _Binmap_type __bin_max = _M_options._M_min_bin;
182    _Binmap_type __bint = 0;
183    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
184      {
185	if (__ct > __bin_max)
186	  {
187	    __bin_max <<= 1;
188	    ++__bint;
189	  }
190	*__bp++ = __bint;
191      }
192
193    // Initialize _M_bin and its members.
194    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
195    _M_bin = static_cast<_Bin_record*>(__v);
196    for (size_t __n = 0; __n < _M_bin_size; ++__n)
197      {
198	_Bin_record& __bin = _M_bin[__n];
199	__v = ::operator new(sizeof(_Block_record*));
200	__bin._M_first = static_cast<_Block_record**>(__v);
201	__bin._M_first[0] = NULL;
202	__bin._M_address = NULL;
203      }
204    _M_init = true;
205  }
206
207
208#ifdef __GTHREADS
209  void
210  __pool<true>::_M_destroy() throw()
211  {
212    if (_M_init && !_M_options._M_force_new)
213      {
214	if (__gthread_active_p())
215	  {
216	    for (size_t __n = 0; __n < _M_bin_size; ++__n)
217	      {
218		_Bin_record& __bin = _M_bin[__n];
219		while (__bin._M_address)
220		  {
221		    _Block_address* __tmp = __bin._M_address->_M_next;
222		    ::operator delete(__bin._M_address->_M_initial);
223		    __bin._M_address = __tmp;
224		  }
225		::operator delete(__bin._M_first);
226		::operator delete(__bin._M_free);
227		::operator delete(__bin._M_used);
228		::operator delete(__bin._M_mutex);
229	      }
230	  }
231	else
232	  {
233	    for (size_t __n = 0; __n < _M_bin_size; ++__n)
234	      {
235		_Bin_record& __bin = _M_bin[__n];
236		while (__bin._M_address)
237		  {
238		    _Block_address* __tmp = __bin._M_address->_M_next;
239		    ::operator delete(__bin._M_address->_M_initial);
240		    __bin._M_address = __tmp;
241		  }
242		::operator delete(__bin._M_first);
243	      }
244	  }
245	::operator delete(_M_bin);
246	::operator delete(_M_binmap);
247      }
248  }
249
250  void
251  __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
252  {
253    // Round up to power of 2 and figure out which bin to use.
254    const size_t __which = _M_binmap[__bytes];
255    const _Bin_record& __bin = _M_bin[__which];
256
257    // Know __p not null, assume valid block.
258    char* __c = __p - _M_get_align();
259    _Block_record* __block_record = reinterpret_cast<_Block_record*>(__c);
260    if (__gthread_active_p())
261      {
262	// Calculate the number of records to remove from our freelist:
263	// in order to avoid too much contention we wait until the
264	// number of records is "high enough".
265	const size_t __thread_id = _M_get_thread_id();
266	const _Tune& __options = _M_get_options();
267	const size_t __limit = (100 * (_M_bin_size - __which)
268				* __options._M_freelist_headroom);
269
270	size_t __remove = __bin._M_free[__thread_id];
271	__remove *= __options._M_freelist_headroom;
272
273	// NB: We assume that reads of _Atomic_words are atomic.
274	const size_t __max_threads = __options._M_max_threads + 1;
275	_Atomic_word* const __reclaimed_base =
276	  reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
277	const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
278	const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
279
280	// NB: For performance sake we don't resync every time, in order
281	// to spare atomic ops.  Note that if __reclaimed increased by,
282	// say, 1024, since the last sync, it means that the other
283	// threads executed the atomic in the else below at least the
284	// same number of times (at least, because _M_reserve_block may
285	// have decreased the counter), therefore one more cannot hurt.
286	if (__reclaimed > 1024)
287	  {
288	    __bin._M_used[__thread_id] -= __reclaimed;
289	    __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
290	  }
291
292	if (__remove >= __net_used)
293	  __remove -= __net_used;
294	else
295	  __remove = 0;
296	if (__remove > __limit && __remove > __bin._M_free[__thread_id])
297	  {
298	    _Block_record* __first = __bin._M_first[__thread_id];
299	    _Block_record* __tmp = __first;
300	    __remove /= __options._M_freelist_headroom;
301	    const size_t __removed = __remove;
302	    while (--__remove > 0)
303	      __tmp = __tmp->_M_next;
304	    __bin._M_first[__thread_id] = __tmp->_M_next;
305	    __bin._M_free[__thread_id] -= __removed;
306
307	    __gthread_mutex_lock(__bin._M_mutex);
308	    __tmp->_M_next = __bin._M_first[0];
309	    __bin._M_first[0] = __first;
310	    __bin._M_free[0] += __removed;
311	    __gthread_mutex_unlock(__bin._M_mutex);
312	  }
313
314	// Return this block to our list and update counters and
315	// owner id as needed.
316	if (__block_record->_M_thread_id == __thread_id)
317	  --__bin._M_used[__thread_id];
318	else
319	  __atomic_add(&__reclaimed_base[__block_record->_M_thread_id], 1);
320
321	__block_record->_M_next = __bin._M_first[__thread_id];
322	__bin._M_first[__thread_id] = __block_record;
323
324	++__bin._M_free[__thread_id];
325      }
326    else
327      {
328	// Not using threads, so single threaded application - return
329	// to global pool.
330	__block_record->_M_next = __bin._M_first[0];
331	__bin._M_first[0] = __block_record;
332      }
333  }
334
335  char*
336  __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
337  {
338    // Round up to power of 2 and figure out which bin to use.
339    const size_t __which = _M_binmap[__bytes];
340    const _Tune& __options = _M_get_options();
341    const size_t __bin_size = ((__options._M_min_bin << __which)
342			       + __options._M_align);
343    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
344    __block_count /= __bin_size;
345
346    // Are we using threads?
347    // - Yes, check if there are free blocks on the global
348    //   list. If so, grab up to __block_count blocks in one
349    //   lock and change ownership. If the global list is
350    //   empty, we allocate a new chunk and add those blocks
351    //   directly to our own freelist (with us as owner).
352    // - No, all operations are made directly to global pool 0
353    //   no need to lock or change ownership but check for free
354    //   blocks on global list (and if not add new ones) and
355    //   get the first one.
356    _Bin_record& __bin = _M_bin[__which];
357    _Block_record* __block_record = NULL;
358    if (__gthread_active_p())
359      {
360	// Resync the _M_used counters.
361	const size_t __max_threads = __options._M_max_threads + 1;
362	_Atomic_word* const __reclaimed_base =
363	  reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
364	const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
365	__bin._M_used[__thread_id] -= __reclaimed;
366	__atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
367
368	__gthread_mutex_lock(__bin._M_mutex);
369	if (__bin._M_first[0] == NULL)
370	  {
371	    void* __v = ::operator new(__options._M_chunk_size);
372	    _Block_address* __address = static_cast<_Block_address*>(__v);
373	    __address->_M_initial = __v;
374	    __address->_M_next = __bin._M_address;
375	    __bin._M_address = __address;
376	    __gthread_mutex_unlock(__bin._M_mutex);
377
378	    // No need to hold the lock when we are adding a whole
379	    // chunk to our own list.
380	    char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
381	    __block_record = reinterpret_cast<_Block_record*>(__c);
382	    __bin._M_free[__thread_id] = __block_count;
383	    __bin._M_first[__thread_id] = __block_record;
384	    while (--__block_count > 0)
385	      {
386		__c += __bin_size;
387		__block_record->_M_next = reinterpret_cast<_Block_record*>(__c);
388		__block_record = __block_record->_M_next;
389	      }
390	    __block_record->_M_next = NULL;
391	  }
392	else
393	  {
394	    // Is the number of required blocks greater than or equal
395	    // to the number that can be provided by the global free
396	    // list?
397	    __bin._M_first[__thread_id] = __bin._M_first[0];
398	    if (__block_count >= __bin._M_free[0])
399	      {
400		__bin._M_free[__thread_id] = __bin._M_free[0];
401		__bin._M_free[0] = 0;
402		__bin._M_first[0] = NULL;
403	      }
404	    else
405	      {
406		__bin._M_free[__thread_id] = __block_count;
407		__bin._M_free[0] -= __block_count;
408		__block_record = __bin._M_first[0];
409		while (--__block_count > 0)
410		  __block_record = __block_record->_M_next;
411		__bin._M_first[0] = __block_record->_M_next;
412		__block_record->_M_next = NULL;
413	      }
414	    __gthread_mutex_unlock(__bin._M_mutex);
415	  }
416      }
417    else
418      {
419	void* __v = ::operator new(__options._M_chunk_size);
420	_Block_address* __address = static_cast<_Block_address*>(__v);
421	__address->_M_initial = __v;
422	__address->_M_next = __bin._M_address;
423	__bin._M_address = __address;
424
425	char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
426	__block_record = reinterpret_cast<_Block_record*>(__c);
427 	__bin._M_first[0] = __block_record;
428	while (--__block_count > 0)
429	  {
430	    __c += __bin_size;
431	    __block_record->_M_next = reinterpret_cast<_Block_record*>(__c);
432	    __block_record = __block_record->_M_next;
433	  }
434	__block_record->_M_next = NULL;
435      }
436
437    __block_record = __bin._M_first[__thread_id];
438    __bin._M_first[__thread_id] = __block_record->_M_next;
439
440    if (__gthread_active_p())
441      {
442	__block_record->_M_thread_id = __thread_id;
443	--__bin._M_free[__thread_id];
444	++__bin._M_used[__thread_id];
445      }
446
447    // NB: For alignment reasons, we can't use the first _M_align
448    // bytes, even when sizeof(_Block_record) < _M_align.
449    return reinterpret_cast<char*>(__block_record) + __options._M_align;
450  }
451
452  void
453  __pool<true>::_M_initialize()
454  {
455    // _M_force_new must not change after the first allocate(),
456    // which in turn calls this method, so if it's false, it's false
457    // forever and we don't need to return here ever again.
458    if (_M_options._M_force_new)
459      {
460	_M_init = true;
461	return;
462      }
463
464    // Create the bins.
465    // Calculate the number of bins required based on _M_max_bytes.
466    // _M_bin_size is statically-initialized to one.
467    size_t __bin_size = _M_options._M_min_bin;
468    while (_M_options._M_max_bytes > __bin_size)
469      {
470	__bin_size <<= 1;
471	++_M_bin_size;
472      }
473
474    // Setup the bin map for quick lookup of the relevant bin.
475    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
476    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
477    _Binmap_type* __bp = _M_binmap;
478    _Binmap_type __bin_max = _M_options._M_min_bin;
479    _Binmap_type __bint = 0;
480    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
481      {
482	if (__ct > __bin_max)
483	  {
484	    __bin_max <<= 1;
485	    ++__bint;
486	  }
487	*__bp++ = __bint;
488      }
489
490    // Initialize _M_bin and its members.
491    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
492    _M_bin = static_cast<_Bin_record*>(__v);
493
494    // If __gthread_active_p() create and initialize the list of
495    // free thread ids. Single threaded applications use thread id 0
496    // directly and have no need for this.
497    if (__gthread_active_p())
498      {
499	{
500	  __gnu_cxx::__scoped_lock sentry(freelist_mutex);
501
502	  if (!freelist._M_thread_freelist_array
503	      || freelist._M_max_threads < _M_options._M_max_threads)
504	    {
505	      const size_t __k = sizeof(_Thread_record)
506				 * _M_options._M_max_threads;
507	      __v = ::operator new(__k);
508	      _M_thread_freelist = static_cast<_Thread_record*>(__v);
509
510	      // NOTE! The first assignable thread id is 1 since the
511	      // global pool uses id 0
512	      size_t __i;
513	      for (__i = 1; __i < _M_options._M_max_threads; ++__i)
514		{
515		  _Thread_record& __tr = _M_thread_freelist[__i - 1];
516		  __tr._M_next = &_M_thread_freelist[__i];
517		  __tr._M_id = __i;
518		}
519
520	      // Set last record.
521	      _M_thread_freelist[__i - 1]._M_next = NULL;
522	      _M_thread_freelist[__i - 1]._M_id = __i;
523
524	      if (!freelist._M_thread_freelist_array)
525		{
526		  // Initialize per thread key to hold pointer to
527		  // _M_thread_freelist.
528		  __gthread_key_create(&freelist._M_key,
529				       ::_M_destroy_thread_key);
530		  freelist._M_thread_freelist = _M_thread_freelist;
531		}
532	      else
533		{
534		  _Thread_record* _M_old_freelist
535		    = freelist._M_thread_freelist;
536		  _Thread_record* _M_old_array
537		    = freelist._M_thread_freelist_array;
538		  freelist._M_thread_freelist
539		    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
540		  while (_M_old_freelist)
541		    {
542		      size_t next_id;
543		      if (_M_old_freelist->_M_next)
544			next_id = _M_old_freelist->_M_next - _M_old_array;
545		      else
546			next_id = freelist._M_max_threads;
547		      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
548			= &_M_thread_freelist[next_id];
549		      _M_old_freelist = _M_old_freelist->_M_next;
550		    }
551		  ::operator delete(static_cast<void*>(_M_old_array));
552		}
553	      freelist._M_thread_freelist_array = _M_thread_freelist;
554	      freelist._M_max_threads = _M_options._M_max_threads;
555	    }
556	}
557
558	const size_t __max_threads = _M_options._M_max_threads + 1;
559	for (size_t __n = 0; __n < _M_bin_size; ++__n)
560	  {
561	    _Bin_record& __bin = _M_bin[__n];
562	    __v = ::operator new(sizeof(_Block_record*) * __max_threads);
563	    std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
564	    __bin._M_first = static_cast<_Block_record**>(__v);
565
566	    __bin._M_address = NULL;
567
568	    __v = ::operator new(sizeof(size_t) * __max_threads);
569	    std::memset(__v, 0, sizeof(size_t) * __max_threads);
570
571	    __bin._M_free = static_cast<size_t*>(__v);
572
573	    __v = ::operator new(sizeof(size_t) * __max_threads
574				 + sizeof(_Atomic_word) * __max_threads);
575	    std::memset(__v, 0, (sizeof(size_t) * __max_threads
576				 + sizeof(_Atomic_word) * __max_threads));
577	    __bin._M_used = static_cast<size_t*>(__v);
578
579	    __v = ::operator new(sizeof(__gthread_mutex_t));
580	    __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
581
582#ifdef __GTHREAD_MUTEX_INIT
583	    {
584	      // Do not copy a POSIX/gthr mutex once in use.
585	      __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
586	      *__bin._M_mutex = __tmp;
587	    }
588#else
589	    { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
590#endif
591	  }
592      }
593    else
594      {
595	for (size_t __n = 0; __n < _M_bin_size; ++__n)
596	  {
597	    _Bin_record& __bin = _M_bin[__n];
598	    __v = ::operator new(sizeof(_Block_record*));
599	    __bin._M_first = static_cast<_Block_record**>(__v);
600	    __bin._M_first[0] = NULL;
601	    __bin._M_address = NULL;
602	  }
603      }
604    _M_init = true;
605  }
606
607  size_t
608  __pool<true>::_M_get_thread_id()
609  {
610    // If we have thread support and it's active we check the thread
611    // key value and return its id or if it's not set we take the
612    // first record from _M_thread_freelist and sets the key and
613    // returns it's id.
614    if (__gthread_active_p())
615      {
616	void* v = __gthread_getspecific(freelist._M_key);
617	size_t _M_id = (size_t)v;
618	if (_M_id == 0)
619	  {
620	    {
621	      __gnu_cxx::__scoped_lock sentry(freelist_mutex);
622	      if (freelist._M_thread_freelist)
623		{
624		  _M_id = freelist._M_thread_freelist->_M_id;
625		  freelist._M_thread_freelist
626		    = freelist._M_thread_freelist->_M_next;
627		}
628	    }
629
630	    __gthread_setspecific(freelist._M_key, (void*)_M_id);
631	  }
632	return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
633      }
634
635    // Otherwise (no thread support or inactive) all requests are
636    // served from the global pool 0.
637    return 0;
638  }
639
640  // XXX GLIBCXX_ABI Deprecated
641  void
642  __pool<true>::_M_destroy_thread_key(void*) { }
643
644  // XXX GLIBCXX_ABI Deprecated
645  void
646  __pool<true>::_M_initialize(__destroy_handler)
647  {
648    // _M_force_new must not change after the first allocate(),
649    // which in turn calls this method, so if it's false, it's false
650    // forever and we don't need to return here ever again.
651    if (_M_options._M_force_new)
652      {
653	_M_init = true;
654	return;
655      }
656
657    // Create the bins.
658    // Calculate the number of bins required based on _M_max_bytes.
659    // _M_bin_size is statically-initialized to one.
660    size_t __bin_size = _M_options._M_min_bin;
661    while (_M_options._M_max_bytes > __bin_size)
662      {
663	__bin_size <<= 1;
664	++_M_bin_size;
665      }
666
667    // Setup the bin map for quick lookup of the relevant bin.
668    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
669    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
670    _Binmap_type* __bp = _M_binmap;
671    _Binmap_type __bin_max = _M_options._M_min_bin;
672    _Binmap_type __bint = 0;
673    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
674      {
675	if (__ct > __bin_max)
676	  {
677	    __bin_max <<= 1;
678	    ++__bint;
679	  }
680	*__bp++ = __bint;
681      }
682
683    // Initialize _M_bin and its members.
684    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
685    _M_bin = static_cast<_Bin_record*>(__v);
686
687    // If __gthread_active_p() create and initialize the list of
688    // free thread ids. Single threaded applications use thread id 0
689    // directly and have no need for this.
690    if (__gthread_active_p())
691      {
692	{
693	  __gnu_cxx::__scoped_lock sentry(freelist_mutex);
694
695	  if (!freelist._M_thread_freelist_array
696	      || freelist._M_max_threads < _M_options._M_max_threads)
697	    {
698	      const size_t __k = sizeof(_Thread_record)
699				 * _M_options._M_max_threads;
700	      __v = ::operator new(__k);
701	      _M_thread_freelist = static_cast<_Thread_record*>(__v);
702
703	      // NOTE! The first assignable thread id is 1 since the
704	      // global pool uses id 0
705	      size_t __i;
706	      for (__i = 1; __i < _M_options._M_max_threads; ++__i)
707		{
708		  _Thread_record& __tr = _M_thread_freelist[__i - 1];
709		  __tr._M_next = &_M_thread_freelist[__i];
710		  __tr._M_id = __i;
711		}
712
713	      // Set last record.
714	      _M_thread_freelist[__i - 1]._M_next = NULL;
715	      _M_thread_freelist[__i - 1]._M_id = __i;
716
717	      if (!freelist._M_thread_freelist_array)
718		{
719		  // Initialize per thread key to hold pointer to
720		  // _M_thread_freelist.
721		  __gthread_key_create(&freelist._M_key,
722				       ::_M_destroy_thread_key);
723		  freelist._M_thread_freelist = _M_thread_freelist;
724		}
725	      else
726		{
727		  _Thread_record* _M_old_freelist
728		    = freelist._M_thread_freelist;
729		  _Thread_record* _M_old_array
730		    = freelist._M_thread_freelist_array;
731		  freelist._M_thread_freelist
732		    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
733		  while (_M_old_freelist)
734		    {
735		      size_t next_id;
736		      if (_M_old_freelist->_M_next)
737			next_id = _M_old_freelist->_M_next - _M_old_array;
738		      else
739			next_id = freelist._M_max_threads;
740		      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
741			= &_M_thread_freelist[next_id];
742		      _M_old_freelist = _M_old_freelist->_M_next;
743		    }
744		  ::operator delete(static_cast<void*>(_M_old_array));
745		}
746	      freelist._M_thread_freelist_array = _M_thread_freelist;
747	      freelist._M_max_threads = _M_options._M_max_threads;
748	    }
749	}
750
751	const size_t __max_threads = _M_options._M_max_threads + 1;
752	for (size_t __n = 0; __n < _M_bin_size; ++__n)
753	  {
754	    _Bin_record& __bin = _M_bin[__n];
755	    __v = ::operator new(sizeof(_Block_record*) * __max_threads);
756	    std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
757	    __bin._M_first = static_cast<_Block_record**>(__v);
758
759	    __bin._M_address = NULL;
760
761	    __v = ::operator new(sizeof(size_t) * __max_threads);
762	    std::memset(__v, 0, sizeof(size_t) * __max_threads);
763	    __bin._M_free = static_cast<size_t*>(__v);
764
765	    __v = ::operator new(sizeof(size_t) * __max_threads +
766				 sizeof(_Atomic_word) * __max_threads);
767	    std::memset(__v, 0, (sizeof(size_t) * __max_threads
768				 + sizeof(_Atomic_word) * __max_threads));
769	    __bin._M_used = static_cast<size_t*>(__v);
770
771	    __v = ::operator new(sizeof(__gthread_mutex_t));
772	    __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
773
774#ifdef __GTHREAD_MUTEX_INIT
775	    {
776	      // Do not copy a POSIX/gthr mutex once in use.
777	      __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
778	      *__bin._M_mutex = __tmp;
779	    }
780#else
781	    { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
782#endif
783	  }
784      }
785    else
786      {
787	for (size_t __n = 0; __n < _M_bin_size; ++__n)
788	  {
789	    _Bin_record& __bin = _M_bin[__n];
790	    __v = ::operator new(sizeof(_Block_record*));
791	    __bin._M_first = static_cast<_Block_record**>(__v);
792	    __bin._M_first[0] = NULL;
793	    __bin._M_address = NULL;
794	  }
795      }
796    _M_init = true;
797  }
798#endif
799
800  // Instantiations.
801  template class __mt_alloc<char>;
802  template class __mt_alloc<wchar_t>;
803
804_GLIBCXX_END_NAMESPACE
805