• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/toolchains/hndtools-armeabi-2013.11/arm-none-eabi/include/c++/4.8.1/ext/
1// Allocators -*- C++ -*-
2
3// Copyright (C) 2001-2013 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 * Copyright (c) 1996-1997
27 * Silicon Graphics Computer Systems, Inc.
28 *
29 * Permission to use, copy, modify, distribute and sell this software
30 * and its documentation for any purpose is hereby granted without fee,
31 * provided that the above copyright notice appear in all copies and
32 * that both that copyright notice and this permission notice appear
33 * in supporting documentation.  Silicon Graphics makes no
34 * representations about the suitability of this software for any
35 * purpose.  It is provided "as is" without express or implied warranty.
36 */
37
38/** @file ext/pool_allocator.h
39 *  This file is a GNU extension to the Standard C++ Library.
40 */
41
42#ifndef _POOL_ALLOCATOR_H
43#define _POOL_ALLOCATOR_H 1
44
45#include <bits/c++config.h>
46#include <cstdlib>
47#include <new>
48#include <bits/functexcept.h>
49#include <ext/atomicity.h>
50#include <ext/concurrence.h>
51#include <bits/move.h>
52#if __cplusplus >= 201103L
53#include <type_traits>
54#endif
55
56namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
57{
58_GLIBCXX_BEGIN_NAMESPACE_VERSION
59
60  using std::size_t;
61  using std::ptrdiff_t;
62
63  /**
64   *  @brief  Base class for __pool_alloc.
65   *
66   *  Uses various allocators to fulfill underlying requests (and makes as
67   *  few requests as possible when in default high-speed pool mode).
68   *
69   *  Important implementation properties:
70   *  0. If globally mandated, then allocate objects from new
71   *  1. If the clients request an object of size > _S_max_bytes, the resulting
72   *     object will be obtained directly from new
73   *  2. In all other cases, we allocate an object of size exactly
74   *     _S_round_up(requested_size).  Thus the client has enough size
75   *     information that we can return the object to the proper free list
76   *     without permanently losing part of the object.
77   */
78    class __pool_alloc_base
79    {
80    protected:
81
82      enum { _S_align = 8 };
83      enum { _S_max_bytes = 128 };
84      enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align };
85
86      union _Obj
87      {
88	union _Obj* _M_free_list_link;
89	char        _M_client_data[1];    // The client sees this.
90      };
91
92      static _Obj* volatile         _S_free_list[_S_free_list_size];
93
94      // Chunk allocation state.
95      static char*                  _S_start_free;
96      static char*                  _S_end_free;
97      static size_t                 _S_heap_size;
98
99      size_t
100      _M_round_up(size_t __bytes)
101      { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
102
103      _GLIBCXX_CONST _Obj* volatile*
104      _M_get_free_list(size_t __bytes) throw ();
105
106      __mutex&
107      _M_get_mutex() throw ();
108
109      // Returns an object of size __n, and optionally adds to size __n
110      // free list.
111      void*
112      _M_refill(size_t __n);
113
114      // Allocates a chunk for nobjs of size size.  nobjs may be reduced
115      // if it is inconvenient to allocate the requested number.
116      char*
117      _M_allocate_chunk(size_t __n, int& __nobjs);
118    };
119
120
121  /**
122   * @brief  Allocator using a memory pool with a single lock.
123   * @ingroup allocators
124   */
125  template<typename _Tp>
126    class __pool_alloc : private __pool_alloc_base
127    {
128    private:
129      static _Atomic_word	    _S_force_new;
130
131    public:
132      typedef size_t     size_type;
133      typedef ptrdiff_t  difference_type;
134      typedef _Tp*       pointer;
135      typedef const _Tp* const_pointer;
136      typedef _Tp&       reference;
137      typedef const _Tp& const_reference;
138      typedef _Tp        value_type;
139
140      template<typename _Tp1>
141        struct rebind
142        { typedef __pool_alloc<_Tp1> other; };
143
144#if __cplusplus >= 201103L
145      // _GLIBCXX_RESOLVE_LIB_DEFECTS
146      // 2103. propagate_on_container_move_assignment
147      typedef std::true_type propagate_on_container_move_assignment;
148#endif
149
150      __pool_alloc() _GLIBCXX_USE_NOEXCEPT { }
151
152      __pool_alloc(const __pool_alloc&) _GLIBCXX_USE_NOEXCEPT { }
153
154      template<typename _Tp1>
155        __pool_alloc(const __pool_alloc<_Tp1>&) _GLIBCXX_USE_NOEXCEPT { }
156
157      ~__pool_alloc() _GLIBCXX_USE_NOEXCEPT { }
158
159      pointer
160      address(reference __x) const _GLIBCXX_NOEXCEPT
161      { return std::__addressof(__x); }
162
163      const_pointer
164      address(const_reference __x) const _GLIBCXX_NOEXCEPT
165      { return std::__addressof(__x); }
166
167      size_type
168      max_size() const _GLIBCXX_USE_NOEXCEPT
169      { return size_t(-1) / sizeof(_Tp); }
170
171#if __cplusplus >= 201103L
172      template<typename _Up, typename... _Args>
173        void
174        construct(_Up* __p, _Args&&... __args)
175	{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
176
177      template<typename _Up>
178        void
179        destroy(_Up* __p) { __p->~_Up(); }
180#else
181      // _GLIBCXX_RESOLVE_LIB_DEFECTS
182      // 402. wrong new expression in [some_] allocator::construct
183      void
184      construct(pointer __p, const _Tp& __val)
185      { ::new((void *)__p) _Tp(__val); }
186
187      void
188      destroy(pointer __p) { __p->~_Tp(); }
189#endif
190
191      pointer
192      allocate(size_type __n, const void* = 0);
193
194      void
195      deallocate(pointer __p, size_type __n);
196    };
197
198  template<typename _Tp>
199    inline bool
200    operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
201    { return true; }
202
203  template<typename _Tp>
204    inline bool
205    operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
206    { return false; }
207
208  template<typename _Tp>
209    _Atomic_word
210    __pool_alloc<_Tp>::_S_force_new;
211
212  template<typename _Tp>
213    _Tp*
214    __pool_alloc<_Tp>::allocate(size_type __n, const void*)
215    {
216      pointer __ret = 0;
217      if (__builtin_expect(__n != 0, true))
218	{
219	  if (__n > this->max_size())
220	    std::__throw_bad_alloc();
221
222	  // If there is a race through here, assume answer from getenv
223	  // will resolve in same direction.  Inspired by techniques
224	  // to efficiently support threading found in basic_string.h.
225	  if (_S_force_new == 0)
226	    {
227	      if (std::getenv("GLIBCXX_FORCE_NEW"))
228		__atomic_add_dispatch(&_S_force_new, 1);
229	      else
230		__atomic_add_dispatch(&_S_force_new, -1);
231	    }
232
233	  const size_t __bytes = __n * sizeof(_Tp);
234	  if (__bytes > size_t(_S_max_bytes) || _S_force_new > 0)
235	    __ret = static_cast<_Tp*>(::operator new(__bytes));
236	  else
237	    {
238	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
239
240	      __scoped_lock sentry(_M_get_mutex());
241	      _Obj* __restrict__ __result = *__free_list;
242	      if (__builtin_expect(__result == 0, 0))
243		__ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
244	      else
245		{
246		  *__free_list = __result->_M_free_list_link;
247		  __ret = reinterpret_cast<_Tp*>(__result);
248		}
249	      if (__ret == 0)
250		std::__throw_bad_alloc();
251	    }
252	}
253      return __ret;
254    }
255
256  template<typename _Tp>
257    void
258    __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n)
259    {
260      if (__builtin_expect(__n != 0 && __p != 0, true))
261	{
262	  const size_t __bytes = __n * sizeof(_Tp);
263	  if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0)
264	    ::operator delete(__p);
265	  else
266	    {
267	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
268	      _Obj* __q = reinterpret_cast<_Obj*>(__p);
269
270	      __scoped_lock sentry(_M_get_mutex());
271	      __q ->_M_free_list_link = *__free_list;
272	      *__free_list = __q;
273	    }
274	}
275    }
276
277_GLIBCXX_END_NAMESPACE_VERSION
278} // namespace
279
280#endif
281