allocation.cpp revision 10606:24c6f885d316
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/genCollectedHeap.hpp"
27#include "memory/allocation.hpp"
28#include "memory/allocation.inline.hpp"
29#include "memory/metaspaceShared.hpp"
30#include "memory/resourceArea.hpp"
31#include "memory/universe.hpp"
32#include "runtime/atomic.inline.hpp"
33#include "runtime/os.hpp"
34#include "runtime/task.hpp"
35#include "runtime/threadCritical.hpp"
36#include "services/memTracker.hpp"
37#include "utilities/ostream.hpp"
38
39void* StackObj::operator new(size_t size)     throw() { ShouldNotCallThis(); return 0; }
40void  StackObj::operator delete(void* p)              { ShouldNotCallThis(); }
41void* StackObj::operator new [](size_t size)  throw() { ShouldNotCallThis(); return 0; }
42void  StackObj::operator delete [](void* p)           { ShouldNotCallThis(); }
43
44void* _ValueObj::operator new(size_t size)    throw() { ShouldNotCallThis(); return 0; }
45void  _ValueObj::operator delete(void* p)             { ShouldNotCallThis(); }
46void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
47void  _ValueObj::operator delete [](void* p)          { ShouldNotCallThis(); }
48
49void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
50                                 size_t word_size, bool read_only,
51                                 MetaspaceObj::Type type, TRAPS) throw() {
52  // Klass has it's own operator new
53  return Metaspace::allocate(loader_data, word_size, read_only, type, THREAD);
54}
55
56bool MetaspaceObj::is_shared() const {
57  return MetaspaceShared::is_in_shared_space(this);
58}
59
60bool MetaspaceObj::is_metaspace_object() const {
61  return Metaspace::contains((void*)this);
62}
63
64void MetaspaceObj::print_address_on(outputStream* st) const {
65  st->print(" {" INTPTR_FORMAT "}", p2i(this));
66}
67
68void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
69  address res = NULL;
70  switch (type) {
71   case C_HEAP:
72    res = (address)AllocateHeap(size, flags, CALLER_PC);
73    DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
74    break;
75   case RESOURCE_AREA:
76    // new(size) sets allocation type RESOURCE_AREA.
77    res = (address)operator new(size);
78    break;
79   default:
80    ShouldNotReachHere();
81  }
82  return res;
83}
84
85void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
86  return (address) operator new(size, type, flags);
87}
88
89void* ResourceObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
90    allocation_type type, MEMFLAGS flags) throw() {
91  // should only call this with std::nothrow, use other operator new() otherwise
92  address res = NULL;
93  switch (type) {
94   case C_HEAP:
95    res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
96    DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
97    break;
98   case RESOURCE_AREA:
99    // new(size) sets allocation type RESOURCE_AREA.
100    res = (address)operator new(size, std::nothrow);
101    break;
102   default:
103    ShouldNotReachHere();
104  }
105  return res;
106}
107
108void* ResourceObj::operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
109    allocation_type type, MEMFLAGS flags) throw() {
110  return (address)operator new(size, nothrow_constant, type, flags);
111}
112
113void ResourceObj::operator delete(void* p) {
114  assert(((ResourceObj *)p)->allocated_on_C_heap(),
115         "delete only allowed for C_HEAP objects");
116  DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
117  FreeHeap(p);
118}
119
120void ResourceObj::operator delete [](void* p) {
121  operator delete(p);
122}
123
124#ifdef ASSERT
125void ResourceObj::set_allocation_type(address res, allocation_type type) {
126    // Set allocation type in the resource object
127    uintptr_t allocation = (uintptr_t)res;
128    assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res));
129    assert(type <= allocation_mask, "incorrect allocation type");
130    ResourceObj* resobj = (ResourceObj *)res;
131    resobj->_allocation_t[0] = ~(allocation + type);
132    if (type != STACK_OR_EMBEDDED) {
133      // Called from operator new() and CollectionSetChooser(),
134      // set verification value.
135      resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
136    }
137}
138
139ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
140    assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
141    return (allocation_type)((~_allocation_t[0]) & allocation_mask);
142}
143
144bool ResourceObj::is_type_set() const {
145    allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
146    return get_allocation_type()  == type &&
147           (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
148}
149
150ResourceObj::ResourceObj() { // default constructor
151    if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
152      // Operator new() is not called for allocations
153      // on stack and for embedded objects.
154      set_allocation_type((address)this, STACK_OR_EMBEDDED);
155    } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
156      // For some reason we got a value which resembles
157      // an embedded or stack object (operator new() does not
158      // set such type). Keep it since it is valid value
159      // (even if it was garbage).
160      // Ignore garbage in other fields.
161    } else if (is_type_set()) {
162      // Operator new() was called and type was set.
163      assert(!allocated_on_stack(),
164             "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
165             p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
166    } else {
167      // Operator new() was not called.
168      // Assume that it is embedded or stack object.
169      set_allocation_type((address)this, STACK_OR_EMBEDDED);
170    }
171    _allocation_t[1] = 0; // Zap verification value
172}
173
174ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
175    // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
176    // Note: garbage may resembles valid value.
177    assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
178           "embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
179           p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
180    set_allocation_type((address)this, STACK_OR_EMBEDDED);
181    _allocation_t[1] = 0; // Zap verification value
182}
183
184ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
185    // Used in InlineTree::ok_to_inline() for WarmCallInfo.
186    assert(allocated_on_stack(),
187           "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
188           p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
189    // Keep current _allocation_t value;
190    return *this;
191}
192
193ResourceObj::~ResourceObj() {
194    // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
195    if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
196      _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
197    }
198}
199#endif // ASSERT
200
201
202void trace_heap_malloc(size_t size, const char* name, void* p) {
203  // A lock is not needed here - tty uses a lock internally
204  tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
205}
206
207
208void trace_heap_free(void* p) {
209  // A lock is not needed here - tty uses a lock internally
210  tty->print_cr("Heap free   " INTPTR_FORMAT, p2i(p));
211}
212
213//--------------------------------------------------------------------------------------
214// ChunkPool implementation
215
216// MT-safe pool of chunks to reduce malloc/free thrashing
217// NB: not using Mutex because pools are used before Threads are initialized
218class ChunkPool: public CHeapObj<mtInternal> {
219  Chunk*       _first;        // first cached Chunk; its first word points to next chunk
220  size_t       _num_chunks;   // number of unused chunks in pool
221  size_t       _num_used;     // number of chunks currently checked out
222  const size_t _size;         // size of each chunk (must be uniform)
223
224  // Our four static pools
225  static ChunkPool* _large_pool;
226  static ChunkPool* _medium_pool;
227  static ChunkPool* _small_pool;
228  static ChunkPool* _tiny_pool;
229
230  // return first element or null
231  void* get_first() {
232    Chunk* c = _first;
233    if (_first) {
234      _first = _first->next();
235      _num_chunks--;
236    }
237    return c;
238  }
239
240 public:
241  // All chunks in a ChunkPool has the same size
242   ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
243
244  // Allocate a new chunk from the pool (might expand the pool)
245  NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {
246    assert(bytes == _size, "bad size");
247    void* p = NULL;
248    // No VM lock can be taken inside ThreadCritical lock, so os::malloc
249    // should be done outside ThreadCritical lock due to NMT
250    { ThreadCritical tc;
251      _num_used++;
252      p = get_first();
253    }
254    if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
255    if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
256      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
257    }
258    return p;
259  }
260
261  // Return a chunk to the pool
262  void free(Chunk* chunk) {
263    assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
264    ThreadCritical tc;
265    _num_used--;
266
267    // Add chunk to list
268    chunk->set_next(_first);
269    _first = chunk;
270    _num_chunks++;
271  }
272
273  // Prune the pool
274  void free_all_but(size_t n) {
275    Chunk* cur = NULL;
276    Chunk* next;
277    {
278      // if we have more than n chunks, free all of them
279      ThreadCritical tc;
280      if (_num_chunks > n) {
281        // free chunks at end of queue, for better locality
282        cur = _first;
283        for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
284
285        if (cur != NULL) {
286          next = cur->next();
287          cur->set_next(NULL);
288          cur = next;
289
290          // Free all remaining chunks while in ThreadCritical lock
291          // so NMT adjustment is stable.
292          while(cur != NULL) {
293            next = cur->next();
294            os::free(cur);
295            _num_chunks--;
296            cur = next;
297          }
298        }
299      }
300    }
301  }
302
303  // Accessors to preallocated pool's
304  static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
305  static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
306  static ChunkPool* small_pool()  { assert(_small_pool  != NULL, "must be initialized"); return _small_pool;  }
307  static ChunkPool* tiny_pool()   { assert(_tiny_pool   != NULL, "must be initialized"); return _tiny_pool;   }
308
309  static void initialize() {
310    _large_pool  = new ChunkPool(Chunk::size        + Chunk::aligned_overhead_size());
311    _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
312    _small_pool  = new ChunkPool(Chunk::init_size   + Chunk::aligned_overhead_size());
313    _tiny_pool   = new ChunkPool(Chunk::tiny_size   + Chunk::aligned_overhead_size());
314  }
315
316  static void clean() {
317    enum { BlocksToKeep = 5 };
318     _tiny_pool->free_all_but(BlocksToKeep);
319     _small_pool->free_all_but(BlocksToKeep);
320     _medium_pool->free_all_but(BlocksToKeep);
321     _large_pool->free_all_but(BlocksToKeep);
322  }
323};
324
325ChunkPool* ChunkPool::_large_pool  = NULL;
326ChunkPool* ChunkPool::_medium_pool = NULL;
327ChunkPool* ChunkPool::_small_pool  = NULL;
328ChunkPool* ChunkPool::_tiny_pool   = NULL;
329
330void chunkpool_init() {
331  ChunkPool::initialize();
332}
333
334void
335Chunk::clean_chunk_pool() {
336  ChunkPool::clean();
337}
338
339
340//--------------------------------------------------------------------------------------
341// ChunkPoolCleaner implementation
342//
343
344class ChunkPoolCleaner : public PeriodicTask {
345  enum { CleaningInterval = 5000 };      // cleaning interval in ms
346
347 public:
348   ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
349   void task() {
350     ChunkPool::clean();
351   }
352};
353
354//--------------------------------------------------------------------------------------
355// Chunk implementation
356
357void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
358  // requested_size is equal to sizeof(Chunk) but in order for the arena
359  // allocations to come out aligned as expected the size must be aligned
360  // to expected arena alignment.
361  // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
362  assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
363  size_t bytes = ARENA_ALIGN(requested_size) + length;
364  switch (length) {
365   case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
366   case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
367   case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
368   case Chunk::tiny_size:   return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
369   default: {
370     void* p = os::malloc(bytes, mtChunk, CALLER_PC);
371     if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
372       vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
373     }
374     return p;
375   }
376  }
377}
378
379void Chunk::operator delete(void* p) {
380  Chunk* c = (Chunk*)p;
381  switch (c->length()) {
382   case Chunk::size:        ChunkPool::large_pool()->free(c); break;
383   case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
384   case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
385   case Chunk::tiny_size:   ChunkPool::tiny_pool()->free(c); break;
386   default:
387     ThreadCritical tc;  // Free chunks under TC lock so that NMT adjustment is stable.
388     os::free(c);
389  }
390}
391
392Chunk::Chunk(size_t length) : _len(length) {
393  _next = NULL;         // Chain on the linked list
394}
395
396
397void Chunk::chop() {
398  Chunk *k = this;
399  while( k ) {
400    Chunk *tmp = k->next();
401    // clear out this chunk (to detect allocation bugs)
402    if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
403    delete k;                   // Free chunk (was malloc'd)
404    k = tmp;
405  }
406}
407
408void Chunk::next_chop() {
409  _next->chop();
410  _next = NULL;
411}
412
413
414void Chunk::start_chunk_pool_cleaner_task() {
415#ifdef ASSERT
416  static bool task_created = false;
417  assert(!task_created, "should not start chuck pool cleaner twice");
418  task_created = true;
419#endif
420  ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
421  cleaner->enroll();
422}
423
424//------------------------------Arena------------------------------------------
425
426Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0)  {
427  size_t round_size = (sizeof (char *)) - 1;
428  init_size = (init_size+round_size) & ~round_size;
429  _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
430  _hwm = _chunk->bottom();      // Save the cached hwm, max
431  _max = _chunk->top();
432  MemTracker::record_new_arena(flag);
433  set_size_in_bytes(init_size);
434}
435
436Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
437  _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
438  _hwm = _chunk->bottom();      // Save the cached hwm, max
439  _max = _chunk->top();
440  MemTracker::record_new_arena(flag);
441  set_size_in_bytes(Chunk::init_size);
442}
443
444Arena *Arena::move_contents(Arena *copy) {
445  copy->destruct_contents();
446  copy->_chunk = _chunk;
447  copy->_hwm   = _hwm;
448  copy->_max   = _max;
449  copy->_first = _first;
450
451  // workaround rare racing condition, which could double count
452  // the arena size by native memory tracking
453  size_t size = size_in_bytes();
454  set_size_in_bytes(0);
455  copy->set_size_in_bytes(size);
456  // Destroy original arena
457  reset();
458  return copy;            // Return Arena with contents
459}
460
461Arena::~Arena() {
462  destruct_contents();
463  MemTracker::record_arena_free(_flags);
464}
465
466void* Arena::operator new(size_t size) throw() {
467  assert(false, "Use dynamic memory type binding");
468  return NULL;
469}
470
471void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) throw() {
472  assert(false, "Use dynamic memory type binding");
473  return NULL;
474}
475
476  // dynamic memory type binding
477void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
478#ifdef ASSERT
479  void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
480  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
481  return p;
482#else
483  return (void *) AllocateHeap(size, flags, CALLER_PC);
484#endif
485}
486
487void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
488#ifdef ASSERT
489  void* p = os::malloc(size, flags, CALLER_PC);
490  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
491  return p;
492#else
493  return os::malloc(size, flags, CALLER_PC);
494#endif
495}
496
497void Arena::operator delete(void* p) {
498  FreeHeap(p);
499}
500
501// Destroy this arenas contents and reset to empty
502void Arena::destruct_contents() {
503  if (UseMallocOnly && _first != NULL) {
504    char* end = _first->next() ? _first->top() : _hwm;
505    free_malloced_objects(_first, _first->bottom(), end, _hwm);
506  }
507  // reset size before chop to avoid a rare racing condition
508  // that can have total arena memory exceed total chunk memory
509  set_size_in_bytes(0);
510  _first->chop();
511  reset();
512}
513
514// This is high traffic method, but many calls actually don't
515// change the size
516void Arena::set_size_in_bytes(size_t size) {
517  if (_size_in_bytes != size) {
518    long delta = (long)(size - size_in_bytes());
519    _size_in_bytes = size;
520    MemTracker::record_arena_size_change(delta, _flags);
521  }
522}
523
524// Total of all Chunks in arena
525size_t Arena::used() const {
526  size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
527  register Chunk *k = _first;
528  while( k != _chunk) {         // Whilst have Chunks in a row
529    sum += k->length();         // Total size of this Chunk
530    k = k->next();              // Bump along to next Chunk
531  }
532  return sum;                   // Return total consumed space.
533}
534
535void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
536  vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "%s", whence);
537}
538
539// Grow a new Chunk
540void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
541  // Get minimal required size.  Either real big, or even bigger for giant objs
542  size_t len = MAX2(x, (size_t) Chunk::size);
543
544  Chunk *k = _chunk;            // Get filled-up chunk address
545  _chunk = new (alloc_failmode, len) Chunk(len);
546
547  if (_chunk == NULL) {
548    _chunk = k;                 // restore the previous value of _chunk
549    return NULL;
550  }
551  if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
552  else _first = _chunk;
553  _hwm  = _chunk->bottom();     // Save the cached hwm, max
554  _max =  _chunk->top();
555  set_size_in_bytes(size_in_bytes() + len);
556  void* result = _hwm;
557  _hwm += x;
558  return result;
559}
560
561
562
563// Reallocate storage in Arena.
564void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
565  if (new_size == 0) return NULL;
566#ifdef ASSERT
567  if (UseMallocOnly) {
568    // always allocate a new object  (otherwise we'll free this one twice)
569    char* copy = (char*)Amalloc(new_size, alloc_failmode);
570    if (copy == NULL) {
571      return NULL;
572    }
573    size_t n = MIN2(old_size, new_size);
574    if (n > 0) memcpy(copy, old_ptr, n);
575    Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
576    return copy;
577  }
578#endif
579  char *c_old = (char*)old_ptr; // Handy name
580  // Stupid fast special case
581  if( new_size <= old_size ) {  // Shrink in-place
582    if( c_old+old_size == _hwm) // Attempt to free the excess bytes
583      _hwm = c_old+new_size;    // Adjust hwm
584    return c_old;
585  }
586
587  // make sure that new_size is legal
588  size_t corrected_new_size = ARENA_ALIGN(new_size);
589
590  // See if we can resize in-place
591  if( (c_old+old_size == _hwm) &&       // Adjusting recent thing
592      (c_old+corrected_new_size <= _max) ) {      // Still fits where it sits
593    _hwm = c_old+corrected_new_size;      // Adjust hwm
594    return c_old;               // Return old pointer
595  }
596
597  // Oops, got to relocate guts
598  void *new_ptr = Amalloc(new_size, alloc_failmode);
599  if (new_ptr == NULL) {
600    return NULL;
601  }
602  memcpy( new_ptr, c_old, old_size );
603  Afree(c_old,old_size);        // Mostly done to keep stats accurate
604  return new_ptr;
605}
606
607
608// Determine if pointer belongs to this Arena or not.
609bool Arena::contains( const void *ptr ) const {
610#ifdef ASSERT
611  if (UseMallocOnly) {
612    // really slow, but not easy to make fast
613    if (_chunk == NULL) return false;
614    char** bottom = (char**)_chunk->bottom();
615    for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
616      if (*p == ptr) return true;
617    }
618    for (Chunk *c = _first; c != NULL; c = c->next()) {
619      if (c == _chunk) continue;  // current chunk has been processed
620      char** bottom = (char**)c->bottom();
621      for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
622        if (*p == ptr) return true;
623      }
624    }
625    return false;
626  }
627#endif
628  if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
629    return true;                // Check for in this chunk
630  for (Chunk *c = _first; c; c = c->next()) {
631    if (c == _chunk) continue;  // current chunk has been processed
632    if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
633      return true;              // Check for every chunk in Arena
634    }
635  }
636  return false;                 // Not in any Chunk, so not in Arena
637}
638
639
640#ifdef ASSERT
641void* Arena::malloc(size_t size) {
642  assert(UseMallocOnly, "shouldn't call");
643  // use malloc, but save pointer in res. area for later freeing
644  char** save = (char**)internal_malloc_4(sizeof(char*));
645  return (*save = (char*)os::malloc(size, mtChunk));
646}
647
648// for debugging with UseMallocOnly
649void* Arena::internal_malloc_4(size_t x) {
650  assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
651  check_for_overflow(x, "Arena::internal_malloc_4");
652  if (_hwm + x > _max) {
653    return grow(x);
654  } else {
655    char *old = _hwm;
656    _hwm += x;
657    return old;
658  }
659}
660#endif
661
662
663//--------------------------------------------------------------------------------------
664// Non-product code
665
666#ifndef PRODUCT
667// The global operator new should never be called since it will usually indicate
668// a memory leak.  Use CHeapObj as the base class of such objects to make it explicit
669// that they're allocated on the C heap.
670// Commented out in product version to avoid conflicts with third-party C++ native code.
671//
672// In C++98/03 the throwing new operators are defined with the following signature:
673//
674// void* operator new(std::size_tsize) throw(std::bad_alloc);
675// void* operator new[](std::size_tsize) throw(std::bad_alloc);
676//
677// while all the other (non-throwing) new and delete operators are defined with an empty
678// throw clause (i.e. "operator delete(void* p) throw()") which means that they do not
679// throw any exceptions (see section 18.4 of the C++ standard).
680//
681// In the new C++11/14 standard, the signature of the throwing new operators was changed
682// by completely omitting the throw clause (which effectively means they could throw any
683// exception) while all the other new/delete operators where changed to have a 'nothrow'
684// clause instead of an empty throw clause.
685//
686// Unfortunately, the support for exception specifications among C++ compilers is still
687// very fragile. While some more strict compilers like AIX xlC or HP aCC reject to
688// override the default throwing new operator with a user operator with an empty throw()
689// clause, the MS Visual C++ compiler warns for every non-empty throw clause like
690// throw(std::bad_alloc) that it will ignore the exception specification. The following
691// operator definitions have been checked to correctly work with all currently supported
692// compilers and they should be upwards compatible with C++11/14. Therefore
693// PLEASE BE CAREFUL if you change the signature of the following operators!
694
695static void * zero = (void *) 0;
696
697void* operator new(size_t size) /* throw(std::bad_alloc) */ {
698  fatal("Should not call global operator new");
699  return zero;
700}
701
702void* operator new [](size_t size) /* throw(std::bad_alloc) */ {
703  fatal("Should not call global operator new[]");
704  return zero;
705}
706
707void* operator new(size_t size, const std::nothrow_t&  nothrow_constant) throw() {
708  fatal("Should not call global operator new");
709  return 0;
710}
711
712void* operator new [](size_t size, std::nothrow_t&  nothrow_constant) throw() {
713  fatal("Should not call global operator new[]");
714  return 0;
715}
716
717void operator delete(void* p) throw() {
718  fatal("Should not call global delete");
719}
720
721void operator delete [](void* p) throw() {
722  fatal("Should not call global delete []");
723}
724
725void AllocatedObj::print() const       { print_on(tty); }
726void AllocatedObj::print_value() const { print_value_on(tty); }
727
728void AllocatedObj::print_on(outputStream* st) const {
729  st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
730}
731
732void AllocatedObj::print_value_on(outputStream* st) const {
733  st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
734}
735
736julong Arena::_bytes_allocated = 0;
737
738void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
739
740AllocStats::AllocStats() {
741  start_mallocs      = os::num_mallocs;
742  start_frees        = os::num_frees;
743  start_malloc_bytes = os::alloc_bytes;
744  start_mfree_bytes  = os::free_bytes;
745  start_res_bytes    = Arena::_bytes_allocated;
746}
747
748julong  AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
749julong  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
750julong  AllocStats::num_frees()   { return os::num_frees - start_frees; }
751julong  AllocStats::free_bytes()  { return os::free_bytes - start_mfree_bytes; }
752julong  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
753void    AllocStats::print() {
754  tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
755                UINT64_FORMAT " frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
756                num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
757}
758
759
760// debugging code
761inline void Arena::free_all(char** start, char** end) {
762  for (char** p = start; p < end; p++) if (*p) os::free(*p);
763}
764
765void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
766  assert(UseMallocOnly, "should not call");
767  // free all objects malloced since resource mark was created; resource area
768  // contains their addresses
769  if (chunk->next()) {
770    // this chunk is full, and some others too
771    for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
772      char* top = c->top();
773      if (c->next() == NULL) {
774        top = hwm2;     // last junk is only used up to hwm2
775        assert(c->contains(hwm2), "bad hwm2");
776      }
777      free_all((char**)c->bottom(), (char**)top);
778    }
779    assert(chunk->contains(hwm), "bad hwm");
780    assert(chunk->contains(max), "bad max");
781    free_all((char**)hwm, (char**)max);
782  } else {
783    // this chunk was partially used
784    assert(chunk->contains(hwm), "bad hwm");
785    assert(chunk->contains(hwm2), "bad hwm2");
786    free_all((char**)hwm, (char**)hwm2);
787  }
788}
789
790
791ReallocMark::ReallocMark() {
792#ifdef ASSERT
793  Thread *thread = Thread::current();
794  _nesting = thread->resource_area()->nesting();
795#endif
796}
797
798void ReallocMark::check() {
799#ifdef ASSERT
800  if (_nesting != Thread::current()->resource_area()->nesting()) {
801    fatal("allocation bug: array could grow within nested ResourceMark");
802  }
803#endif
804}
805
806#endif // Non-product
807