allocation.cpp revision 3465:d2a62e0f25eb
1/*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "memory/allocation.hpp"
27#include "memory/allocation.inline.hpp"
28#include "memory/resourceArea.hpp"
29#include "runtime/atomic.hpp"
30#include "runtime/os.hpp"
31#include "runtime/task.hpp"
32#include "runtime/threadCritical.hpp"
33#include "services/memTracker.hpp"
34#include "utilities/ostream.hpp"
35
36#ifdef TARGET_OS_FAMILY_linux
37# include "os_linux.inline.hpp"
38#endif
39#ifdef TARGET_OS_FAMILY_solaris
40# include "os_solaris.inline.hpp"
41#endif
42#ifdef TARGET_OS_FAMILY_windows
43# include "os_windows.inline.hpp"
44#endif
45#ifdef TARGET_OS_FAMILY_bsd
46# include "os_bsd.inline.hpp"
47#endif
48
49void* StackObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
50void  StackObj::operator delete(void* p)   { ShouldNotCallThis(); };
51void* _ValueObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
52void  _ValueObj::operator delete(void* p)   { ShouldNotCallThis(); };
53
54void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
55  address res;
56  switch (type) {
57   case C_HEAP:
58    res = (address)AllocateHeap(size, flags, CALLER_PC);
59    DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
60    break;
61   case RESOURCE_AREA:
62    // new(size) sets allocation type RESOURCE_AREA.
63    res = (address)operator new(size);
64    break;
65   default:
66    ShouldNotReachHere();
67  }
68  return res;
69}
70
71void ResourceObj::operator delete(void* p) {
72  assert(((ResourceObj *)p)->allocated_on_C_heap(),
73         "delete only allowed for C_HEAP objects");
74  DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
75  FreeHeap(p);
76}
77
78#ifdef ASSERT
79void ResourceObj::set_allocation_type(address res, allocation_type type) {
80    // Set allocation type in the resource object
81    uintptr_t allocation = (uintptr_t)res;
82    assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
83    assert(type <= allocation_mask, "incorrect allocation type");
84    ResourceObj* resobj = (ResourceObj *)res;
85    resobj->_allocation_t[0] = ~(allocation + type);
86    if (type != STACK_OR_EMBEDDED) {
87      // Called from operator new() and CollectionSetChooser(),
88      // set verification value.
89      resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
90    }
91}
92
93ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
94    assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
95    return (allocation_type)((~_allocation_t[0]) & allocation_mask);
96}
97
98bool ResourceObj::is_type_set() const {
99    allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
100    return get_allocation_type()  == type &&
101           (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
102}
103
104ResourceObj::ResourceObj() { // default constructor
105    if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
106      // Operator new() is not called for allocations
107      // on stack and for embedded objects.
108      set_allocation_type((address)this, STACK_OR_EMBEDDED);
109    } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
110      // For some reason we got a value which resembles
111      // an embedded or stack object (operator new() does not
112      // set such type). Keep it since it is valid value
113      // (even if it was garbage).
114      // Ignore garbage in other fields.
115    } else if (is_type_set()) {
116      // Operator new() was called and type was set.
117      assert(!allocated_on_stack(),
118             err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
119                     this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
120    } else {
121      // Operator new() was not called.
122      // Assume that it is embedded or stack object.
123      set_allocation_type((address)this, STACK_OR_EMBEDDED);
124    }
125    _allocation_t[1] = 0; // Zap verification value
126}
127
128ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
129    // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
130    // Note: garbage may resembles valid value.
131    assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
132           err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
133                   this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
134    set_allocation_type((address)this, STACK_OR_EMBEDDED);
135    _allocation_t[1] = 0; // Zap verification value
136}
137
138ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
139    // Used in InlineTree::ok_to_inline() for WarmCallInfo.
140    assert(allocated_on_stack(),
141           err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
142                   this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
143    // Keep current _allocation_t value;
144    return *this;
145}
146
147ResourceObj::~ResourceObj() {
148    // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
149    if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
150      _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
151    }
152}
153#endif // ASSERT
154
155
156void trace_heap_malloc(size_t size, const char* name, void* p) {
157  // A lock is not needed here - tty uses a lock internally
158  tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
159}
160
161
162void trace_heap_free(void* p) {
163  // A lock is not needed here - tty uses a lock internally
164  tty->print_cr("Heap free   " INTPTR_FORMAT, p);
165}
166
167bool warn_new_operator = false; // see vm_main
168
169//--------------------------------------------------------------------------------------
170// ChunkPool implementation
171
172// MT-safe pool of chunks to reduce malloc/free thrashing
173// NB: not using Mutex because pools are used before Threads are initialized
174class ChunkPool: public CHeapObj<mtInternal> {
175  Chunk*       _first;        // first cached Chunk; its first word points to next chunk
176  size_t       _num_chunks;   // number of unused chunks in pool
177  size_t       _num_used;     // number of chunks currently checked out
178  const size_t _size;         // size of each chunk (must be uniform)
179
180  // Our three static pools
181  static ChunkPool* _large_pool;
182  static ChunkPool* _medium_pool;
183  static ChunkPool* _small_pool;
184
185  // return first element or null
186  void* get_first() {
187    Chunk* c = _first;
188    if (_first) {
189      _first = _first->next();
190      _num_chunks--;
191    }
192    return c;
193  }
194
195 public:
196  // All chunks in a ChunkPool has the same size
197   ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
198
199  // Allocate a new chunk from the pool (might expand the pool)
200  _NOINLINE_ void* allocate(size_t bytes) {
201    assert(bytes == _size, "bad size");
202    void* p = NULL;
203    // No VM lock can be taken inside ThreadCritical lock, so os::malloc
204    // should be done outside ThreadCritical lock due to NMT
205    { ThreadCritical tc;
206      _num_used++;
207      p = get_first();
208    }
209    if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
210    if (p == NULL)
211      vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
212
213    return p;
214  }
215
216  // Return a chunk to the pool
217  void free(Chunk* chunk) {
218    assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
219    ThreadCritical tc;
220    _num_used--;
221
222    // Add chunk to list
223    chunk->set_next(_first);
224    _first = chunk;
225    _num_chunks++;
226  }
227
228  // Prune the pool
229  void free_all_but(size_t n) {
230    Chunk* cur = NULL;
231    Chunk* next;
232    {
233    // if we have more than n chunks, free all of them
234    ThreadCritical tc;
235    if (_num_chunks > n) {
236      // free chunks at end of queue, for better locality
237        cur = _first;
238      for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
239
240      if (cur != NULL) {
241          next = cur->next();
242        cur->set_next(NULL);
243        cur = next;
244
245          _num_chunks = n;
246        }
247      }
248    }
249
250    // Free all remaining chunks, outside of ThreadCritical
251    // to avoid deadlock with NMT
252        while(cur != NULL) {
253          next = cur->next();
254      os::free(cur, mtChunk);
255          cur = next;
256        }
257      }
258
259  // Accessors to preallocated pool's
260  static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
261  static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
262  static ChunkPool* small_pool()  { assert(_small_pool  != NULL, "must be initialized"); return _small_pool;  }
263
264  static void initialize() {
265    _large_pool  = new ChunkPool(Chunk::size        + Chunk::aligned_overhead_size());
266    _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
267    _small_pool  = new ChunkPool(Chunk::init_size   + Chunk::aligned_overhead_size());
268  }
269
270  static void clean() {
271    enum { BlocksToKeep = 5 };
272     _small_pool->free_all_but(BlocksToKeep);
273     _medium_pool->free_all_but(BlocksToKeep);
274     _large_pool->free_all_but(BlocksToKeep);
275  }
276};
277
278ChunkPool* ChunkPool::_large_pool  = NULL;
279ChunkPool* ChunkPool::_medium_pool = NULL;
280ChunkPool* ChunkPool::_small_pool  = NULL;
281
282void chunkpool_init() {
283  ChunkPool::initialize();
284}
285
286void
287Chunk::clean_chunk_pool() {
288  ChunkPool::clean();
289}
290
291
292//--------------------------------------------------------------------------------------
293// ChunkPoolCleaner implementation
294//
295
296class ChunkPoolCleaner : public PeriodicTask {
297  enum { CleaningInterval = 5000 };      // cleaning interval in ms
298
299 public:
300   ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
301   void task() {
302     ChunkPool::clean();
303   }
304};
305
306//--------------------------------------------------------------------------------------
307// Chunk implementation
308
309void* Chunk::operator new(size_t requested_size, size_t length) {
310  // requested_size is equal to sizeof(Chunk) but in order for the arena
311  // allocations to come out aligned as expected the size must be aligned
312  // to expected arean alignment.
313  // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
314  assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
315  size_t bytes = ARENA_ALIGN(requested_size) + length;
316  switch (length) {
317   case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
318   case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
319   case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
320   default: {
321     void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
322     if (p == NULL)
323       vm_exit_out_of_memory(bytes, "Chunk::new");
324     return p;
325   }
326  }
327}
328
329void Chunk::operator delete(void* p) {
330  Chunk* c = (Chunk*)p;
331  switch (c->length()) {
332   case Chunk::size:        ChunkPool::large_pool()->free(c); break;
333   case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
334   case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
335   default:                 os::free(c, mtChunk);
336  }
337}
338
339Chunk::Chunk(size_t length) : _len(length) {
340  _next = NULL;         // Chain on the linked list
341}
342
343
344void Chunk::chop() {
345  Chunk *k = this;
346  while( k ) {
347    Chunk *tmp = k->next();
348    // clear out this chunk (to detect allocation bugs)
349    if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
350    delete k;                   // Free chunk (was malloc'd)
351    k = tmp;
352  }
353}
354
355void Chunk::next_chop() {
356  _next->chop();
357  _next = NULL;
358}
359
360
361void Chunk::start_chunk_pool_cleaner_task() {
362#ifdef ASSERT
363  static bool task_created = false;
364  assert(!task_created, "should not start chuck pool cleaner twice");
365  task_created = true;
366#endif
367  ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
368  cleaner->enroll();
369}
370
371//------------------------------Arena------------------------------------------
372NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
373
374Arena::Arena(size_t init_size) {
375  size_t round_size = (sizeof (char *)) - 1;
376  init_size = (init_size+round_size) & ~round_size;
377  _first = _chunk = new (init_size) Chunk(init_size);
378  _hwm = _chunk->bottom();      // Save the cached hwm, max
379  _max = _chunk->top();
380  set_size_in_bytes(init_size);
381  NOT_PRODUCT(Atomic::inc(&_instance_count);)
382}
383
384Arena::Arena() {
385  _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
386  _hwm = _chunk->bottom();      // Save the cached hwm, max
387  _max = _chunk->top();
388  set_size_in_bytes(Chunk::init_size);
389  NOT_PRODUCT(Atomic::inc(&_instance_count);)
390}
391
392Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
393  set_size_in_bytes(a->size_in_bytes());
394  NOT_PRODUCT(Atomic::inc(&_instance_count);)
395}
396
397
398Arena *Arena::move_contents(Arena *copy) {
399  copy->destruct_contents();
400  copy->_chunk = _chunk;
401  copy->_hwm   = _hwm;
402  copy->_max   = _max;
403  copy->_first = _first;
404  copy->set_size_in_bytes(size_in_bytes());
405  // Destroy original arena
406  reset();
407  return copy;            // Return Arena with contents
408}
409
410Arena::~Arena() {
411  destruct_contents();
412  NOT_PRODUCT(Atomic::dec(&_instance_count);)
413}
414
415void* Arena::operator new(size_t size) {
416  assert(false, "Use dynamic memory type binding");
417  return NULL;
418}
419
420void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
421  assert(false, "Use dynamic memory type binding");
422  return NULL;
423}
424
425  // dynamic memory type binding
426void* Arena::operator new(size_t size, MEMFLAGS flags) {
427#ifdef ASSERT
428  void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
429  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
430  return p;
431#else
432  return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
433#endif
434}
435
436void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
437#ifdef ASSERT
438  void* p = os::malloc(size, flags|otArena, CALLER_PC);
439  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
440  return p;
441#else
442  return os::malloc(size, flags|otArena, CALLER_PC);
443#endif
444}
445
446void Arena::operator delete(void* p) {
447  FreeHeap(p);
448}
449
450// Destroy this arenas contents and reset to empty
451void Arena::destruct_contents() {
452  if (UseMallocOnly && _first != NULL) {
453    char* end = _first->next() ? _first->top() : _hwm;
454    free_malloced_objects(_first, _first->bottom(), end, _hwm);
455  }
456  _first->chop();
457  reset();
458}
459
460// This is high traffic method, but many calls actually don't
461// change the size
462void Arena::set_size_in_bytes(size_t size) {
463  if (_size_in_bytes != size) {
464    _size_in_bytes = size;
465    MemTracker::record_arena_size((address)this, size);
466  }
467}
468
469// Total of all Chunks in arena
470size_t Arena::used() const {
471  size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
472  register Chunk *k = _first;
473  while( k != _chunk) {         // Whilst have Chunks in a row
474    sum += k->length();         // Total size of this Chunk
475    k = k->next();              // Bump along to next Chunk
476  }
477  return sum;                   // Return total consumed space.
478}
479
480void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
481  vm_exit_out_of_memory(sz, whence);
482}
483
484// Grow a new Chunk
485void* Arena::grow( size_t x ) {
486  // Get minimal required size.  Either real big, or even bigger for giant objs
487  size_t len = MAX2(x, (size_t) Chunk::size);
488
489  Chunk *k = _chunk;            // Get filled-up chunk address
490  _chunk = new (len) Chunk(len);
491
492  if (_chunk == NULL) {
493    signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
494  }
495  if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
496  else _first = _chunk;
497  _hwm  = _chunk->bottom();     // Save the cached hwm, max
498  _max =  _chunk->top();
499  set_size_in_bytes(size_in_bytes() + len);
500  void* result = _hwm;
501  _hwm += x;
502  return result;
503}
504
505
506
507// Reallocate storage in Arena.
508void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
509  assert(new_size >= 0, "bad size");
510  if (new_size == 0) return NULL;
511#ifdef ASSERT
512  if (UseMallocOnly) {
513    // always allocate a new object  (otherwise we'll free this one twice)
514    char* copy = (char*)Amalloc(new_size);
515    size_t n = MIN2(old_size, new_size);
516    if (n > 0) memcpy(copy, old_ptr, n);
517    Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
518    return copy;
519  }
520#endif
521  char *c_old = (char*)old_ptr; // Handy name
522  // Stupid fast special case
523  if( new_size <= old_size ) {  // Shrink in-place
524    if( c_old+old_size == _hwm) // Attempt to free the excess bytes
525      _hwm = c_old+new_size;    // Adjust hwm
526    return c_old;
527  }
528
529  // make sure that new_size is legal
530  size_t corrected_new_size = ARENA_ALIGN(new_size);
531
532  // See if we can resize in-place
533  if( (c_old+old_size == _hwm) &&       // Adjusting recent thing
534      (c_old+corrected_new_size <= _max) ) {      // Still fits where it sits
535    _hwm = c_old+corrected_new_size;      // Adjust hwm
536    return c_old;               // Return old pointer
537  }
538
539  // Oops, got to relocate guts
540  void *new_ptr = Amalloc(new_size);
541  memcpy( new_ptr, c_old, old_size );
542  Afree(c_old,old_size);        // Mostly done to keep stats accurate
543  return new_ptr;
544}
545
546
547// Determine if pointer belongs to this Arena or not.
548bool Arena::contains( const void *ptr ) const {
549#ifdef ASSERT
550  if (UseMallocOnly) {
551    // really slow, but not easy to make fast
552    if (_chunk == NULL) return false;
553    char** bottom = (char**)_chunk->bottom();
554    for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
555      if (*p == ptr) return true;
556    }
557    for (Chunk *c = _first; c != NULL; c = c->next()) {
558      if (c == _chunk) continue;  // current chunk has been processed
559      char** bottom = (char**)c->bottom();
560      for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
561        if (*p == ptr) return true;
562      }
563    }
564    return false;
565  }
566#endif
567  if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
568    return true;                // Check for in this chunk
569  for (Chunk *c = _first; c; c = c->next()) {
570    if (c == _chunk) continue;  // current chunk has been processed
571    if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
572      return true;              // Check for every chunk in Arena
573    }
574  }
575  return false;                 // Not in any Chunk, so not in Arena
576}
577
578
579#ifdef ASSERT
580void* Arena::malloc(size_t size) {
581  assert(UseMallocOnly, "shouldn't call");
582  // use malloc, but save pointer in res. area for later freeing
583  char** save = (char**)internal_malloc_4(sizeof(char*));
584  return (*save = (char*)os::malloc(size, mtChunk));
585}
586
587// for debugging with UseMallocOnly
588void* Arena::internal_malloc_4(size_t x) {
589  assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
590  check_for_overflow(x, "Arena::internal_malloc_4");
591  if (_hwm + x > _max) {
592    return grow(x);
593  } else {
594    char *old = _hwm;
595    _hwm += x;
596    return old;
597  }
598}
599#endif
600
601
602//--------------------------------------------------------------------------------------
603// Non-product code
604
605#ifndef PRODUCT
606// The global operator new should never be called since it will usually indicate
607// a memory leak.  Use CHeapObj as the base class of such objects to make it explicit
608// that they're allocated on the C heap.
609// Commented out in product version to avoid conflicts with third-party C++ native code.
610// %% note this is causing a problem on solaris debug build. the global
611// new is being called from jdk source and causing data corruption.
612// src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
613// define CATCH_OPERATOR_NEW_USAGE if you want to use this.
614#ifdef CATCH_OPERATOR_NEW_USAGE
615void* operator new(size_t size){
616  static bool warned = false;
617  if (!warned && warn_new_operator)
618    warning("should not call global (default) operator new");
619  warned = true;
620  return (void *) AllocateHeap(size, "global operator new");
621}
622#endif
623
624void AllocatedObj::print() const       { print_on(tty); }
625void AllocatedObj::print_value() const { print_value_on(tty); }
626
627void AllocatedObj::print_on(outputStream* st) const {
628  st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
629}
630
631void AllocatedObj::print_value_on(outputStream* st) const {
632  st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
633}
634
635julong Arena::_bytes_allocated = 0;
636
637void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
638
639AllocStats::AllocStats() {
640  start_mallocs      = os::num_mallocs;
641  start_frees        = os::num_frees;
642  start_malloc_bytes = os::alloc_bytes;
643  start_mfree_bytes  = os::free_bytes;
644  start_res_bytes    = Arena::_bytes_allocated;
645}
646
647julong  AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
648julong  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
649julong  AllocStats::num_frees()   { return os::num_frees - start_frees; }
650julong  AllocStats::free_bytes()  { return os::free_bytes - start_mfree_bytes; }
651julong  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
652void    AllocStats::print() {
653  tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
654                UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
655                num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
656}
657
658
659// debugging code
660inline void Arena::free_all(char** start, char** end) {
661  for (char** p = start; p < end; p++) if (*p) os::free(*p);
662}
663
664void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
665  assert(UseMallocOnly, "should not call");
666  // free all objects malloced since resource mark was created; resource area
667  // contains their addresses
668  if (chunk->next()) {
669    // this chunk is full, and some others too
670    for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
671      char* top = c->top();
672      if (c->next() == NULL) {
673        top = hwm2;     // last junk is only used up to hwm2
674        assert(c->contains(hwm2), "bad hwm2");
675      }
676      free_all((char**)c->bottom(), (char**)top);
677    }
678    assert(chunk->contains(hwm), "bad hwm");
679    assert(chunk->contains(max), "bad max");
680    free_all((char**)hwm, (char**)max);
681  } else {
682    // this chunk was partially used
683    assert(chunk->contains(hwm), "bad hwm");
684    assert(chunk->contains(hwm2), "bad hwm2");
685    free_all((char**)hwm, (char**)hwm2);
686  }
687}
688
689
690ReallocMark::ReallocMark() {
691#ifdef ASSERT
692  Thread *thread = ThreadLocalStorage::get_thread_slow();
693  _nesting = thread->resource_area()->nesting();
694#endif
695}
696
697void ReallocMark::check() {
698#ifdef ASSERT
699  if (_nesting != Thread::current()->resource_area()->nesting()) {
700    fatal("allocation bug: array could grow within nested ResourceMark");
701  }
702#endif
703}
704
705#endif // Non-product
706