allocation.cpp revision 4527:6f817ce50129
1/* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "memory/allocation.hpp" 27#include "memory/allocation.inline.hpp" 28#include "memory/genCollectedHeap.hpp" 29#include "memory/metaspaceShared.hpp" 30#include "memory/resourceArea.hpp" 31#include "memory/universe.hpp" 32#include "runtime/atomic.hpp" 33#include "runtime/os.hpp" 34#include "runtime/task.hpp" 35#include "runtime/threadCritical.hpp" 36#include "services/memTracker.hpp" 37#include "utilities/ostream.hpp" 38 39#ifdef TARGET_OS_FAMILY_linux 40# include "os_linux.inline.hpp" 41#endif 42#ifdef TARGET_OS_FAMILY_solaris 43# include "os_solaris.inline.hpp" 44#endif 45#ifdef TARGET_OS_FAMILY_windows 46# include "os_windows.inline.hpp" 47#endif 48#ifdef TARGET_OS_FAMILY_bsd 49# include "os_bsd.inline.hpp" 50#endif 51 52void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; 53void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; 54void* StackObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; }; 55void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }; 56void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; 57void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; 58void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; }; 59void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }; 60 61void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, 62 size_t word_size, bool read_only, TRAPS) { 63 // Klass has it's own operator new 64 return Metaspace::allocate(loader_data, word_size, read_only, 65 Metaspace::NonClassType, CHECK_NULL); 66} 67 68bool MetaspaceObj::is_shared() const { 69 return MetaspaceShared::is_in_shared_space(this); 70} 71 72bool MetaspaceObj::is_metadata() const { 73 // GC Verify checks use this in guarantees. 74 // TODO: either replace them with is_metaspace_object() or remove them. 75 // is_metaspace_object() is slower than this test. This test doesn't 76 // seem very useful for metaspace objects anymore though. 77 return !Universe::heap()->is_in_reserved(this); 78} 79 80bool MetaspaceObj::is_metaspace_object() const { 81 return Metaspace::contains((void*)this); 82} 83 84void MetaspaceObj::print_address_on(outputStream* st) const { 85 st->print(" {"INTPTR_FORMAT"}", this); 86} 87 88void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { 89 address res; 90 switch (type) { 91 case C_HEAP: 92 res = (address)AllocateHeap(size, flags, CALLER_PC); 93 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) 94 break; 95 case RESOURCE_AREA: 96 // new(size) sets allocation type RESOURCE_AREA. 97 res = (address)operator new(size); 98 break; 99 default: 100 ShouldNotReachHere(); 101 } 102 return res; 103} 104 105void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) { 106 return (address) operator new(size, type, flags); 107} 108 109void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, 110 allocation_type type, MEMFLAGS flags) { 111 //should only call this with std::nothrow, use other operator new() otherwise 112 address res; 113 switch (type) { 114 case C_HEAP: 115 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); 116 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) 117 break; 118 case RESOURCE_AREA: 119 // new(size) sets allocation type RESOURCE_AREA. 120 res = (address)operator new(size, std::nothrow); 121 break; 122 default: 123 ShouldNotReachHere(); 124 } 125 return res; 126} 127 128void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, 129 allocation_type type, MEMFLAGS flags) { 130 return (address)operator new(size, nothrow_constant, type, flags); 131} 132 133void ResourceObj::operator delete(void* p) { 134 assert(((ResourceObj *)p)->allocated_on_C_heap(), 135 "delete only allowed for C_HEAP objects"); 136 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 137 FreeHeap(p); 138} 139 140void ResourceObj::operator delete [](void* p) { 141 operator delete(p); 142} 143 144#ifdef ASSERT 145void ResourceObj::set_allocation_type(address res, allocation_type type) { 146 // Set allocation type in the resource object 147 uintptr_t allocation = (uintptr_t)res; 148 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least"); 149 assert(type <= allocation_mask, "incorrect allocation type"); 150 ResourceObj* resobj = (ResourceObj *)res; 151 resobj->_allocation_t[0] = ~(allocation + type); 152 if (type != STACK_OR_EMBEDDED) { 153 // Called from operator new() and CollectionSetChooser(), 154 // set verification value. 155 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; 156 } 157} 158 159ResourceObj::allocation_type ResourceObj::get_allocation_type() const { 160 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); 161 return (allocation_type)((~_allocation_t[0]) & allocation_mask); 162} 163 164bool ResourceObj::is_type_set() const { 165 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); 166 return get_allocation_type() == type && 167 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); 168} 169 170ResourceObj::ResourceObj() { // default constructor 171 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { 172 // Operator new() is not called for allocations 173 // on stack and for embedded objects. 174 set_allocation_type((address)this, STACK_OR_EMBEDDED); 175 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED 176 // For some reason we got a value which resembles 177 // an embedded or stack object (operator new() does not 178 // set such type). Keep it since it is valid value 179 // (even if it was garbage). 180 // Ignore garbage in other fields. 181 } else if (is_type_set()) { 182 // Operator new() was called and type was set. 183 assert(!allocated_on_stack(), 184 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 185 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 186 } else { 187 // Operator new() was not called. 188 // Assume that it is embedded or stack object. 189 set_allocation_type((address)this, STACK_OR_EMBEDDED); 190 } 191 _allocation_t[1] = 0; // Zap verification value 192} 193 194ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor 195 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. 196 // Note: garbage may resembles valid value. 197 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), 198 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 199 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 200 set_allocation_type((address)this, STACK_OR_EMBEDDED); 201 _allocation_t[1] = 0; // Zap verification value 202} 203 204ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment 205 // Used in InlineTree::ok_to_inline() for WarmCallInfo. 206 assert(allocated_on_stack(), 207 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 208 this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 209 // Keep current _allocation_t value; 210 return *this; 211} 212 213ResourceObj::~ResourceObj() { 214 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. 215 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. 216 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type 217 } 218} 219#endif // ASSERT 220 221 222void trace_heap_malloc(size_t size, const char* name, void* p) { 223 // A lock is not needed here - tty uses a lock internally 224 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name); 225} 226 227 228void trace_heap_free(void* p) { 229 // A lock is not needed here - tty uses a lock internally 230 tty->print_cr("Heap free " INTPTR_FORMAT, p); 231} 232 233bool warn_new_operator = false; // see vm_main 234 235//-------------------------------------------------------------------------------------- 236// ChunkPool implementation 237 238// MT-safe pool of chunks to reduce malloc/free thrashing 239// NB: not using Mutex because pools are used before Threads are initialized 240class ChunkPool: public CHeapObj<mtInternal> { 241 Chunk* _first; // first cached Chunk; its first word points to next chunk 242 size_t _num_chunks; // number of unused chunks in pool 243 size_t _num_used; // number of chunks currently checked out 244 const size_t _size; // size of each chunk (must be uniform) 245 246 // Our three static pools 247 static ChunkPool* _large_pool; 248 static ChunkPool* _medium_pool; 249 static ChunkPool* _small_pool; 250 251 // return first element or null 252 void* get_first() { 253 Chunk* c = _first; 254 if (_first) { 255 _first = _first->next(); 256 _num_chunks--; 257 } 258 return c; 259 } 260 261 public: 262 // All chunks in a ChunkPool has the same size 263 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } 264 265 // Allocate a new chunk from the pool (might expand the pool) 266 _NOINLINE_ void* allocate(size_t bytes) { 267 assert(bytes == _size, "bad size"); 268 void* p = NULL; 269 // No VM lock can be taken inside ThreadCritical lock, so os::malloc 270 // should be done outside ThreadCritical lock due to NMT 271 { ThreadCritical tc; 272 _num_used++; 273 p = get_first(); 274 } 275 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); 276 if (p == NULL) 277 vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); 278 279 return p; 280 } 281 282 // Return a chunk to the pool 283 void free(Chunk* chunk) { 284 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); 285 ThreadCritical tc; 286 _num_used--; 287 288 // Add chunk to list 289 chunk->set_next(_first); 290 _first = chunk; 291 _num_chunks++; 292 } 293 294 // Prune the pool 295 void free_all_but(size_t n) { 296 Chunk* cur = NULL; 297 Chunk* next; 298 { 299 // if we have more than n chunks, free all of them 300 ThreadCritical tc; 301 if (_num_chunks > n) { 302 // free chunks at end of queue, for better locality 303 cur = _first; 304 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); 305 306 if (cur != NULL) { 307 next = cur->next(); 308 cur->set_next(NULL); 309 cur = next; 310 311 _num_chunks = n; 312 } 313 } 314 } 315 316 // Free all remaining chunks, outside of ThreadCritical 317 // to avoid deadlock with NMT 318 while(cur != NULL) { 319 next = cur->next(); 320 os::free(cur, mtChunk); 321 cur = next; 322 } 323 } 324 325 // Accessors to preallocated pool's 326 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } 327 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } 328 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } 329 330 static void initialize() { 331 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); 332 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); 333 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); 334 } 335 336 static void clean() { 337 enum { BlocksToKeep = 5 }; 338 _small_pool->free_all_but(BlocksToKeep); 339 _medium_pool->free_all_but(BlocksToKeep); 340 _large_pool->free_all_but(BlocksToKeep); 341 } 342}; 343 344ChunkPool* ChunkPool::_large_pool = NULL; 345ChunkPool* ChunkPool::_medium_pool = NULL; 346ChunkPool* ChunkPool::_small_pool = NULL; 347 348void chunkpool_init() { 349 ChunkPool::initialize(); 350} 351 352void 353Chunk::clean_chunk_pool() { 354 ChunkPool::clean(); 355} 356 357 358//-------------------------------------------------------------------------------------- 359// ChunkPoolCleaner implementation 360// 361 362class ChunkPoolCleaner : public PeriodicTask { 363 enum { CleaningInterval = 5000 }; // cleaning interval in ms 364 365 public: 366 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} 367 void task() { 368 ChunkPool::clean(); 369 } 370}; 371 372//-------------------------------------------------------------------------------------- 373// Chunk implementation 374 375void* Chunk::operator new(size_t requested_size, size_t length) { 376 // requested_size is equal to sizeof(Chunk) but in order for the arena 377 // allocations to come out aligned as expected the size must be aligned 378 // to expected arena alignment. 379 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. 380 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); 381 size_t bytes = ARENA_ALIGN(requested_size) + length; 382 switch (length) { 383 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); 384 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); 385 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); 386 default: { 387 void *p = os::malloc(bytes, mtChunk, CALLER_PC); 388 if (p == NULL) 389 vm_exit_out_of_memory(bytes, "Chunk::new"); 390 return p; 391 } 392 } 393} 394 395void Chunk::operator delete(void* p) { 396 Chunk* c = (Chunk*)p; 397 switch (c->length()) { 398 case Chunk::size: ChunkPool::large_pool()->free(c); break; 399 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; 400 case Chunk::init_size: ChunkPool::small_pool()->free(c); break; 401 default: os::free(c, mtChunk); 402 } 403} 404 405Chunk::Chunk(size_t length) : _len(length) { 406 _next = NULL; // Chain on the linked list 407} 408 409 410void Chunk::chop() { 411 Chunk *k = this; 412 while( k ) { 413 Chunk *tmp = k->next(); 414 // clear out this chunk (to detect allocation bugs) 415 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); 416 delete k; // Free chunk (was malloc'd) 417 k = tmp; 418 } 419} 420 421void Chunk::next_chop() { 422 _next->chop(); 423 _next = NULL; 424} 425 426 427void Chunk::start_chunk_pool_cleaner_task() { 428#ifdef ASSERT 429 static bool task_created = false; 430 assert(!task_created, "should not start chuck pool cleaner twice"); 431 task_created = true; 432#endif 433 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); 434 cleaner->enroll(); 435} 436 437//------------------------------Arena------------------------------------------ 438NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) 439 440Arena::Arena(size_t init_size) { 441 size_t round_size = (sizeof (char *)) - 1; 442 init_size = (init_size+round_size) & ~round_size; 443 _first = _chunk = new (init_size) Chunk(init_size); 444 _hwm = _chunk->bottom(); // Save the cached hwm, max 445 _max = _chunk->top(); 446 set_size_in_bytes(init_size); 447 NOT_PRODUCT(Atomic::inc(&_instance_count);) 448} 449 450Arena::Arena() { 451 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); 452 _hwm = _chunk->bottom(); // Save the cached hwm, max 453 _max = _chunk->top(); 454 set_size_in_bytes(Chunk::init_size); 455 NOT_PRODUCT(Atomic::inc(&_instance_count);) 456} 457 458Arena *Arena::move_contents(Arena *copy) { 459 copy->destruct_contents(); 460 copy->_chunk = _chunk; 461 copy->_hwm = _hwm; 462 copy->_max = _max; 463 copy->_first = _first; 464 465 // workaround rare racing condition, which could double count 466 // the arena size by native memory tracking 467 size_t size = size_in_bytes(); 468 set_size_in_bytes(0); 469 copy->set_size_in_bytes(size); 470 // Destroy original arena 471 reset(); 472 return copy; // Return Arena with contents 473} 474 475Arena::~Arena() { 476 destruct_contents(); 477 NOT_PRODUCT(Atomic::dec(&_instance_count);) 478} 479 480void* Arena::operator new(size_t size) { 481 assert(false, "Use dynamic memory type binding"); 482 return NULL; 483} 484 485void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) { 486 assert(false, "Use dynamic memory type binding"); 487 return NULL; 488} 489 490 // dynamic memory type binding 491void* Arena::operator new(size_t size, MEMFLAGS flags) { 492#ifdef ASSERT 493 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); 494 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 495 return p; 496#else 497 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); 498#endif 499} 500 501void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) { 502#ifdef ASSERT 503 void* p = os::malloc(size, flags|otArena, CALLER_PC); 504 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 505 return p; 506#else 507 return os::malloc(size, flags|otArena, CALLER_PC); 508#endif 509} 510 511void Arena::operator delete(void* p) { 512 FreeHeap(p); 513} 514 515// Destroy this arenas contents and reset to empty 516void Arena::destruct_contents() { 517 if (UseMallocOnly && _first != NULL) { 518 char* end = _first->next() ? _first->top() : _hwm; 519 free_malloced_objects(_first, _first->bottom(), end, _hwm); 520 } 521 // reset size before chop to avoid a rare racing condition 522 // that can have total arena memory exceed total chunk memory 523 set_size_in_bytes(0); 524 _first->chop(); 525 reset(); 526} 527 528// This is high traffic method, but many calls actually don't 529// change the size 530void Arena::set_size_in_bytes(size_t size) { 531 if (_size_in_bytes != size) { 532 _size_in_bytes = size; 533 MemTracker::record_arena_size((address)this, size); 534 } 535} 536 537// Total of all Chunks in arena 538size_t Arena::used() const { 539 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk 540 register Chunk *k = _first; 541 while( k != _chunk) { // Whilst have Chunks in a row 542 sum += k->length(); // Total size of this Chunk 543 k = k->next(); // Bump along to next Chunk 544 } 545 return sum; // Return total consumed space. 546} 547 548void Arena::signal_out_of_memory(size_t sz, const char* whence) const { 549 vm_exit_out_of_memory(sz, whence); 550} 551 552// Grow a new Chunk 553void* Arena::grow(size_t x, AllocFailType alloc_failmode) { 554 // Get minimal required size. Either real big, or even bigger for giant objs 555 size_t len = MAX2(x, (size_t) Chunk::size); 556 557 Chunk *k = _chunk; // Get filled-up chunk address 558 _chunk = new (len) Chunk(len); 559 560 if (_chunk == NULL) { 561 if (alloc_failmode == AllocFailStrategy::EXIT_OOM) { 562 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); 563 } 564 return NULL; 565 } 566 if (k) k->set_next(_chunk); // Append new chunk to end of linked list 567 else _first = _chunk; 568 _hwm = _chunk->bottom(); // Save the cached hwm, max 569 _max = _chunk->top(); 570 set_size_in_bytes(size_in_bytes() + len); 571 void* result = _hwm; 572 _hwm += x; 573 return result; 574} 575 576 577 578// Reallocate storage in Arena. 579void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { 580 assert(new_size >= 0, "bad size"); 581 if (new_size == 0) return NULL; 582#ifdef ASSERT 583 if (UseMallocOnly) { 584 // always allocate a new object (otherwise we'll free this one twice) 585 char* copy = (char*)Amalloc(new_size, alloc_failmode); 586 if (copy == NULL) { 587 return NULL; 588 } 589 size_t n = MIN2(old_size, new_size); 590 if (n > 0) memcpy(copy, old_ptr, n); 591 Afree(old_ptr,old_size); // Mostly done to keep stats accurate 592 return copy; 593 } 594#endif 595 char *c_old = (char*)old_ptr; // Handy name 596 // Stupid fast special case 597 if( new_size <= old_size ) { // Shrink in-place 598 if( c_old+old_size == _hwm) // Attempt to free the excess bytes 599 _hwm = c_old+new_size; // Adjust hwm 600 return c_old; 601 } 602 603 // make sure that new_size is legal 604 size_t corrected_new_size = ARENA_ALIGN(new_size); 605 606 // See if we can resize in-place 607 if( (c_old+old_size == _hwm) && // Adjusting recent thing 608 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits 609 _hwm = c_old+corrected_new_size; // Adjust hwm 610 return c_old; // Return old pointer 611 } 612 613 // Oops, got to relocate guts 614 void *new_ptr = Amalloc(new_size, alloc_failmode); 615 if (new_ptr == NULL) { 616 return NULL; 617 } 618 memcpy( new_ptr, c_old, old_size ); 619 Afree(c_old,old_size); // Mostly done to keep stats accurate 620 return new_ptr; 621} 622 623 624// Determine if pointer belongs to this Arena or not. 625bool Arena::contains( const void *ptr ) const { 626#ifdef ASSERT 627 if (UseMallocOnly) { 628 // really slow, but not easy to make fast 629 if (_chunk == NULL) return false; 630 char** bottom = (char**)_chunk->bottom(); 631 for (char** p = (char**)_hwm - 1; p >= bottom; p--) { 632 if (*p == ptr) return true; 633 } 634 for (Chunk *c = _first; c != NULL; c = c->next()) { 635 if (c == _chunk) continue; // current chunk has been processed 636 char** bottom = (char**)c->bottom(); 637 for (char** p = (char**)c->top() - 1; p >= bottom; p--) { 638 if (*p == ptr) return true; 639 } 640 } 641 return false; 642 } 643#endif 644 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) 645 return true; // Check for in this chunk 646 for (Chunk *c = _first; c; c = c->next()) { 647 if (c == _chunk) continue; // current chunk has been processed 648 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { 649 return true; // Check for every chunk in Arena 650 } 651 } 652 return false; // Not in any Chunk, so not in Arena 653} 654 655 656#ifdef ASSERT 657void* Arena::malloc(size_t size) { 658 assert(UseMallocOnly, "shouldn't call"); 659 // use malloc, but save pointer in res. area for later freeing 660 char** save = (char**)internal_malloc_4(sizeof(char*)); 661 return (*save = (char*)os::malloc(size, mtChunk)); 662} 663 664// for debugging with UseMallocOnly 665void* Arena::internal_malloc_4(size_t x) { 666 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 667 check_for_overflow(x, "Arena::internal_malloc_4"); 668 if (_hwm + x > _max) { 669 return grow(x); 670 } else { 671 char *old = _hwm; 672 _hwm += x; 673 return old; 674 } 675} 676#endif 677 678 679//-------------------------------------------------------------------------------------- 680// Non-product code 681 682#ifndef PRODUCT 683// The global operator new should never be called since it will usually indicate 684// a memory leak. Use CHeapObj as the base class of such objects to make it explicit 685// that they're allocated on the C heap. 686// Commented out in product version to avoid conflicts with third-party C++ native code. 687void* operator new(size_t size){ 688 ShouldNotReachHere(); return 0; 689} 690 691void* operator new [](size_t size){ 692 ShouldNotReachHere(); return 0; 693} 694 695void* operator new(size_t size, const std::nothrow_t& nothrow_constant){ 696 ShouldNotReachHere(); return 0; 697} 698 699void* operator new [](size_t size, std::nothrow_t& nothrow_constant){ 700 ShouldNotReachHere(); return 0; 701} 702 703void AllocatedObj::print() const { print_on(tty); } 704void AllocatedObj::print_value() const { print_value_on(tty); } 705 706void AllocatedObj::print_on(outputStream* st) const { 707 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); 708} 709 710void AllocatedObj::print_value_on(outputStream* st) const { 711 st->print("AllocatedObj(" INTPTR_FORMAT ")", this); 712} 713 714julong Arena::_bytes_allocated = 0; 715 716void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } 717 718AllocStats::AllocStats() { 719 start_mallocs = os::num_mallocs; 720 start_frees = os::num_frees; 721 start_malloc_bytes = os::alloc_bytes; 722 start_mfree_bytes = os::free_bytes; 723 start_res_bytes = Arena::_bytes_allocated; 724} 725 726julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } 727julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } 728julong AllocStats::num_frees() { return os::num_frees - start_frees; } 729julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } 730julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } 731void AllocStats::print() { 732 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " 733 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", 734 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); 735} 736 737 738// debugging code 739inline void Arena::free_all(char** start, char** end) { 740 for (char** p = start; p < end; p++) if (*p) os::free(*p); 741} 742 743void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { 744 assert(UseMallocOnly, "should not call"); 745 // free all objects malloced since resource mark was created; resource area 746 // contains their addresses 747 if (chunk->next()) { 748 // this chunk is full, and some others too 749 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { 750 char* top = c->top(); 751 if (c->next() == NULL) { 752 top = hwm2; // last junk is only used up to hwm2 753 assert(c->contains(hwm2), "bad hwm2"); 754 } 755 free_all((char**)c->bottom(), (char**)top); 756 } 757 assert(chunk->contains(hwm), "bad hwm"); 758 assert(chunk->contains(max), "bad max"); 759 free_all((char**)hwm, (char**)max); 760 } else { 761 // this chunk was partially used 762 assert(chunk->contains(hwm), "bad hwm"); 763 assert(chunk->contains(hwm2), "bad hwm2"); 764 free_all((char**)hwm, (char**)hwm2); 765 } 766} 767 768 769ReallocMark::ReallocMark() { 770#ifdef ASSERT 771 Thread *thread = ThreadLocalStorage::get_thread_slow(); 772 _nesting = thread->resource_area()->nesting(); 773#endif 774} 775 776void ReallocMark::check() { 777#ifdef ASSERT 778 if (_nesting != Thread::current()->resource_area()->nesting()) { 779 fatal("allocation bug: array could grow within nested ResourceMark"); 780 } 781#endif 782} 783 784#endif // Non-product 785