allocation.cpp revision 6010:abec000618bf
1271651Skargl/* 2271651Skargl * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3271651Skargl * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4271651Skargl * 5271651Skargl * This code is free software; you can redistribute it and/or modify it 6271651Skargl * under the terms of the GNU General Public License version 2 only, as 7271651Skargl * published by the Free Software Foundation. 8271651Skargl * 9271651Skargl * This code is distributed in the hope that it will be useful, but WITHOUT 10271651Skargl * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11271651Skargl * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12271651Skargl * version 2 for more details (a copy is included in the LICENSE file that 13271651Skargl * accompanied this code). 14271651Skargl * 15271651Skargl * You should have received a copy of the GNU General Public License version 16271651Skargl * 2 along with this work; if not, write to the Free Software Foundation, 17271651Skargl * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18271651Skargl * 19271651Skargl * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20271651Skargl * or visit www.oracle.com if you need additional information or have any 21271651Skargl * questions. 22271651Skargl * 23271651Skargl */ 24271651Skargl 25271651Skargl#include "precompiled.hpp" 26271651Skargl#include "memory/allocation.hpp" 27271651Skargl#include "memory/allocation.inline.hpp" 28271651Skargl#include "memory/genCollectedHeap.hpp" 29271651Skargl#include "memory/metaspaceShared.hpp" 30271651Skargl#include "memory/resourceArea.hpp" 31271651Skargl#include "memory/universe.hpp" 32271651Skargl#include "runtime/atomic.hpp" 33271651Skargl#include "runtime/os.hpp" 34271651Skargl#include "runtime/task.hpp" 35271651Skargl#include "runtime/threadCritical.hpp" 36271651Skargl#include "services/memTracker.hpp" 37271651Skargl#include "utilities/ostream.hpp" 38271651Skargl 39271651Skargl#ifdef TARGET_OS_FAMILY_linux 40271651Skargl# include "os_linux.inline.hpp" 41271651Skargl#endif 42271651Skargl#ifdef TARGET_OS_FAMILY_solaris 43271651Skargl# include "os_solaris.inline.hpp" 44271651Skargl#endif 45271651Skargl#ifdef TARGET_OS_FAMILY_windows 46271651Skargl# include "os_windows.inline.hpp" 47271651Skargl#endif 48271651Skargl#ifdef TARGET_OS_FAMILY_aix 49271651Skargl# include "os_aix.inline.hpp" 50271651Skargl#endif 51271651Skargl#ifdef TARGET_OS_FAMILY_bsd 52271651Skargl# include "os_bsd.inline.hpp" 53271651Skargl#endif 54271651Skargl 55271651Skarglvoid* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } 56271651Skarglvoid StackObj::operator delete(void* p) { ShouldNotCallThis(); } 57271651Skarglvoid* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } 58271651Skarglvoid StackObj::operator delete [](void* p) { ShouldNotCallThis(); } 59271651Skargl 60271651Skarglvoid* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } 61271651Skarglvoid _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } 62271651Skarglvoid* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } 63271651Skarglvoid _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } 64271651Skargl 65271651Skarglvoid* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, 66271651Skargl size_t word_size, bool read_only, 67271651Skargl MetaspaceObj::Type type, TRAPS) throw() { 68271651Skargl // Klass has it's own operator new 69271651Skargl return Metaspace::allocate(loader_data, word_size, read_only, 70271651Skargl type, CHECK_NULL); 71271651Skargl} 72271651Skargl 73271651Skarglbool MetaspaceObj::is_shared() const { 74271651Skargl return MetaspaceShared::is_in_shared_space(this); 75271651Skargl} 76271651Skargl 77271651Skarglbool MetaspaceObj::is_metaspace_object() const { 78271651Skargl return ClassLoaderDataGraph::contains((void*)this); 79271651Skargl} 80271651Skargl 81271651Skarglvoid MetaspaceObj::print_address_on(outputStream* st) const { 82271651Skargl st->print(" {"INTPTR_FORMAT"}", this); 83271651Skargl} 84271651Skargl 85271651Skarglvoid* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { 86271651Skargl address res; 87271651Skargl switch (type) { 88271651Skargl case C_HEAP: 89271651Skargl res = (address)AllocateHeap(size, flags, CALLER_PC); 90271651Skargl DEBUG_ONLY(set_allocation_type(res, C_HEAP);) 91271651Skargl break; 92271651Skargl case RESOURCE_AREA: 93271651Skargl // new(size) sets allocation type RESOURCE_AREA. 94271651Skargl res = (address)operator new(size); 95271651Skargl break; 96271651Skargl default: 97271651Skargl ShouldNotReachHere(); 98271651Skargl } 99271651Skargl return res; 100271651Skargl} 101271651Skargl 102271651Skarglvoid* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { 103271651Skargl return (address) operator new(size, type, flags); 104271651Skargl} 105271651Skargl 106271651Skarglvoid* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, 107271651Skargl allocation_type type, MEMFLAGS flags) throw() { 108271651Skargl //should only call this with std::nothrow, use other operator new() otherwise 109271651Skargl address res; 110271651Skargl switch (type) { 111271651Skargl case C_HEAP: 112271651Skargl res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); 113271651Skargl DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) 114271651Skargl break; 115271651Skargl case RESOURCE_AREA: 116271651Skargl // new(size) sets allocation type RESOURCE_AREA. 117271651Skargl res = (address)operator new(size, std::nothrow); 118271651Skargl break; 119271651Skargl default: 120271651Skargl ShouldNotReachHere(); 121271651Skargl } 122271651Skargl return res; 123271651Skargl} 124271651Skargl 125271651Skarglvoid* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, 126271651Skargl allocation_type type, MEMFLAGS flags) throw() { 127271651Skargl return (address)operator new(size, nothrow_constant, type, flags); 128271651Skargl} 129271651Skargl 130271651Skarglvoid ResourceObj::operator delete(void* p) { 131271651Skargl assert(((ResourceObj *)p)->allocated_on_C_heap(), 132271651Skargl "delete only allowed for C_HEAP objects"); 133271651Skargl DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 134271651Skargl FreeHeap(p); 135271651Skargl} 136271651Skargl 137271651Skarglvoid ResourceObj::operator delete [](void* p) { 138271651Skargl operator delete(p); 139271651Skargl} 140271651Skargl 141271651Skargl#ifdef ASSERT 142271651Skarglvoid ResourceObj::set_allocation_type(address res, allocation_type type) { 143271651Skargl // Set allocation type in the resource object 144271651Skargl uintptr_t allocation = (uintptr_t)res; 145271651Skargl assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " PTR_FORMAT, res)); 146271651Skargl assert(type <= allocation_mask, "incorrect allocation type"); 147271651Skargl ResourceObj* resobj = (ResourceObj *)res; 148271651Skargl resobj->_allocation_t[0] = ~(allocation + type); 149271651Skargl if (type != STACK_OR_EMBEDDED) { 150271651Skargl // Called from operator new() and CollectionSetChooser(), 151271651Skargl // set verification value. 152271651Skargl resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; 153271651Skargl } 154271651Skargl} 155271651Skargl 156271651SkarglResourceObj::allocation_type ResourceObj::get_allocation_type() const { 157271651Skargl assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); 158271651Skargl return (allocation_type)((~_allocation_t[0]) & allocation_mask); 159271651Skargl} 160271651Skargl 161271651Skarglbool ResourceObj::is_type_set() const { 162271651Skargl allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); 163271651Skargl return get_allocation_type() == type && 164271651Skargl (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); 165271651Skargl} 166271651Skargl 167271651SkarglResourceObj::ResourceObj() { // default constructor 168271651Skargl if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { 169271651Skargl // Operator new() is not called for allocations 170271651Skargl // on stack and for embedded objects. 171271651Skargl set_allocation_type((address)this, STACK_OR_EMBEDDED); 172271651Skargl } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED 173271651Skargl // For some reason we got a value which resembles 174271651Skargl // an embedded or stack object (operator new() does not 175271651Skargl // set such type). Keep it since it is valid value 176271651Skargl // (even if it was garbage). 177271651Skargl // Ignore garbage in other fields. 178271651Skargl } else if (is_type_set()) { 179271651Skargl // Operator new() was called and type was set. 180271651Skargl assert(!allocated_on_stack(), 181271651Skargl err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 182271651Skargl this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 183271651Skargl } else { 184271651Skargl // Operator new() was not called. 185271651Skargl // Assume that it is embedded or stack object. 186271651Skargl set_allocation_type((address)this, STACK_OR_EMBEDDED); 187271651Skargl } 188271651Skargl _allocation_t[1] = 0; // Zap verification value 189271651Skargl} 190271651Skargl 191271651SkarglResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor 192271651Skargl // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. 193271651Skargl // Note: garbage may resembles valid value. 194271651Skargl assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), 195271651Skargl err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 196271651Skargl this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 197271651Skargl set_allocation_type((address)this, STACK_OR_EMBEDDED); 198271651Skargl _allocation_t[1] = 0; // Zap verification value 199271651Skargl} 200271651Skargl 201271651SkarglResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment 202271651Skargl // Used in InlineTree::ok_to_inline() for WarmCallInfo. 203271651Skargl assert(allocated_on_stack(), 204271651Skargl err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 205271651Skargl this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 206271651Skargl // Keep current _allocation_t value; 207271651Skargl return *this; 208271651Skargl} 209284810Stijl 210271651SkarglResourceObj::~ResourceObj() { 211271651Skargl // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. 212271651Skargl if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. 213271651Skargl _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type 214271651Skargl } 215284810Stijl} 216271651Skargl#endif // ASSERT 217271651Skargl 218271651Skargl 219271651Skarglvoid trace_heap_malloc(size_t size, const char* name, void* p) { 220271651Skargl // A lock is not needed here - tty uses a lock internally 221271651Skargl tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name); 222271651Skargl} 223271651Skargl 224271651Skargl 225271651Skarglvoid trace_heap_free(void* p) { 226271651Skargl // A lock is not needed here - tty uses a lock internally 227271651Skargl tty->print_cr("Heap free " INTPTR_FORMAT, p); 228271651Skargl} 229271651Skargl 230284810Stijl//-------------------------------------------------------------------------------------- 231271651Skargl// ChunkPool implementation 232284810Stijl 233271651Skargl// MT-safe pool of chunks to reduce malloc/free thrashing 234284810Stijl// NB: not using Mutex because pools are used before Threads are initialized 235271651Skarglclass ChunkPool: public CHeapObj<mtInternal> { 236284810Stijl Chunk* _first; // first cached Chunk; its first word points to next chunk 237284810Stijl size_t _num_chunks; // number of unused chunks in pool 238271651Skargl size_t _num_used; // number of chunks currently checked out 239284810Stijl const size_t _size; // size of each chunk (must be uniform) 240284810Stijl 241284810Stijl // Our four static pools 242284810Stijl static ChunkPool* _large_pool; 243284810Stijl static ChunkPool* _medium_pool; 244284810Stijl static ChunkPool* _small_pool; 245271651Skargl static ChunkPool* _tiny_pool; 246284810Stijl 247284810Stijl // return first element or null 248271651Skargl void* get_first() { 249284810Stijl Chunk* c = _first; 250284810Stijl if (_first) { 251271651Skargl _first = _first->next(); 252271651Skargl _num_chunks--; 253271651Skargl } 254271651Skargl return c; 255271651Skargl } 256271651Skargl 257271651Skargl public: 258271651Skargl // All chunks in a ChunkPool has the same size 259284810Stijl ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } 260284810Stijl 261284810Stijl // Allocate a new chunk from the pool (might expand the pool) 262284810Stijl _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { 263284810Stijl assert(bytes == _size, "bad size"); 264271651Skargl void* p = NULL; 265284810Stijl // No VM lock can be taken inside ThreadCritical lock, so os::malloc 266284810Stijl // should be done outside ThreadCritical lock due to NMT 267271651Skargl { ThreadCritical tc; 268271651Skargl _num_used++; 269284810Stijl p = get_first(); 270284810Stijl } 271284810Stijl if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); 272271651Skargl if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 273271651Skargl vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); 274271651Skargl } 275271651Skargl return p; 276271651Skargl } 277271651Skargl 278271651Skargl // Return a chunk to the pool 279271651Skargl void free(Chunk* chunk) { 280271651Skargl assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); 281271651Skargl ThreadCritical tc; 282284810Stijl _num_used--; 283271651Skargl 284271651Skargl // Add chunk to list 285271651Skargl chunk->set_next(_first); 286271651Skargl _first = chunk; 287271651Skargl _num_chunks++; 288271651Skargl } 289284810Stijl 290271651Skargl // Prune the pool 291271651Skargl void free_all_but(size_t n) { 292271651Skargl Chunk* cur = NULL; 293271651Skargl Chunk* next; 294271651Skargl { 295284810Stijl // if we have more than n chunks, free all of them 296271651Skargl ThreadCritical tc; 297271651Skargl if (_num_chunks > n) { 298284810Stijl // free chunks at end of queue, for better locality 299284810Stijl cur = _first; 300271651Skargl for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); 301271651Skargl 302271651Skargl if (cur != NULL) { 303271651Skargl next = cur->next(); 304271651Skargl cur->set_next(NULL); 305271651Skargl cur = next; 306271651Skargl 307271651Skargl _num_chunks = n; 308271651Skargl } 309271651Skargl } 310271651Skargl } 311271651Skargl 312271651Skargl // Free all remaining chunks, outside of ThreadCritical 313271651Skargl // to avoid deadlock with NMT 314271651Skargl while(cur != NULL) { 315271651Skargl next = cur->next(); 316284810Stijl os::free(cur, mtChunk); 317284810Stijl cur = next; 318271651Skargl } 319271651Skargl } 320271651Skargl 321271651Skargl // Accessors to preallocated pool's 322271651Skargl static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } 323271651Skargl static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } 324271651Skargl static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } 325284810Stijl static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; } 326271651Skargl 327271651Skargl static void initialize() { 328271651Skargl _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); 329271651Skargl _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); 330271651Skargl _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); 331 _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size()); 332 } 333 334 static void clean() { 335 enum { BlocksToKeep = 5 }; 336 _tiny_pool->free_all_but(BlocksToKeep); 337 _small_pool->free_all_but(BlocksToKeep); 338 _medium_pool->free_all_but(BlocksToKeep); 339 _large_pool->free_all_but(BlocksToKeep); 340 } 341}; 342 343ChunkPool* ChunkPool::_large_pool = NULL; 344ChunkPool* ChunkPool::_medium_pool = NULL; 345ChunkPool* ChunkPool::_small_pool = NULL; 346ChunkPool* ChunkPool::_tiny_pool = NULL; 347 348void chunkpool_init() { 349 ChunkPool::initialize(); 350} 351 352void 353Chunk::clean_chunk_pool() { 354 ChunkPool::clean(); 355} 356 357 358//-------------------------------------------------------------------------------------- 359// ChunkPoolCleaner implementation 360// 361 362class ChunkPoolCleaner : public PeriodicTask { 363 enum { CleaningInterval = 5000 }; // cleaning interval in ms 364 365 public: 366 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} 367 void task() { 368 ChunkPool::clean(); 369 } 370}; 371 372//-------------------------------------------------------------------------------------- 373// Chunk implementation 374 375void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { 376 // requested_size is equal to sizeof(Chunk) but in order for the arena 377 // allocations to come out aligned as expected the size must be aligned 378 // to expected arena alignment. 379 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. 380 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); 381 size_t bytes = ARENA_ALIGN(requested_size) + length; 382 switch (length) { 383 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); 384 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); 385 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); 386 case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode); 387 default: { 388 void* p = os::malloc(bytes, mtChunk, CALLER_PC); 389 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 390 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); 391 } 392 return p; 393 } 394 } 395} 396 397void Chunk::operator delete(void* p) { 398 Chunk* c = (Chunk*)p; 399 switch (c->length()) { 400 case Chunk::size: ChunkPool::large_pool()->free(c); break; 401 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; 402 case Chunk::init_size: ChunkPool::small_pool()->free(c); break; 403 case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break; 404 default: os::free(c, mtChunk); 405 } 406} 407 408Chunk::Chunk(size_t length) : _len(length) { 409 _next = NULL; // Chain on the linked list 410} 411 412 413void Chunk::chop() { 414 Chunk *k = this; 415 while( k ) { 416 Chunk *tmp = k->next(); 417 // clear out this chunk (to detect allocation bugs) 418 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); 419 delete k; // Free chunk (was malloc'd) 420 k = tmp; 421 } 422} 423 424void Chunk::next_chop() { 425 _next->chop(); 426 _next = NULL; 427} 428 429 430void Chunk::start_chunk_pool_cleaner_task() { 431#ifdef ASSERT 432 static bool task_created = false; 433 assert(!task_created, "should not start chuck pool cleaner twice"); 434 task_created = true; 435#endif 436 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); 437 cleaner->enroll(); 438} 439 440//------------------------------Arena------------------------------------------ 441NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) 442 443Arena::Arena(size_t init_size) { 444 size_t round_size = (sizeof (char *)) - 1; 445 init_size = (init_size+round_size) & ~round_size; 446 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); 447 _hwm = _chunk->bottom(); // Save the cached hwm, max 448 _max = _chunk->top(); 449 set_size_in_bytes(init_size); 450 NOT_PRODUCT(Atomic::inc(&_instance_count);) 451} 452 453Arena::Arena() { 454 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); 455 _hwm = _chunk->bottom(); // Save the cached hwm, max 456 _max = _chunk->top(); 457 set_size_in_bytes(Chunk::init_size); 458 NOT_PRODUCT(Atomic::inc(&_instance_count);) 459} 460 461Arena *Arena::move_contents(Arena *copy) { 462 copy->destruct_contents(); 463 copy->_chunk = _chunk; 464 copy->_hwm = _hwm; 465 copy->_max = _max; 466 copy->_first = _first; 467 468 // workaround rare racing condition, which could double count 469 // the arena size by native memory tracking 470 size_t size = size_in_bytes(); 471 set_size_in_bytes(0); 472 copy->set_size_in_bytes(size); 473 // Destroy original arena 474 reset(); 475 return copy; // Return Arena with contents 476} 477 478Arena::~Arena() { 479 destruct_contents(); 480 NOT_PRODUCT(Atomic::dec(&_instance_count);) 481} 482 483void* Arena::operator new(size_t size) throw() { 484 assert(false, "Use dynamic memory type binding"); 485 return NULL; 486} 487 488void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { 489 assert(false, "Use dynamic memory type binding"); 490 return NULL; 491} 492 493 // dynamic memory type binding 494void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { 495#ifdef ASSERT 496 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); 497 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 498 return p; 499#else 500 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); 501#endif 502} 503 504void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { 505#ifdef ASSERT 506 void* p = os::malloc(size, flags|otArena, CALLER_PC); 507 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 508 return p; 509#else 510 return os::malloc(size, flags|otArena, CALLER_PC); 511#endif 512} 513 514void Arena::operator delete(void* p) { 515 FreeHeap(p); 516} 517 518// Destroy this arenas contents and reset to empty 519void Arena::destruct_contents() { 520 if (UseMallocOnly && _first != NULL) { 521 char* end = _first->next() ? _first->top() : _hwm; 522 free_malloced_objects(_first, _first->bottom(), end, _hwm); 523 } 524 // reset size before chop to avoid a rare racing condition 525 // that can have total arena memory exceed total chunk memory 526 set_size_in_bytes(0); 527 _first->chop(); 528 reset(); 529} 530 531// This is high traffic method, but many calls actually don't 532// change the size 533void Arena::set_size_in_bytes(size_t size) { 534 if (_size_in_bytes != size) { 535 _size_in_bytes = size; 536 MemTracker::record_arena_size((address)this, size); 537 } 538} 539 540// Total of all Chunks in arena 541size_t Arena::used() const { 542 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk 543 register Chunk *k = _first; 544 while( k != _chunk) { // Whilst have Chunks in a row 545 sum += k->length(); // Total size of this Chunk 546 k = k->next(); // Bump along to next Chunk 547 } 548 return sum; // Return total consumed space. 549} 550 551void Arena::signal_out_of_memory(size_t sz, const char* whence) const { 552 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); 553} 554 555// Grow a new Chunk 556void* Arena::grow(size_t x, AllocFailType alloc_failmode) { 557 // Get minimal required size. Either real big, or even bigger for giant objs 558 size_t len = MAX2(x, (size_t) Chunk::size); 559 560 Chunk *k = _chunk; // Get filled-up chunk address 561 _chunk = new (alloc_failmode, len) Chunk(len); 562 563 if (_chunk == NULL) { 564 return NULL; 565 } 566 if (k) k->set_next(_chunk); // Append new chunk to end of linked list 567 else _first = _chunk; 568 _hwm = _chunk->bottom(); // Save the cached hwm, max 569 _max = _chunk->top(); 570 set_size_in_bytes(size_in_bytes() + len); 571 void* result = _hwm; 572 _hwm += x; 573 return result; 574} 575 576 577 578// Reallocate storage in Arena. 579void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { 580 assert(new_size >= 0, "bad size"); 581 if (new_size == 0) return NULL; 582#ifdef ASSERT 583 if (UseMallocOnly) { 584 // always allocate a new object (otherwise we'll free this one twice) 585 char* copy = (char*)Amalloc(new_size, alloc_failmode); 586 if (copy == NULL) { 587 return NULL; 588 } 589 size_t n = MIN2(old_size, new_size); 590 if (n > 0) memcpy(copy, old_ptr, n); 591 Afree(old_ptr,old_size); // Mostly done to keep stats accurate 592 return copy; 593 } 594#endif 595 char *c_old = (char*)old_ptr; // Handy name 596 // Stupid fast special case 597 if( new_size <= old_size ) { // Shrink in-place 598 if( c_old+old_size == _hwm) // Attempt to free the excess bytes 599 _hwm = c_old+new_size; // Adjust hwm 600 return c_old; 601 } 602 603 // make sure that new_size is legal 604 size_t corrected_new_size = ARENA_ALIGN(new_size); 605 606 // See if we can resize in-place 607 if( (c_old+old_size == _hwm) && // Adjusting recent thing 608 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits 609 _hwm = c_old+corrected_new_size; // Adjust hwm 610 return c_old; // Return old pointer 611 } 612 613 // Oops, got to relocate guts 614 void *new_ptr = Amalloc(new_size, alloc_failmode); 615 if (new_ptr == NULL) { 616 return NULL; 617 } 618 memcpy( new_ptr, c_old, old_size ); 619 Afree(c_old,old_size); // Mostly done to keep stats accurate 620 return new_ptr; 621} 622 623 624// Determine if pointer belongs to this Arena or not. 625bool Arena::contains( const void *ptr ) const { 626#ifdef ASSERT 627 if (UseMallocOnly) { 628 // really slow, but not easy to make fast 629 if (_chunk == NULL) return false; 630 char** bottom = (char**)_chunk->bottom(); 631 for (char** p = (char**)_hwm - 1; p >= bottom; p--) { 632 if (*p == ptr) return true; 633 } 634 for (Chunk *c = _first; c != NULL; c = c->next()) { 635 if (c == _chunk) continue; // current chunk has been processed 636 char** bottom = (char**)c->bottom(); 637 for (char** p = (char**)c->top() - 1; p >= bottom; p--) { 638 if (*p == ptr) return true; 639 } 640 } 641 return false; 642 } 643#endif 644 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) 645 return true; // Check for in this chunk 646 for (Chunk *c = _first; c; c = c->next()) { 647 if (c == _chunk) continue; // current chunk has been processed 648 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { 649 return true; // Check for every chunk in Arena 650 } 651 } 652 return false; // Not in any Chunk, so not in Arena 653} 654 655 656#ifdef ASSERT 657void* Arena::malloc(size_t size) { 658 assert(UseMallocOnly, "shouldn't call"); 659 // use malloc, but save pointer in res. area for later freeing 660 char** save = (char**)internal_malloc_4(sizeof(char*)); 661 return (*save = (char*)os::malloc(size, mtChunk)); 662} 663 664// for debugging with UseMallocOnly 665void* Arena::internal_malloc_4(size_t x) { 666 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 667 check_for_overflow(x, "Arena::internal_malloc_4"); 668 if (_hwm + x > _max) { 669 return grow(x); 670 } else { 671 char *old = _hwm; 672 _hwm += x; 673 return old; 674 } 675} 676#endif 677 678 679//-------------------------------------------------------------------------------------- 680// Non-product code 681 682#ifndef PRODUCT 683// The global operator new should never be called since it will usually indicate 684// a memory leak. Use CHeapObj as the base class of such objects to make it explicit 685// that they're allocated on the C heap. 686// Commented out in product version to avoid conflicts with third-party C++ native code. 687// On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called 688// from jdk source and causing data corruption. Such as 689// Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair 690// define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. 691// 692#ifndef ALLOW_OPERATOR_NEW_USAGE 693void* operator new(size_t size) throw() { 694 assert(false, "Should not call global operator new"); 695 return 0; 696} 697 698void* operator new [](size_t size) throw() { 699 assert(false, "Should not call global operator new[]"); 700 return 0; 701} 702 703void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 704 assert(false, "Should not call global operator new"); 705 return 0; 706} 707 708void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { 709 assert(false, "Should not call global operator new[]"); 710 return 0; 711} 712 713void operator delete(void* p) { 714 assert(false, "Should not call global delete"); 715} 716 717void operator delete [](void* p) { 718 assert(false, "Should not call global delete []"); 719} 720#endif // ALLOW_OPERATOR_NEW_USAGE 721 722void AllocatedObj::print() const { print_on(tty); } 723void AllocatedObj::print_value() const { print_value_on(tty); } 724 725void AllocatedObj::print_on(outputStream* st) const { 726 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); 727} 728 729void AllocatedObj::print_value_on(outputStream* st) const { 730 st->print("AllocatedObj(" INTPTR_FORMAT ")", this); 731} 732 733julong Arena::_bytes_allocated = 0; 734 735void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } 736 737AllocStats::AllocStats() { 738 start_mallocs = os::num_mallocs; 739 start_frees = os::num_frees; 740 start_malloc_bytes = os::alloc_bytes; 741 start_mfree_bytes = os::free_bytes; 742 start_res_bytes = Arena::_bytes_allocated; 743} 744 745julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } 746julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } 747julong AllocStats::num_frees() { return os::num_frees - start_frees; } 748julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } 749julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } 750void AllocStats::print() { 751 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " 752 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", 753 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); 754} 755 756 757// debugging code 758inline void Arena::free_all(char** start, char** end) { 759 for (char** p = start; p < end; p++) if (*p) os::free(*p); 760} 761 762void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { 763 assert(UseMallocOnly, "should not call"); 764 // free all objects malloced since resource mark was created; resource area 765 // contains their addresses 766 if (chunk->next()) { 767 // this chunk is full, and some others too 768 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { 769 char* top = c->top(); 770 if (c->next() == NULL) { 771 top = hwm2; // last junk is only used up to hwm2 772 assert(c->contains(hwm2), "bad hwm2"); 773 } 774 free_all((char**)c->bottom(), (char**)top); 775 } 776 assert(chunk->contains(hwm), "bad hwm"); 777 assert(chunk->contains(max), "bad max"); 778 free_all((char**)hwm, (char**)max); 779 } else { 780 // this chunk was partially used 781 assert(chunk->contains(hwm), "bad hwm"); 782 assert(chunk->contains(hwm2), "bad hwm2"); 783 free_all((char**)hwm, (char**)hwm2); 784 } 785} 786 787 788ReallocMark::ReallocMark() { 789#ifdef ASSERT 790 Thread *thread = ThreadLocalStorage::get_thread_slow(); 791 _nesting = thread->resource_area()->nesting(); 792#endif 793} 794 795void ReallocMark::check() { 796#ifdef ASSERT 797 if (_nesting != Thread::current()->resource_area()->nesting()) { 798 fatal("allocation bug: array could grow within nested ResourceMark"); 799 } 800#endif 801} 802 803#endif // Non-product 804