allocation.hpp revision 4811:4b52137b07c9
1/* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#ifndef SHARE_VM_MEMORY_ALLOCATION_HPP 26#define SHARE_VM_MEMORY_ALLOCATION_HPP 27 28#include "runtime/globals.hpp" 29#include "utilities/globalDefinitions.hpp" 30#include "utilities/macros.hpp" 31#ifdef COMPILER1 32#include "c1/c1_globals.hpp" 33#endif 34#ifdef COMPILER2 35#include "opto/c2_globals.hpp" 36#endif 37 38#include <new> 39 40#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) 41#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) 42#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) 43 44 45// noinline attribute 46#ifdef _WINDOWS 47 #define _NOINLINE_ __declspec(noinline) 48#else 49 #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute 50 #define _NOINLINE_ 51 #else 52 #define _NOINLINE_ __attribute__ ((noinline)) 53 #endif 54#endif 55 56class AllocFailStrategy { 57public: 58 enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; 59}; 60typedef AllocFailStrategy::AllocFailEnum AllocFailType; 61 62// All classes in the virtual machine must be subclassed 63// by one of the following allocation classes: 64// 65// For objects allocated in the resource area (see resourceArea.hpp). 66// - ResourceObj 67// 68// For objects allocated in the C-heap (managed by: free & malloc). 69// - CHeapObj 70// 71// For objects allocated on the stack. 72// - StackObj 73// 74// For embedded objects. 75// - ValueObj 76// 77// For classes used as name spaces. 78// - AllStatic 79// 80// For classes in Metaspace (class data) 81// - MetaspaceObj 82// 83// The printable subclasses are used for debugging and define virtual 84// member functions for printing. Classes that avoid allocating the 85// vtbl entries in the objects should therefore not be the printable 86// subclasses. 87// 88// The following macros and function should be used to allocate memory 89// directly in the resource area or in the C-heap: 90// 91// NEW_RESOURCE_ARRAY(type,size) 92// NEW_RESOURCE_OBJ(type) 93// NEW_C_HEAP_ARRAY(type,size) 94// NEW_C_HEAP_OBJ(type) 95// char* AllocateHeap(size_t size, const char* name); 96// void FreeHeap(void* p); 97// 98// C-heap allocation can be traced using +PrintHeapAllocation. 99// malloc and free should therefore never called directly. 100 101// Base class for objects allocated in the C-heap. 102 103// In non product mode we introduce a super class for all allocation classes 104// that supports printing. 105// We avoid the superclass in product mode since some C++ compilers add 106// a word overhead for empty super classes. 107 108#ifdef PRODUCT 109#define ALLOCATION_SUPER_CLASS_SPEC 110#else 111#define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj 112class AllocatedObj { 113 public: 114 // Printing support 115 void print() const; 116 void print_value() const; 117 118 virtual void print_on(outputStream* st) const; 119 virtual void print_value_on(outputStream* st) const; 120}; 121#endif 122 123 124/* 125 * MemoryType bitmap layout: 126 * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 | 127 * | memory type | object | reserved | 128 * | | type | | 129 */ 130enum MemoryType { 131 // Memory type by sub systems. It occupies lower byte. 132 mtNone = 0x0000, // undefined 133 mtClass = 0x0100, // memory class for Java classes 134 mtThread = 0x0200, // memory for thread objects 135 mtThreadStack = 0x0300, 136 mtCode = 0x0400, // memory for generated code 137 mtGC = 0x0500, // memory for GC 138 mtCompiler = 0x0600, // memory for compiler 139 mtInternal = 0x0700, // memory used by VM, but does not belong to 140 // any of above categories, and not used for 141 // native memory tracking 142 mtOther = 0x0800, // memory not used by VM 143 mtSymbol = 0x0900, // symbol 144 mtNMT = 0x0A00, // memory used by native memory tracking 145 mtChunk = 0x0B00, // chunk that holds content of arenas 146 mtJavaHeap = 0x0C00, // Java heap 147 mtClassShared = 0x0D00, // class data sharing 148 mtTest = 0x0E00, // Test type for verifying NMT 149 mt_number_of_types = 0x000E, // number of memory types (mtDontTrack 150 // is not included as validate type) 151 mtDontTrack = 0x0F00, // memory we do not or cannot track 152 mt_masks = 0x7F00, 153 154 // object type mask 155 otArena = 0x0010, // an arena object 156 otNMTRecorder = 0x0020, // memory recorder object 157 ot_masks = 0x00F0 158}; 159 160#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type) 161#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone) 162#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks) 163 164#define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena) 165#define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder) 166#define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack))) 167 168typedef unsigned short MEMFLAGS; 169 170#if INCLUDE_NMT 171 172extern bool NMT_track_callsite; 173 174#else 175 176const bool NMT_track_callsite = false; 177 178#endif // INCLUDE_NMT 179 180// debug build does not inline 181#if defined(_NMT_NOINLINE_) 182 #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) 183 #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) 184 #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0) 185#else 186 #define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0) 187 #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) 188 #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) 189#endif 190 191 192 193template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { 194 public: 195 _NOINLINE_ void* operator new(size_t size, address caller_pc = 0); 196 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, 197 address caller_pc = 0); 198 199 void operator delete(void* p); 200}; 201 202// Base class for objects allocated on the stack only. 203// Calling new or delete will result in fatal error. 204 205class StackObj ALLOCATION_SUPER_CLASS_SPEC { 206 private: 207 void* operator new(size_t size); 208 void operator delete(void* p); 209}; 210 211// Base class for objects used as value objects. 212// Calling new or delete will result in fatal error. 213// 214// Portability note: Certain compilers (e.g. gcc) will 215// always make classes bigger if it has a superclass, even 216// if the superclass does not have any virtual methods or 217// instance fields. The HotSpot implementation relies on this 218// not to happen. So never make a ValueObj class a direct subclass 219// of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g., 220// like this: 221// 222// class A VALUE_OBJ_CLASS_SPEC { 223// ... 224// } 225// 226// With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can 227// be defined as a an empty string "". 228// 229class _ValueObj { 230 private: 231 void* operator new(size_t size); 232 void operator delete(void* p); 233}; 234 235 236// Base class for objects stored in Metaspace. 237// Calling delete will result in fatal error. 238// 239// Do not inherit from something with a vptr because this class does 240// not introduce one. This class is used to allocate both shared read-only 241// and shared read-write classes. 242// 243 244class ClassLoaderData; 245 246class MetaspaceObj { 247 public: 248 bool is_metadata() const; 249 bool is_metaspace_object() const; // more specific test but slower 250 bool is_shared() const; 251 void print_address_on(outputStream* st) const; // nonvirtual address printing 252 253 void* operator new(size_t size, ClassLoaderData* loader_data, 254 size_t word_size, bool read_only, Thread* thread); 255 // can't use TRAPS from this header file. 256 void operator delete(void* p) { ShouldNotCallThis(); } 257}; 258 259// Base class for classes that constitute name spaces. 260 261class AllStatic { 262 public: 263 AllStatic() { ShouldNotCallThis(); } 264 ~AllStatic() { ShouldNotCallThis(); } 265}; 266 267 268//------------------------------Chunk------------------------------------------ 269// Linked list of raw memory chunks 270class Chunk: CHeapObj<mtChunk> { 271 friend class VMStructs; 272 273 protected: 274 Chunk* _next; // Next Chunk in list 275 const size_t _len; // Size of this Chunk 276 public: 277 void* operator new(size_t size, AllocFailType alloc_failmode, size_t length); 278 void operator delete(void* p); 279 Chunk(size_t length); 280 281 enum { 282 // default sizes; make them slightly smaller than 2**k to guard against 283 // buddy-system style malloc implementations 284#ifdef _LP64 285 slack = 40, // [RGV] Not sure if this is right, but make it 286 // a multiple of 8. 287#else 288 slack = 20, // suspected sizeof(Chunk) + internal malloc headers 289#endif 290 291 init_size = 1*K - slack, // Size of first chunk 292 medium_size= 10*K - slack, // Size of medium-sized chunk 293 size = 32*K - slack, // Default size of an Arena chunk (following the first) 294 non_pool_size = init_size + 32 // An initial size which is not one of above 295 }; 296 297 void chop(); // Chop this chunk 298 void next_chop(); // Chop next chunk 299 static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } 300 static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); } 301 302 size_t length() const { return _len; } 303 Chunk* next() const { return _next; } 304 void set_next(Chunk* n) { _next = n; } 305 // Boundaries of data area (possibly unused) 306 char* bottom() const { return ((char*) this) + aligned_overhead_size(); } 307 char* top() const { return bottom() + _len; } 308 bool contains(char* p) const { return bottom() <= p && p <= top(); } 309 310 // Start the chunk_pool cleaner task 311 static void start_chunk_pool_cleaner_task(); 312 313 static void clean_chunk_pool(); 314}; 315 316//------------------------------Arena------------------------------------------ 317// Fast allocation of memory 318class Arena : public CHeapObj<mtNone|otArena> { 319protected: 320 friend class ResourceMark; 321 friend class HandleMark; 322 friend class NoHandleMark; 323 friend class VMStructs; 324 325 Chunk *_first; // First chunk 326 Chunk *_chunk; // current chunk 327 char *_hwm, *_max; // High water mark and max in current chunk 328 // Get a new Chunk of at least size x 329 void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 330 size_t _size_in_bytes; // Size of arena (used for native memory tracking) 331 332 NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start 333 friend class AllocStats; 334 debug_only(void* malloc(size_t size);) 335 debug_only(void* internal_malloc_4(size_t x);) 336 NOT_PRODUCT(void inc_bytes_allocated(size_t x);) 337 338 void signal_out_of_memory(size_t request, const char* whence) const; 339 340 bool check_for_overflow(size_t request, const char* whence, 341 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { 342 if (UINTPTR_MAX - request < (uintptr_t)_hwm) { 343 if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { 344 return false; 345 } 346 signal_out_of_memory(request, whence); 347 } 348 return true; 349 } 350 351 public: 352 Arena(); 353 Arena(size_t init_size); 354 ~Arena(); 355 void destruct_contents(); 356 char* hwm() const { return _hwm; } 357 358 // new operators 359 void* operator new (size_t size); 360 void* operator new (size_t size, const std::nothrow_t& nothrow_constant); 361 362 // dynamic memory type tagging 363 void* operator new(size_t size, MEMFLAGS flags); 364 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags); 365 void operator delete(void* p); 366 367 // Fast allocate in the arena. Common case is: pointer test + increment. 368 void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 369 assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); 370 x = ARENA_ALIGN(x); 371 debug_only(if (UseMallocOnly) return malloc(x);) 372 if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) 373 return NULL; 374 NOT_PRODUCT(inc_bytes_allocated(x);) 375 if (_hwm + x > _max) { 376 return grow(x, alloc_failmode); 377 } else { 378 char *old = _hwm; 379 _hwm += x; 380 return old; 381 } 382 } 383 // Further assume size is padded out to words 384 void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 385 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 386 debug_only(if (UseMallocOnly) return malloc(x);) 387 if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) 388 return NULL; 389 NOT_PRODUCT(inc_bytes_allocated(x);) 390 if (_hwm + x > _max) { 391 return grow(x, alloc_failmode); 392 } else { 393 char *old = _hwm; 394 _hwm += x; 395 return old; 396 } 397 } 398 399 // Allocate with 'double' alignment. It is 8 bytes on sparc. 400 // In other cases Amalloc_D() should be the same as Amalloc_4(). 401 void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 402 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 403 debug_only(if (UseMallocOnly) return malloc(x);) 404#if defined(SPARC) && !defined(_LP64) 405#define DALIGN_M1 7 406 size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; 407 x += delta; 408#endif 409 if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) 410 return NULL; 411 NOT_PRODUCT(inc_bytes_allocated(x);) 412 if (_hwm + x > _max) { 413 return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. 414 } else { 415 char *old = _hwm; 416 _hwm += x; 417#if defined(SPARC) && !defined(_LP64) 418 old += delta; // align to 8-bytes 419#endif 420 return old; 421 } 422 } 423 424 // Fast delete in area. Common case is: NOP (except for storage reclaimed) 425 void Afree(void *ptr, size_t size) { 426#ifdef ASSERT 427 if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory 428 if (UseMallocOnly) return; 429#endif 430 if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; 431 } 432 433 void *Arealloc( void *old_ptr, size_t old_size, size_t new_size, 434 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 435 436 // Move contents of this arena into an empty arena 437 Arena *move_contents(Arena *empty_arena); 438 439 // Determine if pointer belongs to this Arena or not. 440 bool contains( const void *ptr ) const; 441 442 // Total of all chunks in use (not thread-safe) 443 size_t used() const; 444 445 // Total # of bytes used 446 size_t size_in_bytes() const { return _size_in_bytes; }; 447 void set_size_in_bytes(size_t size); 448 449 static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; 450 static void free_all(char** start, char** end) PRODUCT_RETURN; 451 452 // how many arena instances 453 NOT_PRODUCT(static volatile jint _instance_count;) 454private: 455 // Reset this Arena to empty, access will trigger grow if necessary 456 void reset(void) { 457 _first = _chunk = NULL; 458 _hwm = _max = NULL; 459 set_size_in_bytes(0); 460 } 461}; 462 463// One of the following macros must be used when allocating 464// an array or object from an arena 465#define NEW_ARENA_ARRAY(arena, type, size) \ 466 (type*) (arena)->Amalloc((size) * sizeof(type)) 467 468#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ 469 (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ 470 (new_size) * sizeof(type) ) 471 472#define FREE_ARENA_ARRAY(arena, type, old, size) \ 473 (arena)->Afree((char*)(old), (size) * sizeof(type)) 474 475#define NEW_ARENA_OBJ(arena, type) \ 476 NEW_ARENA_ARRAY(arena, type, 1) 477 478 479//%note allocation_1 480extern char* resource_allocate_bytes(size_t size, 481 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 482extern char* resource_allocate_bytes(Thread* thread, size_t size, 483 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 484extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, 485 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 486extern void resource_free_bytes( char *old, size_t size ); 487 488//---------------------------------------------------------------------- 489// Base class for objects allocated in the resource area per default. 490// Optionally, objects may be allocated on the C heap with 491// new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena) 492// ResourceObj's can be allocated within other objects, but don't use 493// new or delete (allocation_type is unknown). If new is used to allocate, 494// use delete to deallocate. 495class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { 496 public: 497 enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; 498 static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; 499#ifdef ASSERT 500 private: 501 // When this object is allocated on stack the new() operator is not 502 // called but garbage on stack may look like a valid allocation_type. 503 // Store negated 'this' pointer when new() is called to distinguish cases. 504 // Use second array's element for verification value to distinguish garbage. 505 uintptr_t _allocation_t[2]; 506 bool is_type_set() const; 507 public: 508 allocation_type get_allocation_type() const; 509 bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; } 510 bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } 511 bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } 512 bool allocated_on_arena() const { return get_allocation_type() == ARENA; } 513 ResourceObj(); // default construtor 514 ResourceObj(const ResourceObj& r); // default copy construtor 515 ResourceObj& operator=(const ResourceObj& r); // default copy assignment 516 ~ResourceObj(); 517#endif // ASSERT 518 519 public: 520 void* operator new(size_t size, allocation_type type, MEMFLAGS flags); 521 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, 522 allocation_type type, MEMFLAGS flags); 523 void* operator new(size_t size, Arena *arena) { 524 address res = (address)arena->Amalloc(size); 525 DEBUG_ONLY(set_allocation_type(res, ARENA);) 526 return res; 527 } 528 void* operator new(size_t size) { 529 address res = (address)resource_allocate_bytes(size); 530 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) 531 return res; 532 } 533 534 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) { 535 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); 536 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) 537 return res; 538 } 539 540 void operator delete(void* p); 541}; 542 543// One of the following macros must be used when allocating an array 544// or object to determine whether it should reside in the C heap on in 545// the resource area. 546 547#define NEW_RESOURCE_ARRAY(type, size)\ 548 (type*) resource_allocate_bytes((size) * sizeof(type)) 549 550#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ 551 (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 552 553#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ 554 (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) 555 556#define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ 557 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) ) 558 559#define FREE_RESOURCE_ARRAY(type, old, size)\ 560 resource_free_bytes((char*)(old), (size) * sizeof(type)) 561 562#define FREE_FAST(old)\ 563 /* nop */ 564 565#define NEW_RESOURCE_OBJ(type)\ 566 NEW_RESOURCE_ARRAY(type, 1) 567 568#define NEW_C_HEAP_ARRAY(type, size, memflags)\ 569 (type*) (AllocateHeap((size) * sizeof(type), memflags)) 570 571#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ 572 (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags)) 573 574#define FREE_C_HEAP_ARRAY(type,old,memflags) \ 575 FreeHeap((char*)(old), memflags) 576 577#define NEW_C_HEAP_OBJ(type, memflags)\ 578 NEW_C_HEAP_ARRAY(type, 1, memflags) 579 580 581#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ 582 (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) 583 584#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\ 585 (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc)) 586 587#define NEW_C_HEAP_OBJ2(type, memflags, pc)\ 588 NEW_C_HEAP_ARRAY2(type, 1, memflags, pc) 589 590 591extern bool warn_new_operator; 592 593// for statistics 594#ifndef PRODUCT 595class AllocStats : StackObj { 596 julong start_mallocs, start_frees; 597 julong start_malloc_bytes, start_mfree_bytes, start_res_bytes; 598 public: 599 AllocStats(); 600 601 julong num_mallocs(); // since creation of receiver 602 julong alloc_bytes(); 603 julong num_frees(); 604 julong free_bytes(); 605 julong resource_bytes(); 606 void print(); 607}; 608#endif 609 610 611//------------------------------ReallocMark--------------------------------- 612// Code which uses REALLOC_RESOURCE_ARRAY should check an associated 613// ReallocMark, which is declared in the same scope as the reallocated 614// pointer. Any operation that could __potentially__ cause a reallocation 615// should check the ReallocMark. 616class ReallocMark: public StackObj { 617protected: 618 NOT_PRODUCT(int _nesting;) 619 620public: 621 ReallocMark() PRODUCT_RETURN; 622 void check() PRODUCT_RETURN; 623}; 624 625// Helper class to allocate arrays that may become large. 626// Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit 627// and uses mapped memory for larger allocations. 628// Most OS mallocs do something similar but Solaris malloc does not revert 629// to mapped memory for large allocations. By default ArrayAllocatorMallocLimit 630// is set so that we always use malloc except for Solaris where we set the 631// limit to get mapped memory. 632template <class E, MEMFLAGS F> 633class ArrayAllocator : StackObj { 634 char* _addr; 635 bool _use_malloc; 636 size_t _size; 637 public: 638 ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { } 639 ~ArrayAllocator() { free(); } 640 E* allocate(size_t length); 641 void free(); 642}; 643 644#endif // SHARE_VM_MEMORY_ALLOCATION_HPP 645