allocation.hpp revision 13249:a2753984d2c1
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_MEMORY_ALLOCATION_HPP
26#define SHARE_VM_MEMORY_ALLOCATION_HPP
27
28#include "runtime/globals.hpp"
29#include "utilities/globalDefinitions.hpp"
30#include "utilities/macros.hpp"
31#ifdef COMPILER1
32#include "c1/c1_globals.hpp"
33#endif
34#ifdef COMPILER2
35#include "opto/c2_globals.hpp"
36#endif
37
38#include <new>
39
40// The byte alignment to be used by Arena::Amalloc.  See bugid 4169348.
41// Note: this value must be a power of 2
42
43#define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord)
44
45#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
46#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
47#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
48
49class AllocFailStrategy {
50public:
51  enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
52};
53typedef AllocFailStrategy::AllocFailEnum AllocFailType;
54
55// All classes in the virtual machine must be subclassed
56// by one of the following allocation classes:
57//
58// For objects allocated in the resource area (see resourceArea.hpp).
59// - ResourceObj
60//
61// For objects allocated in the C-heap (managed by: free & malloc).
62// - CHeapObj
63//
64// For objects allocated on the stack.
65// - StackObj
66//
67// For embedded objects.
68// - ValueObj
69//
70// For classes used as name spaces.
71// - AllStatic
72//
73// For classes in Metaspace (class data)
74// - MetaspaceObj
75//
76// The printable subclasses are used for debugging and define virtual
77// member functions for printing. Classes that avoid allocating the
78// vtbl entries in the objects should therefore not be the printable
79// subclasses.
80//
81// The following macros and function should be used to allocate memory
82// directly in the resource area or in the C-heap, The _OBJ variants
83// of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple
84// objects which are not inherited from CHeapObj, note constructor and
85// destructor are not called. The preferable way to allocate objects
86// is using the new operator.
87//
88// WARNING: The array variant must only be used for a homogenous array
89// where all objects are of the exact type specified. If subtypes are
90// stored in the array then must pay attention to calling destructors
91// at needed.
92//
93//   NEW_RESOURCE_ARRAY(type, size)
94//   NEW_RESOURCE_OBJ(type)
95//   NEW_C_HEAP_ARRAY(type, size)
96//   NEW_C_HEAP_OBJ(type, memflags)
97//   FREE_C_HEAP_ARRAY(type, old)
98//   FREE_C_HEAP_OBJ(objname, type, memflags)
99//   char* AllocateHeap(size_t size, const char* name);
100//   void  FreeHeap(void* p);
101//
102// C-heap allocation can be traced using +PrintHeapAllocation.
103// malloc and free should therefore never called directly.
104
105// Base class for objects allocated in the C-heap.
106
107// In non product mode we introduce a super class for all allocation classes
108// that supports printing.
109// We avoid the superclass in product mode since some C++ compilers add
110// a word overhead for empty super classes.
111
112#ifdef PRODUCT
113#define ALLOCATION_SUPER_CLASS_SPEC
114#else
115#define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj
116class AllocatedObj {
117 public:
118  // Printing support
119  void print() const;
120  void print_value() const;
121
122  virtual void print_on(outputStream* st) const;
123  virtual void print_value_on(outputStream* st) const;
124};
125#endif
126
127
128/*
129 * Memory types
130 */
131enum MemoryType {
132  // Memory type by sub systems. It occupies lower byte.
133  mtJavaHeap          = 0x00,  // Java heap
134  mtClass             = 0x01,  // memory class for Java classes
135  mtThread            = 0x02,  // memory for thread objects
136  mtThreadStack       = 0x03,
137  mtCode              = 0x04,  // memory for generated code
138  mtGC                = 0x05,  // memory for GC
139  mtCompiler          = 0x06,  // memory for compiler
140  mtInternal          = 0x07,  // memory used by VM, but does not belong to
141                                 // any of above categories, and not used for
142                                 // native memory tracking
143  mtOther             = 0x08,  // memory not used by VM
144  mtSymbol            = 0x09,  // symbol
145  mtNMT               = 0x0A,  // memory used by native memory tracking
146  mtClassShared       = 0x0B,  // class data sharing
147  mtChunk             = 0x0C,  // chunk that holds content of arenas
148  mtTest              = 0x0D,  // Test type for verifying NMT
149  mtTracing           = 0x0E,  // memory used for Tracing
150  mtLogging           = 0x0F,  // memory for logging
151  mtArguments         = 0x10,  // memory for argument processing
152  mtModule            = 0x11,  // memory for module processing
153  mtNone              = 0x12,  // undefined
154  mt_number_of_types  = 0x13   // number of memory types (mtDontTrack
155                                 // is not included as validate type)
156};
157
158typedef MemoryType MEMFLAGS;
159
160
161#if INCLUDE_NMT
162
163extern bool NMT_track_callsite;
164
165#else
166
167const bool NMT_track_callsite = false;
168
169#endif // INCLUDE_NMT
170
171class NativeCallStack;
172
173
174template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
175 public:
176  NOINLINE void* operator new(size_t size, const NativeCallStack& stack) throw();
177  NOINLINE void* operator new(size_t size) throw();
178  NOINLINE void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
179                               const NativeCallStack& stack) throw();
180  NOINLINE void* operator new (size_t size, const std::nothrow_t&  nothrow_constant)
181                               throw();
182  NOINLINE void* operator new [](size_t size, const NativeCallStack& stack) throw();
183  NOINLINE void* operator new [](size_t size) throw();
184  NOINLINE void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
185                               const NativeCallStack& stack) throw();
186  NOINLINE void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant)
187                               throw();
188  void  operator delete(void* p);
189  void  operator delete [] (void* p);
190};
191
192// Base class for objects allocated on the stack only.
193// Calling new or delete will result in fatal error.
194
195class StackObj ALLOCATION_SUPER_CLASS_SPEC {
196 private:
197  void* operator new(size_t size) throw();
198  void* operator new [](size_t size) throw();
199#ifdef __IBMCPP__
200 public:
201#endif
202  void  operator delete(void* p);
203  void  operator delete [](void* p);
204};
205
206// Base class for objects used as value objects.
207// Calling new or delete will result in fatal error.
208//
209// Portability note: Certain compilers (e.g. gcc) will
210// always make classes bigger if it has a superclass, even
211// if the superclass does not have any virtual methods or
212// instance fields. The HotSpot implementation relies on this
213// not to happen. So never make a ValueObj class a direct subclass
214// of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g.,
215// like this:
216//
217//   class A VALUE_OBJ_CLASS_SPEC {
218//     ...
219//   }
220//
221// With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can
222// be defined as a an empty string "".
223//
224class _ValueObj {
225 private:
226  void* operator new(size_t size) throw();
227  void  operator delete(void* p);
228  void* operator new [](size_t size) throw();
229  void  operator delete [](void* p);
230};
231
232
233// Base class for objects stored in Metaspace.
234// Calling delete will result in fatal error.
235//
236// Do not inherit from something with a vptr because this class does
237// not introduce one.  This class is used to allocate both shared read-only
238// and shared read-write classes.
239//
240
241class ClassLoaderData;
242
243class MetaspaceObj {
244 public:
245  bool is_metaspace_object() const;
246  bool is_shared() const;
247  void print_address_on(outputStream* st) const;  // nonvirtual address printing
248
249#define METASPACE_OBJ_TYPES_DO(f) \
250  f(Unknown) \
251  f(Class) \
252  f(Symbol) \
253  f(TypeArrayU1) \
254  f(TypeArrayU2) \
255  f(TypeArrayU4) \
256  f(TypeArrayU8) \
257  f(TypeArrayOther) \
258  f(Method) \
259  f(ConstMethod) \
260  f(MethodData) \
261  f(ConstantPool) \
262  f(ConstantPoolCache) \
263  f(Annotation) \
264  f(MethodCounters) \
265  f(Deallocated)
266
267#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
268#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
269
270  enum Type {
271    // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
272    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
273    _number_of_types
274  };
275
276  static const char * type_name(Type type) {
277    switch(type) {
278    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
279    default:
280      ShouldNotReachHere();
281      return NULL;
282    }
283  }
284
285  static MetaspaceObj::Type array_type(size_t elem_size) {
286    switch (elem_size) {
287    case 1: return TypeArrayU1Type;
288    case 2: return TypeArrayU2Type;
289    case 4: return TypeArrayU4Type;
290    case 8: return TypeArrayU8Type;
291    default:
292      return TypeArrayOtherType;
293    }
294  }
295
296  void* operator new(size_t size, ClassLoaderData* loader_data,
297                     size_t word_size, bool read_only,
298                     Type type, Thread* thread) throw();
299                     // can't use TRAPS from this header file.
300  void operator delete(void* p) { ShouldNotCallThis(); }
301};
302
303// Base class for classes that constitute name spaces.
304
305class AllStatic {
306 public:
307  AllStatic()  { ShouldNotCallThis(); }
308  ~AllStatic() { ShouldNotCallThis(); }
309};
310
311
312//------------------------------Chunk------------------------------------------
313// Linked list of raw memory chunks
314class Chunk: CHeapObj<mtChunk> {
315  friend class VMStructs;
316
317 protected:
318  Chunk*       _next;     // Next Chunk in list
319  const size_t _len;      // Size of this Chunk
320 public:
321  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
322  void  operator delete(void* p);
323  Chunk(size_t length);
324
325  enum {
326    // default sizes; make them slightly smaller than 2**k to guard against
327    // buddy-system style malloc implementations
328#ifdef _LP64
329    slack      = 40,            // [RGV] Not sure if this is right, but make it
330                                //       a multiple of 8.
331#else
332    slack      = 20,            // suspected sizeof(Chunk) + internal malloc headers
333#endif
334
335    tiny_size  =  256  - slack, // Size of first chunk (tiny)
336    init_size  =  1*K  - slack, // Size of first chunk (normal aka small)
337    medium_size= 10*K  - slack, // Size of medium-sized chunk
338    size       = 32*K  - slack, // Default size of an Arena chunk (following the first)
339    non_pool_size = init_size + 32 // An initial size which is not one of above
340  };
341
342  void chop();                  // Chop this chunk
343  void next_chop();             // Chop next chunk
344  static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
345  static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
346
347  size_t length() const         { return _len;  }
348  Chunk* next() const           { return _next;  }
349  void set_next(Chunk* n)       { _next = n;  }
350  // Boundaries of data area (possibly unused)
351  char* bottom() const          { return ((char*) this) + aligned_overhead_size();  }
352  char* top()    const          { return bottom() + _len; }
353  bool contains(char* p) const  { return bottom() <= p && p <= top(); }
354
355  // Start the chunk_pool cleaner task
356  static void start_chunk_pool_cleaner_task();
357
358  static void clean_chunk_pool();
359};
360
361//------------------------------Arena------------------------------------------
362// Fast allocation of memory
363class Arena : public CHeapObj<mtNone> {
364protected:
365  friend class ResourceMark;
366  friend class HandleMark;
367  friend class NoHandleMark;
368  friend class VMStructs;
369
370  MEMFLAGS    _flags;           // Memory tracking flags
371
372  Chunk *_first;                // First chunk
373  Chunk *_chunk;                // current chunk
374  char *_hwm, *_max;            // High water mark and max in current chunk
375  // Get a new Chunk of at least size x
376  void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
377  size_t _size_in_bytes;        // Size of arena (used for native memory tracking)
378
379  NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
380  friend class AllocStats;
381  debug_only(void* malloc(size_t size);)
382  debug_only(void* internal_malloc_4(size_t x);)
383  NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
384
385  void signal_out_of_memory(size_t request, const char* whence) const;
386
387  bool check_for_overflow(size_t request, const char* whence,
388      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
389    if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
390      if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
391        return false;
392      }
393      signal_out_of_memory(request, whence);
394    }
395    return true;
396 }
397
398 public:
399  Arena(MEMFLAGS memflag);
400  Arena(MEMFLAGS memflag, size_t init_size);
401  ~Arena();
402  void  destruct_contents();
403  char* hwm() const             { return _hwm; }
404
405  // new operators
406  void* operator new (size_t size) throw();
407  void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
408
409  // dynamic memory type tagging
410  void* operator new(size_t size, MEMFLAGS flags) throw();
411  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
412  void  operator delete(void* p);
413
414  // Fast allocate in the arena.  Common case is: pointer test + increment.
415  void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
416    assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
417    x = ARENA_ALIGN(x);
418    debug_only(if (UseMallocOnly) return malloc(x);)
419    if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
420      return NULL;
421    NOT_PRODUCT(inc_bytes_allocated(x);)
422    if (_hwm + x > _max) {
423      return grow(x, alloc_failmode);
424    } else {
425      char *old = _hwm;
426      _hwm += x;
427      return old;
428    }
429  }
430  // Further assume size is padded out to words
431  void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
432    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
433    debug_only(if (UseMallocOnly) return malloc(x);)
434    if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
435      return NULL;
436    NOT_PRODUCT(inc_bytes_allocated(x);)
437    if (_hwm + x > _max) {
438      return grow(x, alloc_failmode);
439    } else {
440      char *old = _hwm;
441      _hwm += x;
442      return old;
443    }
444  }
445
446  // Allocate with 'double' alignment. It is 8 bytes on sparc.
447  // In other cases Amalloc_D() should be the same as Amalloc_4().
448  void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
449    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
450    debug_only(if (UseMallocOnly) return malloc(x);)
451#if defined(SPARC) && !defined(_LP64)
452#define DALIGN_M1 7
453    size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
454    x += delta;
455#endif
456    if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
457      return NULL;
458    NOT_PRODUCT(inc_bytes_allocated(x);)
459    if (_hwm + x > _max) {
460      return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
461    } else {
462      char *old = _hwm;
463      _hwm += x;
464#if defined(SPARC) && !defined(_LP64)
465      old += delta; // align to 8-bytes
466#endif
467      return old;
468    }
469  }
470
471  // Fast delete in area.  Common case is: NOP (except for storage reclaimed)
472  void Afree(void *ptr, size_t size) {
473#ifdef ASSERT
474    if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
475    if (UseMallocOnly) return;
476#endif
477    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
478  }
479
480  void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
481      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
482
483  // Move contents of this arena into an empty arena
484  Arena *move_contents(Arena *empty_arena);
485
486  // Determine if pointer belongs to this Arena or not.
487  bool contains( const void *ptr ) const;
488
489  // Total of all chunks in use (not thread-safe)
490  size_t used() const;
491
492  // Total # of bytes used
493  size_t size_in_bytes() const         {  return _size_in_bytes; };
494  void set_size_in_bytes(size_t size);
495
496  static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
497  static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
498
499private:
500  // Reset this Arena to empty, access will trigger grow if necessary
501  void   reset(void) {
502    _first = _chunk = NULL;
503    _hwm = _max = NULL;
504    set_size_in_bytes(0);
505  }
506};
507
508// One of the following macros must be used when allocating
509// an array or object from an arena
510#define NEW_ARENA_ARRAY(arena, type, size) \
511  (type*) (arena)->Amalloc((size) * sizeof(type))
512
513#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)    \
514  (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
515                            (new_size) * sizeof(type) )
516
517#define FREE_ARENA_ARRAY(arena, type, old, size) \
518  (arena)->Afree((char*)(old), (size) * sizeof(type))
519
520#define NEW_ARENA_OBJ(arena, type) \
521  NEW_ARENA_ARRAY(arena, type, 1)
522
523
524//%note allocation_1
525extern char* resource_allocate_bytes(size_t size,
526    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
527extern char* resource_allocate_bytes(Thread* thread, size_t size,
528    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
529extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size,
530    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
531extern void resource_free_bytes( char *old, size_t size );
532
533//----------------------------------------------------------------------
534// Base class for objects allocated in the resource area per default.
535// Optionally, objects may be allocated on the C heap with
536// new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena)
537// ResourceObj's can be allocated within other objects, but don't use
538// new or delete (allocation_type is unknown).  If new is used to allocate,
539// use delete to deallocate.
540class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
541 public:
542  enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
543  static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
544#ifdef ASSERT
545 private:
546  // When this object is allocated on stack the new() operator is not
547  // called but garbage on stack may look like a valid allocation_type.
548  // Store negated 'this' pointer when new() is called to distinguish cases.
549  // Use second array's element for verification value to distinguish garbage.
550  uintptr_t _allocation_t[2];
551  bool is_type_set() const;
552 public:
553  allocation_type get_allocation_type() const;
554  bool allocated_on_stack()    const { return get_allocation_type() == STACK_OR_EMBEDDED; }
555  bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
556  bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
557  bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
558  ResourceObj(); // default constructor
559  ResourceObj(const ResourceObj& r); // default copy constructor
560  ResourceObj& operator=(const ResourceObj& r); // default copy assignment
561  ~ResourceObj();
562#endif // ASSERT
563
564 public:
565  void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw();
566  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw();
567  void* operator new(size_t size, const std::nothrow_t&  nothrow_constant,
568      allocation_type type, MEMFLAGS flags) throw();
569  void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
570      allocation_type type, MEMFLAGS flags) throw();
571
572  void* operator new(size_t size, Arena *arena) throw() {
573      address res = (address)arena->Amalloc(size);
574      DEBUG_ONLY(set_allocation_type(res, ARENA);)
575      return res;
576  }
577
578  void* operator new [](size_t size, Arena *arena) throw() {
579      address res = (address)arena->Amalloc(size);
580      DEBUG_ONLY(set_allocation_type(res, ARENA);)
581      return res;
582  }
583
584  void* operator new(size_t size) throw() {
585      address res = (address)resource_allocate_bytes(size);
586      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
587      return res;
588  }
589
590  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
591      address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
592      DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
593      return res;
594  }
595
596  void* operator new [](size_t size) throw() {
597      address res = (address)resource_allocate_bytes(size);
598      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
599      return res;
600  }
601
602  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() {
603      address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
604      DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
605      return res;
606  }
607
608  void  operator delete(void* p);
609  void  operator delete [](void* p);
610};
611
612// One of the following macros must be used when allocating an array
613// or object to determine whether it should reside in the C heap on in
614// the resource area.
615
616#define NEW_RESOURCE_ARRAY(type, size)\
617  (type*) resource_allocate_bytes((size) * sizeof(type))
618
619#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
620  (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
621
622#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
623  (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
624
625#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\
626  (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
627
628#define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
629  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type))
630
631#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\
632  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\
633                                    (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
634
635#define FREE_RESOURCE_ARRAY(type, old, size)\
636  resource_free_bytes((char*)(old), (size) * sizeof(type))
637
638#define FREE_FAST(old)\
639    /* nop */
640
641#define NEW_RESOURCE_OBJ(type)\
642  NEW_RESOURCE_ARRAY(type, 1)
643
644#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
645  NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
646
647#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
648  (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
649
650#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
651  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
652
653#define NEW_C_HEAP_ARRAY(type, size, memflags)\
654  (type*) (AllocateHeap((size) * sizeof(type), memflags))
655
656#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
657  NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
658
659#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
660  NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
661
662#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
663  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
664
665#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
666  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
667
668#define FREE_C_HEAP_ARRAY(type, old) \
669  FreeHeap((char*)(old))
670
671// allocate type in heap without calling ctor
672#define NEW_C_HEAP_OBJ(type, memflags)\
673  NEW_C_HEAP_ARRAY(type, 1, memflags)
674
675#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
676  NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
677
678// deallocate obj of type in heap without calling dtor
679#define FREE_C_HEAP_OBJ(objname)\
680  FreeHeap((char*)objname);
681
682// for statistics
683#ifndef PRODUCT
684class AllocStats : StackObj {
685  julong start_mallocs, start_frees;
686  julong start_malloc_bytes, start_mfree_bytes, start_res_bytes;
687 public:
688  AllocStats();
689
690  julong num_mallocs();    // since creation of receiver
691  julong alloc_bytes();
692  julong num_frees();
693  julong free_bytes();
694  julong resource_bytes();
695  void   print();
696};
697#endif
698
699
700//------------------------------ReallocMark---------------------------------
701// Code which uses REALLOC_RESOURCE_ARRAY should check an associated
702// ReallocMark, which is declared in the same scope as the reallocated
703// pointer.  Any operation that could __potentially__ cause a reallocation
704// should check the ReallocMark.
705class ReallocMark: public StackObj {
706protected:
707  NOT_PRODUCT(int _nesting;)
708
709public:
710  ReallocMark()   PRODUCT_RETURN;
711  void check()    PRODUCT_RETURN;
712};
713
714// Helper class to allocate arrays that may become large.
715// Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit
716// and uses mapped memory for larger allocations.
717// Most OS mallocs do something similar but Solaris malloc does not revert
718// to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
719// is set so that we always use malloc except for Solaris where we set the
720// limit to get mapped memory.
721template <class E, MEMFLAGS F>
722class ArrayAllocator : public AllStatic {
723 private:
724  static bool should_use_malloc(size_t length);
725
726  static E* allocate_malloc(size_t length);
727  static E* allocate_mmap(size_t length);
728
729  static void free_malloc(E* addr, size_t length);
730  static void free_mmap(E* addr, size_t length);
731
732 public:
733  static E* allocate(size_t length);
734  static E* reallocate(E* old_addr, size_t old_length, size_t new_length);
735  static void free(E* addr, size_t length);
736};
737
738// Uses mmaped memory for all allocations. All allocations are initially
739// zero-filled. No pre-touching.
740template <class E, MEMFLAGS F>
741class MmapArrayAllocator : public AllStatic {
742 private:
743  static size_t size_for(size_t length);
744
745 public:
746  static E* allocate_or_null(size_t length);
747  static E* allocate(size_t length);
748  static void free(E* addr, size_t length);
749};
750
751// Uses malloc:ed memory for all allocations.
752template <class E, MEMFLAGS F>
753class MallocArrayAllocator : public AllStatic {
754 public:
755  static size_t size_for(size_t length);
756
757  static E* allocate(size_t length);
758  static void free(E* addr, size_t length);
759};
760
761#endif // SHARE_VM_MEMORY_ALLOCATION_HPP
762