allocation.hpp revision 4802:f2110083203d
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_MEMORY_ALLOCATION_HPP
26#define SHARE_VM_MEMORY_ALLOCATION_HPP
27
28#include "runtime/globals.hpp"
29#include "utilities/globalDefinitions.hpp"
30#include "utilities/macros.hpp"
31#ifdef COMPILER1
32#include "c1/c1_globals.hpp"
33#endif
34#ifdef COMPILER2
35#include "opto/c2_globals.hpp"
36#endif
37
38#include <new>
39
40#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
41#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
42#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
43
44
45// noinline attribute
46#ifdef _WINDOWS
47  #define _NOINLINE_  __declspec(noinline)
48#else
49  #if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
50    #define _NOINLINE_
51  #else
52    #define _NOINLINE_ __attribute__ ((noinline))
53  #endif
54#endif
55
56class AllocFailStrategy {
57public:
58  enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
59};
60typedef AllocFailStrategy::AllocFailEnum AllocFailType;
61
62// All classes in the virtual machine must be subclassed
63// by one of the following allocation classes:
64//
65// For objects allocated in the resource area (see resourceArea.hpp).
66// - ResourceObj
67//
68// For objects allocated in the C-heap (managed by: free & malloc).
69// - CHeapObj
70//
71// For objects allocated on the stack.
72// - StackObj
73//
74// For embedded objects.
75// - ValueObj
76//
77// For classes used as name spaces.
78// - AllStatic
79//
80// For classes in Metaspace (class data)
81// - MetaspaceObj
82//
83// The printable subclasses are used for debugging and define virtual
84// member functions for printing. Classes that avoid allocating the
85// vtbl entries in the objects should therefore not be the printable
86// subclasses.
87//
88// The following macros and function should be used to allocate memory
89// directly in the resource area or in the C-heap, The _OBJ variants
90// of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple
91// objects which are not inherited from CHeapObj, note constructor and
92// destructor are not called. The preferable way to allocate objects
93// is using the new operator.
94//
95// WARNING: The array variant must only be used for a homogenous array
96// where all objects are of the exact type specified. If subtypes are
97// stored in the array then must pay attention to calling destructors
98// at needed.
99//
100//   NEW_RESOURCE_ARRAY(type, size)
101//   NEW_RESOURCE_OBJ(type)
102//   NEW_C_HEAP_ARRAY(type, size)
103//   NEW_C_HEAP_OBJ(type, memflags)
104//   FREE_C_HEAP_ARRAY(type, old, memflags)
105//   FREE_C_HEAP_OBJ(objname, type, memflags)
106//   char* AllocateHeap(size_t size, const char* name);
107//   void  FreeHeap(void* p);
108//
109// C-heap allocation can be traced using +PrintHeapAllocation.
110// malloc and free should therefore never called directly.
111
112// Base class for objects allocated in the C-heap.
113
114// In non product mode we introduce a super class for all allocation classes
115// that supports printing.
116// We avoid the superclass in product mode since some C++ compilers add
117// a word overhead for empty super classes.
118
119#ifdef PRODUCT
120#define ALLOCATION_SUPER_CLASS_SPEC
121#else
122#define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj
123class AllocatedObj {
124 public:
125  // Printing support
126  void print() const;
127  void print_value() const;
128
129  virtual void print_on(outputStream* st) const;
130  virtual void print_value_on(outputStream* st) const;
131};
132#endif
133
134
135/*
136 * MemoryType bitmap layout:
137 * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
138 * |      memory type        |   object    | reserved    |
139 * |                         |     type    |             |
140 */
141enum MemoryType {
142  // Memory type by sub systems. It occupies lower byte.
143  mtNone              = 0x0000,  // undefined
144  mtClass             = 0x0100,  // memory class for Java classes
145  mtThread            = 0x0200,  // memory for thread objects
146  mtThreadStack       = 0x0300,
147  mtCode              = 0x0400,  // memory for generated code
148  mtGC                = 0x0500,  // memory for GC
149  mtCompiler          = 0x0600,  // memory for compiler
150  mtInternal          = 0x0700,  // memory used by VM, but does not belong to
151                                 // any of above categories, and not used for
152                                 // native memory tracking
153  mtOther             = 0x0800,  // memory not used by VM
154  mtSymbol            = 0x0900,  // symbol
155  mtNMT               = 0x0A00,  // memory used by native memory tracking
156  mtChunk             = 0x0B00,  // chunk that holds content of arenas
157  mtJavaHeap          = 0x0C00,  // Java heap
158  mtClassShared       = 0x0D00,  // class data sharing
159  mtTest              = 0x0E00,  // Test type for verifying NMT
160  mtTracing           = 0x0F00,  // memory used for Tracing
161  mt_number_of_types  = 0x000F,  // number of memory types (mtDontTrack
162                                 // is not included as validate type)
163  mtDontTrack         = 0x0F00,  // memory we do not or cannot track
164  mt_masks            = 0x7F00,
165
166  // object type mask
167  otArena             = 0x0010, // an arena object
168  otNMTRecorder       = 0x0020, // memory recorder object
169  ot_masks            = 0x00F0
170};
171
172#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
173#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
174#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
175
176#define IS_ARENA_OBJ(flags)         ((flags & ot_masks) == otArena)
177#define IS_NMT_RECORDER(flags)      ((flags & ot_masks) == otNMTRecorder)
178#define NMT_CAN_TRACK(flags)        (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
179
180typedef unsigned short MEMFLAGS;
181
182#if INCLUDE_NMT
183
184extern bool NMT_track_callsite;
185
186#else
187
188const bool NMT_track_callsite = false;
189
190#endif // INCLUDE_NMT
191
192// debug build does not inline
193#if defined(_NMT_NOINLINE_)
194  #define CURRENT_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
195  #define CALLER_PC        (NMT_track_callsite ? os::get_caller_pc(2) : 0)
196  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
197#else
198  #define CURRENT_PC      (NMT_track_callsite? os::get_caller_pc(0) : 0)
199  #define CALLER_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
200  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
201#endif
202
203
204
205template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
206 public:
207  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
208  _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
209                               address caller_pc = 0);
210  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0);
211  _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
212                               address caller_pc = 0);
213  void  operator delete(void* p);
214  void  operator delete [] (void* p);
215};
216
217// Base class for objects allocated on the stack only.
218// Calling new or delete will result in fatal error.
219
220class StackObj ALLOCATION_SUPER_CLASS_SPEC {
221 private:
222  void* operator new(size_t size);
223  void  operator delete(void* p);
224  void* operator new [](size_t size);
225  void  operator delete [](void* p);
226};
227
228// Base class for objects used as value objects.
229// Calling new or delete will result in fatal error.
230//
231// Portability note: Certain compilers (e.g. gcc) will
232// always make classes bigger if it has a superclass, even
233// if the superclass does not have any virtual methods or
234// instance fields. The HotSpot implementation relies on this
235// not to happen. So never make a ValueObj class a direct subclass
236// of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g.,
237// like this:
238//
239//   class A VALUE_OBJ_CLASS_SPEC {
240//     ...
241//   }
242//
243// With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can
244// be defined as a an empty string "".
245//
246class _ValueObj {
247 private:
248  void* operator new(size_t size);
249  void  operator delete(void* p);
250  void* operator new [](size_t size);
251  void  operator delete [](void* p);
252};
253
254
255// Base class for objects stored in Metaspace.
256// Calling delete will result in fatal error.
257//
258// Do not inherit from something with a vptr because this class does
259// not introduce one.  This class is used to allocate both shared read-only
260// and shared read-write classes.
261//
262
263class ClassLoaderData;
264
265class MetaspaceObj {
266 public:
267  bool is_metadata() const;
268  bool is_metaspace_object() const;  // more specific test but slower
269  bool is_shared() const;
270  void print_address_on(outputStream* st) const;  // nonvirtual address printing
271
272#define METASPACE_OBJ_TYPES_DO(f) \
273  f(Unknown) \
274  f(Class) \
275  f(Symbol) \
276  f(TypeArrayU1) \
277  f(TypeArrayU2) \
278  f(TypeArrayU4) \
279  f(TypeArrayU8) \
280  f(TypeArrayOther) \
281  f(Method) \
282  f(ConstMethod) \
283  f(MethodData) \
284  f(ConstantPool) \
285  f(ConstantPoolCache) \
286  f(Annotation) \
287  f(MethodCounters)
288
289#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
290#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
291
292  enum Type {
293    // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
294    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
295    _number_of_types
296  };
297
298  static const char * type_name(Type type) {
299    switch(type) {
300    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
301    default:
302      ShouldNotReachHere();
303      return NULL;
304    }
305  }
306
307  static MetaspaceObj::Type array_type(size_t elem_size) {
308    switch (elem_size) {
309    case 1: return TypeArrayU1Type;
310    case 2: return TypeArrayU2Type;
311    case 4: return TypeArrayU4Type;
312    case 8: return TypeArrayU8Type;
313    default:
314      return TypeArrayOtherType;
315    }
316  }
317
318  void* operator new(size_t size, ClassLoaderData* loader_data,
319                     size_t word_size, bool read_only,
320                     Type type, Thread* thread);
321                     // can't use TRAPS from this header file.
322  void operator delete(void* p) { ShouldNotCallThis(); }
323};
324
325// Base class for classes that constitute name spaces.
326
327class AllStatic {
328 public:
329  AllStatic()  { ShouldNotCallThis(); }
330  ~AllStatic() { ShouldNotCallThis(); }
331};
332
333
334//------------------------------Chunk------------------------------------------
335// Linked list of raw memory chunks
336class Chunk: CHeapObj<mtChunk> {
337  friend class VMStructs;
338
339 protected:
340  Chunk*       _next;     // Next Chunk in list
341  const size_t _len;      // Size of this Chunk
342 public:
343  void* operator new(size_t size, size_t length);
344  void  operator delete(void* p);
345  Chunk(size_t length);
346
347  enum {
348    // default sizes; make them slightly smaller than 2**k to guard against
349    // buddy-system style malloc implementations
350#ifdef _LP64
351    slack      = 40,            // [RGV] Not sure if this is right, but make it
352                                //       a multiple of 8.
353#else
354    slack      = 20,            // suspected sizeof(Chunk) + internal malloc headers
355#endif
356
357    init_size  =  1*K  - slack, // Size of first chunk
358    medium_size= 10*K  - slack, // Size of medium-sized chunk
359    size       = 32*K  - slack, // Default size of an Arena chunk (following the first)
360    non_pool_size = init_size + 32 // An initial size which is not one of above
361  };
362
363  void chop();                  // Chop this chunk
364  void next_chop();             // Chop next chunk
365  static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
366  static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
367
368  size_t length() const         { return _len;  }
369  Chunk* next() const           { return _next;  }
370  void set_next(Chunk* n)       { _next = n;  }
371  // Boundaries of data area (possibly unused)
372  char* bottom() const          { return ((char*) this) + aligned_overhead_size();  }
373  char* top()    const          { return bottom() + _len; }
374  bool contains(char* p) const  { return bottom() <= p && p <= top(); }
375
376  // Start the chunk_pool cleaner task
377  static void start_chunk_pool_cleaner_task();
378
379  static void clean_chunk_pool();
380};
381
382//------------------------------Arena------------------------------------------
383// Fast allocation of memory
384class Arena : public CHeapObj<mtNone|otArena> {
385protected:
386  friend class ResourceMark;
387  friend class HandleMark;
388  friend class NoHandleMark;
389  friend class VMStructs;
390
391  Chunk *_first;                // First chunk
392  Chunk *_chunk;                // current chunk
393  char *_hwm, *_max;            // High water mark and max in current chunk
394  // Get a new Chunk of at least size x
395  void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
396  size_t _size_in_bytes;        // Size of arena (used for native memory tracking)
397
398  NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
399  friend class AllocStats;
400  debug_only(void* malloc(size_t size);)
401  debug_only(void* internal_malloc_4(size_t x);)
402  NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
403
404  void signal_out_of_memory(size_t request, const char* whence) const;
405
406  void check_for_overflow(size_t request, const char* whence) const {
407    if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
408      signal_out_of_memory(request, whence);
409    }
410 }
411
412 public:
413  Arena();
414  Arena(size_t init_size);
415  ~Arena();
416  void  destruct_contents();
417  char* hwm() const             { return _hwm; }
418
419  // new operators
420  void* operator new (size_t size);
421  void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
422
423  // dynamic memory type tagging
424  void* operator new(size_t size, MEMFLAGS flags);
425  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
426  void  operator delete(void* p);
427
428  // Fast allocate in the arena.  Common case is: pointer test + increment.
429  void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
430    assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
431    x = ARENA_ALIGN(x);
432    debug_only(if (UseMallocOnly) return malloc(x);)
433    check_for_overflow(x, "Arena::Amalloc");
434    NOT_PRODUCT(inc_bytes_allocated(x);)
435    if (_hwm + x > _max) {
436      return grow(x, alloc_failmode);
437    } else {
438      char *old = _hwm;
439      _hwm += x;
440      return old;
441    }
442  }
443  // Further assume size is padded out to words
444  void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
445    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
446    debug_only(if (UseMallocOnly) return malloc(x);)
447    check_for_overflow(x, "Arena::Amalloc_4");
448    NOT_PRODUCT(inc_bytes_allocated(x);)
449    if (_hwm + x > _max) {
450      return grow(x, alloc_failmode);
451    } else {
452      char *old = _hwm;
453      _hwm += x;
454      return old;
455    }
456  }
457
458  // Allocate with 'double' alignment. It is 8 bytes on sparc.
459  // In other cases Amalloc_D() should be the same as Amalloc_4().
460  void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
461    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
462    debug_only(if (UseMallocOnly) return malloc(x);)
463#if defined(SPARC) && !defined(_LP64)
464#define DALIGN_M1 7
465    size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
466    x += delta;
467#endif
468    check_for_overflow(x, "Arena::Amalloc_D");
469    NOT_PRODUCT(inc_bytes_allocated(x);)
470    if (_hwm + x > _max) {
471      return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
472    } else {
473      char *old = _hwm;
474      _hwm += x;
475#if defined(SPARC) && !defined(_LP64)
476      old += delta; // align to 8-bytes
477#endif
478      return old;
479    }
480  }
481
482  // Fast delete in area.  Common case is: NOP (except for storage reclaimed)
483  void Afree(void *ptr, size_t size) {
484#ifdef ASSERT
485    if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
486    if (UseMallocOnly) return;
487#endif
488    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
489  }
490
491  void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
492      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
493
494  // Move contents of this arena into an empty arena
495  Arena *move_contents(Arena *empty_arena);
496
497  // Determine if pointer belongs to this Arena or not.
498  bool contains( const void *ptr ) const;
499
500  // Total of all chunks in use (not thread-safe)
501  size_t used() const;
502
503  // Total # of bytes used
504  size_t size_in_bytes() const         {  return _size_in_bytes; };
505  void set_size_in_bytes(size_t size);
506
507  static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
508  static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
509
510  // how many arena instances
511  NOT_PRODUCT(static volatile jint _instance_count;)
512private:
513  // Reset this Arena to empty, access will trigger grow if necessary
514  void   reset(void) {
515    _first = _chunk = NULL;
516    _hwm = _max = NULL;
517    set_size_in_bytes(0);
518  }
519};
520
521// One of the following macros must be used when allocating
522// an array or object from an arena
523#define NEW_ARENA_ARRAY(arena, type, size) \
524  (type*) (arena)->Amalloc((size) * sizeof(type))
525
526#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)    \
527  (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
528                            (new_size) * sizeof(type) )
529
530#define FREE_ARENA_ARRAY(arena, type, old, size) \
531  (arena)->Afree((char*)(old), (size) * sizeof(type))
532
533#define NEW_ARENA_OBJ(arena, type) \
534  NEW_ARENA_ARRAY(arena, type, 1)
535
536
537//%note allocation_1
538extern char* resource_allocate_bytes(size_t size,
539    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
540extern char* resource_allocate_bytes(Thread* thread, size_t size,
541    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
542extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size,
543    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
544extern void resource_free_bytes( char *old, size_t size );
545
546//----------------------------------------------------------------------
547// Base class for objects allocated in the resource area per default.
548// Optionally, objects may be allocated on the C heap with
549// new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena)
550// ResourceObj's can be allocated within other objects, but don't use
551// new or delete (allocation_type is unknown).  If new is used to allocate,
552// use delete to deallocate.
553class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
554 public:
555  enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
556  static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
557#ifdef ASSERT
558 private:
559  // When this object is allocated on stack the new() operator is not
560  // called but garbage on stack may look like a valid allocation_type.
561  // Store negated 'this' pointer when new() is called to distinguish cases.
562  // Use second array's element for verification value to distinguish garbage.
563  uintptr_t _allocation_t[2];
564  bool is_type_set() const;
565 public:
566  allocation_type get_allocation_type() const;
567  bool allocated_on_stack()    const { return get_allocation_type() == STACK_OR_EMBEDDED; }
568  bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
569  bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
570  bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
571  ResourceObj(); // default construtor
572  ResourceObj(const ResourceObj& r); // default copy construtor
573  ResourceObj& operator=(const ResourceObj& r); // default copy assignment
574  ~ResourceObj();
575#endif // ASSERT
576
577 public:
578  void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
579  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags);
580  void* operator new(size_t size, const std::nothrow_t&  nothrow_constant,
581      allocation_type type, MEMFLAGS flags);
582  void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
583      allocation_type type, MEMFLAGS flags);
584
585  void* operator new(size_t size, Arena *arena) {
586      address res = (address)arena->Amalloc(size);
587      DEBUG_ONLY(set_allocation_type(res, ARENA);)
588      return res;
589  }
590
591  void* operator new [](size_t size, Arena *arena) {
592      address res = (address)arena->Amalloc(size);
593      DEBUG_ONLY(set_allocation_type(res, ARENA);)
594      return res;
595  }
596
597  void* operator new(size_t size) {
598      address res = (address)resource_allocate_bytes(size);
599      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
600      return res;
601  }
602
603  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
604      address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
605      DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
606      return res;
607  }
608
609  void* operator new [](size_t size) {
610      address res = (address)resource_allocate_bytes(size);
611      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
612      return res;
613  }
614
615  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) {
616      address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
617      DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
618      return res;
619  }
620
621  void  operator delete(void* p);
622  void  operator delete [](void* p);
623};
624
625// One of the following macros must be used when allocating an array
626// or object to determine whether it should reside in the C heap on in
627// the resource area.
628
629#define NEW_RESOURCE_ARRAY(type, size)\
630  (type*) resource_allocate_bytes((size) * sizeof(type))
631
632#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
633  (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
634
635#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
636  (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
637
638#define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
639  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
640
641#define FREE_RESOURCE_ARRAY(type, old, size)\
642  resource_free_bytes((char*)(old), (size) * sizeof(type))
643
644#define FREE_FAST(old)\
645    /* nop */
646
647#define NEW_RESOURCE_OBJ(type)\
648  NEW_RESOURCE_ARRAY(type, 1)
649
650#define NEW_C_HEAP_ARRAY(type, size, memflags)\
651  (type*) (AllocateHeap((size) * sizeof(type), memflags))
652
653#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
654  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
655
656#define FREE_C_HEAP_ARRAY(type, old, memflags) \
657  FreeHeap((char*)(old), memflags)
658
659#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
660  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
661
662#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\
663  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc))
664
665#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)         \
666  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
667
668// allocate type in heap without calling ctor
669#define NEW_C_HEAP_OBJ(type, memflags)\
670  NEW_C_HEAP_ARRAY(type, 1, memflags)
671
672// deallocate obj of type in heap without calling dtor
673#define FREE_C_HEAP_OBJ(objname, memflags)\
674  FreeHeap((char*)objname, memflags);
675
676// for statistics
677#ifndef PRODUCT
678class AllocStats : StackObj {
679  julong start_mallocs, start_frees;
680  julong start_malloc_bytes, start_mfree_bytes, start_res_bytes;
681 public:
682  AllocStats();
683
684  julong num_mallocs();    // since creation of receiver
685  julong alloc_bytes();
686  julong num_frees();
687  julong free_bytes();
688  julong resource_bytes();
689  void   print();
690};
691#endif
692
693
694//------------------------------ReallocMark---------------------------------
695// Code which uses REALLOC_RESOURCE_ARRAY should check an associated
696// ReallocMark, which is declared in the same scope as the reallocated
697// pointer.  Any operation that could __potentially__ cause a reallocation
698// should check the ReallocMark.
699class ReallocMark: public StackObj {
700protected:
701  NOT_PRODUCT(int _nesting;)
702
703public:
704  ReallocMark()   PRODUCT_RETURN;
705  void check()    PRODUCT_RETURN;
706};
707
708// Helper class to allocate arrays that may become large.
709// Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit
710// and uses mapped memory for larger allocations.
711// Most OS mallocs do something similar but Solaris malloc does not revert
712// to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
713// is set so that we always use malloc except for Solaris where we set the
714// limit to get mapped memory.
715template <class E, MEMFLAGS F>
716class ArrayAllocator : StackObj {
717  char* _addr;
718  bool _use_malloc;
719  size_t _size;
720 public:
721  ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
722  ~ArrayAllocator() { free(); }
723  E* allocate(size_t length);
724  void free();
725};
726
727#endif // SHARE_VM_MEMORY_ALLOCATION_HPP
728