1/*
2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_ARENA_HPP
26#define SHARE_VM_ARENA_HPP
27
28#include "memory/allocation.hpp"
29#include "runtime/globals.hpp"
30#include "utilities/globalDefinitions.hpp"
31
32#include <new>
33
34// The byte alignment to be used by Arena::Amalloc.  See bugid 4169348.
35// Note: this value must be a power of 2
36
37#define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord)
38
39#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
40#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
41#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
42
43//------------------------------Chunk------------------------------------------
44// Linked list of raw memory chunks
45class Chunk: CHeapObj<mtChunk> {
46
47 private:
48  Chunk*       _next;     // Next Chunk in list
49  const size_t _len;      // Size of this Chunk
50 public:
51  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
52  void  operator delete(void* p);
53  Chunk(size_t length);
54
55  enum {
56    // default sizes; make them slightly smaller than 2**k to guard against
57    // buddy-system style malloc implementations
58#ifdef _LP64
59    slack      = 40,            // [RGV] Not sure if this is right, but make it
60                                //       a multiple of 8.
61#else
62    slack      = 20,            // suspected sizeof(Chunk) + internal malloc headers
63#endif
64
65    tiny_size  =  256  - slack, // Size of first chunk (tiny)
66    init_size  =  1*K  - slack, // Size of first chunk (normal aka small)
67    medium_size= 10*K  - slack, // Size of medium-sized chunk
68    size       = 32*K  - slack, // Default size of an Arena chunk (following the first)
69    non_pool_size = init_size + 32 // An initial size which is not one of above
70  };
71
72  void chop();                  // Chop this chunk
73  void next_chop();             // Chop next chunk
74  static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
75  static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
76
77  size_t length() const         { return _len;  }
78  Chunk* next() const           { return _next;  }
79  void set_next(Chunk* n)       { _next = n;  }
80  // Boundaries of data area (possibly unused)
81  char* bottom() const          { return ((char*) this) + aligned_overhead_size();  }
82  char* top()    const          { return bottom() + _len; }
83  bool contains(char* p) const  { return bottom() <= p && p <= top(); }
84
85  // Start the chunk_pool cleaner task
86  static void start_chunk_pool_cleaner_task();
87
88  static void clean_chunk_pool();
89};
90
91//------------------------------Arena------------------------------------------
92// Fast allocation of memory
93class Arena : public CHeapObj<mtNone> {
94protected:
95  friend class ResourceMark;
96  friend class HandleMark;
97  friend class NoHandleMark;
98  friend class VMStructs;
99
100  MEMFLAGS    _flags;           // Memory tracking flags
101
102  Chunk *_first;                // First chunk
103  Chunk *_chunk;                // current chunk
104  char *_hwm, *_max;            // High water mark and max in current chunk
105  // Get a new Chunk of at least size x
106  void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
107  size_t _size_in_bytes;        // Size of arena (used for native memory tracking)
108
109  NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
110  friend class AllocStats;
111  debug_only(void* malloc(size_t size);)
112  debug_only(void* internal_malloc_4(size_t x);)
113  NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
114
115  void signal_out_of_memory(size_t request, const char* whence) const;
116
117  bool check_for_overflow(size_t request, const char* whence,
118      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
119    if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
120      if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
121        return false;
122      }
123      signal_out_of_memory(request, whence);
124    }
125    return true;
126 }
127
128 public:
129  Arena(MEMFLAGS memflag);
130  Arena(MEMFLAGS memflag, size_t init_size);
131  ~Arena();
132  void  destruct_contents();
133  char* hwm() const             { return _hwm; }
134
135  // new operators
136  void* operator new (size_t size) throw();
137  void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
138
139  // dynamic memory type tagging
140  void* operator new(size_t size, MEMFLAGS flags) throw();
141  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
142  void  operator delete(void* p);
143
144  // Fast allocate in the arena.  Common case is: pointer test + increment.
145  void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
146    assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
147    x = ARENA_ALIGN(x);
148    debug_only(if (UseMallocOnly) return malloc(x);)
149    if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
150      return NULL;
151    NOT_PRODUCT(inc_bytes_allocated(x);)
152    if (_hwm + x > _max) {
153      return grow(x, alloc_failmode);
154    } else {
155      char *old = _hwm;
156      _hwm += x;
157      return old;
158    }
159  }
160  // Further assume size is padded out to words
161  void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
162    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
163    debug_only(if (UseMallocOnly) return malloc(x);)
164    if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
165      return NULL;
166    NOT_PRODUCT(inc_bytes_allocated(x);)
167    if (_hwm + x > _max) {
168      return grow(x, alloc_failmode);
169    } else {
170      char *old = _hwm;
171      _hwm += x;
172      return old;
173    }
174  }
175
176  // Allocate with 'double' alignment. It is 8 bytes on sparc.
177  // In other cases Amalloc_D() should be the same as Amalloc_4().
178  void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
179    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
180    debug_only(if (UseMallocOnly) return malloc(x);)
181#if defined(SPARC) && !defined(_LP64)
182#define DALIGN_M1 7
183    size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
184    x += delta;
185#endif
186    if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
187      return NULL;
188    NOT_PRODUCT(inc_bytes_allocated(x);)
189    if (_hwm + x > _max) {
190      return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
191    } else {
192      char *old = _hwm;
193      _hwm += x;
194#if defined(SPARC) && !defined(_LP64)
195      old += delta; // align to 8-bytes
196#endif
197      return old;
198    }
199  }
200
201  // Fast delete in area.  Common case is: NOP (except for storage reclaimed)
202  void Afree(void *ptr, size_t size) {
203#ifdef ASSERT
204    if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
205    if (UseMallocOnly) return;
206#endif
207    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
208  }
209
210  void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
211      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
212
213  // Move contents of this arena into an empty arena
214  Arena *move_contents(Arena *empty_arena);
215
216  // Determine if pointer belongs to this Arena or not.
217  bool contains( const void *ptr ) const;
218
219  // Total of all chunks in use (not thread-safe)
220  size_t used() const;
221
222  // Total # of bytes used
223  size_t size_in_bytes() const         {  return _size_in_bytes; };
224  void set_size_in_bytes(size_t size);
225
226  static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
227  static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
228
229private:
230  // Reset this Arena to empty, access will trigger grow if necessary
231  void   reset(void) {
232    _first = _chunk = NULL;
233    _hwm = _max = NULL;
234    set_size_in_bytes(0);
235  }
236};
237
238// One of the following macros must be used when allocating
239// an array or object from an arena
240#define NEW_ARENA_ARRAY(arena, type, size) \
241  (type*) (arena)->Amalloc((size) * sizeof(type))
242
243#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)    \
244  (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
245                            (new_size) * sizeof(type) )
246
247#define FREE_ARENA_ARRAY(arena, type, old, size) \
248  (arena)->Afree((char*)(old), (size) * sizeof(type))
249
250#define NEW_ARENA_OBJ(arena, type) \
251  NEW_ARENA_ARRAY(arena, type, 1)
252
253#endif // SHARE_VM_ARENA_HPP
254