threadLocalAllocBuffer.hpp revision 11464:2604d6f0d78b
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_HPP
26#define SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_HPP
27
28#include "gc/shared/gcUtil.hpp"
29#include "oops/typeArrayOop.hpp"
30#include "runtime/perfData.hpp"
31#include "runtime/vm_version.hpp"
32
33class GlobalTLABStats;
34
35// ThreadLocalAllocBuffer: a descriptor for thread-local storage used by
36// the threads for allocation.
37//            It is thread-private at any time, but maybe multiplexed over
38//            time across multiple threads. The park()/unpark() pair is
39//            used to make it available for such multiplexing.
40class ThreadLocalAllocBuffer: public CHeapObj<mtThread> {
41  friend class VMStructs;
42  friend class JVMCIVMStructs;
43private:
44  HeapWord* _start;                              // address of TLAB
45  HeapWord* _top;                                // address after last allocation
46  HeapWord* _pf_top;                             // allocation prefetch watermark
47  HeapWord* _end;                                // allocation end (excluding alignment_reserve)
48  size_t    _desired_size;                       // desired size   (including alignment_reserve)
49  size_t    _refill_waste_limit;                 // hold onto tlab if free() is larger than this
50  size_t    _allocated_before_last_gc;           // total bytes allocated up until the last gc
51
52  static size_t   _max_size;                          // maximum size of any TLAB
53  static int      _reserve_for_allocation_prefetch;   // Reserve at the end of the TLAB
54  static unsigned _target_refills;                    // expected number of refills between GCs
55
56  unsigned  _number_of_refills;
57  unsigned  _fast_refill_waste;
58  unsigned  _slow_refill_waste;
59  unsigned  _gc_waste;
60  unsigned  _slow_allocations;
61
62  AdaptiveWeightedAverage _allocation_fraction;  // fraction of eden allocated in tlabs
63
64  void accumulate_statistics();
65  void initialize_statistics();
66
67  void set_start(HeapWord* start)                { _start = start; }
68  void set_end(HeapWord* end)                    { _end = end; }
69  void set_top(HeapWord* top)                    { _top = top; }
70  void set_pf_top(HeapWord* pf_top)              { _pf_top = pf_top; }
71  void set_desired_size(size_t desired_size)     { _desired_size = desired_size; }
72  void set_refill_waste_limit(size_t waste)      { _refill_waste_limit = waste;  }
73
74  size_t initial_refill_waste_limit()            { return desired_size() / TLABRefillWasteFraction; }
75
76  static int    target_refills()                 { return _target_refills; }
77  size_t initial_desired_size();
78
79  size_t remaining() const                       { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); }
80
81  // Make parsable and release it.
82  void reset();
83
84  // Resize based on amount of allocation, etc.
85  void resize();
86
87  void invariants() const { assert(top() >= start() && top() <= end(), "invalid tlab"); }
88
89  void initialize(HeapWord* start, HeapWord* top, HeapWord* end);
90
91  void print_stats(const char* tag);
92
93  Thread* myThread();
94
95  // statistics
96
97  int number_of_refills() const { return _number_of_refills; }
98  int fast_refill_waste() const { return _fast_refill_waste; }
99  int slow_refill_waste() const { return _slow_refill_waste; }
100  int gc_waste() const          { return _gc_waste; }
101  int slow_allocations() const  { return _slow_allocations; }
102
103  static GlobalTLABStats* _global_stats;
104  static GlobalTLABStats* global_stats() { return _global_stats; }
105
106public:
107  ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) {
108    // do nothing.  tlabs must be inited by initialize() calls
109  }
110
111  static size_t min_size()                       { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); }
112  static size_t max_size()                       { assert(_max_size != 0, "max_size not set up"); return _max_size; }
113  static size_t max_size_in_bytes()              { return max_size() * BytesPerWord; }
114  static void set_max_size(size_t max_size)      { _max_size = max_size; }
115
116  HeapWord* start() const                        { return _start; }
117  HeapWord* end() const                          { return _end; }
118  HeapWord* hard_end() const                     { return _end + alignment_reserve(); }
119  HeapWord* top() const                          { return _top; }
120  HeapWord* pf_top() const                       { return _pf_top; }
121  size_t desired_size() const                    { return _desired_size; }
122  size_t used() const                            { return pointer_delta(top(), start()); }
123  size_t used_bytes() const                      { return pointer_delta(top(), start(), 1); }
124  size_t free() const                            { return pointer_delta(end(), top()); }
125  // Don't discard tlab if remaining space is larger than this.
126  size_t refill_waste_limit() const              { return _refill_waste_limit; }
127
128  // Allocate size HeapWords. The memory is NOT initialized to zero.
129  inline HeapWord* allocate(size_t size);
130
131  // Reserve space at the end of TLAB
132  static size_t end_reserve() {
133    int reserve_size = typeArrayOopDesc::header_size(T_INT);
134    return MAX2(reserve_size, _reserve_for_allocation_prefetch);
135  }
136  static size_t alignment_reserve()              { return align_object_size(end_reserve()); }
137  static size_t alignment_reserve_in_bytes()     { return alignment_reserve() * HeapWordSize; }
138
139  // Return tlab size or remaining space in eden such that the
140  // space is large enough to hold obj_size and necessary fill space.
141  // Otherwise return 0;
142  inline size_t compute_size(size_t obj_size);
143
144  // Record slow allocation
145  inline void record_slow_allocation(size_t obj_size);
146
147  // Initialization at startup
148  static void startup_initialization();
149
150  // Make an in-use tlab parsable, optionally retiring and/or zapping it.
151  void make_parsable(bool retire, bool zap = true);
152
153  // Retire in-use tlab before allocation of a new tlab
154  void clear_before_allocation();
155
156  // Accumulate statistics across all tlabs before gc
157  static void accumulate_statistics_before_gc();
158
159  // Resize tlabs for all threads
160  static void resize_all_tlabs();
161
162  void fill(HeapWord* start, HeapWord* top, size_t new_size);
163  void initialize();
164
165  static size_t refill_waste_limit_increment()   { return TLABWasteIncrement; }
166
167  // Code generation support
168  static ByteSize start_offset()                 { return byte_offset_of(ThreadLocalAllocBuffer, _start); }
169  static ByteSize end_offset()                   { return byte_offset_of(ThreadLocalAllocBuffer, _end  ); }
170  static ByteSize top_offset()                   { return byte_offset_of(ThreadLocalAllocBuffer, _top  ); }
171  static ByteSize pf_top_offset()                { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top  ); }
172  static ByteSize size_offset()                  { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); }
173  static ByteSize refill_waste_limit_offset()    { return byte_offset_of(ThreadLocalAllocBuffer, _refill_waste_limit ); }
174
175  static ByteSize number_of_refills_offset()     { return byte_offset_of(ThreadLocalAllocBuffer, _number_of_refills ); }
176  static ByteSize fast_refill_waste_offset()     { return byte_offset_of(ThreadLocalAllocBuffer, _fast_refill_waste ); }
177  static ByteSize slow_allocations_offset()      { return byte_offset_of(ThreadLocalAllocBuffer, _slow_allocations ); }
178
179  void verify();
180};
181
182class GlobalTLABStats: public CHeapObj<mtThread> {
183private:
184
185  // Accumulate perfdata in private variables because
186  // PerfData should be write-only for security reasons
187  // (see perfData.hpp)
188  unsigned _allocating_threads;
189  unsigned _total_refills;
190  unsigned _max_refills;
191  size_t   _total_allocation;
192  size_t   _total_gc_waste;
193  size_t   _max_gc_waste;
194  size_t   _total_slow_refill_waste;
195  size_t   _max_slow_refill_waste;
196  size_t   _total_fast_refill_waste;
197  size_t   _max_fast_refill_waste;
198  unsigned _total_slow_allocations;
199  unsigned _max_slow_allocations;
200
201  PerfVariable* _perf_allocating_threads;
202  PerfVariable* _perf_total_refills;
203  PerfVariable* _perf_max_refills;
204  PerfVariable* _perf_allocation;
205  PerfVariable* _perf_gc_waste;
206  PerfVariable* _perf_max_gc_waste;
207  PerfVariable* _perf_slow_refill_waste;
208  PerfVariable* _perf_max_slow_refill_waste;
209  PerfVariable* _perf_fast_refill_waste;
210  PerfVariable* _perf_max_fast_refill_waste;
211  PerfVariable* _perf_slow_allocations;
212  PerfVariable* _perf_max_slow_allocations;
213
214  AdaptiveWeightedAverage _allocating_threads_avg;
215
216public:
217  GlobalTLABStats();
218
219  // Initialize all counters
220  void initialize();
221
222  // Write all perf counters to the perf_counters
223  void publish();
224
225  void print();
226
227  // Accessors
228  unsigned allocating_threads_avg() {
229    return MAX2((unsigned)(_allocating_threads_avg.average() + 0.5), 1U);
230  }
231
232  size_t allocation() {
233    return _total_allocation;
234  }
235
236  // Update methods
237
238  void update_allocating_threads() {
239    _allocating_threads++;
240  }
241  void update_number_of_refills(unsigned value) {
242    _total_refills += value;
243    _max_refills    = MAX2(_max_refills, value);
244  }
245  void update_allocation(size_t value) {
246    _total_allocation += value;
247  }
248  void update_gc_waste(size_t value) {
249    _total_gc_waste += value;
250    _max_gc_waste    = MAX2(_max_gc_waste, value);
251  }
252  void update_fast_refill_waste(size_t value) {
253    _total_fast_refill_waste += value;
254    _max_fast_refill_waste    = MAX2(_max_fast_refill_waste, value);
255  }
256  void update_slow_refill_waste(size_t value) {
257    _total_slow_refill_waste += value;
258    _max_slow_refill_waste    = MAX2(_max_slow_refill_waste, value);
259  }
260  void update_slow_allocations(unsigned value) {
261    _total_slow_allocations += value;
262    _max_slow_allocations    = MAX2(_max_slow_allocations, value);
263  }
264};
265
266#endif // SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_HPP
267