1/*
2 * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/copyFailedInfo.hpp"
27#include "gc/shared/gcHeapSummary.hpp"
28#include "gc/shared/gcTimer.hpp"
29#include "gc/shared/gcTrace.hpp"
30#include "gc/shared/gcWhen.hpp"
31#include "runtime/os.hpp"
32#include "trace/traceBackend.hpp"
33#include "trace/tracing.hpp"
34#include "utilities/macros.hpp"
35#if INCLUDE_ALL_GCS
36#include "gc/g1/evacuationInfo.hpp"
37#include "gc/g1/g1YCTypes.hpp"
38#include "tracefiles/traceEventClasses.hpp"
39#endif
40
41// All GC dependencies against the trace framework is contained within this file.
42
43typedef uintptr_t TraceAddress;
44
45void GCTracer::send_garbage_collection_event() const {
46  EventGarbageCollection event(UNTIMED);
47  if (event.should_commit()) {
48    event.set_gcId(GCId::current());
49    event.set_name(_shared_gc_info.name());
50    event.set_cause((u2) _shared_gc_info.cause());
51    event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
52    event.set_longestPause(_shared_gc_info.longest_pause());
53    event.set_starttime(_shared_gc_info.start_timestamp());
54    event.set_endtime(_shared_gc_info.end_timestamp());
55    event.commit();
56  }
57}
58
59void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
60  EventGCReferenceStatistics e;
61  if (e.should_commit()) {
62      e.set_gcId(GCId::current());
63      e.set_type((u1)type);
64      e.set_count(count);
65      e.commit();
66  }
67}
68
69void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype,
70                                                      const MetaspaceChunkFreeListSummary& summary) const {
71  EventMetaspaceChunkFreeListSummary e;
72  if (e.should_commit()) {
73    e.set_gcId(GCId::current());
74    e.set_when(when);
75    e.set_metadataType(mdtype);
76
77    e.set_specializedChunks(summary.num_specialized_chunks());
78    e.set_specializedChunksTotalSize(summary.specialized_chunks_size_in_bytes());
79
80    e.set_smallChunks(summary.num_small_chunks());
81    e.set_smallChunksTotalSize(summary.small_chunks_size_in_bytes());
82
83    e.set_mediumChunks(summary.num_medium_chunks());
84    e.set_mediumChunksTotalSize(summary.medium_chunks_size_in_bytes());
85
86    e.set_humongousChunks(summary.num_humongous_chunks());
87    e.set_humongousChunksTotalSize(summary.humongous_chunks_size_in_bytes());
88
89    e.commit();
90  }
91}
92
93void ParallelOldTracer::send_parallel_old_event() const {
94  EventParallelOldGarbageCollection e(UNTIMED);
95  if (e.should_commit()) {
96    e.set_gcId(GCId::current());
97    e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
98    e.set_starttime(_shared_gc_info.start_timestamp());
99    e.set_endtime(_shared_gc_info.end_timestamp());
100    e.commit();
101  }
102}
103
104void YoungGCTracer::send_young_gc_event() const {
105  EventYoungGarbageCollection e(UNTIMED);
106  if (e.should_commit()) {
107    e.set_gcId(GCId::current());
108    e.set_tenuringThreshold(_tenuring_threshold);
109    e.set_starttime(_shared_gc_info.start_timestamp());
110    e.set_endtime(_shared_gc_info.end_timestamp());
111    e.commit();
112  }
113}
114
115bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
116  return EventPromoteObjectInNewPLAB::is_enabled();
117}
118
119bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
120  return EventPromoteObjectOutsidePLAB::is_enabled();
121}
122
123void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
124                                                     uint age, bool tenured,
125                                                     size_t plab_size) const {
126
127  EventPromoteObjectInNewPLAB event;
128  if (event.should_commit()) {
129    event.set_gcId(GCId::current());
130    event.set_objectClass(klass);
131    event.set_objectSize(obj_size);
132    event.set_tenured(tenured);
133    event.set_tenuringAge(age);
134    event.set_plabSize(plab_size);
135    event.commit();
136  }
137}
138
139void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
140                                                      uint age, bool tenured) const {
141
142  EventPromoteObjectOutsidePLAB event;
143  if (event.should_commit()) {
144    event.set_gcId(GCId::current());
145    event.set_objectClass(klass);
146    event.set_objectSize(obj_size);
147    event.set_tenured(tenured);
148    event.set_tenuringAge(age);
149    event.commit();
150  }
151}
152
153void OldGCTracer::send_old_gc_event() const {
154  EventOldGarbageCollection e(UNTIMED);
155  if (e.should_commit()) {
156    e.set_gcId(GCId::current());
157    e.set_starttime(_shared_gc_info.start_timestamp());
158    e.set_endtime(_shared_gc_info.end_timestamp());
159    e.commit();
160  }
161}
162
163static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
164  TraceStructCopyFailed failed_info;
165  failed_info.set_objectCount(cf_info.failed_count());
166  failed_info.set_firstSize(cf_info.first_size());
167  failed_info.set_smallestSize(cf_info.smallest_size());
168  failed_info.set_totalSize(cf_info.total_size());
169  return failed_info;
170}
171
172void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
173  EventPromotionFailed e;
174  if (e.should_commit()) {
175    e.set_gcId(GCId::current());
176    e.set_promotionFailed(to_trace_struct(pf_info));
177    e.set_thread(pf_info.thread_trace_id());
178    e.commit();
179  }
180}
181
182// Common to CMS and G1
183void OldGCTracer::send_concurrent_mode_failure_event() {
184  EventConcurrentModeFailure e;
185  if (e.should_commit()) {
186    e.set_gcId(GCId::current());
187    e.commit();
188  }
189}
190
191#if INCLUDE_ALL_GCS
192void G1NewTracer::send_g1_young_gc_event() {
193  EventG1GarbageCollection e(UNTIMED);
194  if (e.should_commit()) {
195    e.set_gcId(GCId::current());
196    e.set_type(_g1_young_gc_info.type());
197    e.set_starttime(_shared_gc_info.start_timestamp());
198    e.set_endtime(_shared_gc_info.end_timestamp());
199    e.commit();
200  }
201}
202
203void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) {
204  EventG1MMU e;
205  if (e.should_commit()) {
206    e.set_gcId(GCId::current());
207    e.set_timeSlice(time_slice_ms);
208    e.set_gcTime(gc_time_ms);
209    e.set_pauseTarget(max_time_ms);
210    e.commit();
211  }
212}
213
214void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
215  EventEvacuationInformation e;
216  if (e.should_commit()) {
217    e.set_gcId(GCId::current());
218    e.set_cSetRegions(info->collectionset_regions());
219    e.set_cSetUsedBefore(info->collectionset_used_before());
220    e.set_cSetUsedAfter(info->collectionset_used_after());
221    e.set_allocationRegions(info->allocation_regions());
222    e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before());
223    e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
224    e.set_bytesCopied(info->bytes_copied());
225    e.set_regionsFreed(info->regions_freed());
226    e.commit();
227  }
228}
229
230void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
231  EventEvacuationFailed e;
232  if (e.should_commit()) {
233    e.set_gcId(GCId::current());
234    e.set_evacuationFailed(to_trace_struct(ef_info));
235    e.commit();
236  }
237}
238
239static TraceStructG1EvacuationStatistics
240create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
241  TraceStructG1EvacuationStatistics s;
242  s.set_gcId(gcid);
243  s.set_allocated(summary.allocated() * HeapWordSize);
244  s.set_wasted(summary.wasted() * HeapWordSize);
245  s.set_used(summary.used() * HeapWordSize);
246  s.set_undoWaste(summary.undo_wasted() * HeapWordSize);
247  s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize);
248  s.set_regionsRefilled(summary.regions_filled());
249  s.set_directAllocated(summary.direct_allocated() * HeapWordSize);
250  s.set_failureUsed(summary.failure_used() * HeapWordSize);
251  s.set_failureWaste(summary.failure_waste() * HeapWordSize);
252  return s;
253}
254
255void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
256  EventG1EvacuationYoungStatistics surv_evt;
257  if (surv_evt.should_commit()) {
258    surv_evt.set_statistics(create_g1_evacstats(GCId::current(), summary));
259    surv_evt.commit();
260  }
261}
262
263void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
264  EventG1EvacuationOldStatistics old_evt;
265  if (old_evt.should_commit()) {
266    old_evt.set_statistics(create_g1_evacstats(GCId::current(), summary));
267    old_evt.commit();
268  }
269}
270
271void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
272                                             size_t target_occupancy,
273                                             size_t current_occupancy,
274                                             size_t last_allocation_size,
275                                             double last_allocation_duration,
276                                             double last_marking_length) {
277  EventG1BasicIHOP evt;
278  if (evt.should_commit()) {
279    evt.set_gcId(GCId::current());
280    evt.set_threshold(threshold);
281    evt.set_targetOccupancy(target_occupancy);
282    evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
283    evt.set_currentOccupancy(current_occupancy);
284    evt.set_recentMutatorAllocationSize(last_allocation_size);
285    evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
286    evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
287    evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
288    evt.commit();
289  }
290}
291
292void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
293                                                size_t internal_target_occupancy,
294                                                size_t current_occupancy,
295                                                size_t additional_buffer_size,
296                                                double predicted_allocation_rate,
297                                                double predicted_marking_length,
298                                                bool prediction_active) {
299  EventG1AdaptiveIHOP evt;
300  if (evt.should_commit()) {
301    evt.set_gcId(GCId::current());
302    evt.set_threshold(threshold);
303    evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0);
304    evt.set_ihopTargetOccupancy(internal_target_occupancy);
305    evt.set_currentOccupancy(current_occupancy);
306    evt.set_additionalBufferSize(additional_buffer_size);
307    evt.set_predictedAllocationRate(predicted_allocation_rate);
308    evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS);
309    evt.set_predictionActive(prediction_active);
310    evt.commit();
311  }
312}
313
314#endif
315
316static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
317  TraceStructVirtualSpace space;
318  space.set_start((TraceAddress)summary.start());
319  space.set_committedEnd((TraceAddress)summary.committed_end());
320  space.set_committedSize(summary.committed_size());
321  space.set_reservedEnd((TraceAddress)summary.reserved_end());
322  space.set_reservedSize(summary.reserved_size());
323  return space;
324}
325
326static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
327  TraceStructObjectSpace space;
328  space.set_start((TraceAddress)summary.start());
329  space.set_end((TraceAddress)summary.end());
330  space.set_used(summary.used());
331  space.set_size(summary.size());
332  return space;
333}
334
335class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
336  GCWhen::Type _when;
337 public:
338  GCHeapSummaryEventSender(GCWhen::Type when) : _when(when) {}
339
340  void visit(const GCHeapSummary* heap_summary) const {
341    const VirtualSpaceSummary& heap_space = heap_summary->heap();
342
343    EventGCHeapSummary e;
344    if (e.should_commit()) {
345      e.set_gcId(GCId::current());
346      e.set_when((u1)_when);
347      e.set_heapSpace(to_trace_struct(heap_space));
348      e.set_heapUsed(heap_summary->used());
349      e.commit();
350    }
351  }
352
353  void visit(const G1HeapSummary* g1_heap_summary) const {
354    visit((GCHeapSummary*)g1_heap_summary);
355
356    EventG1HeapSummary e;
357    if (e.should_commit()) {
358      e.set_gcId(GCId::current());
359      e.set_when((u1)_when);
360      e.set_edenUsedSize(g1_heap_summary->edenUsed());
361      e.set_edenTotalSize(g1_heap_summary->edenCapacity());
362      e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
363      e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
364      e.commit();
365    }
366  }
367
368  void visit(const PSHeapSummary* ps_heap_summary) const {
369    visit((GCHeapSummary*)ps_heap_summary);
370
371    const VirtualSpaceSummary& old_summary = ps_heap_summary->old();
372    const SpaceSummary& old_space = ps_heap_summary->old_space();
373    const VirtualSpaceSummary& young_summary = ps_heap_summary->young();
374    const SpaceSummary& eden_space = ps_heap_summary->eden();
375    const SpaceSummary& from_space = ps_heap_summary->from();
376    const SpaceSummary& to_space = ps_heap_summary->to();
377
378    EventPSHeapSummary e;
379    if (e.should_commit()) {
380      e.set_gcId(GCId::current());
381      e.set_when((u1)_when);
382
383      e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
384      e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space()));
385      e.set_youngSpace(to_trace_struct(ps_heap_summary->young()));
386      e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
387      e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
388      e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
389      e.commit();
390    }
391  }
392};
393
394void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
395  GCHeapSummaryEventSender visitor(when);
396  heap_summary.accept(&visitor);
397}
398
399static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
400  TraceStructMetaspaceSizes meta_sizes;
401
402  meta_sizes.set_committed(sizes.committed());
403  meta_sizes.set_used(sizes.used());
404  meta_sizes.set_reserved(sizes.reserved());
405
406  return meta_sizes;
407}
408
409void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
410  EventMetaspaceSummary e;
411  if (e.should_commit()) {
412    e.set_gcId(GCId::current());
413    e.set_when((u1) when);
414    e.set_gcThreshold(meta_space_summary.capacity_until_GC());
415    e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
416    e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
417    e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
418    e.commit();
419  }
420}
421
422class PhaseSender : public PhaseVisitor {
423  void visit_pause(GCPhase* phase) {
424    assert(phase->level() < PhasesStack::PHASE_LEVELS, "Need more event types for PausePhase");
425
426    switch (phase->level()) {
427      case 0: send_phase<EventGCPhasePause>(phase); break;
428      case 1: send_phase<EventGCPhasePauseLevel1>(phase); break;
429      case 2: send_phase<EventGCPhasePauseLevel2>(phase); break;
430      case 3: send_phase<EventGCPhasePauseLevel3>(phase); break;
431      case 4: send_phase<EventGCPhasePauseLevel4>(phase); break;
432      default: /* Ignore sending this phase */ break;
433    }
434  }
435
436  void visit_concurrent(GCPhase* phase) {
437    assert(phase->level() < 1, "There is only one level for ConcurrentPhase");
438
439    switch (phase->level()) {
440      case 0: send_phase<EventGCPhaseConcurrent>(phase); break;
441      default: /* Ignore sending this phase */ break;
442    }
443  }
444
445 public:
446  template<typename T>
447  void send_phase(GCPhase* phase) {
448    T event(UNTIMED);
449    if (event.should_commit()) {
450      event.set_gcId(GCId::current());
451      event.set_name(phase->name());
452      event.set_starttime(phase->start());
453      event.set_endtime(phase->end());
454      event.commit();
455    }
456  }
457
458  void visit(GCPhase* phase) {
459    if (phase->type() == GCPhase::PausePhaseType) {
460      visit_pause(phase);
461    } else {
462      assert(phase->type() == GCPhase::ConcurrentPhaseType, "Should be ConcurrentPhaseType");
463      visit_concurrent(phase);
464    }
465  }
466};
467
468void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
469  PhaseSender phase_reporter;
470
471  TimePartitionPhasesIterator iter(time_partitions);
472  while (iter.has_next()) {
473    GCPhase* phase = iter.next();
474    phase->accept(&phase_reporter);
475  }
476}
477