1 /* 2 * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/g1GCParPhaseTimesTracker.hpp" 28 #include "gc/g1/g1GCPhaseTimes.hpp" 29 #include "gc/g1/g1ParScanThreadState.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/oopStorage.hpp" 32 #include "gc/shared/oopStorageSet.hpp" 33 #include "gc/shared/tlab_globals.hpp" 34 #include "gc/shared/workerDataArray.inline.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "logging/log.hpp" 37 #include "logging/logStream.hpp" 38 #include "runtime/timer.hpp" 39 #include "runtime/os.hpp" 40 #include "utilities/enumIterator.hpp" 41 #include "utilities/macros.hpp" 42 43 constexpr const char* G1GCPhaseTimes::GCMergeRSWorkItemsStrings[]; 44 45 G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) : 46 _max_gc_threads(max_gc_threads), 47 _gc_start_counter(0), 48 _gc_pause_time_ms(0.0), 49 _ref_phase_times(gc_timer, max_gc_threads), 50 _weak_phase_times(max_gc_threads) 51 { 52 assert(max_gc_threads > 0, "Must have some GC threads"); 53 54 _gc_par_phases[RetireTLABsAndFlushLogs] = new WorkerDataArray<double>("RetireTLABsAndFlushLogs", "JT Retire TLABs And Flush Logs (ms):", max_gc_threads); 55 _gc_par_phases[NonJavaThreadFlushLogs] = new WorkerDataArray<double>("NonJavaThreadFlushLogs", "Non-JT Flush Logs (ms):", max_gc_threads); 56 57 _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>("GCWorkerStart", "GC Worker Start (ms):", max_gc_threads); 58 _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>("ExtRootScan", "Ext Root Scanning (ms):", max_gc_threads); 59 60 // Root scanning phases 61 _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("ThreadRoots", "Thread Roots (ms):", max_gc_threads); 62 _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDGRoots", "CLDG Roots (ms):", max_gc_threads); 63 _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>("CMRefRoots", "CM RefProcessor Roots (ms):", max_gc_threads); 64 65 for (auto id : EnumRange<OopStorageSet::StrongId>()) { 66 GCParPhases phase = strong_oopstorage_phase(id); 67 const char* phase_name_postfix = " Roots (ms):"; 68 const char* storage_name = OopStorageSet::storage(id)->name(); 69 char* oop_storage_phase_name = NEW_C_HEAP_ARRAY(char, strlen(phase_name_postfix) + strlen(storage_name) + 1, mtGC); 70 strcpy(oop_storage_phase_name, storage_name); 71 strcat(oop_storage_phase_name, phase_name_postfix); 72 _gc_par_phases[phase] = new WorkerDataArray<double>(storage_name, oop_storage_phase_name, max_gc_threads); 73 } 74 75 _gc_par_phases[MergeER] = new WorkerDataArray<double>("MergeER", "Eager Reclaim (ms):", max_gc_threads); 76 77 _gc_par_phases[MergeRS] = new WorkerDataArray<double>("MergeRS", "Remembered Sets (ms):", max_gc_threads); 78 for (uint i = 0; i < MergeRSContainersSentinel; i++) { 79 _gc_par_phases[MergeRS]->create_thread_work_items(GCMergeRSWorkItemsStrings[i], i); 80 } 81 82 _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>("OptMergeRS", "Optional Remembered Sets (ms):", max_gc_threads); 83 for (uint i = 0; i < MergeRSContainersSentinel; i++) { 84 _gc_par_phases[OptMergeRS]->create_thread_work_items(GCMergeRSWorkItemsStrings[i], i); 85 } 86 87 _gc_par_phases[MergeLB] = new WorkerDataArray<double>("MergeLB", "Log Buffers (ms):", max_gc_threads); 88 _gc_par_phases[ScanHR] = new WorkerDataArray<double>("ScanHR", "Scan Heap Roots (ms):", max_gc_threads); 89 _gc_par_phases[OptScanHR] = new WorkerDataArray<double>("OptScanHR", "Optional Scan Heap Roots (ms):", max_gc_threads); 90 _gc_par_phases[CodeRoots] = new WorkerDataArray<double>("CodeRoots", "Code Root Scan (ms):", max_gc_threads); 91 _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>("OptCodeRoots", "Optional Code Root Scan (ms):", max_gc_threads); 92 _gc_par_phases[ObjCopy] = new WorkerDataArray<double>("ObjCopy", "Object Copy (ms):", max_gc_threads); 93 _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>("OptObjCopy", "Optional Object Copy (ms):", max_gc_threads); 94 _gc_par_phases[Termination] = new WorkerDataArray<double>("Termination", "Termination (ms):", max_gc_threads); 95 _gc_par_phases[OptTermination] = new WorkerDataArray<double>("OptTermination", "Optional Termination (ms):", max_gc_threads); 96 _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>("GCWorkerTotal", "GC Worker Total (ms):", max_gc_threads); 97 _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>("GCWorkerEnd", "GC Worker End (ms):", max_gc_threads); 98 _gc_par_phases[Other] = new WorkerDataArray<double>("Other", "GC Worker Other (ms):", max_gc_threads); 99 _gc_par_phases[MergePSS] = new WorkerDataArray<double>("MergePSS", "Merge Per-Thread State (ms):", max_gc_threads); 100 _gc_par_phases[RestoreEvacuationFailedRegions] = new WorkerDataArray<double>("RestoreEvacuationFailedRegions", "Restore Evacuation Failed Regions (ms):", max_gc_threads); 101 _gc_par_phases[RemoveSelfForwards] = new WorkerDataArray<double>("RemoveSelfForwards", "Remove Self Forwards (ms):", max_gc_threads); 102 _gc_par_phases[ClearCardTable] = new WorkerDataArray<double>("ClearLoggedCards", "Clear Logged Cards (ms):", max_gc_threads); 103 _gc_par_phases[RecalculateUsed] = new WorkerDataArray<double>("RecalculateUsed", "Recalculate Used Memory (ms):", max_gc_threads); 104 #if COMPILER2_OR_JVMCI 105 _gc_par_phases[UpdateDerivedPointers] = new WorkerDataArray<double>("UpdateDerivedPointers", "Update Derived Pointers (ms):", max_gc_threads); 106 #endif 107 _gc_par_phases[EagerlyReclaimHumongousObjects] = new WorkerDataArray<double>("EagerlyReclaimHumongousObjects", "Eagerly Reclaim Humongous Objects (ms):", max_gc_threads); 108 _gc_par_phases[RestorePreservedMarks] = new WorkerDataArray<double>("RestorePreservedMarks", "Restore Preserved Marks (ms):", max_gc_threads); 109 _gc_par_phases[ProcessEvacuationFailedRegions] = new WorkerDataArray<double>("ProcessEvacuationFailedRegions", "Process Evacuation Failed Regions (ms):", max_gc_threads); 110 111 _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); 112 _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks); 113 _gc_par_phases[ScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks); 114 _gc_par_phases[ScanHR]->create_thread_work_items("Found Roots:", ScanHRFoundRoots); 115 116 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); 117 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks); 118 _gc_par_phases[OptScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks); 119 _gc_par_phases[OptScanHR]->create_thread_work_items("Found Roots:", ScanHRFoundRoots); 120 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Refs:", ScanHRScannedOptRefs); 121 _gc_par_phases[OptScanHR]->create_thread_work_items("Used Memory:", ScanHRUsedMemory); 122 123 _gc_par_phases[MergeLB]->create_thread_work_items("Dirty Cards:", MergeLBDirtyCards); 124 _gc_par_phases[MergeLB]->create_thread_work_items("Skipped Cards:", MergeLBSkippedCards); 125 126 _gc_par_phases[CodeRoots]->create_thread_work_items("Scanned Nmethods:", CodeRootsScannedNMethods); 127 128 _gc_par_phases[OptCodeRoots]->create_thread_work_items("Scanned Nmethods:", CodeRootsScannedNMethods); 129 130 _gc_par_phases[MergePSS]->create_thread_work_items("Copied Bytes:", MergePSSCopiedBytes); 131 _gc_par_phases[MergePSS]->create_thread_work_items("LAB Waste:", MergePSSLABWasteBytes); 132 _gc_par_phases[MergePSS]->create_thread_work_items("LAB Undo Waste:", MergePSSLABUndoWasteBytes); 133 _gc_par_phases[MergePSS]->create_thread_work_items("Evac Fail Extra Cards:", MergePSSEvacFailExtra); 134 135 _gc_par_phases[RestoreEvacuationFailedRegions]->create_thread_work_items("Evacuation Failed Regions:", RestoreEvacFailureRegionsEvacFailedNum); 136 _gc_par_phases[RestoreEvacuationFailedRegions]->create_thread_work_items("Pinned Regions:", RestoreEvacFailureRegionsPinnedNum); 137 _gc_par_phases[RestoreEvacuationFailedRegions]->create_thread_work_items("Allocation Failed Regions:", RestoreEvacFailureRegionsAllocFailedNum); 138 139 _gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Forward Chunks:", RemoveSelfForwardChunksNum); 140 _gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Empty Forward Chunks:", RemoveSelfForwardEmptyChunksNum); 141 _gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Forward Objects:", RemoveSelfForwardObjectsNum); 142 _gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Forward Bytes:", RemoveSelfForwardObjectsBytes); 143 144 _gc_par_phases[EagerlyReclaimHumongousObjects]->create_thread_work_items("Humongous Total:", EagerlyReclaimNumTotal); 145 _gc_par_phases[EagerlyReclaimHumongousObjects]->create_thread_work_items("Humongous Candidates:", EagerlyReclaimNumCandidates); 146 _gc_par_phases[EagerlyReclaimHumongousObjects]->create_thread_work_items("Humongous Reclaimed:", EagerlyReclaimNumReclaimed); 147 148 _gc_par_phases[SampleCollectionSetCandidates] = new WorkerDataArray<double>("SampleCandidates", "Sample CSet Candidates (ms):", max_gc_threads); 149 150 _gc_par_phases[Termination]->create_thread_work_items("Termination Attempts:"); 151 152 _gc_par_phases[OptTermination]->create_thread_work_items("Optional Termination Attempts:"); 153 154 _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>("RedirtyCards", "Redirty Logged Cards (ms):", max_gc_threads); 155 _gc_par_phases[RedirtyCards]->create_thread_work_items("Redirtied Cards:"); 156 157 _gc_par_phases[ResizeThreadLABs] = new WorkerDataArray<double>("ResizeTLABs", "Resize TLABs (ms):", max_gc_threads); 158 159 _gc_par_phases[FreeCollectionSet] = new WorkerDataArray<double>("FreeCSet", "Free Collection Set (ms):", max_gc_threads); 160 _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>("YoungFreeCSet", "Young Free Collection Set (ms):", max_gc_threads); 161 _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>("NonYoungFreeCSet", "Non-Young Free Collection Set (ms):", max_gc_threads); 162 _gc_par_phases[RebuildFreeList] = new WorkerDataArray<double>("RebuildFreeList", "Parallel Rebuild Free List (ms):", max_gc_threads); 163 164 _gc_par_phases[ResetMarkingState] = new WorkerDataArray<double>("ResetMarkingState", "Reset Marking State (ms):", max_gc_threads); 165 _gc_par_phases[NoteStartOfMark] = new WorkerDataArray<double>("NoteStartOfMark", "Note Start Of Mark (ms):", max_gc_threads); 166 167 reset(); 168 } 169 170 void G1GCPhaseTimes::reset() { 171 _cur_collection_initial_evac_time_ms = 0.0; 172 _cur_optional_evac_time_ms = 0.0; 173 _cur_collection_nmethod_list_cleanup_time_ms = 0.0; 174 _cur_merge_heap_roots_time_ms = 0.0; 175 _cur_optional_merge_heap_roots_time_ms = 0.0; 176 _cur_prepare_merge_heap_roots_time_ms = 0.0; 177 _cur_distribute_log_buffers_time_ms = 0.0; 178 _cur_optional_prepare_merge_heap_roots_time_ms = 0.0; 179 _cur_pre_evacuate_prepare_time_ms = 0.0; 180 _cur_post_evacuate_cleanup_1_time_ms = 0.0; 181 _cur_post_evacuate_cleanup_2_time_ms = 0.0; 182 _cur_expand_heap_time_ms = 0.0; 183 _cur_ref_proc_time_ms = 0.0; 184 _cur_collection_start_sec = 0.0; 185 _root_region_scan_wait_time_ms = 0.0; 186 _external_accounted_time_ms = 0.0; 187 _recorded_prepare_heap_roots_time_ms = 0.0; 188 _recorded_young_cset_choice_time_ms = 0.0; 189 _recorded_non_young_cset_choice_time_ms = 0.0; 190 _recorded_prepare_for_mutator_time_ms = 0.0; 191 _recorded_serial_free_cset_time_ms = 0.0; 192 _recorded_total_rebuild_freelist_time_ms = 0.0; 193 _recorded_serial_rebuild_freelist_time_ms = 0.0; 194 _cur_region_register_time = 0.0; 195 _cur_verify_before_time_ms = 0.0; 196 _cur_verify_after_time_ms = 0.0; 197 198 for (int i = 0; i < GCParPhasesSentinel; i++) { 199 if (_gc_par_phases[i] != nullptr) { 200 _gc_par_phases[i]->reset(); 201 } 202 } 203 204 _ref_phase_times.reset(); 205 _weak_phase_times.reset(); 206 } 207 208 void G1GCPhaseTimes::record_gc_pause_start() { 209 _gc_start_counter = os::elapsed_counter(); 210 reset(); 211 } 212 213 #define ASSERT_PHASE_UNINITIALIZED(phase) \ 214 assert(_gc_par_phases[phase] == nullptr || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started"); 215 216 double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) { 217 if (_gc_par_phases[phase] == nullptr) { 218 return 0.0; 219 } 220 double value = _gc_par_phases[phase]->get(worker); 221 if (value != WorkerDataArray<double>::uninitialized()) { 222 return value; 223 } 224 return 0.0; 225 } 226 227 void G1GCPhaseTimes::record_gc_pause_end() { 228 _gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter); 229 230 double uninitialized = WorkerDataArray<double>::uninitialized(); 231 232 for (uint i = 0; i < _max_gc_threads; i++) { 233 double worker_start = _gc_par_phases[GCWorkerStart]->get(i); 234 if (worker_start != uninitialized) { 235 assert(_gc_par_phases[GCWorkerEnd]->get(i) != uninitialized, "Worker started but not ended."); 236 double total_worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i); 237 record_time_secs(GCWorkerTotal, i , total_worker_time); 238 239 double worker_known_time = worker_time(ExtRootScan, i) + 240 worker_time(ScanHR, i) + 241 worker_time(CodeRoots, i) + 242 worker_time(ObjCopy, i) + 243 worker_time(Termination, i); 244 245 record_time_secs(Other, i, total_worker_time - worker_known_time); 246 } else { 247 // Make sure all slots are uninitialized since this thread did not seem to have been started 248 ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd); 249 ASSERT_PHASE_UNINITIALIZED(ExtRootScan); 250 ASSERT_PHASE_UNINITIALIZED(MergeER); 251 ASSERT_PHASE_UNINITIALIZED(MergeRS); 252 ASSERT_PHASE_UNINITIALIZED(OptMergeRS); 253 ASSERT_PHASE_UNINITIALIZED(MergeLB); 254 ASSERT_PHASE_UNINITIALIZED(ScanHR); 255 ASSERT_PHASE_UNINITIALIZED(CodeRoots); 256 ASSERT_PHASE_UNINITIALIZED(OptCodeRoots); 257 ASSERT_PHASE_UNINITIALIZED(ObjCopy); 258 ASSERT_PHASE_UNINITIALIZED(OptObjCopy); 259 ASSERT_PHASE_UNINITIALIZED(Termination); 260 } 261 } 262 } 263 264 #undef ASSERT_PHASE_UNINITIALIZED 265 266 // record the time a phase took in seconds 267 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_id, double secs) { 268 _gc_par_phases[phase]->set(worker_id, secs); 269 } 270 271 // add a number of seconds to a phase 272 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_id, double secs) { 273 _gc_par_phases[phase]->add(worker_id, secs); 274 } 275 276 void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_id, double secs) { 277 _gc_par_phases[phase]->set_or_add(worker_id, secs); 278 } 279 280 double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_id) { 281 return _gc_par_phases[phase]->get(worker_id); 282 } 283 284 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) { 285 _gc_par_phases[phase]->set_thread_work_item(worker_id, count, index); 286 } 287 288 void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) { 289 _gc_par_phases[phase]->set_or_add_thread_work_item(worker_id, count, index); 290 } 291 292 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, uint index) { 293 return _gc_par_phases[phase]->get_thread_work_item(worker_id, index); 294 } 295 296 // return the average time for a phase in milliseconds 297 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) const { 298 if (_gc_par_phases[phase] == nullptr) { 299 return 0.0; 300 } 301 return _gc_par_phases[phase]->average() * 1000.0; 302 } 303 304 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) { 305 if (_gc_par_phases[phase] == nullptr) { 306 return 0; 307 } 308 assert(_gc_par_phases[phase]->thread_work_items(index) != nullptr, "No sub count"); 309 return _gc_par_phases[phase]->thread_work_items(index)->sum(); 310 } 311 312 template <class T> 313 void G1GCPhaseTimes::details(T* phase, uint indent_level) const { 314 LogTarget(Trace, gc, phases, task) lt; 315 if (lt.is_enabled()) { 316 LogStream ls(lt); 317 ls.sp(indent_level * 2); 318 phase->print_details_on(&ls); 319 } 320 } 321 322 void G1GCPhaseTimes::print_thread_work_items(WorkerDataArray<double>* phase, uint indent_level, outputStream* out) const { 323 for (uint i = 0; i < phase->MaxThreadWorkItems; i++) { 324 WorkerDataArray<size_t>* work_items = phase->thread_work_items(i); 325 if (work_items != nullptr) { 326 out->sp((indent_level + 1) * 2); 327 work_items->print_summary_on(out, true); 328 details(work_items, indent_level + 1); 329 } 330 } 331 } 332 333 void G1GCPhaseTimes::debug_phase_merge_remset() const { 334 LogTarget(Debug, gc, phases) lt; 335 if (!lt.is_enabled()) { 336 return; 337 } 338 339 ResourceMark rm; 340 LogStream ls(lt); 341 342 WorkerDataArray<double>* phase = _gc_par_phases[MergeRS]; 343 WorkerDataArray<double>* sub_phase = _gc_par_phases[MergeER]; 344 345 uint indent_level = 2; 346 347 ls.sp(indent_level * 2); 348 phase->print_summary_on(&ls, true); 349 details(phase, indent_level); 350 351 log_phase(sub_phase, (indent_level + 1), &ls, true); 352 353 print_thread_work_items(phase, indent_level, &ls); 354 } 355 356 void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent_level, outputStream* out, bool print_sum) const { 357 out->sp(indent_level * 2); 358 phase->print_summary_on(out, print_sum); 359 details(phase, indent_level); 360 361 print_thread_work_items(phase, indent_level, out); 362 } 363 364 void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase, uint extra_indent) const { 365 LogTarget(Debug, gc, phases) lt; 366 if (lt.is_enabled()) { 367 LogStream ls(lt); 368 log_phase(phase, 2 + extra_indent, &ls, true); 369 } 370 } 371 372 void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum, uint extra_indent) const { 373 LogTarget(Trace, gc, phases) lt; 374 if (lt.is_enabled()) { 375 LogStream ls(lt); 376 log_phase(phase, 3 + extra_indent, &ls, print_sum); 377 } 378 } 379 380 #define TIME_FORMAT "%.2lfms" 381 382 void G1GCPhaseTimes::info_time(const char* name, double value) const { 383 log_info(gc, phases)(" %s: " TIME_FORMAT, name, value); 384 } 385 386 void G1GCPhaseTimes::debug_time(const char* name, double value) const { 387 log_debug(gc, phases)(" %s: " TIME_FORMAT, name, value); 388 } 389 390 void G1GCPhaseTimes::debug_time_for_reference(const char* name, double value) const { 391 LogTarget(Debug, gc, phases) lt; 392 LogTarget(Debug, gc, phases, ref) lt2; 393 394 if (lt.is_enabled()) { 395 LogStream ls(lt); 396 ls.print_cr(" %s: " TIME_FORMAT, name, value); 397 } else if (lt2.is_enabled()) { 398 LogStream ls(lt2); 399 ls.print_cr(" %s: " TIME_FORMAT, name, value); 400 } 401 } 402 403 void G1GCPhaseTimes::trace_time(const char* name, double value) const { 404 log_trace(gc, phases)(" %s: " TIME_FORMAT, name, value); 405 } 406 407 void G1GCPhaseTimes::trace_count(const char* name, size_t value) const { 408 log_trace(gc, phases)(" %s: " SIZE_FORMAT, name, value); 409 } 410 411 double G1GCPhaseTimes::print_pre_evacuate_collection_set() const { 412 const double pre_concurrent_start_ms = average_time_ms(ResetMarkingState) + 413 average_time_ms(NoteStartOfMark); 414 415 const double sum_ms = pre_concurrent_start_ms + 416 _cur_pre_evacuate_prepare_time_ms + 417 _recorded_young_cset_choice_time_ms + 418 _recorded_non_young_cset_choice_time_ms + 419 _cur_region_register_time + 420 _recorded_prepare_heap_roots_time_ms; 421 422 info_time("Pre Evacuate Collection Set", sum_ms); 423 424 if (pre_concurrent_start_ms > 0.0) { 425 debug_phase(_gc_par_phases[ResetMarkingState]); 426 debug_phase(_gc_par_phases[NoteStartOfMark]); 427 } 428 429 debug_time("Pre Evacuate Prepare", _cur_pre_evacuate_prepare_time_ms); 430 debug_phase(_gc_par_phases[RetireTLABsAndFlushLogs], 1); 431 debug_phase(_gc_par_phases[NonJavaThreadFlushLogs], 1); 432 debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms)); 433 debug_time("Region Register", _cur_region_register_time); 434 435 debug_time("Prepare Heap Roots", _recorded_prepare_heap_roots_time_ms); 436 437 return sum_ms; 438 } 439 440 double G1GCPhaseTimes::print_evacuate_optional_collection_set() const { 441 const double sum_ms = _cur_optional_evac_time_ms + _cur_optional_merge_heap_roots_time_ms; 442 if (sum_ms > 0) { 443 info_time("Merge Optional Heap Roots", _cur_optional_merge_heap_roots_time_ms); 444 445 debug_time("Prepare Optional Merge Heap Roots", _cur_optional_prepare_merge_heap_roots_time_ms); 446 debug_phase(_gc_par_phases[OptMergeRS]); 447 448 info_time("Evacuate Optional Collection Set", _cur_optional_evac_time_ms); 449 debug_phase(_gc_par_phases[OptScanHR]); 450 debug_phase(_gc_par_phases[OptObjCopy]); 451 debug_phase(_gc_par_phases[OptCodeRoots]); 452 debug_phase(_gc_par_phases[OptTermination]); 453 } 454 return sum_ms; 455 } 456 457 double G1GCPhaseTimes::print_evacuate_initial_collection_set() const { 458 info_time("Merge Heap Roots", _cur_merge_heap_roots_time_ms); 459 460 debug_time("Prepare Merge Heap Roots", _cur_prepare_merge_heap_roots_time_ms); 461 debug_phase_merge_remset(); 462 463 debug_time("Distribute Log Buffers", _cur_distribute_log_buffers_time_ms); 464 debug_phase(_gc_par_phases[MergeLB]); 465 466 info_time("Evacuate Collection Set", _cur_collection_initial_evac_time_ms); 467 468 trace_phase(_gc_par_phases[GCWorkerStart], false); 469 debug_phase(_gc_par_phases[ExtRootScan]); 470 for (int i = ExtRootScanSubPhasesFirst; i <= ExtRootScanSubPhasesLast; i++) { 471 trace_phase(_gc_par_phases[i]); 472 } 473 debug_phase(_gc_par_phases[ScanHR]); 474 debug_phase(_gc_par_phases[CodeRoots]); 475 debug_phase(_gc_par_phases[ObjCopy]); 476 debug_phase(_gc_par_phases[Termination]); 477 debug_phase(_gc_par_phases[Other]); 478 debug_phase(_gc_par_phases[GCWorkerTotal]); 479 trace_phase(_gc_par_phases[GCWorkerEnd], false); 480 481 return _cur_collection_initial_evac_time_ms + _cur_merge_heap_roots_time_ms; 482 } 483 484 double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed) const { 485 const double sum_ms = _cur_collection_nmethod_list_cleanup_time_ms + 486 _cur_ref_proc_time_ms + 487 (_weak_phase_times.total_time_sec() * MILLIUNITS) + 488 _cur_post_evacuate_cleanup_1_time_ms + 489 _cur_post_evacuate_cleanup_2_time_ms + 490 _recorded_total_rebuild_freelist_time_ms + 491 _recorded_prepare_for_mutator_time_ms + 492 _cur_expand_heap_time_ms; 493 494 info_time("Post Evacuate Collection Set", sum_ms); 495 496 debug_time("NMethod List Cleanup", _cur_collection_nmethod_list_cleanup_time_ms); 497 498 debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms); 499 _ref_phase_times.print_all_references(2, false); 500 _weak_phase_times.log_total(2); 501 _weak_phase_times.log_subtotals(3); 502 503 debug_time("Post Evacuate Cleanup 1", _cur_post_evacuate_cleanup_1_time_ms); 504 debug_phase(_gc_par_phases[MergePSS], 1); 505 debug_phase(_gc_par_phases[ClearCardTable], 1); 506 debug_phase(_gc_par_phases[RecalculateUsed], 1); 507 if (evacuation_failed) { 508 debug_phase(_gc_par_phases[RestoreEvacuationFailedRegions], 1); 509 debug_phase(_gc_par_phases[RemoveSelfForwards], 2); 510 } 511 512 debug_time("Post Evacuate Cleanup 2", _cur_post_evacuate_cleanup_2_time_ms); 513 if (evacuation_failed) { 514 debug_phase(_gc_par_phases[RecalculateUsed], 1); 515 debug_phase(_gc_par_phases[RestorePreservedMarks], 1); 516 debug_phase(_gc_par_phases[ProcessEvacuationFailedRegions], 1); 517 } 518 #if COMPILER2_OR_JVMCI 519 debug_phase(_gc_par_phases[UpdateDerivedPointers], 1); 520 #endif 521 debug_phase(_gc_par_phases[EagerlyReclaimHumongousObjects], 1); 522 523 if (G1CollectedHeap::heap()->should_sample_collection_set_candidates()) { 524 debug_phase(_gc_par_phases[SampleCollectionSetCandidates], 1); 525 } 526 debug_phase(_gc_par_phases[RedirtyCards], 1); 527 if (UseTLAB && ResizeTLAB) { 528 debug_phase(_gc_par_phases[ResizeThreadLABs], 1); 529 } 530 debug_phase(_gc_par_phases[FreeCollectionSet], 1); 531 trace_phase(_gc_par_phases[YoungFreeCSet], true, 1); 532 trace_phase(_gc_par_phases[NonYoungFreeCSet], true, 1); 533 534 trace_time("Serial Free Collection Set", _recorded_serial_free_cset_time_ms); 535 536 debug_time("Rebuild Free List", _recorded_total_rebuild_freelist_time_ms); 537 trace_time("Serial Rebuild Free List", _recorded_serial_rebuild_freelist_time_ms); 538 trace_phase(_gc_par_phases[RebuildFreeList]); 539 540 debug_time("Prepare For Mutator", _recorded_prepare_for_mutator_time_ms); 541 debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms); 542 543 return sum_ms; 544 } 545 546 void G1GCPhaseTimes::print_other(double accounted_ms) const { 547 info_time("Other", _gc_pause_time_ms - accounted_ms); 548 } 549 550 void G1GCPhaseTimes::print(bool evacuation_failed) { 551 if (_root_region_scan_wait_time_ms > 0.0) { 552 debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms); 553 } 554 555 // Check if some time has been recorded for verification and only then print 556 // the message. We do not use Verify*GC here to print because VerifyGCType 557 // further limits actual verification. 558 if (_cur_verify_before_time_ms > 0.0) { 559 debug_time("Verify Before", _cur_verify_before_time_ms); 560 } 561 562 double accounted_ms = 0.0; 563 564 accounted_ms += _root_region_scan_wait_time_ms; 565 accounted_ms += _cur_verify_before_time_ms; 566 567 accounted_ms += print_pre_evacuate_collection_set(); 568 accounted_ms += print_evacuate_initial_collection_set(); 569 accounted_ms += print_evacuate_optional_collection_set(); 570 accounted_ms += print_post_evacuate_collection_set(evacuation_failed); 571 572 accounted_ms += _cur_verify_after_time_ms; 573 574 print_other(accounted_ms); 575 576 // See above comment on the _cur_verify_before_time_ms check. 577 if (_cur_verify_after_time_ms > 0.0) { 578 debug_time("Verify After", _cur_verify_after_time_ms); 579 } 580 } 581 582 const char* G1GCPhaseTimes::phase_name(GCParPhases phase) { 583 G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->phase_times(); 584 return phase_times->_gc_par_phases[phase]->short_name(); 585 } 586 587 G1EvacPhaseWithTrimTimeTracker::G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time) : 588 _pss(pss), 589 _start(Ticks::now()), 590 _total_time(total_time), 591 _trim_time(trim_time), 592 _stopped(false) { 593 594 assert(_pss->trim_ticks().value() == 0, "Possibly remaining trim ticks left over from previous use"); 595 } 596 597 G1EvacPhaseWithTrimTimeTracker::~G1EvacPhaseWithTrimTimeTracker() { 598 if (!_stopped) { 599 stop(); 600 } 601 } 602 603 void G1EvacPhaseWithTrimTimeTracker::stop() { 604 assert(!_stopped, "Should only be called once"); 605 _total_time += (Ticks::now() - _start) - _pss->trim_ticks(); 606 _trim_time += _pss->trim_ticks(); 607 _pss->reset_trim_ticks(); 608 _stopped = true; 609 } 610 611 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id, bool allow_multiple_record) : 612 _start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id), _event(), _allow_multiple_record(allow_multiple_record) { 613 if (_phase_times != nullptr) { 614 _start_time = Ticks::now(); 615 } 616 } 617 618 G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() { 619 if (_phase_times != nullptr) { 620 if (_allow_multiple_record) { 621 _phase_times->record_or_add_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); 622 } else { 623 _phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); 624 } 625 _event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_phase)); 626 } 627 } 628 629 G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times, 630 G1ParScanThreadState* pss, 631 G1GCPhaseTimes::GCParPhases phase, 632 uint worker_id) : 633 G1GCParPhaseTimesTracker(phase_times, phase, worker_id), 634 _total_time(), 635 _trim_time(), 636 _trim_tracker(pss, _total_time, _trim_time) { 637 } 638 639 G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() { 640 if (_phase_times != nullptr) { 641 // Explicitly stop the trim tracker since it's not yet destructed. 642 _trim_tracker.stop(); 643 // Exclude trim time by increasing the start time. 644 _start_time += _trim_time; 645 _phase_times->record_or_add_time_secs(G1GCPhaseTimes::ObjCopy, _worker_id, _trim_time.seconds()); 646 } 647 }