1 /*
  2  * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/collectedHeap.hpp"
 27 #include "gc/shared/gc_globals.hpp"
 28 #include "jfr/leakprofiler/leakProfiler.hpp"
 29 #include "jfr/leakprofiler/chains/bfsClosure.hpp"
 30 #include "jfr/leakprofiler/chains/bitset.inline.hpp"
 31 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
 32 #include "jfr/leakprofiler/chains/edge.hpp"
 33 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
 34 #include "jfr/leakprofiler/chains/edgeStore.hpp"
 35 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
 36 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 37 #include "jfr/leakprofiler/chains/edgeStore.hpp"
 38 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
 39 #include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
 40 #include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
 41 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 42 #include "jfr/leakprofiler/sampling/objectSample.hpp"
 43 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 44 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
 45 #include "logging/log.hpp"
 46 #include "memory/universe.hpp"
 47 #include "oops/oop.inline.hpp"
 48 #include "runtime/safepoint.hpp"
 49 #include "utilities/globalDefinitions.hpp"
 50 
 51 PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all, bool skip_bfs) :
 52   _sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all), _skip_bfs(skip_bfs) {}
 53 
 54 /* The EdgeQueue is backed by directly managed virtual memory.
 55  * We will attempt to dimension an initial reservation
 56  * in proportion to the size of the heap (represented by heap_region).
 57  * Initial memory reservation: 5% of the heap OR at least 32 Mb
 58  * Commit ratio: 1 : 10 (subject to allocation granularties)
 59  */
 60 static size_t edge_queue_memory_reservation() {
 61   const size_t memory_reservation_bytes = MAX2(MaxHeapSize / 20, 32*M);
 62   assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
 63   return memory_reservation_bytes;
 64 }
 65 
 66 static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
 67   const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
 68   assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
 69   return memory_commit_block_size_bytes;
 70 }
 71 
 72 static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
 73   log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
 74   log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
 75   log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
 76   if (edge_queue.reserved_size() > 0) {
 77     log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
 78       ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
 79   }
 80 }
 81 
 82 void PathToGcRootsOperation::doit() {
 83   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 84   assert(_cutoff_ticks > 0, "invariant");
 85 
 86   // The bitset used for marking is dimensioned as a function of the heap size
 87   BitSet mark_bits;
 88 
 89   // The edge queue is dimensioned as a fraction of the heap size
 90   const size_t edge_queue_reservation_size = edge_queue_memory_reservation();
 91   EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
 92 
 93   // The initialize() routines will attempt to reserve and allocate backing storage memory.
 94   // Failure to accommodate will render root chain processing impossible.
 95   // As a fallback on failure, just write out the existing samples, flat, without chains.
 96   if (!edge_queue.initialize()) {
 97     log_warning(jfr)("Unable to allocate memory for root chain processing");
 98     return;
 99   }
100 
101   // Save the original markWord for the potential leak objects,
102   // to be restored on function exit
103   ObjectSampleMarker marker;
104   if (ObjectSampleCheckpoint::save_mark_words(_sampler, marker, _emit_all) == 0) {
105     // no valid samples to process
106     return;
107   }
108 
109   // Necessary condition for attempting a root set iteration
110   Universe::heap()->ensure_parsability(false);
111 
112   BFSClosure bfs(&edge_queue, _edge_store, &mark_bits);
113   RootSetClosure<BFSClosure> roots(&bfs);
114 
115   GranularTimer::start(_cutoff_ticks, 1000000);
116   roots.process();
117   if (edge_queue.is_full() || _skip_bfs) {
118     // Pathological case where roots don't fit in queue
119     // Do a depth-first search, but mark roots first
120     // to avoid walking sideways over roots
121     DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits);
122   } else {
123     bfs.process();
124   }
125   GranularTimer::stop();
126   log_edge_queue_summary(edge_queue);
127 
128   // Emit old objects including their reference chains as events
129   EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time());
130   emitter.write_events(_sampler, _edge_store, _emit_all);
131 }