1 /*
   2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_interface/collectedHeap.hpp"
  27 #include "jfr/leakprofiler/leakProfiler.hpp"
  28 #include "jfr/leakprofiler/chains/bfsClosure.hpp"
  29 #include "jfr/leakprofiler/chains/bitset.hpp"
  30 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
  31 #include "jfr/leakprofiler/chains/edge.hpp"
  32 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
  33 #include "jfr/leakprofiler/chains/edgeStore.hpp"
  34 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
  35 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
  36 #include "jfr/leakprofiler/chains/edgeStore.hpp"
  37 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
  38 #include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
  39 #include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
  40 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
  41 #include "jfr/leakprofiler/sampling/objectSample.hpp"
  42 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
  43 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/markOop.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "utilities/globalDefinitions.hpp"
  49 
  50 PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) :
  51   _sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {}
  52 
  53 /* The EdgeQueue is backed by directly managed virtual memory.
  54  * We will attempt to dimension an initial reservation
  55  * in proportion to the size of the heap (represented by heap_region).
  56  * Initial memory reservation: 5% of the heap OR at least 32 Mb
  57  * Commit ratio: 1 : 10 (subject to allocation granularties)
  58  */
  59 static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
  60   const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
  61   assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
  62   return memory_reservation_bytes;
  63 }
  64 
  65 static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
  66   const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
  67   assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
  68   return memory_commit_block_size_bytes;
  69 }
  70 
  71 static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
  72   if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
  73   if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
  74   if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
  75   if (edge_queue.reserved_size() > 0) {
  76     if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n",
  77       ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
  78   }
  79 }
  80 
  81 void PathToGcRootsOperation::doit() {
  82   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
  83   assert(_cutoff_ticks > 0, "invariant");
  84 
  85   // The bitset used for marking is dimensioned as a function of the heap size
  86   const MemRegion heap_region = Universe::heap()->reserved_region();
  87   BitSet mark_bits(heap_region);
  88 
  89   // The edge queue is dimensioned as a fraction of the heap size
  90   const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
  91   EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
  92 
  93   // The initialize() routines will attempt to reserve and allocate backing storage memory.
  94   // Failure to accommodate will render root chain processing impossible.
  95   // As a fallback on failure, just write out the existing samples, flat, without chains.
  96   if (!(mark_bits.initialize() && edge_queue.initialize())) {
  97     if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing");
  98     return;
  99   }
 100 
 101   // Save the original markWord for the potential leak objects,
 102   // to be restored on function exit
 103   ObjectSampleMarker marker;
 104   if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) {
 105     // no valid samples to process
 106     return;
 107   }
 108 
 109   // Necessary condition for attempting a root set iteration
 110   Universe::heap()->ensure_parsability(false);
 111 
 112   BFSClosure bfs(&edge_queue, _edge_store, &mark_bits);
 113   RootSetClosure<BFSClosure> roots(&bfs);
 114 
 115   GranularTimer::start(_cutoff_ticks, 1000000);
 116   roots.process();
 117   if (edge_queue.is_full()) {
 118     // Pathological case where roots don't fit in queue
 119     // Do a depth-first search, but mark roots first
 120     // to avoid walking sideways over roots
 121     DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits);
 122   } else {
 123     bfs.process();
 124   }
 125   GranularTimer::stop();
 126   log_edge_queue_summary(edge_queue);
 127 
 128   // Emit old objects including their reference chains as events
 129   EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time());
 130   emitter.write_events(_sampler, _edge_store, _emit_all);
 131 }