1 /*
  2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/compiledIC.hpp"
 28 #include "code/icBuffer.hpp"
 29 #include "code/nmethod.hpp"
 30 #include "compiler/compileBroker.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "gc/shared/workgroup.hpp"
 33 #include "jfr/jfrEvents.hpp"
 34 #include "logging/log.hpp"
 35 #include "logging/logStream.hpp"
 36 #include "memory/allocation.inline.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "memory/universe.hpp"
 39 #include "oops/method.hpp"
 40 #include "runtime/interfaceSupport.inline.hpp"
 41 #include "runtime/handshake.hpp"
 42 #include "runtime/mutexLocker.hpp"
 43 #include "runtime/orderAccess.hpp"
 44 #include "runtime/os.hpp"
 45 #include "runtime/sweeper.hpp"
 46 #include "runtime/thread.inline.hpp"
 47 #include "runtime/vmOperations.hpp"
 48 #include "runtime/vmThread.hpp"
 49 #include "utilities/events.hpp"
 50 #include "utilities/xmlstream.hpp"
 51 
 52 #ifdef ASSERT
 53 
 54 #define SWEEP(nm) record_sweep(nm, __LINE__)
 55 // Sweeper logging code
 56 class SweeperRecord {
 57  public:
 58   int traversal;
 59   int compile_id;
 60   long traversal_mark;
 61   int state;
 62   const char* kind;
 63   address vep;
 64   address uep;
 65   int line;
 66 
 67   void print() {
 68       tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
 69                     PTR_FORMAT " state = %d traversal_mark %ld line = %d",
 70                     traversal,
 71                     compile_id,
 72                     kind == NULL ? "" : kind,
 73                     p2i(uep),
 74                     p2i(vep),
 75                     state,
 76                     traversal_mark,
 77                     line);
 78   }
 79 };
 80 
 81 static int _sweep_index = 0;
 82 static SweeperRecord* _records = NULL;
 83 
 84 void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) {
 85   if (_records != NULL) {
 86     _records[_sweep_index].traversal = _traversals;
 87     _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0;
 88     _records[_sweep_index].compile_id = nm->compile_id();
 89     _records[_sweep_index].kind = nm->compile_kind();
 90     _records[_sweep_index].state = nm->get_state();
 91     _records[_sweep_index].vep = nm->verified_entry_point();
 92     _records[_sweep_index].uep = nm->entry_point();
 93     _records[_sweep_index].line = line;
 94     _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
 95   }
 96 }
 97 
 98 void NMethodSweeper::init_sweeper_log() {
 99  if (LogSweeper && _records == NULL) {
100    // Create the ring buffer for the logging code
101    _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
102    memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
103   }
104 }
105 #else
106 #define SWEEP(nm)
107 #endif
108 
109 CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method
110 long     NMethodSweeper::_traversals                   = 0;    // Stack scan count, also sweep ID.
111 long     NMethodSweeper::_total_nof_code_cache_sweeps  = 0;    // Total number of full sweeps of the code cache
112 int      NMethodSweeper::_seen                         = 0;    // Nof. nmethod we have currently processed in current pass of CodeCache
113 size_t   NMethodSweeper::_sweep_threshold_bytes        = 0;    // Threshold for when to sweep. Updated after ergonomics
114 
115 volatile bool NMethodSweeper::_should_sweep            = false;// Indicates if a normal sweep will be done
116 volatile bool NMethodSweeper::_force_sweep             = false;// Indicates if a forced sweep will be done
117 volatile size_t NMethodSweeper::_bytes_changed         = 0;    // Counts the total nmethod size if the nmethod changed from:
118                                                                //   1) alive       -> not_entrant
119                                                                //   2) not_entrant -> zombie
120 int    NMethodSweeper::_hotness_counter_reset_val       = 0;
121 
122 long   NMethodSweeper::_total_nof_methods_reclaimed     = 0;   // Accumulated nof methods flushed
123 long   NMethodSweeper::_total_nof_c2_methods_reclaimed  = 0;   // Accumulated nof methods flushed
124 size_t NMethodSweeper::_total_flushed_size              = 0;   // Total number of bytes flushed from the code cache
125 Tickspan NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
126 Tickspan NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
127 Tickspan NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
128 Tickspan NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
129 
130 class MarkActivationClosure: public CodeBlobClosure {
131 public:
132   virtual void do_code_blob(CodeBlob* cb) {
133     assert(cb->is_nmethod(), "CodeBlob should be nmethod");
134     nmethod* nm = (nmethod*)cb;
135     nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
136     // If we see an activation belonging to a non_entrant nmethod, we mark it.
137     if (nm->is_not_entrant()) {
138       nm->mark_as_seen_on_stack();
139     }
140   }
141 };
142 static MarkActivationClosure mark_activation_closure;
143 
144 int NMethodSweeper::hotness_counter_reset_val() {
145   if (_hotness_counter_reset_val == 0) {
146     _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
147   }
148   return _hotness_counter_reset_val;
149 }
150 bool NMethodSweeper::wait_for_stack_scanning() {
151   return _current.end();
152 }
153 
154 class NMethodMarkingClosure : public HandshakeClosure {
155 private:
156   CodeBlobClosure* _cl;
157 public:
158   NMethodMarkingClosure(CodeBlobClosure* cl) : HandshakeClosure("NMethodMarking"), _cl(cl) {}
159   void do_thread(Thread* thread) {
160     if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) {
161       JavaThread::cast(thread)->nmethods_do(_cl);
162     }
163   }
164 };
165 
166 CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
167 #ifdef ASSERT
168   assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread");
169   assert_lock_strong(CodeCache_lock);
170 #endif
171 
172   // If we do not want to reclaim not-entrant or zombie methods there is no need
173   // to scan stacks
174   if (!MethodFlushing) {
175     return NULL;
176   }
177 
178   // Check for restart
179   assert(_current.method() == NULL, "should only happen between sweeper cycles");
180   assert(wait_for_stack_scanning(), "should only happen between sweeper cycles");
181 
182   _seen = 0;
183   _current = CompiledMethodIterator(CompiledMethodIterator::all_blobs);
184   // Initialize to first nmethod
185   _current.next();
186   _traversals += 1;
187   _total_time_this_sweep = Tickspan();
188 
189   if (PrintMethodFlushing) {
190     tty->print_cr("### Sweep: stack traversal %ld", _traversals);
191   }
192   return &mark_activation_closure;
193 }
194 
195 /**
196   * This function triggers a VM operation that does stack scanning of active
197   * methods. Stack scanning is mandatory for the sweeper to make progress.
198   */
199 void NMethodSweeper::do_stack_scanning() {
200   assert(!CodeCache_lock->owned_by_self(), "just checking");
201   // There are stacks in the heap that need to be scanned.
202   Universe::heap()->collect_for_codecache();
203   if (wait_for_stack_scanning()) {
204     CodeBlobClosure* code_cl;
205     {
206       MutexLocker ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag);
207       code_cl = prepare_mark_active_nmethods();
208     }
209     if (code_cl != NULL) {
210       NMethodMarkingClosure nm_cl(code_cl);
211       Handshake::execute(&nm_cl);
212     }
213   }
214 }
215 
216 void NMethodSweeper::sweeper_loop() {
217   bool timeout;
218   while (true) {
219     {
220       ThreadBlockInVM tbivm(JavaThread::current());
221       MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
222       const long wait_time = 60*60*24 * 1000;
223       timeout = waiter.wait(wait_time);
224     }
225     if (!timeout && (_should_sweep || _force_sweep)) {
226       sweep();
227     }
228   }
229 }
230 
231 /**
232   * Wakes up the sweeper thread to sweep if code cache space runs low
233   */
234 void NMethodSweeper::report_allocation(int code_blob_type) {
235   if (should_start_aggressive_sweep(code_blob_type)) {
236     MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
237     _should_sweep = true;
238     CodeSweeper_lock->notify();
239   }
240 }
241 
242 bool NMethodSweeper::should_start_aggressive_sweep(int code_blob_type) {
243   // Makes sure that we do not invoke the sweeper too often during startup.
244   double start_threshold = 100.0 / (double)StartAggressiveSweepingAt;
245   double aggressive_sweep_threshold = MAX2(start_threshold, 1.1);
246   return (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold);
247 }
248 
249 /**
250   * Wakes up the sweeper thread and forces a sweep. Blocks until it finished.
251   */
252 void NMethodSweeper::force_sweep() {
253   ThreadBlockInVM tbivm(JavaThread::current());
254   MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
255   // Request forced sweep
256   _force_sweep = true;
257   while (_force_sweep) {
258     // Notify sweeper that we want to force a sweep and wait for completion.
259     // In case a sweep currently takes place we timeout and try again because
260     // we want to enforce a full sweep.
261     CodeSweeper_lock->notify();
262     waiter.wait(1000);
263   }
264 }
265 
266 /**
267  * Handle a safepoint request
268  */
269 void NMethodSweeper::handle_safepoint_request() {
270   JavaThread* thread = JavaThread::current();
271   if (SafepointMechanism::should_process(thread)) {
272     if (PrintMethodFlushing && Verbose) {
273       tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nmethod_count());
274     }
275     MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
276 
277     ThreadBlockInVM tbivm(thread);
278   }
279 }
280 
281 void NMethodSweeper::sweep() {
282   assert(_should_sweep || _force_sweep, "must have been set");
283   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
284   Atomic::store(&_bytes_changed, static_cast<size_t>(0)); // reset regardless of sleep reason
285   if (_should_sweep) {
286     MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
287     _should_sweep = false;
288   }
289 
290   do_stack_scanning();
291 
292   init_sweeper_log();
293   sweep_code_cache();
294 
295   // We are done with sweeping the code cache once.
296   _total_nof_code_cache_sweeps++;
297 
298   if (_force_sweep) {
299     // Notify requester that forced sweep finished
300     MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
301     _force_sweep = false;
302     CodeSweeper_lock->notify();
303   }
304 }
305 
306 static void post_sweep_event(EventSweepCodeCache* event,
307                              const Ticks& start,
308                              const Ticks& end,
309                              s4 traversals,
310                              int swept,
311                              int flushed,
312                              int zombified) {
313   assert(event != NULL, "invariant");
314   assert(event->should_commit(), "invariant");
315   event->set_starttime(start);
316   event->set_endtime(end);
317   event->set_sweepId(traversals);
318   event->set_sweptCount(swept);
319   event->set_flushedCount(flushed);
320   event->set_zombifiedCount(zombified);
321   event->commit();
322 }
323 
324 void NMethodSweeper::sweep_code_cache() {
325   ResourceMark rm;
326   Ticks sweep_start_counter = Ticks::now();
327 
328   log_debug(codecache, sweep, start)("CodeCache flushing");
329 
330   int flushed_count                = 0;
331   int zombified_count              = 0;
332   int flushed_c2_count     = 0;
333 
334   if (PrintMethodFlushing && Verbose) {
335     tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nmethod_count());
336   }
337 
338   int swept_count = 0;
339   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
340   assert(!CodeCache_lock->owned_by_self(), "just checking");
341 
342   int freed_memory = 0;
343   {
344     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
345 
346     while (!_current.end()) {
347       swept_count++;
348       // Since we will give up the CodeCache_lock, always skip ahead
349       // to the next nmethod.  Other blobs can be deleted by other
350       // threads but nmethods are only reclaimed by the sweeper.
351       CompiledMethod* nm = _current.method();
352       _current.next();
353 
354       // Now ready to process nmethod and give up CodeCache_lock
355       {
356         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
357         // Save information before potentially flushing the nmethod
358         // Only flushing nmethods so size only matters for them.
359         int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0;
360         bool is_c2_method = nm->is_compiled_by_c2();
361         bool is_osr = nm->is_osr_method();
362         int compile_id = nm->compile_id();
363         intptr_t address = p2i(nm);
364         const char* state_before = nm->state();
365         const char* state_after = "";
366 
367         MethodStateChange type = process_compiled_method(nm);
368         switch (type) {
369           case Flushed:
370             state_after = "flushed";
371             freed_memory += size;
372             ++flushed_count;
373             if (is_c2_method) {
374               ++flushed_c2_count;
375             }
376             break;
377           case MadeZombie:
378             state_after = "made zombie";
379             ++zombified_count;
380             break;
381           case None:
382             break;
383           default:
384            ShouldNotReachHere();
385         }
386         if (PrintMethodFlushing && Verbose && type != None) {
387           tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after);
388         }
389       }
390 
391       _seen++;
392       handle_safepoint_request();
393     }
394   }
395 
396   assert(_current.end(), "must have scanned the whole cache");
397 
398   const Ticks sweep_end_counter = Ticks::now();
399   const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
400   {
401     MutexLocker mu(NMethodSweeperStats_lock, Mutex::_no_safepoint_check_flag);
402     _total_time_sweeping  += sweep_time;
403     _total_time_this_sweep += sweep_time;
404     _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
405     _total_flushed_size += freed_memory;
406     _total_nof_methods_reclaimed += flushed_count;
407     _total_nof_c2_methods_reclaimed += flushed_c2_count;
408     _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
409   }
410 
411   EventSweepCodeCache event(UNTIMED);
412   if (event.should_commit()) {
413     post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, flushed_count, zombified_count);
414   }
415 
416 #ifdef ASSERT
417   if(PrintMethodFlushing) {
418     tty->print_cr("### sweeper:      sweep time(" JLONG_FORMAT "): ", sweep_time.value());
419   }
420 #endif
421 
422   Log(codecache, sweep) log;
423   if (log.is_debug()) {
424     LogStream ls(log.debug());
425     CodeCache::print_summary(&ls, false);
426   }
427   log_sweep("finished");
428 
429   // Sweeper is the only case where memory is released, check here if it
430   // is time to restart the compiler. Only checking if there is a certain
431   // amount of free memory in the code cache might lead to re-enabling
432   // compilation although no memory has been released. For example, there are
433   // cases when compilation was disabled although there is 4MB (or more) free
434   // memory in the code cache. The reason is code cache fragmentation. Therefore,
435   // it only makes sense to re-enable compilation if we have actually freed memory.
436   // Note that typically several kB are released for sweeping 16MB of the code
437   // cache. As a result, 'freed_memory' > 0 to restart the compiler.
438   if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
439     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
440     log.debug("restart compiler");
441     log_sweep("restart_compiler");
442   }
443 }
444 
445  // This function updates the sweeper statistics that keep track of nmethods
446  // state changes. If there is 'enough' state change, the sweeper is invoked
447  // as soon as possible. Also, we are guaranteed to invoke the sweeper if
448  // the code cache gets full.
449 void NMethodSweeper::report_state_change(nmethod* nm) {
450   Atomic::add(&_bytes_changed, (size_t)nm->total_size());
451   if (Atomic::load(&_bytes_changed) > _sweep_threshold_bytes) {
452     MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
453     _should_sweep = true;
454     CodeSweeper_lock->notify(); // Wake up sweeper.
455   }
456 }
457 
458 class CompiledMethodMarker: public StackObj {
459  private:
460   CodeCacheSweeperThread* _thread;
461  public:
462   CompiledMethodMarker(CompiledMethod* cm) {
463     JavaThread* current = JavaThread::current();
464     assert (current->is_Code_cache_sweeper_thread(), "Must be");
465     _thread = (CodeCacheSweeperThread*)current;
466     if (!cm->is_zombie() && !cm->is_unloading()) {
467       // Only expose live nmethods for scanning
468       _thread->set_scanned_compiled_method(cm);
469     }
470   }
471   ~CompiledMethodMarker() {
472     _thread->set_scanned_compiled_method(NULL);
473   }
474 };
475 
476 NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) {
477   assert(cm != NULL, "sanity");
478   assert(!CodeCache_lock->owned_by_self(), "just checking");
479 
480   MethodStateChange result = None;
481   // Make sure this nmethod doesn't get unloaded during the scan,
482   // since safepoints may happen during acquired below locks.
483   CompiledMethodMarker nmm(cm);
484   SWEEP(cm);
485 
486   // Skip methods that are currently referenced by the VM
487   if (cm->is_locked_by_vm()) {
488     // But still remember to clean-up inline caches for alive nmethods
489     if (cm->is_alive()) {
490       // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
491       cm->cleanup_inline_caches(false);
492       SWEEP(cm);
493     }
494     return result;
495   }
496 
497   if (cm->is_zombie()) {
498     // All inline caches that referred to this nmethod were cleaned in the
499     // previous sweeper cycle. Now flush the nmethod from the code cache.
500     assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods");
501     cm->flush();
502     assert(result == None, "sanity");
503     result = Flushed;
504   } else if (cm->is_not_entrant()) {
505     // If there are no current activations of this method on the
506     // stack we can safely convert it to a zombie method
507     OrderAccess::loadload(); // _stack_traversal_mark and _state
508     if (cm->can_convert_to_zombie()) {
509       // Code cache state change is tracked in make_zombie()
510       cm->make_zombie();
511       SWEEP(cm);
512       assert(result == None, "sanity");
513       result = MadeZombie;
514       assert(cm->is_zombie(), "nmethod must be zombie");
515     } else {
516       // Still alive, clean up its inline caches
517       cm->cleanup_inline_caches(false);
518       SWEEP(cm);
519     }
520   } else if (cm->is_unloaded()) {
521     // Code is unloaded, so there are no activations on the stack.
522     // Convert the nmethod to zombie.
523     // Code cache state change is tracked in make_zombie()
524     cm->make_zombie();
525     SWEEP(cm);
526     assert(result == None, "sanity");
527     result = MadeZombie;
528   } else {
529     if (cm->is_nmethod()) {
530       possibly_flush((nmethod*)cm);
531     }
532     // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
533     cm->cleanup_inline_caches(false);
534     SWEEP(cm);
535   }
536   return result;
537 }
538 
539 
540 void NMethodSweeper::possibly_flush(nmethod* nm) {
541   if (UseCodeCacheFlushing) {
542     if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed() && !nm->is_unloading()) {
543       bool make_not_entrant = false;
544 
545       // Do not make native methods not-entrant
546       nm->dec_hotness_counter();
547       // Get the initial value of the hotness counter. This value depends on the
548       // ReservedCodeCacheSize
549       int reset_val = hotness_counter_reset_val();
550       int time_since_reset = reset_val - nm->hotness_counter();
551       int code_blob_type = CodeCache::get_code_blob_type(nm);
552       double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
553       // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
554       // I.e., 'threshold' increases with lower available space in the code cache and a higher
555       // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
556       // value until it is reset by stack walking - is smaller than the computed threshold, the
557       // corresponding nmethod is considered for removal.
558       if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) {
559         // A method is marked as not-entrant if the method is
560         // 1) 'old enough': nm->hotness_counter() < threshold
561         // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush)
562         //    The second condition is necessary if we are dealing with very small code cache
563         //    sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
564         //    The second condition ensures that methods are not immediately made not-entrant
565         //    after compilation.
566         make_not_entrant = true;
567       }
568 
569       // The stack-scanning low-cost detection may not see the method was used (which can happen for
570       // flat profiles). Check the age counter for possible data.
571       if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) {
572         MethodCounters* mc = nm->method()->get_method_counters(Thread::current());
573         if (mc != NULL) {
574           // Snapshot the value as it's changed concurrently
575           int age = mc->nmethod_age();
576           if (MethodCounters::is_nmethod_hot(age)) {
577             // The method has gone through flushing, and it became relatively hot that it deopted
578             // before we could take a look at it. Give it more time to appear in the stack traces,
579             // proportional to the number of deopts.
580             MethodData* md = nm->method()->method_data();
581             if (md != NULL && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) {
582               // It's been long enough, we still haven't seen it on stack.
583               // Try to flush it, but enable counters the next time.
584               mc->reset_nmethod_age();
585             } else {
586               make_not_entrant = false;
587             }
588           } else if (MethodCounters::is_nmethod_warm(age)) {
589             // Method has counters enabled, and the method was used within
590             // previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing
591             // compiled state.
592             mc->reset_nmethod_age();
593             // delay the next check
594             nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
595             make_not_entrant = false;
596           } else if (MethodCounters::is_nmethod_age_unset(age)) {
597             // No counters were used before. Set the counters to the detection
598             // limit value. If the method is going to be used again it will be compiled
599             // with counters that we're going to use for analysis the the next time.
600             mc->reset_nmethod_age();
601           } else {
602             // Method was totally idle for 10 sweeps
603             // The counter already has the initial value, flush it and may be recompile
604             // later with counters
605           }
606         }
607       }
608 
609       if (make_not_entrant) {
610         nm->make_not_entrant();
611 
612         // Code cache state change is tracked in make_not_entrant()
613         if (PrintMethodFlushing && Verbose) {
614           tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
615               nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold);
616         }
617       }
618     }
619   }
620 }
621 
622 // Print out some state information about the current sweep and the
623 // state of the code cache if it's requested.
624 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
625   if (PrintMethodFlushing) {
626     ResourceMark rm;
627     stringStream s;
628     // Dump code cache state into a buffer before locking the tty,
629     // because log_state() will use locks causing lock conflicts.
630     CodeCache::log_state(&s);
631 
632     ttyLocker ttyl;
633     tty->print("### sweeper: %s ", msg);
634     if (format != NULL) {
635       va_list ap;
636       va_start(ap, format);
637       tty->vprint(format, ap);
638       va_end(ap);
639     }
640     tty->print_cr("%s", s.as_string());
641   }
642 
643   if (LogCompilation && (xtty != NULL)) {
644     ResourceMark rm;
645     stringStream s;
646     // Dump code cache state into a buffer before locking the tty,
647     // because log_state() will use locks causing lock conflicts.
648     CodeCache::log_state(&s);
649 
650     ttyLocker ttyl;
651     xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
652     if (format != NULL) {
653       va_list ap;
654       va_start(ap, format);
655       xtty->vprint(format, ap);
656       va_end(ap);
657     }
658     xtty->print("%s", s.as_string());
659     xtty->stamp();
660     xtty->end_elem();
661   }
662 }
663 
664 void NMethodSweeper::print(outputStream* out) {
665   ttyLocker ttyl;
666   out = (out == NULL) ? tty : out;
667   out->print_cr("Code cache sweeper statistics:");
668   out->print_cr("  Total sweep time:                %1.0lf ms", (double)_total_time_sweeping.value()/1000000);
669   out->print_cr("  Total number of full sweeps:     %ld", _total_nof_code_cache_sweeps);
670   out->print_cr("  Total number of flushed methods: %ld (thereof %ld C2 methods)", _total_nof_methods_reclaimed,
671                                                     _total_nof_c2_methods_reclaimed);
672   out->print_cr("  Total size of flushed methods:   " SIZE_FORMAT " kB", _total_flushed_size/K);
673 }