< prev index next >

src/share/vm/runtime/safepoint.cpp

Print this page




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nmethod.hpp"
  31 #include "code/pcDesc.hpp"
  32 #include "code/scopeDesc.hpp"
  33 #include "gc_interface/collectedHeap.hpp"
  34 #include "interpreter/interpreter.hpp"

  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/symbol.hpp"
  39 #include "runtime/compilationPolicy.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/interfaceSupport.hpp"
  43 #include "runtime/mutexLocker.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/osThread.hpp"
  46 #include "runtime/safepoint.hpp"
  47 #include "runtime/signature.hpp"
  48 #include "runtime/stubCodeGenerator.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/sweeper.hpp"
  51 #include "runtime/synchronizer.hpp"
  52 #include "runtime/thread.inline.hpp"
  53 #include "services/runtimeService.hpp"
  54 #include "utilities/events.hpp"


  66 # include "vmreg_zero.inline.hpp"
  67 #endif
  68 #ifdef TARGET_ARCH_arm
  69 # include "nativeInst_arm.hpp"
  70 # include "vmreg_arm.inline.hpp"
  71 #endif
  72 #ifdef TARGET_ARCH_ppc
  73 # include "nativeInst_ppc.hpp"
  74 # include "vmreg_ppc.inline.hpp"
  75 #endif
  76 #if INCLUDE_ALL_GCS
  77 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  78 #include "gc_implementation/shared/suspendibleThreadSet.hpp"
  79 #endif // INCLUDE_ALL_GCS
  80 #ifdef COMPILER1
  81 #include "c1/c1_globals.hpp"
  82 #endif
  83 
  84 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  85 



































































  86 // --------------------------------------------------------------------------------------------------
  87 // Implementation of Safepoint begin/end
  88 
  89 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
  90 volatile int  SafepointSynchronize::_waiting_to_block = 0;
  91 volatile int SafepointSynchronize::_safepoint_counter = 0;
  92 int SafepointSynchronize::_current_jni_active_count = 0;
  93 long  SafepointSynchronize::_end_of_last_safepoint = 0;
  94 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
  95 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
  96 static bool timeout_error_printed = false;
  97 
  98 // Roll all threads forward to a safepoint and suspend them all
  99 void SafepointSynchronize::begin() {
 100 
 101   Thread* myThread = Thread::current();
 102   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
 103 
 104   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
 105     _safepoint_begin_time = os::javaTimeNanos();
 106     _ts_of_current_safepoint = tty->time_stamp().seconds();
 107   }
 108 
 109 #if INCLUDE_ALL_GCS
 110   if (UseConcMarkSweepGC) {
 111     // In the future we should investigate whether CMS can use the
 112     // more-general mechanism below.  DLD (01/05).
 113     ConcurrentMarkSweepThread::synchronize(false);
 114   } else if (UseG1GC) {
 115     SuspendibleThreadSet::synchronize();
 116   }
 117 #endif // INCLUDE_ALL_GCS
 118 
 119   // By getting the Threads_lock, we assure that no threads are about to start or
 120   // exit. It is released again in SafepointSynchronize::end().


 172   //     (on MP systems).  In order to avoid the overhead of issuing
 173   //     a memory barrier for each Java thread making native calls, each Java
 174   //     thread performs a write to a single memory page after changing
 175   //     the thread state.  The VM thread performs a sequence of
 176   //     mprotect OS calls which forces all previous writes from all
 177   //     Java threads to be serialized.  This is done in the
 178   //     os::serialize_thread_states() call.  This has proven to be
 179   //     much more efficient than executing a membar instruction
 180   //     on every call to native code.
 181   //  3. Running compiled Code
 182   //     Compiled code reads a global (Safepoint Polling) page that
 183   //     is set to fault if we are trying to get to a safepoint.
 184   //  4. Blocked
 185   //     A thread which is blocked will not be allowed to return from the
 186   //     block condition until the safepoint operation is complete.
 187   //  5. In VM or Transitioning between states
 188   //     If a Java thread is currently running in the VM or transitioning
 189   //     between states, the safepointing code will wait for the thread to
 190   //     block itself when it attempts transitions to a new state.
 191   //



 192   _state            = _synchronizing;
 193   OrderAccess::fence();
 194 
 195   // Flush all thread states to memory
 196   if (!UseMembar) {
 197     os::serialize_thread_states();
 198   }
 199 
 200   // Make interpreter safepoint aware
 201   Interpreter::notice_safepoints();
 202 
 203   if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) {
 204     // Make polling safepoint aware
 205     guarantee (PageArmed == 0, "invariant") ;
 206     PageArmed = 1 ;
 207     os::make_polling_page_unreadable();
 208   }
 209 
 210   // Consider using active_processor_count() ... but that call is expensive.
 211   int ncpus = os::processor_count() ;


 226   int steps = 0 ;
 227   while(still_running > 0) {
 228     for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 229       assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
 230       ThreadSafepointState *cur_state = cur->safepoint_state();
 231       if (cur_state->is_running()) {
 232         cur_state->examine_state_of_thread();
 233         if (!cur_state->is_running()) {
 234            still_running--;
 235            // consider adjusting steps downward:
 236            //   steps = 0
 237            //   steps -= NNN
 238            //   steps >>= 1
 239            //   steps = MIN(steps, 2000-100)
 240            //   if (iterations != 0) steps -= NNN
 241         }
 242         if (TraceSafepoint && Verbose) cur_state->print();
 243       }
 244     }
 245 
 246     if (PrintSafepointStatistics && iterations == 0) {
 247       begin_statistics(nof_threads, still_running);



 248     }
 249 
 250     if (still_running > 0) {
 251       // Check for if it takes to long
 252       if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 253         print_safepoint_timeout(_spinning_timeout);
 254       }
 255 
 256       // Spin to avoid context switching.
 257       // There's a tension between allowing the mutators to run (and rendezvous)
 258       // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
 259       // a mutator might otherwise use profitably to reach a safepoint.  Excessive
 260       // spinning by the VM thread on a saturated system can increase rendezvous latency.
 261       // Blocking or yielding incur their own penalties in the form of context switching
 262       // and the resultant loss of $ residency.
 263       //
 264       // Further complicating matters is that yield() does not work as naively expected
 265       // on many platforms -- yield() does not guarantee that any other ready threads
 266       // will run.   As such we revert yield_all() after some number of iterations.
 267       // Yield_all() is implemented as a short unconditional sleep on some platforms.


 319         SpinPause() ;     // MP-Polite spin
 320       } else
 321       if (steps < DeferThrSuspendLoopCount) {
 322         os::NakedYield() ;
 323       } else {
 324         os::yield_all(steps) ;
 325         // Alternately, the VM thread could transiently depress its scheduling priority or
 326         // transiently increase the priority of the tardy mutator(s).
 327       }
 328 
 329       iterations ++ ;
 330     }
 331     assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
 332   }
 333   assert(still_running == 0, "sanity check");
 334 
 335   if (PrintSafepointStatistics) {
 336     update_statistics_on_spin_end();
 337   }
 338 




 339   // wait until all threads are stopped
 340   while (_waiting_to_block > 0) {
 341     if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
 342     if (!SafepointTimeout || timeout_error_printed) {
 343       Safepoint_lock->wait(true);  // true, means with no safepoint checks
 344     } else {
 345       // Compute remaining time
 346       jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
 347 
 348       // If there is no remaining time, then there is an error
 349       if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
 350         print_safepoint_timeout(_blocking_timeout);





 351       }
 352     }
 353   }
 354   assert(_waiting_to_block == 0, "sanity check");
 355 
 356 #ifndef PRODUCT
 357   if (SafepointTimeout) {
 358     jlong current_time = os::javaTimeNanos();
 359     if (safepoint_limit_time < current_time) {
 360       tty->print_cr("# SafepointSynchronize: Finished after "
 361                     INT64_FORMAT_W(6) " ms",
 362                     ((current_time - safepoint_limit_time) / MICROUNITS +
 363                      SafepointTimeoutDelay));

 364     }
 365   }
 366 #endif
 367 
 368   assert((_safepoint_counter & 0x1) == 0, "must be even");
 369   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 370   _safepoint_counter ++;
 371 
 372   // Record state
 373   _state = _synchronized;
 374 
 375   OrderAccess::fence();





 376 
 377 #ifdef ASSERT
 378   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 379     // make sure all the threads were visited
 380     assert(cur->was_visited_for_critical_count(), "missed a thread");
 381   }
 382 #endif // ASSERT
 383 
 384   // Update the count of active JNI critical regions
 385   GC_locker::set_jni_lock_count(_current_jni_active_count);
 386 
 387   if (TraceSafepoint) {
 388     VM_Operation *op = VMThread::vm_operation();
 389     tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
 390   }
 391 
 392   RuntimeService::record_safepoint_synchronized();
 393   if (PrintSafepointStatistics) {
 394     update_statistics_on_sync_end(os::javaTimeNanos());
 395   }
 396 
 397   // Call stuff that needs to be run when a safepoint is just about to be completed
 398   do_cleanup_tasks();






 399 
 400   if (PrintSafepointStatistics) {
 401     // Record how much time spend on the above cleanup tasks
 402     update_statistics_on_cleanup_end(os::javaTimeNanos());
 403   }




 404 }
 405 
 406 // Wake up all threads, so they are ready to resume execution after the safepoint
 407 // operation has been carried out
 408 void SafepointSynchronize::end() {
 409 
 410   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 411   assert((_safepoint_counter & 0x1) == 1, "must be odd");

 412   _safepoint_counter ++;
 413   // memory fence isn't required here since an odd _safepoint_counter
 414   // value can do no harm and a fence is issued below anyway.
 415 
 416   DEBUG_ONLY(Thread* myThread = Thread::current();)
 417   assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
 418 
 419   if (PrintSafepointStatistics) {
 420     end_statistics(os::javaTimeNanos());
 421   }
 422 
 423 #ifdef ASSERT
 424   // A pending_exception cannot be installed during a safepoint.  The threads
 425   // may install an async exception after they come back from a safepoint into
 426   // pending_exception after they unblock.  But that should happen later.
 427   for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
 428     assert (!(cur->has_pending_exception() &&
 429               cur->safepoint_state()->is_at_poll_safepoint()),
 430             "safepoint installed a pending exception");
 431   }


 477     }
 478 
 479     RuntimeService::record_safepoint_end();
 480 
 481     // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
 482     // blocked in signal_thread_blocked
 483     Threads_lock->unlock();
 484 
 485   }
 486 #if INCLUDE_ALL_GCS
 487   // If there are any concurrent GC threads resume them.
 488   if (UseConcMarkSweepGC) {
 489     ConcurrentMarkSweepThread::desynchronize(false);
 490   } else if (UseG1GC) {
 491     SuspendibleThreadSet::desynchronize();
 492   }
 493 #endif // INCLUDE_ALL_GCS
 494   // record this time so VMThread can keep track how much time has elasped
 495   // since last safepoint.
 496   _end_of_last_safepoint = os::javaTimeMillis();



 497 }
 498 
 499 bool SafepointSynchronize::is_cleanup_needed() {
 500   // Need a safepoint if some inline cache buffers is non-empty
 501   if (!InlineCacheBuffer::is_empty()) return true;
 502   return false;
 503 }
 504 
 505 
 506 
 507 // Various cleaning tasks that should be done periodically at safepoints
 508 void SafepointSynchronize::do_cleanup_tasks() {
 509   {
 510     TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime);


 511     ObjectSynchronizer::deflate_idle_monitors();



 512   }
 513 
 514   {
 515     TraceTime t2("updating inline caches", TraceSafepointCleanupTime);


 516     InlineCacheBuffer::update_inline_caches();



 517   }
 518   {
 519     TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);


 520     CompilationPolicy::policy()->do_safepoint_work();



 521   }
 522 
 523   {
 524     TraceTime t4("mark nmethods", TraceSafepointCleanupTime);


 525     NMethodSweeper::mark_active_nmethods();



 526   }
 527 
 528   if (SymbolTable::needs_rehashing()) {
 529     TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime);


 530     SymbolTable::rehash_table();



 531   }
 532 
 533   if (StringTable::needs_rehashing()) {
 534     TraceTime t6("rehashing string table", TraceSafepointCleanupTime);


 535     StringTable::rehash_table();



 536   }
 537 
 538   // rotate log files?
 539   if (UseGCLogFileRotation) {
 540     gclog_or_tty->rotate_log(false);
 541   }
 542 
 543   {
 544     // CMS delays purging the CLDG until the beginning of the next safepoint and to
 545     // make sure concurrent sweep is done
 546     TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
 547     ClassLoaderDataGraph::purge_if_needed();
 548   }
 549 }
 550 
 551 
 552 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
 553   switch(state) {
 554   case _thread_in_native:
 555     // native threads are safe if they have no java stack or have walkable stack




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nmethod.hpp"
  31 #include "code/pcDesc.hpp"
  32 #include "code/scopeDesc.hpp"
  33 #include "gc_interface/collectedHeap.hpp"
  34 #include "interpreter/interpreter.hpp"
  35 #include "jfr/jfrEvents.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/symbol.hpp"
  40 #include "runtime/compilationPolicy.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/interfaceSupport.hpp"
  44 #include "runtime/mutexLocker.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "runtime/osThread.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/signature.hpp"
  49 #include "runtime/stubCodeGenerator.hpp"
  50 #include "runtime/stubRoutines.hpp"
  51 #include "runtime/sweeper.hpp"
  52 #include "runtime/synchronizer.hpp"
  53 #include "runtime/thread.inline.hpp"
  54 #include "services/runtimeService.hpp"
  55 #include "utilities/events.hpp"


  67 # include "vmreg_zero.inline.hpp"
  68 #endif
  69 #ifdef TARGET_ARCH_arm
  70 # include "nativeInst_arm.hpp"
  71 # include "vmreg_arm.inline.hpp"
  72 #endif
  73 #ifdef TARGET_ARCH_ppc
  74 # include "nativeInst_ppc.hpp"
  75 # include "vmreg_ppc.inline.hpp"
  76 #endif
  77 #if INCLUDE_ALL_GCS
  78 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  79 #include "gc_implementation/shared/suspendibleThreadSet.hpp"
  80 #endif // INCLUDE_ALL_GCS
  81 #ifdef COMPILER1
  82 #include "c1/c1_globals.hpp"
  83 #endif
  84 
  85 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  86 
  87 template <typename E>
  88 static void set_current_safepoint_id(E* event, int adjustment = 0) {
  89   assert(event != NULL, "invariant");
  90   event->set_safepointId(SafepointSynchronize::safepoint_counter() + adjustment);
  91 }
  92 
  93 static void post_safepoint_begin_event(EventSafepointBegin* event,
  94                                        int thread_count,
  95                                        int critical_thread_count) {
  96   assert(event != NULL, "invariant");
  97   assert(event->should_commit(), "invariant");
  98   set_current_safepoint_id(event);
  99   event->set_totalThreadCount(thread_count);
 100   event->set_jniCriticalThreadCount(critical_thread_count);
 101   event->commit();
 102 }
 103 
 104 static void post_safepoint_cleanup_event(EventSafepointCleanup* event) {
 105   assert(event != NULL, "invariant");
 106   assert(event->should_commit(), "invariant");
 107   set_current_safepoint_id(event);
 108   event->commit();
 109 }
 110 
 111 static void post_safepoint_synchronize_event(EventSafepointStateSynchronization* event,
 112                                              int initial_number_of_threads,
 113                                              int threads_waiting_to_block,
 114                                              unsigned int iterations) {
 115   assert(event != NULL, "invariant");
 116   if (event->should_commit()) {
 117     // Group this event together with the ones committed after the counter is increased
 118     set_current_safepoint_id(event, 1);
 119     event->set_initialThreadCount(initial_number_of_threads);
 120     event->set_runningThreadCount(threads_waiting_to_block);
 121     event->set_iterations(iterations);
 122     event->commit();
 123   }
 124 }
 125 
 126 static void post_safepoint_wait_blocked_event(EventSafepointWaitBlocked* event,
 127                                               int initial_threads_waiting_to_block) {
 128   assert(event != NULL, "invariant");
 129   assert(event->should_commit(), "invariant");
 130   set_current_safepoint_id(event);
 131   event->set_runningThreadCount(initial_threads_waiting_to_block);
 132   event->commit();
 133 }
 134 
 135 static void post_safepoint_cleanup_task_event(EventSafepointCleanupTask* event,
 136                                               const char* name) {
 137   assert(event != NULL, "invariant");
 138   if (event->should_commit()) {
 139     set_current_safepoint_id(event);
 140     event->set_name(name);
 141     event->commit();
 142   }
 143 }
 144 
 145 static void post_safepoint_end_event(EventSafepointEnd* event) {
 146   assert(event != NULL, "invariant");
 147   if (event->should_commit()) {
 148     // Group this event together with the ones committed before the counter increased
 149     set_current_safepoint_id(event, -1);
 150     event->commit();
 151   }
 152 }
 153 
 154 // --------------------------------------------------------------------------------------------------
 155 // Implementation of Safepoint begin/end
 156 
 157 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
 158 volatile int  SafepointSynchronize::_waiting_to_block = 0;
 159 volatile int SafepointSynchronize::_safepoint_counter = 0;
 160 int SafepointSynchronize::_current_jni_active_count = 0;
 161 long  SafepointSynchronize::_end_of_last_safepoint = 0;
 162 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
 163 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
 164 static bool timeout_error_printed = false;
 165 
 166 // Roll all threads forward to a safepoint and suspend them all
 167 void SafepointSynchronize::begin() {
 168   EventSafepointBegin begin_event;
 169   Thread* myThread = Thread::current();
 170   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
 171 
 172   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
 173     _safepoint_begin_time = os::javaTimeNanos();
 174     _ts_of_current_safepoint = tty->time_stamp().seconds();
 175   }
 176 
 177 #if INCLUDE_ALL_GCS
 178   if (UseConcMarkSweepGC) {
 179     // In the future we should investigate whether CMS can use the
 180     // more-general mechanism below.  DLD (01/05).
 181     ConcurrentMarkSweepThread::synchronize(false);
 182   } else if (UseG1GC) {
 183     SuspendibleThreadSet::synchronize();
 184   }
 185 #endif // INCLUDE_ALL_GCS
 186 
 187   // By getting the Threads_lock, we assure that no threads are about to start or
 188   // exit. It is released again in SafepointSynchronize::end().


 240   //     (on MP systems).  In order to avoid the overhead of issuing
 241   //     a memory barrier for each Java thread making native calls, each Java
 242   //     thread performs a write to a single memory page after changing
 243   //     the thread state.  The VM thread performs a sequence of
 244   //     mprotect OS calls which forces all previous writes from all
 245   //     Java threads to be serialized.  This is done in the
 246   //     os::serialize_thread_states() call.  This has proven to be
 247   //     much more efficient than executing a membar instruction
 248   //     on every call to native code.
 249   //  3. Running compiled Code
 250   //     Compiled code reads a global (Safepoint Polling) page that
 251   //     is set to fault if we are trying to get to a safepoint.
 252   //  4. Blocked
 253   //     A thread which is blocked will not be allowed to return from the
 254   //     block condition until the safepoint operation is complete.
 255   //  5. In VM or Transitioning between states
 256   //     If a Java thread is currently running in the VM or transitioning
 257   //     between states, the safepointing code will wait for the thread to
 258   //     block itself when it attempts transitions to a new state.
 259   //
 260   EventSafepointStateSynchronization sync_event;
 261   int initial_running = 0;
 262 
 263   _state            = _synchronizing;
 264   OrderAccess::fence();
 265 
 266   // Flush all thread states to memory
 267   if (!UseMembar) {
 268     os::serialize_thread_states();
 269   }
 270 
 271   // Make interpreter safepoint aware
 272   Interpreter::notice_safepoints();
 273 
 274   if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) {
 275     // Make polling safepoint aware
 276     guarantee (PageArmed == 0, "invariant") ;
 277     PageArmed = 1 ;
 278     os::make_polling_page_unreadable();
 279   }
 280 
 281   // Consider using active_processor_count() ... but that call is expensive.
 282   int ncpus = os::processor_count() ;


 297   int steps = 0 ;
 298   while(still_running > 0) {
 299     for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 300       assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
 301       ThreadSafepointState *cur_state = cur->safepoint_state();
 302       if (cur_state->is_running()) {
 303         cur_state->examine_state_of_thread();
 304         if (!cur_state->is_running()) {
 305            still_running--;
 306            // consider adjusting steps downward:
 307            //   steps = 0
 308            //   steps -= NNN
 309            //   steps >>= 1
 310            //   steps = MIN(steps, 2000-100)
 311            //   if (iterations != 0) steps -= NNN
 312         }
 313         if (TraceSafepoint && Verbose) cur_state->print();
 314       }
 315     }
 316 
 317     if (iterations == 0) {
 318       initial_running = still_running;
 319       if (PrintSafepointStatistics) {
 320         begin_statistics(nof_threads, still_running);
 321       }
 322     }
 323 
 324     if (still_running > 0) {
 325       // Check for if it takes to long
 326       if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 327         print_safepoint_timeout(_spinning_timeout);
 328       }
 329 
 330       // Spin to avoid context switching.
 331       // There's a tension between allowing the mutators to run (and rendezvous)
 332       // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
 333       // a mutator might otherwise use profitably to reach a safepoint.  Excessive
 334       // spinning by the VM thread on a saturated system can increase rendezvous latency.
 335       // Blocking or yielding incur their own penalties in the form of context switching
 336       // and the resultant loss of $ residency.
 337       //
 338       // Further complicating matters is that yield() does not work as naively expected
 339       // on many platforms -- yield() does not guarantee that any other ready threads
 340       // will run.   As such we revert yield_all() after some number of iterations.
 341       // Yield_all() is implemented as a short unconditional sleep on some platforms.


 393         SpinPause() ;     // MP-Polite spin
 394       } else
 395       if (steps < DeferThrSuspendLoopCount) {
 396         os::NakedYield() ;
 397       } else {
 398         os::yield_all(steps) ;
 399         // Alternately, the VM thread could transiently depress its scheduling priority or
 400         // transiently increase the priority of the tardy mutator(s).
 401       }
 402 
 403       iterations ++ ;
 404     }
 405     assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
 406   }
 407   assert(still_running == 0, "sanity check");
 408 
 409   if (PrintSafepointStatistics) {
 410     update_statistics_on_spin_end();
 411   }
 412 
 413   if (sync_event.should_commit()) {
 414     post_safepoint_synchronize_event(&sync_event, initial_running, _waiting_to_block, iterations);
 415   }
 416 
 417   // wait until all threads are stopped
 418   {
 419     EventSafepointWaitBlocked wait_blocked_event;
 420     int initial_waiting_to_block = _waiting_to_block;
 421 
 422     while (_waiting_to_block > 0) {
 423       if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
 424       if (!SafepointTimeout || timeout_error_printed) {
 425         Safepoint_lock->wait(true);  // true, means with no safepoint checks
 426       } else {
 427         // Compute remaining time
 428         jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
 429 
 430         // If there is no remaining time, then there is an error
 431         if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
 432           print_safepoint_timeout(_blocking_timeout);
 433         }
 434       }
 435     }
 436     assert(_waiting_to_block == 0, "sanity check");

 437 
 438 #ifndef PRODUCT
 439     if (SafepointTimeout) {
 440       jlong current_time = os::javaTimeNanos();
 441       if (safepoint_limit_time < current_time) {
 442         tty->print_cr("# SafepointSynchronize: Finished after "
 443                       INT64_FORMAT_W(6) " ms",
 444                       ((current_time - safepoint_limit_time) / MICROUNITS +
 445                        SafepointTimeoutDelay));
 446       }
 447     }

 448 #endif
 449 
 450     assert((_safepoint_counter & 0x1) == 0, "must be even");
 451     assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 452     _safepoint_counter ++;
 453 
 454     // Record state
 455     _state = _synchronized;
 456 
 457     OrderAccess::fence();
 458 
 459     if (wait_blocked_event.should_commit()) {
 460       post_safepoint_wait_blocked_event(&wait_blocked_event, initial_waiting_to_block);
 461     }
 462   }
 463 
 464 #ifdef ASSERT
 465   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 466     // make sure all the threads were visited
 467     assert(cur->was_visited_for_critical_count(), "missed a thread");
 468   }
 469 #endif // ASSERT
 470 
 471   // Update the count of active JNI critical regions
 472   GC_locker::set_jni_lock_count(_current_jni_active_count);
 473 
 474   if (TraceSafepoint) {
 475     VM_Operation *op = VMThread::vm_operation();
 476     tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
 477   }
 478 
 479   RuntimeService::record_safepoint_synchronized();
 480   if (PrintSafepointStatistics) {
 481     update_statistics_on_sync_end(os::javaTimeNanos());
 482   }
 483 
 484   // Call stuff that needs to be run when a safepoint is just about to be completed
 485   {
 486     EventSafepointCleanup cleanup_event;
 487     do_cleanup_tasks();
 488     if (cleanup_event.should_commit()) {
 489       post_safepoint_cleanup_event(&cleanup_event);
 490     }
 491   }
 492 
 493   if (PrintSafepointStatistics) {
 494     // Record how much time spend on the above cleanup tasks
 495     update_statistics_on_cleanup_end(os::javaTimeNanos());
 496   }
 497 
 498   if (begin_event.should_commit()) {
 499     post_safepoint_begin_event(&begin_event, nof_threads, _current_jni_active_count);
 500   }
 501 }
 502 
 503 // Wake up all threads, so they are ready to resume execution after the safepoint
 504 // operation has been carried out
 505 void SafepointSynchronize::end() {
 506 
 507   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 508   assert((_safepoint_counter & 0x1) == 1, "must be odd");
 509   EventSafepointEnd event;
 510   _safepoint_counter ++;
 511   // memory fence isn't required here since an odd _safepoint_counter
 512   // value can do no harm and a fence is issued below anyway.
 513 
 514   DEBUG_ONLY(Thread* myThread = Thread::current();)
 515   assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
 516 
 517   if (PrintSafepointStatistics) {
 518     end_statistics(os::javaTimeNanos());
 519   }
 520 
 521 #ifdef ASSERT
 522   // A pending_exception cannot be installed during a safepoint.  The threads
 523   // may install an async exception after they come back from a safepoint into
 524   // pending_exception after they unblock.  But that should happen later.
 525   for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
 526     assert (!(cur->has_pending_exception() &&
 527               cur->safepoint_state()->is_at_poll_safepoint()),
 528             "safepoint installed a pending exception");
 529   }


 575     }
 576 
 577     RuntimeService::record_safepoint_end();
 578 
 579     // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
 580     // blocked in signal_thread_blocked
 581     Threads_lock->unlock();
 582 
 583   }
 584 #if INCLUDE_ALL_GCS
 585   // If there are any concurrent GC threads resume them.
 586   if (UseConcMarkSweepGC) {
 587     ConcurrentMarkSweepThread::desynchronize(false);
 588   } else if (UseG1GC) {
 589     SuspendibleThreadSet::desynchronize();
 590   }
 591 #endif // INCLUDE_ALL_GCS
 592   // record this time so VMThread can keep track how much time has elasped
 593   // since last safepoint.
 594   _end_of_last_safepoint = os::javaTimeMillis();
 595   if (event.should_commit()) {
 596     post_safepoint_end_event(&event);
 597   }
 598 }
 599 
 600 bool SafepointSynchronize::is_cleanup_needed() {
 601   // Need a safepoint if some inline cache buffers is non-empty
 602   if (!InlineCacheBuffer::is_empty()) return true;
 603   return false;
 604 }
 605 
 606 
 607 
 608 // Various cleaning tasks that should be done periodically at safepoints
 609 void SafepointSynchronize::do_cleanup_tasks() {
 610   {
 611     const char* name = "deflating idle monitors";
 612     EventSafepointCleanupTask event;
 613     TraceTime t1(name, TraceSafepointCleanupTime);
 614     ObjectSynchronizer::deflate_idle_monitors();
 615     if (event.should_commit()) {
 616       post_safepoint_cleanup_task_event(&event, name);
 617     }
 618   }
 619 
 620   {
 621     const char* name = "updating inline caches";
 622     EventSafepointCleanupTask event;
 623     TraceTime t2(name, TraceSafepointCleanupTime);
 624     InlineCacheBuffer::update_inline_caches();
 625     if (event.should_commit()) {
 626       post_safepoint_cleanup_task_event(&event, name);
 627     }
 628   }
 629   {
 630     const char* name = "compilation policy safepoint handler";
 631     EventSafepointCleanupTask event;
 632     TraceTime t3(name, TraceSafepointCleanupTime);
 633     CompilationPolicy::policy()->do_safepoint_work();
 634     if (event.should_commit()) {
 635       post_safepoint_cleanup_task_event(&event, name);
 636     }
 637   }
 638 
 639   {
 640     const char* name = "mark nmethods";
 641     EventSafepointCleanupTask event;
 642     TraceTime t4(name, TraceSafepointCleanupTime);
 643     NMethodSweeper::mark_active_nmethods();
 644     if (event.should_commit()) {
 645       post_safepoint_cleanup_task_event(&event, name);
 646     }
 647   }
 648 
 649   if (SymbolTable::needs_rehashing()) {
 650     const char* name = "rehashing symbol table";
 651     EventSafepointCleanupTask event;
 652     TraceTime t5(name, TraceSafepointCleanupTime);
 653     SymbolTable::rehash_table();
 654     if (event.should_commit()) {
 655       post_safepoint_cleanup_task_event(&event, name);
 656     }
 657   }
 658 
 659   if (StringTable::needs_rehashing()) {
 660     const char* name = "rehashing string table";
 661     EventSafepointCleanupTask event;
 662     TraceTime t6(name, TraceSafepointCleanupTime);
 663     StringTable::rehash_table();
 664     if (event.should_commit()) {
 665       post_safepoint_cleanup_task_event(&event, name);
 666     }
 667   }
 668 
 669   // rotate log files?
 670   if (UseGCLogFileRotation) {
 671     gclog_or_tty->rotate_log(false);
 672   }
 673 
 674   {
 675     // CMS delays purging the CLDG until the beginning of the next safepoint and to
 676     // make sure concurrent sweep is done
 677     TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
 678     ClassLoaderDataGraph::purge_if_needed();
 679   }
 680 }
 681 
 682 
 683 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
 684   switch(state) {
 685   case _thread_in_native:
 686     // native threads are safe if they have no java stack or have walkable stack


< prev index next >