< prev index next >

src/hotspot/share/runtime/safepoint.cpp

Print this page




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.inline.hpp"
  27 #include "classfile/dictionary.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/strongRootsScope.hpp"
  39 #include "gc/shared/workgroup.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "oops/symbol.hpp"


  50 #include "runtime/deoptimization.hpp"
  51 #include "runtime/frame.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/interfaceSupport.inline.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/orderAccess.hpp"
  56 #include "runtime/osThread.hpp"
  57 #include "runtime/safepoint.hpp"
  58 #include "runtime/safepointMechanism.inline.hpp"
  59 #include "runtime/signature.hpp"
  60 #include "runtime/stubCodeGenerator.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/sweeper.hpp"
  63 #include "runtime/synchronizer.hpp"
  64 #include "runtime/thread.inline.hpp"
  65 #include "runtime/threadSMR.hpp"
  66 #include "runtime/timerTrace.hpp"
  67 #include "services/runtimeService.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/macros.hpp"



  70 
  71 static void post_safepoint_begin_event(EventSafepointBegin& event,
  72                                        uint64_t safepoint_id,
  73                                        int thread_count,
  74                                        int critical_thread_count) {
  75   if (event.should_commit()) {
  76     event.set_safepointId(safepoint_id);
  77     event.set_totalThreadCount(thread_count);
  78     event.set_jniCriticalThreadCount(critical_thread_count);
  79     event.commit();
  80   }
  81 }
  82 
  83 static void post_safepoint_cleanup_event(EventSafepointCleanup& event, uint64_t safepoint_id) {
  84   if (event.should_commit()) {
  85     event.set_safepointId(safepoint_id);
  86     event.commit();
  87   }
  88 }
  89 


 167     ResourceMark rm;
 168     LogStream ls(lt);
 169     cur_state->print_on(&ls);
 170   }
 171   return false;
 172 }
 173 
 174 #ifdef ASSERT
 175 static void assert_list_is_valid(const ThreadSafepointState* tss_head, int still_running) {
 176   int a = 0;
 177   const ThreadSafepointState *tmp_tss = tss_head;
 178   while (tmp_tss != NULL) {
 179     ++a;
 180     assert(tmp_tss->is_running(), "Illegal initial state");
 181     tmp_tss = tmp_tss->get_next();
 182   }
 183   assert(a == still_running, "Must be the same");
 184 }
 185 #endif // ASSERT
 186 
 187 static void back_off(int64_t start_time) {
 188   // We start with fine-grained nanosleeping until a millisecond has
 189   // passed, at which point we resort to plain naked_short_sleep.
 190   if (os::javaTimeNanos() - start_time < NANOSECS_PER_MILLISEC) {
 191     os::naked_short_nanosleep(10 * (NANOUNITS / MICROUNITS));
 192   } else {
 193     os::naked_short_sleep(1);

 194   }

 195 }
 196 
 197 int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int nof_threads, int* initial_running)
 198 {
 199   JavaThreadIteratorWithHandle jtiwh;
 200 
 201 #ifdef ASSERT
 202   for (; JavaThread *cur = jtiwh.next(); ) {
 203     assert(cur->safepoint_state()->is_running(), "Illegal initial state");
 204   }
 205   jtiwh.rewind();
 206 #endif // ASSERT
 207 
 208   // Iterate through all threads until it has been determined how to stop them all at a safepoint.
 209   int still_running = nof_threads;
 210   ThreadSafepointState *tss_head = NULL;
 211   ThreadSafepointState **p_prev = &tss_head;
 212   for (; JavaThread *cur = jtiwh.next(); ) {
 213     ThreadSafepointState *cur_tss = cur->safepoint_state();
 214     assert(cur_tss->get_next() == NULL, "Must be NULL");
 215     if (thread_not_running(cur_tss)) {
 216       --still_running;
 217     } else {
 218       *p_prev = cur_tss;
 219       p_prev = cur_tss->next_ptr();
 220     }
 221   }
 222   *p_prev = NULL;
 223 
 224   DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
 225 
 226   *initial_running = still_running;
 227 
 228   // If there is no thread still running, we are already done.
 229   if (still_running <= 0) {
 230     assert(tss_head == NULL, "Must be empty");
 231     return 1;
 232   }
 233 
 234   int iterations = 1; // The first iteration is above.
 235   int64_t start_time = os::javaTimeNanos();
 236 
 237   do {
 238     // Check if this has taken too long:
 239     if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 240       print_safepoint_timeout();
 241     }
 242     if (int(iterations) == -1) { // overflow - something is wrong.
 243       // We can only overflow here when we are using global
 244       // polling pages. We keep this guarantee in its original
 245       // form so that searches of the bug database for this
 246       // failure mode find the right bugs.
 247       guarantee (!PageArmed, "invariant");
 248     }
 249 
 250     p_prev = &tss_head;
 251     ThreadSafepointState *cur_tss = tss_head;
 252     while (cur_tss != NULL) {
 253       assert(cur_tss->is_running(), "Illegal initial state");
 254       if (thread_not_running(cur_tss)) {
 255         --still_running;
 256         *p_prev = NULL;
 257         ThreadSafepointState *tmp = cur_tss;
 258         cur_tss = cur_tss->get_next();
 259         tmp->set_next(NULL);
 260       } else {
 261         *p_prev = cur_tss;
 262         p_prev = cur_tss->next_ptr();
 263         cur_tss = cur_tss->get_next();
 264       }
 265     }
 266 
 267     DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
 268 
 269     if (still_running > 0) {
 270       back_off(start_time);
 271     }
 272 
 273     iterations++;
 274   } while (still_running > 0);
 275 
 276   assert(tss_head == NULL, "Must be empty");
 277 
 278   return iterations;
 279 }
 280 
 281 void SafepointSynchronize::arm_safepoint() {
 282   // Begin the process of bringing the system to a safepoint.
 283   // Java threads can be in several different states and are
 284   // stopped by different mechanisms:
 285   //
 286   //  1. Running interpreted
 287   //     When executing branching/returning byte codes interpreter
 288   //     checks if the poll is armed, if so blocks in SS::block().
 289   //     When using global polling the interpreter dispatch table
 290   //     is changed to force it to check for a safepoint condition
 291   //     between bytecodes.
 292   //  2. Running in native code
 293   //     When returning from the native code, a Java thread must check
 294   //     the safepoint _state to see if we must block.  If the


 497 void SafepointSynchronize::end() {
 498   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 499   EventSafepointEnd event;
 500   uint64_t safepoint_id = _safepoint_counter;
 501   assert(Thread::current()->is_VM_thread(), "Only VM thread can execute a safepoint");
 502 
 503   disarm_safepoint();
 504 
 505   Universe::heap()->safepoint_synchronize_end();
 506 
 507   SafepointTracing::end();
 508 
 509   post_safepoint_end_event(event, safepoint_id);
 510 }
 511 
 512 bool SafepointSynchronize::is_cleanup_needed() {
 513   // Need a safepoint if there are many monitors to deflate.
 514   if (ObjectSynchronizer::is_cleanup_needed()) return true;
 515   // Need a safepoint if some inline cache buffers is non-empty
 516   if (!InlineCacheBuffer::is_empty()) return true;
 517   if (StringTable::needs_rehashing()) return true;
 518   if (SymbolTable::needs_rehashing()) return true;
 519   return false;
 520 }
 521 
 522 class ParallelSPCleanupThreadClosure : public ThreadClosure {
 523 private:
 524   CodeBlobClosure* _nmethod_cl;
 525   DeflateMonitorCounters* _counters;
 526 
 527 public:
 528   ParallelSPCleanupThreadClosure(DeflateMonitorCounters* counters) :
 529     _nmethod_cl(UseCodeAging ? NMethodSweeper::prepare_reset_hotness_counters() : NULL),
 530     _counters(counters) {}
 531 
 532   void do_thread(Thread* thread) {
 533     ObjectSynchronizer::deflate_thread_local_monitors(thread, _counters);
 534     if (_nmethod_cl != NULL && thread->is_Java_thread() &&
 535         ! thread->is_Code_cache_sweeper_thread()) {
 536       JavaThread* jt = (JavaThread*) thread;
 537       jt->nmethods_do(_nmethod_cl);
 538     }


 591         EventSafepointCleanupTask event;
 592         TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 593         SymbolTable::rehash_table();
 594 
 595         post_safepoint_cleanup_task_event(event, safepoint_id, name);
 596       }
 597     }
 598 
 599     if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_STRING_TABLE_REHASH)) {
 600       if (StringTable::needs_rehashing()) {
 601         const char* name = "rehashing string table";
 602         EventSafepointCleanupTask event;
 603         TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 604         StringTable::rehash_table();
 605 
 606         post_safepoint_cleanup_task_event(event, safepoint_id, name);
 607       }
 608     }
 609 
 610     if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_CLD_PURGE)) {
 611       if (ClassLoaderDataGraph::should_purge_and_reset()) {
 612         // CMS delays purging the CLDG until the beginning of the next safepoint and to
 613         // make sure concurrent sweep is done
 614         const char* name = "purging class loader data graph";
 615         EventSafepointCleanupTask event;
 616         TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 617         ClassLoaderDataGraph::purge();
 618 
 619         post_safepoint_cleanup_task_event(event, safepoint_id, name);
 620       }
 621     }
 622 
 623     if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) {
 624       if (Dictionary::does_any_dictionary_needs_resizing()) {
 625         const char* name = "resizing system dictionaries";
 626         EventSafepointCleanupTask event;
 627         TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 628         ClassLoaderDataGraph::resize_dictionaries();
 629 
 630         post_safepoint_cleanup_task_event(event, safepoint_id, name);
 631       }
 632     }
 633 
 634     _subtasks.all_tasks_completed(_num_workers);
 635   }
 636 };
 637 
 638 // Various cleaning tasks that should be done periodically at safepoints.
 639 void SafepointSynchronize::do_cleanup_tasks() {
 640 
 641   TraceTime timer("safepoint cleanup tasks", TRACETIME_LOG(Info, safepoint, cleanup));
 642 
 643   // Prepare for monitor deflation.
 644   DeflateMonitorCounters deflate_counters;
 645   ObjectSynchronizer::prepare_deflate_idle_monitors(&deflate_counters);
 646 
 647   CollectedHeap* heap = Universe::heap();
 648   assert(heap != NULL, "heap not initialized yet?");
 649   WorkGang* cleanup_workers = heap->get_safepoint_workers();
 650   if (cleanup_workers != NULL) {
 651     // Parallel cleanup using GC provided thread pool.


 797   JavaThreadState state = thread->thread_state();
 798   thread->frame_anchor()->make_walkable(thread);
 799 
 800   uint64_t safepoint_id = SafepointSynchronize::safepoint_counter();
 801   // Check that we have a valid thread_state at this point
 802   switch(state) {
 803     case _thread_in_vm_trans:
 804     case _thread_in_Java:        // From compiled code
 805     case _thread_in_native_trans:
 806     case _thread_blocked_trans:
 807     case _thread_new_trans:
 808 
 809       // We have no idea where the VMThread is, it might even be at next safepoint.
 810       // So we can miss this poll, but stop at next.
 811 
 812       // Load dependent store, it must not pass loading of safepoint_id.
 813       thread->safepoint_state()->set_safepoint_id(safepoint_id); // Release store
 814 
 815       // This part we can skip if we notice we miss or are in a future safepoint.
 816       OrderAccess::storestore();
 817       // Load in wait barrier should not float up
 818       thread->set_thread_state_fence(_thread_blocked);
 819 

 820       _wait_barrier->wait(static_cast<int>(safepoint_id));
 821       assert(_state != _synchronized, "Can't be");
 822 
 823       // If barrier is disarmed stop store from floating above loads in barrier.
 824       OrderAccess::loadstore();
 825       thread->set_thread_state(state);
 826 
 827       // Then we reset the safepoint id to inactive.
 828       thread->safepoint_state()->reset_safepoint_id(); // Release store
 829 
 830       OrderAccess::fence();
 831 
 832       break;
 833 
 834     default:
 835      fatal("Illegal threadstate encountered: %d", state);
 836   }
 837   guarantee(thread->safepoint_state()->get_safepoint_id() == InactiveSafepointCounter,
 838             "The safepoint id should be set only in block path");
 839 


1023   }
1024   DEBUG_ONLY(_thread->set_visited_for_critical_count(SafepointSynchronize::safepoint_counter());)
1025   assert(!_safepoint_safe, "Must be unsafe before safe");
1026   _safepoint_safe = true;
1027 }
1028 
1029 void ThreadSafepointState::restart() {
1030   assert(_safepoint_safe, "Must be safe before unsafe");
1031   _safepoint_safe = false;
1032 }
1033 
1034 void ThreadSafepointState::print_on(outputStream *st) const {
1035   const char *s = _safepoint_safe ? "_at_safepoint" : "_running";
1036 
1037   st->print_cr("Thread: " INTPTR_FORMAT
1038               "  [0x%2x] State: %s _at_poll_safepoint %d",
1039                p2i(_thread), _thread->osthread()->thread_id(), s, _at_poll_safepoint);
1040 
1041   _thread->print_thread_state_on(st);
1042 }
1043 
1044 void ThreadSafepointState::print() const { print_on(tty); }
1045 
1046 // ---------------------------------------------------------------------------------------------------------------------
1047 
1048 // Block the thread at poll or poll return for safepoint/handshake.
1049 void ThreadSafepointState::handle_polling_page_exception() {
1050 
1051   // If we're using a global poll, then the thread should not be
1052   // marked as safepoint safe yet.
1053   assert(!SafepointMechanism::uses_global_page_poll() || !_safepoint_safe,
1054          "polling page exception on thread safepoint safe");
1055 
1056   // Step 1: Find the nmethod from the return address
1057   address real_return_addr = thread()->saved_exception_pc();
1058 
1059   CodeBlob *cb = CodeCache::find_blob(real_return_addr);
1060   assert(cb != NULL && cb->is_compiled(), "return address should be in nmethod");
1061   CompiledMethod* nm = (CompiledMethod*)cb;
1062 
1063   // Find frame of caller
1064   frame stub_fr = thread()->last_frame();




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.inline.hpp"

  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "gc/shared/collectedHeap.hpp"
  36 #include "gc/shared/gcLocker.hpp"
  37 #include "gc/shared/strongRootsScope.hpp"
  38 #include "gc/shared/workgroup.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/symbol.hpp"


  49 #include "runtime/deoptimization.hpp"
  50 #include "runtime/frame.inline.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 #include "runtime/interfaceSupport.inline.hpp"
  53 #include "runtime/mutexLocker.hpp"
  54 #include "runtime/orderAccess.hpp"
  55 #include "runtime/osThread.hpp"
  56 #include "runtime/safepoint.hpp"
  57 #include "runtime/safepointMechanism.inline.hpp"
  58 #include "runtime/signature.hpp"
  59 #include "runtime/stubCodeGenerator.hpp"
  60 #include "runtime/stubRoutines.hpp"
  61 #include "runtime/sweeper.hpp"
  62 #include "runtime/synchronizer.hpp"
  63 #include "runtime/thread.inline.hpp"
  64 #include "runtime/threadSMR.hpp"
  65 #include "runtime/timerTrace.hpp"
  66 #include "services/runtimeService.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/macros.hpp"
  69 #ifdef COMPILER1
  70 #include "c1/c1_globals.hpp"
  71 #endif
  72 
  73 static void post_safepoint_begin_event(EventSafepointBegin& event,
  74                                        uint64_t safepoint_id,
  75                                        int thread_count,
  76                                        int critical_thread_count) {
  77   if (event.should_commit()) {
  78     event.set_safepointId(safepoint_id);
  79     event.set_totalThreadCount(thread_count);
  80     event.set_jniCriticalThreadCount(critical_thread_count);
  81     event.commit();
  82   }
  83 }
  84 
  85 static void post_safepoint_cleanup_event(EventSafepointCleanup& event, uint64_t safepoint_id) {
  86   if (event.should_commit()) {
  87     event.set_safepointId(safepoint_id);
  88     event.commit();
  89   }
  90 }
  91 


 169     ResourceMark rm;
 170     LogStream ls(lt);
 171     cur_state->print_on(&ls);
 172   }
 173   return false;
 174 }
 175 
 176 #ifdef ASSERT
 177 static void assert_list_is_valid(const ThreadSafepointState* tss_head, int still_running) {
 178   int a = 0;
 179   const ThreadSafepointState *tmp_tss = tss_head;
 180   while (tmp_tss != NULL) {
 181     ++a;
 182     assert(tmp_tss->is_running(), "Illegal initial state");
 183     tmp_tss = tmp_tss->get_next();
 184   }
 185   assert(a == still_running, "Must be the same");
 186 }
 187 #endif // ASSERT
 188 
 189 static void back_off(int iteration) {
 190   // iteration will be 1 the first time we enter this spin back-off.
 191   // naked_short_nanosleep takes tenths of micros which means that
 192   // number of nanoseconds is irrelevant if it's below that. We do
 193   // 20 1 ns sleeps with a total cost of ~1 ms, then we do 1 ms sleeps.
 194   jlong sleep_ns = 1;
 195   if (iteration > 20) {
 196     sleep_ns = NANOUNITS / MILLIUNITS;  // 1 ms
 197   }
 198   os::naked_short_nanosleep(sleep_ns);
 199 }
 200 
 201 int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int nof_threads, int* initial_running)
 202 {
 203   JavaThreadIteratorWithHandle jtiwh;
 204 
 205 #ifdef ASSERT
 206   for (; JavaThread *cur = jtiwh.next(); ) {
 207     assert(cur->safepoint_state()->is_running(), "Illegal initial state");
 208   }
 209   jtiwh.rewind();
 210 #endif // ASSERT
 211 
 212   // Iterate through all threads until it has been determined how to stop them all at a safepoint.
 213   int still_running = nof_threads;
 214   ThreadSafepointState *tss_head = NULL;
 215   ThreadSafepointState **p_prev = &tss_head;
 216   for (; JavaThread *cur = jtiwh.next(); ) {
 217     ThreadSafepointState *cur_tss = cur->safepoint_state();
 218     assert(cur_tss->get_next() == NULL, "Must be NULL");
 219     if (thread_not_running(cur_tss)) {
 220       --still_running;
 221     } else {
 222       *p_prev = cur_tss;
 223       p_prev = cur_tss->next_ptr();
 224     }
 225   }
 226   *p_prev = NULL;
 227 
 228   DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
 229 
 230   *initial_running = still_running;
 231 






 232   int iterations = 1; // The first iteration is above.

 233 
 234   while (still_running > 0) {
 235     // Check if this has taken too long:
 236     if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 237       print_safepoint_timeout();
 238     }
 239     if (int(iterations) == -1) { // overflow - something is wrong.
 240       // We can only overflow here when we are using global
 241       // polling pages. We keep this guarantee in its original
 242       // form so that searches of the bug database for this
 243       // failure mode find the right bugs.
 244       guarantee (!PageArmed, "invariant");
 245     }
 246 
 247     p_prev = &tss_head;
 248     ThreadSafepointState *cur_tss = tss_head;
 249     while (cur_tss != NULL) {
 250       assert(cur_tss->is_running(), "Illegal initial state");
 251       if (thread_not_running(cur_tss)) {
 252         --still_running;
 253         *p_prev = NULL;
 254         ThreadSafepointState *tmp = cur_tss;
 255         cur_tss = cur_tss->get_next();
 256         tmp->set_next(NULL);
 257       } else {
 258         *p_prev = cur_tss;
 259         p_prev = cur_tss->next_ptr();
 260         cur_tss = cur_tss->get_next();
 261       }
 262     }
 263 
 264     DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
 265 
 266     if (still_running > 0) {
 267       back_off(iterations);
 268     }
 269 
 270     iterations++;
 271   }
 272 
 273   assert(tss_head == NULL, "Must be empty");
 274 
 275   return iterations;
 276 }
 277 
 278 void SafepointSynchronize::arm_safepoint() {
 279   // Begin the process of bringing the system to a safepoint.
 280   // Java threads can be in several different states and are
 281   // stopped by different mechanisms:
 282   //
 283   //  1. Running interpreted
 284   //     When executing branching/returning byte codes interpreter
 285   //     checks if the poll is armed, if so blocks in SS::block().
 286   //     When using global polling the interpreter dispatch table
 287   //     is changed to force it to check for a safepoint condition
 288   //     between bytecodes.
 289   //  2. Running in native code
 290   //     When returning from the native code, a Java thread must check
 291   //     the safepoint _state to see if we must block.  If the


 494 void SafepointSynchronize::end() {
 495   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 496   EventSafepointEnd event;
 497   uint64_t safepoint_id = _safepoint_counter;
 498   assert(Thread::current()->is_VM_thread(), "Only VM thread can execute a safepoint");
 499 
 500   disarm_safepoint();
 501 
 502   Universe::heap()->safepoint_synchronize_end();
 503 
 504   SafepointTracing::end();
 505 
 506   post_safepoint_end_event(event, safepoint_id);
 507 }
 508 
 509 bool SafepointSynchronize::is_cleanup_needed() {
 510   // Need a safepoint if there are many monitors to deflate.
 511   if (ObjectSynchronizer::is_cleanup_needed()) return true;
 512   // Need a safepoint if some inline cache buffers is non-empty
 513   if (!InlineCacheBuffer::is_empty()) return true;


 514   return false;
 515 }
 516 
 517 class ParallelSPCleanupThreadClosure : public ThreadClosure {
 518 private:
 519   CodeBlobClosure* _nmethod_cl;
 520   DeflateMonitorCounters* _counters;
 521 
 522 public:
 523   ParallelSPCleanupThreadClosure(DeflateMonitorCounters* counters) :
 524     _nmethod_cl(UseCodeAging ? NMethodSweeper::prepare_reset_hotness_counters() : NULL),
 525     _counters(counters) {}
 526 
 527   void do_thread(Thread* thread) {
 528     ObjectSynchronizer::deflate_thread_local_monitors(thread, _counters);
 529     if (_nmethod_cl != NULL && thread->is_Java_thread() &&
 530         ! thread->is_Code_cache_sweeper_thread()) {
 531       JavaThread* jt = (JavaThread*) thread;
 532       jt->nmethods_do(_nmethod_cl);
 533     }


 586         EventSafepointCleanupTask event;
 587         TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 588         SymbolTable::rehash_table();
 589 
 590         post_safepoint_cleanup_task_event(event, safepoint_id, name);
 591       }
 592     }
 593 
 594     if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_STRING_TABLE_REHASH)) {
 595       if (StringTable::needs_rehashing()) {
 596         const char* name = "rehashing string table";
 597         EventSafepointCleanupTask event;
 598         TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 599         StringTable::rehash_table();
 600 
 601         post_safepoint_cleanup_task_event(event, safepoint_id, name);
 602       }
 603     }
 604 
 605     if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_CLD_PURGE)) {
 606       // CMS delays purging the CLDG until the beginning of the next safepoint and to
 607       // make sure concurrent sweep is done
 608       const char* name = "purging class loader data graph";
 609       EventSafepointCleanupTask event;
 610       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 611       ClassLoaderDataGraph::purge_if_needed();

 612 
 613       post_safepoint_cleanup_task_event(event, safepoint_id, name);

 614     }
 615 
 616     if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) {
 617       const char* name = "resizing system dictionaries";
 618       EventSafepointCleanupTask event;
 619       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
 620       ClassLoaderDataGraph::resize_if_needed();

 621 
 622       post_safepoint_cleanup_task_event(event, safepoint_id, name);

 623     }
 624 
 625     _subtasks.all_tasks_completed(_num_workers);
 626   }
 627 };
 628 
 629 // Various cleaning tasks that should be done periodically at safepoints.
 630 void SafepointSynchronize::do_cleanup_tasks() {
 631 
 632   TraceTime timer("safepoint cleanup tasks", TRACETIME_LOG(Info, safepoint, cleanup));
 633 
 634   // Prepare for monitor deflation.
 635   DeflateMonitorCounters deflate_counters;
 636   ObjectSynchronizer::prepare_deflate_idle_monitors(&deflate_counters);
 637 
 638   CollectedHeap* heap = Universe::heap();
 639   assert(heap != NULL, "heap not initialized yet?");
 640   WorkGang* cleanup_workers = heap->get_safepoint_workers();
 641   if (cleanup_workers != NULL) {
 642     // Parallel cleanup using GC provided thread pool.


 788   JavaThreadState state = thread->thread_state();
 789   thread->frame_anchor()->make_walkable(thread);
 790 
 791   uint64_t safepoint_id = SafepointSynchronize::safepoint_counter();
 792   // Check that we have a valid thread_state at this point
 793   switch(state) {
 794     case _thread_in_vm_trans:
 795     case _thread_in_Java:        // From compiled code
 796     case _thread_in_native_trans:
 797     case _thread_blocked_trans:
 798     case _thread_new_trans:
 799 
 800       // We have no idea where the VMThread is, it might even be at next safepoint.
 801       // So we can miss this poll, but stop at next.
 802 
 803       // Load dependent store, it must not pass loading of safepoint_id.
 804       thread->safepoint_state()->set_safepoint_id(safepoint_id); // Release store
 805 
 806       // This part we can skip if we notice we miss or are in a future safepoint.
 807       OrderAccess::storestore();
 808       thread->set_thread_state(_thread_blocked);

 809 
 810       OrderAccess::fence(); // Load in wait barrier should not float up
 811       _wait_barrier->wait(static_cast<int>(safepoint_id));
 812       assert(_state != _synchronized, "Can't be");
 813 
 814       // If barrier is disarmed stop store from floating above loads in barrier.
 815       OrderAccess::loadstore();
 816       thread->set_thread_state(state);
 817 
 818       // Then we reset the safepoint id to inactive.
 819       thread->safepoint_state()->reset_safepoint_id(); // Release store
 820 
 821       OrderAccess::fence();
 822 
 823       break;
 824 
 825     default:
 826      fatal("Illegal threadstate encountered: %d", state);
 827   }
 828   guarantee(thread->safepoint_state()->get_safepoint_id() == InactiveSafepointCounter,
 829             "The safepoint id should be set only in block path");
 830 


1014   }
1015   DEBUG_ONLY(_thread->set_visited_for_critical_count(SafepointSynchronize::safepoint_counter());)
1016   assert(!_safepoint_safe, "Must be unsafe before safe");
1017   _safepoint_safe = true;
1018 }
1019 
1020 void ThreadSafepointState::restart() {
1021   assert(_safepoint_safe, "Must be safe before unsafe");
1022   _safepoint_safe = false;
1023 }
1024 
1025 void ThreadSafepointState::print_on(outputStream *st) const {
1026   const char *s = _safepoint_safe ? "_at_safepoint" : "_running";
1027 
1028   st->print_cr("Thread: " INTPTR_FORMAT
1029               "  [0x%2x] State: %s _at_poll_safepoint %d",
1030                p2i(_thread), _thread->osthread()->thread_id(), s, _at_poll_safepoint);
1031 
1032   _thread->print_thread_state_on(st);
1033 }


1034 
1035 // ---------------------------------------------------------------------------------------------------------------------
1036 
1037 // Block the thread at poll or poll return for safepoint/handshake.
1038 void ThreadSafepointState::handle_polling_page_exception() {
1039 
1040   // If we're using a global poll, then the thread should not be
1041   // marked as safepoint safe yet.
1042   assert(!SafepointMechanism::uses_global_page_poll() || !_safepoint_safe,
1043          "polling page exception on thread safepoint safe");
1044 
1045   // Step 1: Find the nmethod from the return address
1046   address real_return_addr = thread()->saved_exception_pc();
1047 
1048   CodeBlob *cb = CodeCache::find_blob(real_return_addr);
1049   assert(cb != NULL && cb->is_compiled(), "return address should be in nmethod");
1050   CompiledMethod* nm = (CompiledMethod*)cb;
1051 
1052   // Find frame of caller
1053   frame stub_fr = thread()->last_frame();


< prev index next >