< prev index next >

src/hotspot/share/runtime/javaThread.hpp

Print this page

 472   int _frames_to_pop_failed_realloc;
 473 
 474   ContinuationEntry* _cont_entry;
 475   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub/upcall_stub/native_wrapper
 476                             // frame inside the continuation that we know about
 477   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 478 
 479   ObjectMonitor* _unlocked_inflated_monitor;
 480 
 481   // This is the field we poke in the interpreter and native
 482   // wrapper (Object.wait) to check for preemption.
 483   address _preempt_alternate_return;
 484   // When preempting on monitorenter we could have acquired the
 485   // monitor after freezing all vthread frames. In that case we
 486   // set this field so that in the preempt stub we call thaw again
 487   // instead of unmounting.
 488   bool _preemption_cancelled;
 489   // For Object.wait() we set this field to know if we need to
 490   // throw IE at the end of thawing before returning to Java.
 491   bool _pending_interrupted_exception;



 492 
 493  public:
 494   bool preemption_cancelled()           { return _preemption_cancelled; }
 495   void set_preemption_cancelled(bool b) { _preemption_cancelled = b; }
 496 
 497   bool pending_interrupted_exception()           { return _pending_interrupted_exception; }
 498   void set_pending_interrupted_exception(bool b) { _pending_interrupted_exception = b; }
 499 
 500   bool preempting()           { return _preempt_alternate_return != nullptr; }
 501   void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
 502 
 503 private:

























 504 

 505   friend class VMThread;
 506   friend class ThreadWaitTransition;
 507   friend class VM_Exit;
 508 
 509   // Stack watermark barriers.
 510   StackWatermarks _stack_watermarks;
 511 
 512  public:
 513   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 514 
 515  public:
 516   // Constructor
 517   JavaThread(MemTag mem_tag = mtThread);   // delegating constructor
 518   JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
 519   ~JavaThread();
 520 
 521   // Factory method to create a new JavaThread whose attach state is "is attaching"
 522   static JavaThread* create_attaching_thread();
 523 
 524 #ifdef ASSERT

 872   }
 873   static ByteSize shadow_zone_growth_watermark()  {
 874     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 875   }
 876 
 877   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 878 
 879   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 880   static ByteSize should_post_on_exceptions_flag_offset() {
 881     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 882   }
 883   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 884   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 885 
 886   static ByteSize monitor_owner_id_offset()   { return byte_offset_of(JavaThread, _monitor_owner_id); }
 887 
 888   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 889   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 890   static ByteSize preemption_cancelled_offset()  { return byte_offset_of(JavaThread, _preemption_cancelled); }
 891   static ByteSize preempt_alternate_return_offset() { return byte_offset_of(JavaThread, _preempt_alternate_return); }

 892   static ByteSize unlocked_inflated_monitor_offset() { return byte_offset_of(JavaThread, _unlocked_inflated_monitor); }
 893 
 894 #if INCLUDE_JVMTI
 895   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 896   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 897 #endif
 898 
 899   // Returns the jni environment for this thread
 900   JNIEnv* jni_environment()                      { return &_jni_environment; }
 901 
 902   // Returns the current thread as indicated by the given JNIEnv.
 903   // We don't assert it is Thread::current here as that is done at the
 904   // external JNI entry points where the JNIEnv is passed into the VM.
 905   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 906     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 907     // We can't normally get here in a thread that has completed its
 908     // execution and so "is_terminated", except when the call is from
 909     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 910     // a thread's lifecycle. A thread is also considered terminated if the VM
 911     // has exited, so we have to check this and block in case this is a daemon

1313   public:
1314     UnlockFlagSaver(JavaThread* t) {
1315       _thread = t;
1316       _do_not_unlock = t->do_not_unlock_if_synchronized();
1317       t->set_do_not_unlock_if_synchronized(false);
1318     }
1319     ~UnlockFlagSaver() {
1320       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1321     }
1322 };
1323 
1324 class JNIHandleMark : public StackObj {
1325   JavaThread* _thread;
1326  public:
1327   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1328     thread->push_jni_handle_block();
1329   }
1330   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1331 };
1332 

















1333 class NoPreemptMark {
1334   ContinuationEntry* _ce;
1335   bool _unpin;
1336  public:
1337   NoPreemptMark(JavaThread* thread) : _ce(thread->last_continuation()), _unpin(false) {
1338     if (_ce != nullptr) _unpin = _ce->pin();
1339   }
1340   ~NoPreemptMark() { if (_unpin) _ce->unpin(); }
1341 };
1342 
1343 class ThreadOnMonitorWaitedEvent {
1344   JavaThread* _thread;
1345  public:
1346   ThreadOnMonitorWaitedEvent(JavaThread* thread) : _thread(thread) {
1347     JVMTI_ONLY(_thread->set_on_monitor_waited_event(true);)
1348   }
1349   ~ThreadOnMonitorWaitedEvent() { JVMTI_ONLY(_thread->set_on_monitor_waited_event(false);) }
1350 };
1351 
1352 class ThreadInClassInitializer : public StackObj {
1353   JavaThread* _thread;
1354   InstanceKlass* _previous;
1355  public:
1356   ThreadInClassInitializer(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1357     _previous = _thread->class_being_initialized();
1358     _thread->set_class_being_initialized(ik);
1359   }
1360   ~ThreadInClassInitializer() {
1361     _thread->set_class_being_initialized(_previous);
1362   }
1363 };
1364 











1365 #endif // SHARE_RUNTIME_JAVATHREAD_HPP

 472   int _frames_to_pop_failed_realloc;
 473 
 474   ContinuationEntry* _cont_entry;
 475   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub/upcall_stub/native_wrapper
 476                             // frame inside the continuation that we know about
 477   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 478 
 479   ObjectMonitor* _unlocked_inflated_monitor;
 480 
 481   // This is the field we poke in the interpreter and native
 482   // wrapper (Object.wait) to check for preemption.
 483   address _preempt_alternate_return;
 484   // When preempting on monitorenter we could have acquired the
 485   // monitor after freezing all vthread frames. In that case we
 486   // set this field so that in the preempt stub we call thaw again
 487   // instead of unmounting.
 488   bool _preemption_cancelled;
 489   // For Object.wait() we set this field to know if we need to
 490   // throw IE at the end of thawing before returning to Java.
 491   bool _pending_interrupted_exception;
 492   // We allow preemption on some klass initializion calls.
 493   // We use this boolean to mark such calls.
 494   bool _at_preemptable_init;
 495 
 496  public:
 497   bool preemption_cancelled()           { return _preemption_cancelled; }
 498   void set_preemption_cancelled(bool b) { _preemption_cancelled = b; }
 499 
 500   bool pending_interrupted_exception()           { return _pending_interrupted_exception; }
 501   void set_pending_interrupted_exception(bool b) { _pending_interrupted_exception = b; }
 502 
 503   bool preempting()                              { return _preempt_alternate_return != nullptr; }
 504   void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
 505 
 506   bool at_preemptable_init() { return _at_preemptable_init; }
 507   void set_at_preemptable_init(bool b) { _at_preemptable_init = b; }
 508 
 509 #ifdef ASSERT
 510   // Used for extra logging with -Xlog:continuation+preempt
 511   InstanceKlass* _preempt_init_klass;
 512 
 513   InstanceKlass* preempt_init_klass() { return _preempt_init_klass; }
 514   void set_preempt_init_klass(InstanceKlass* ik) { _preempt_init_klass = ik; }
 515 
 516   int _interp_at_preemptable_vmcall_cnt;
 517   int interp_at_preemptable_vmcall_cnt() { return _interp_at_preemptable_vmcall_cnt; }
 518 
 519   class AtRedoVMCall : public StackObj {
 520     JavaThread* _thread;
 521    public:
 522     AtRedoVMCall(JavaThread *t) : _thread(t) {
 523       _thread->_interp_at_preemptable_vmcall_cnt++;
 524       assert(_thread->_interp_at_preemptable_vmcall_cnt > 0, "");
 525     }
 526     ~AtRedoVMCall() {
 527       _thread->_interp_at_preemptable_vmcall_cnt--;
 528       assert(_thread->_interp_at_preemptable_vmcall_cnt >= 0, "");
 529     }
 530   };
 531 #endif
 532 
 533 private:
 534   friend class VMThread;
 535   friend class ThreadWaitTransition;
 536   friend class VM_Exit;
 537 
 538   // Stack watermark barriers.
 539   StackWatermarks _stack_watermarks;
 540 
 541  public:
 542   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 543 
 544  public:
 545   // Constructor
 546   JavaThread(MemTag mem_tag = mtThread);   // delegating constructor
 547   JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
 548   ~JavaThread();
 549 
 550   // Factory method to create a new JavaThread whose attach state is "is attaching"
 551   static JavaThread* create_attaching_thread();
 552 
 553 #ifdef ASSERT

 901   }
 902   static ByteSize shadow_zone_growth_watermark()  {
 903     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 904   }
 905 
 906   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 907 
 908   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 909   static ByteSize should_post_on_exceptions_flag_offset() {
 910     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 911   }
 912   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 913   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 914 
 915   static ByteSize monitor_owner_id_offset()   { return byte_offset_of(JavaThread, _monitor_owner_id); }
 916 
 917   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 918   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 919   static ByteSize preemption_cancelled_offset()  { return byte_offset_of(JavaThread, _preemption_cancelled); }
 920   static ByteSize preempt_alternate_return_offset() { return byte_offset_of(JavaThread, _preempt_alternate_return); }
 921   DEBUG_ONLY(static ByteSize interp_at_preemptable_vmcall_cnt_offset() { return byte_offset_of(JavaThread, _interp_at_preemptable_vmcall_cnt); })
 922   static ByteSize unlocked_inflated_monitor_offset() { return byte_offset_of(JavaThread, _unlocked_inflated_monitor); }
 923 
 924 #if INCLUDE_JVMTI
 925   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 926   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 927 #endif
 928 
 929   // Returns the jni environment for this thread
 930   JNIEnv* jni_environment()                      { return &_jni_environment; }
 931 
 932   // Returns the current thread as indicated by the given JNIEnv.
 933   // We don't assert it is Thread::current here as that is done at the
 934   // external JNI entry points where the JNIEnv is passed into the VM.
 935   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 936     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 937     // We can't normally get here in a thread that has completed its
 938     // execution and so "is_terminated", except when the call is from
 939     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 940     // a thread's lifecycle. A thread is also considered terminated if the VM
 941     // has exited, so we have to check this and block in case this is a daemon

1343   public:
1344     UnlockFlagSaver(JavaThread* t) {
1345       _thread = t;
1346       _do_not_unlock = t->do_not_unlock_if_synchronized();
1347       t->set_do_not_unlock_if_synchronized(false);
1348     }
1349     ~UnlockFlagSaver() {
1350       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1351     }
1352 };
1353 
1354 class JNIHandleMark : public StackObj {
1355   JavaThread* _thread;
1356  public:
1357   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1358     thread->push_jni_handle_block();
1359   }
1360   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1361 };
1362 
1363 class PreemptableInitCall {
1364   JavaThread* _thread;
1365   bool _previous;
1366   DEBUG_ONLY(InstanceKlass* _previous_klass;)
1367  public:
1368   PreemptableInitCall(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1369     _previous = thread->at_preemptable_init();
1370     _thread->set_at_preemptable_init(true);
1371     DEBUG_ONLY(_previous_klass = _thread->preempt_init_klass();)
1372     DEBUG_ONLY(_thread->set_preempt_init_klass(ik));
1373   }
1374   ~PreemptableInitCall() {
1375     _thread->set_at_preemptable_init(_previous);
1376     DEBUG_ONLY(_thread->set_preempt_init_klass(_previous_klass));
1377   }
1378 };
1379 
1380 class NoPreemptMark {
1381   ContinuationEntry* _ce;
1382   bool _unpin;
1383  public:
1384   NoPreemptMark(JavaThread* thread, bool ignore_mark = false) : _ce(thread->last_continuation()), _unpin(false) {
1385     if (_ce != nullptr && !ignore_mark) _unpin = _ce->pin();
1386   }
1387   ~NoPreemptMark() { if (_unpin) _ce->unpin(); }
1388 };
1389 
1390 class ThreadOnMonitorWaitedEvent {
1391   JavaThread* _thread;
1392  public:
1393   ThreadOnMonitorWaitedEvent(JavaThread* thread) : _thread(thread) {
1394     JVMTI_ONLY(_thread->set_on_monitor_waited_event(true);)
1395   }
1396   ~ThreadOnMonitorWaitedEvent() { JVMTI_ONLY(_thread->set_on_monitor_waited_event(false);) }
1397 };
1398 
1399 class ThreadInClassInitializer : public StackObj {
1400   JavaThread* _thread;
1401   InstanceKlass* _previous;
1402  public:
1403   ThreadInClassInitializer(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1404     _previous = _thread->class_being_initialized();
1405     _thread->set_class_being_initialized(ik);
1406   }
1407   ~ThreadInClassInitializer() {
1408     _thread->set_class_being_initialized(_previous);
1409   }
1410 };
1411 
1412 class ThreadWaitingForClassInit : public StackObj {
1413   JavaThread* _thread;
1414  public:
1415   ThreadWaitingForClassInit(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1416     _thread->set_class_to_be_initialized(ik);
1417   }
1418   ~ThreadWaitingForClassInit() {
1419     _thread->set_class_to_be_initialized(nullptr);
1420   }
1421 };
1422 
1423 #endif // SHARE_RUNTIME_JAVATHREAD_HPP
< prev index next >