< prev index next >

src/hotspot/share/runtime/javaThread.hpp

Print this page

 467   int _frames_to_pop_failed_realloc;
 468 
 469   ContinuationEntry* _cont_entry;
 470   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub/upcall_stub/native_wrapper
 471                             // frame inside the continuation that we know about
 472   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 473 
 474   ObjectMonitor* _unlocked_inflated_monitor;
 475 
 476   // This is the field we poke in the interpreter and native
 477   // wrapper (Object.wait) to check for preemption.
 478   address _preempt_alternate_return;
 479   // When preempting on monitorenter we could have acquired the
 480   // monitor after freezing all vthread frames. In that case we
 481   // set this field so that in the preempt stub we call thaw again
 482   // instead of unmounting.
 483   bool _preemption_cancelled;
 484   // For Object.wait() we set this field to know if we need to
 485   // throw IE at the end of thawing before returning to Java.
 486   bool _pending_interrupted_exception;



 487 
 488  public:
 489   bool preemption_cancelled()           { return _preemption_cancelled; }
 490   void set_preemption_cancelled(bool b) { _preemption_cancelled = b; }
 491 
 492   bool pending_interrupted_exception()           { return _pending_interrupted_exception; }
 493   void set_pending_interrupted_exception(bool b) { _pending_interrupted_exception = b; }
 494 
 495   bool preempting()           { return _preempt_alternate_return != nullptr; }
 496   void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
 497 
 498 private:

























 499 

 500   friend class VMThread;
 501   friend class ThreadWaitTransition;
 502   friend class VM_Exit;
 503 
 504   // Stack watermark barriers.
 505   StackWatermarks _stack_watermarks;
 506 
 507  public:
 508   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 509 
 510  public:
 511   // Constructor
 512   JavaThread(MemTag mem_tag = mtThread);   // delegating constructor
 513   JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
 514   ~JavaThread();
 515 
 516   // Factory method to create a new JavaThread whose attach state is "is attaching"
 517   static JavaThread* create_attaching_thread();
 518 
 519 #ifdef ASSERT

 864   }
 865   static ByteSize shadow_zone_growth_watermark()  {
 866     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 867   }
 868 
 869   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 870 
 871   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 872   static ByteSize should_post_on_exceptions_flag_offset() {
 873     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 874   }
 875   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 876   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 877 
 878   static ByteSize monitor_owner_id_offset()   { return byte_offset_of(JavaThread, _monitor_owner_id); }
 879 
 880   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 881   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 882   static ByteSize preemption_cancelled_offset()  { return byte_offset_of(JavaThread, _preemption_cancelled); }
 883   static ByteSize preempt_alternate_return_offset() { return byte_offset_of(JavaThread, _preempt_alternate_return); }

 884   static ByteSize unlocked_inflated_monitor_offset() { return byte_offset_of(JavaThread, _unlocked_inflated_monitor); }
 885 
 886 #if INCLUDE_JVMTI
 887   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 888   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 889 #endif
 890 
 891   // Returns the jni environment for this thread
 892   JNIEnv* jni_environment()                      { return &_jni_environment; }
 893 
 894   // Returns the current thread as indicated by the given JNIEnv.
 895   // We don't assert it is Thread::current here as that is done at the
 896   // external JNI entry points where the JNIEnv is passed into the VM.
 897   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 898     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 899     // We can't normally get here in a thread that has completed its
 900     // execution and so "is_terminated", except when the call is from
 901     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 902     // a thread's lifecycle. A thread is also considered terminated if the VM
 903     // has exited, so we have to check this and block in case this is a daemon

1305   public:
1306     UnlockFlagSaver(JavaThread* t) {
1307       _thread = t;
1308       _do_not_unlock = t->do_not_unlock_if_synchronized();
1309       t->set_do_not_unlock_if_synchronized(false);
1310     }
1311     ~UnlockFlagSaver() {
1312       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1313     }
1314 };
1315 
1316 class JNIHandleMark : public StackObj {
1317   JavaThread* _thread;
1318  public:
1319   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1320     thread->push_jni_handle_block();
1321   }
1322   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1323 };
1324 

















1325 class NoPreemptMark {
1326   ContinuationEntry* _ce;
1327   bool _unpin;
1328  public:
1329   NoPreemptMark(JavaThread* thread) : _ce(thread->last_continuation()), _unpin(false) {
1330     if (_ce != nullptr) _unpin = _ce->pin();
1331   }
1332   ~NoPreemptMark() { if (_unpin) _ce->unpin(); }
1333 };
1334 
1335 class ThreadOnMonitorWaitedEvent {
1336   JavaThread* _thread;
1337  public:
1338   ThreadOnMonitorWaitedEvent(JavaThread* thread) : _thread(thread) {
1339     JVMTI_ONLY(_thread->set_on_monitor_waited_event(true);)
1340   }
1341   ~ThreadOnMonitorWaitedEvent() { JVMTI_ONLY(_thread->set_on_monitor_waited_event(false);) }
1342 };
1343 
1344 class ThreadInClassInitializer : public StackObj {
1345   JavaThread* _thread;
1346   InstanceKlass* _previous;
1347  public:
1348   ThreadInClassInitializer(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1349     _previous = _thread->class_being_initialized();
1350     _thread->set_class_being_initialized(ik);
1351   }
1352   ~ThreadInClassInitializer() {
1353     _thread->set_class_being_initialized(_previous);
1354   }
1355 };
1356 











1357 #endif // SHARE_RUNTIME_JAVATHREAD_HPP

 467   int _frames_to_pop_failed_realloc;
 468 
 469   ContinuationEntry* _cont_entry;
 470   intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub/upcall_stub/native_wrapper
 471                             // frame inside the continuation that we know about
 472   int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
 473 
 474   ObjectMonitor* _unlocked_inflated_monitor;
 475 
 476   // This is the field we poke in the interpreter and native
 477   // wrapper (Object.wait) to check for preemption.
 478   address _preempt_alternate_return;
 479   // When preempting on monitorenter we could have acquired the
 480   // monitor after freezing all vthread frames. In that case we
 481   // set this field so that in the preempt stub we call thaw again
 482   // instead of unmounting.
 483   bool _preemption_cancelled;
 484   // For Object.wait() we set this field to know if we need to
 485   // throw IE at the end of thawing before returning to Java.
 486   bool _pending_interrupted_exception;
 487   // We allow preemption on some klass initializion calls.
 488   // We use this boolean to mark such calls.
 489   bool _at_preemptable_init;
 490 
 491  public:
 492   bool preemption_cancelled()           { return _preemption_cancelled; }
 493   void set_preemption_cancelled(bool b) { _preemption_cancelled = b; }
 494 
 495   bool pending_interrupted_exception()           { return _pending_interrupted_exception; }
 496   void set_pending_interrupted_exception(bool b) { _pending_interrupted_exception = b; }
 497 
 498   bool preempting()                              { return _preempt_alternate_return != nullptr; }
 499   void set_preempt_alternate_return(address val) { _preempt_alternate_return = val; }
 500 
 501   bool at_preemptable_init() { return _at_preemptable_init; }
 502   void set_at_preemptable_init(bool b) { _at_preemptable_init = b; }
 503 
 504 #ifdef ASSERT
 505   // Used for extra logging with -Xlog:continuation+preempt
 506   InstanceKlass* _preempt_init_klass;
 507 
 508   InstanceKlass* preempt_init_klass() { return _preempt_init_klass; }
 509   void set_preempt_init_klass(InstanceKlass* ik) { _preempt_init_klass = ik; }
 510 
 511   int _interp_at_preemptable_vmcall_cnt;
 512   int interp_at_preemptable_vmcall_cnt() { return _interp_at_preemptable_vmcall_cnt; }
 513 
 514   class AtRedoVMCall : public StackObj {
 515     JavaThread* _thread;
 516    public:
 517     AtRedoVMCall(JavaThread *t) : _thread(t) {
 518       _thread->_interp_at_preemptable_vmcall_cnt++;
 519       assert(_thread->_interp_at_preemptable_vmcall_cnt > 0, "");
 520     }
 521     ~AtRedoVMCall() {
 522       _thread->_interp_at_preemptable_vmcall_cnt--;
 523       assert(_thread->_interp_at_preemptable_vmcall_cnt >= 0, "");
 524     }
 525   };
 526 #endif
 527 
 528 private:
 529   friend class VMThread;
 530   friend class ThreadWaitTransition;
 531   friend class VM_Exit;
 532 
 533   // Stack watermark barriers.
 534   StackWatermarks _stack_watermarks;
 535 
 536  public:
 537   inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; }
 538 
 539  public:
 540   // Constructor
 541   JavaThread(MemTag mem_tag = mtThread);   // delegating constructor
 542   JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread);
 543   ~JavaThread();
 544 
 545   // Factory method to create a new JavaThread whose attach state is "is attaching"
 546   static JavaThread* create_attaching_thread();
 547 
 548 #ifdef ASSERT

 893   }
 894   static ByteSize shadow_zone_growth_watermark()  {
 895     return byte_offset_of(JavaThread, _stack_overflow_state._shadow_zone_growth_watermark);
 896   }
 897 
 898   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 899 
 900   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
 901   static ByteSize should_post_on_exceptions_flag_offset() {
 902     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
 903   }
 904   static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
 905   NOT_PRODUCT(static ByteSize requires_cross_modify_fence_offset()  { return byte_offset_of(JavaThread, _requires_cross_modify_fence); })
 906 
 907   static ByteSize monitor_owner_id_offset()   { return byte_offset_of(JavaThread, _monitor_owner_id); }
 908 
 909   static ByteSize cont_entry_offset()         { return byte_offset_of(JavaThread, _cont_entry); }
 910   static ByteSize cont_fastpath_offset()      { return byte_offset_of(JavaThread, _cont_fastpath); }
 911   static ByteSize preemption_cancelled_offset()  { return byte_offset_of(JavaThread, _preemption_cancelled); }
 912   static ByteSize preempt_alternate_return_offset() { return byte_offset_of(JavaThread, _preempt_alternate_return); }
 913   DEBUG_ONLY(static ByteSize interp_at_preemptable_vmcall_cnt_offset() { return byte_offset_of(JavaThread, _interp_at_preemptable_vmcall_cnt); })
 914   static ByteSize unlocked_inflated_monitor_offset() { return byte_offset_of(JavaThread, _unlocked_inflated_monitor); }
 915 
 916 #if INCLUDE_JVMTI
 917   static ByteSize is_in_VTMS_transition_offset()     { return byte_offset_of(JavaThread, _is_in_VTMS_transition); }
 918   static ByteSize is_disable_suspend_offset()        { return byte_offset_of(JavaThread, _is_disable_suspend); }
 919 #endif
 920 
 921   // Returns the jni environment for this thread
 922   JNIEnv* jni_environment()                      { return &_jni_environment; }
 923 
 924   // Returns the current thread as indicated by the given JNIEnv.
 925   // We don't assert it is Thread::current here as that is done at the
 926   // external JNI entry points where the JNIEnv is passed into the VM.
 927   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
 928     JavaThread* current = reinterpret_cast<JavaThread*>(((intptr_t)env - in_bytes(jni_environment_offset())));
 929     // We can't normally get here in a thread that has completed its
 930     // execution and so "is_terminated", except when the call is from
 931     // AsyncGetCallTrace, which can be triggered by a signal at any point in
 932     // a thread's lifecycle. A thread is also considered terminated if the VM
 933     // has exited, so we have to check this and block in case this is a daemon

1335   public:
1336     UnlockFlagSaver(JavaThread* t) {
1337       _thread = t;
1338       _do_not_unlock = t->do_not_unlock_if_synchronized();
1339       t->set_do_not_unlock_if_synchronized(false);
1340     }
1341     ~UnlockFlagSaver() {
1342       _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
1343     }
1344 };
1345 
1346 class JNIHandleMark : public StackObj {
1347   JavaThread* _thread;
1348  public:
1349   JNIHandleMark(JavaThread* thread) : _thread(thread) {
1350     thread->push_jni_handle_block();
1351   }
1352   ~JNIHandleMark() { _thread->pop_jni_handle_block(); }
1353 };
1354 
1355 class PreemptableInitCall {
1356   JavaThread* _thread;
1357   bool _previous;
1358   DEBUG_ONLY(InstanceKlass* _previous_klass;)
1359  public:
1360   PreemptableInitCall(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1361     _previous = thread->at_preemptable_init();
1362     _thread->set_at_preemptable_init(true);
1363     DEBUG_ONLY(_previous_klass = _thread->preempt_init_klass();)
1364     DEBUG_ONLY(_thread->set_preempt_init_klass(ik));
1365   }
1366   ~PreemptableInitCall() {
1367     _thread->set_at_preemptable_init(_previous);
1368     DEBUG_ONLY(_thread->set_preempt_init_klass(_previous_klass));
1369   }
1370 };
1371 
1372 class NoPreemptMark {
1373   ContinuationEntry* _ce;
1374   bool _unpin;
1375  public:
1376   NoPreemptMark(JavaThread* thread, bool ignore_mark = false) : _ce(thread->last_continuation()), _unpin(false) {
1377     if (_ce != nullptr && !ignore_mark) _unpin = _ce->pin();
1378   }
1379   ~NoPreemptMark() { if (_unpin) _ce->unpin(); }
1380 };
1381 
1382 class ThreadOnMonitorWaitedEvent {
1383   JavaThread* _thread;
1384  public:
1385   ThreadOnMonitorWaitedEvent(JavaThread* thread) : _thread(thread) {
1386     JVMTI_ONLY(_thread->set_on_monitor_waited_event(true);)
1387   }
1388   ~ThreadOnMonitorWaitedEvent() { JVMTI_ONLY(_thread->set_on_monitor_waited_event(false);) }
1389 };
1390 
1391 class ThreadInClassInitializer : public StackObj {
1392   JavaThread* _thread;
1393   InstanceKlass* _previous;
1394  public:
1395   ThreadInClassInitializer(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1396     _previous = _thread->class_being_initialized();
1397     _thread->set_class_being_initialized(ik);
1398   }
1399   ~ThreadInClassInitializer() {
1400     _thread->set_class_being_initialized(_previous);
1401   }
1402 };
1403 
1404 class ThreadWaitingForClassInit : public StackObj {
1405   JavaThread* _thread;
1406  public:
1407   ThreadWaitingForClassInit(JavaThread* thread, InstanceKlass* ik) : _thread(thread) {
1408     _thread->set_class_to_be_initialized(ik);
1409   }
1410   ~ThreadWaitingForClassInit() {
1411     _thread->set_class_to_be_initialized(nullptr);
1412   }
1413 };
1414 
1415 #endif // SHARE_RUNTIME_JAVATHREAD_HPP
< prev index next >