< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page
*** 1626,10 ***
--- 1626,11 ---
  
  // relock objects for which synchronization was eliminated
  bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
                                      JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
    bool relocked_objects = false;
+   LOOM_MONITOR_SUPPORT_ONLY(int compensate_extra_increment = 0;)
    for (int i = 0; i < monitors->length(); i++) {
      MonitorInfo* mon_info = monitors->at(i);
      if (mon_info->eliminated()) {
        assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
        relocked_objects = true;

*** 1641,10 ***
--- 1642,11 ---
              // With exec_mode == Unpack_none obj may be thread local and locked in
              // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
              markWord dmw = mark.displaced_mark_helper();
              mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
              obj->set_mark(dmw);
+             LOOM_MONITOR_SUPPORT_ONLY(compensate_extra_increment++;)
            }
            if (mark.has_monitor()) {
              // defer relocking if the deoptee thread is currently waiting for obj
              ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
              if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {

*** 1660,19 ***
            // Inflate the locks instead. Enter then inflate to avoid races with
            // deflation.
            ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
            assert(mon_info->owner()->is_locked(), "object must be locked now");
            ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
!           assert(mon->owner() == deoptee_thread, "must be");
          } else {
            BasicLock* lock = mon_info->lock();
            ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
            assert(mon_info->owner()->is_locked(), "object must be locked now");
          }
        }
      }
    }
    return relocked_objects;
  }
  #endif // COMPILER2_OR_JVMCI
  
  vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
--- 1662,20 ---
            // Inflate the locks instead. Enter then inflate to avoid races with
            // deflation.
            ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
            assert(mon_info->owner()->is_locked(), "object must be locked now");
            ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
!           assert(mon->is_owner(deoptee_thread), "must be");
          } else {
            BasicLock* lock = mon_info->lock();
            ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
            assert(mon_info->owner()->is_locked(), "object must be locked now");
          }
        }
      }
    }
+   LOOM_MONITOR_SUPPORT_ONLY(deoptee_thread->dec_held_monitor_count(compensate_extra_increment);)
    return relocked_objects;
  }
  #endif // COMPILER2_OR_JVMCI
  
  vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
< prev index next >