< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page
*** 54,10 ***
--- 54,11 ---
  #include "prims/jvmtiExport.hpp"
  #include "prims/jvmtiThreadState.hpp"
  #include "prims/vectorSupport.hpp"
  #include "prims/methodHandles.hpp"
  #include "runtime/atomic.hpp"
+ #include "runtime/continuation.hpp"
  #include "runtime/deoptimization.hpp"
  #include "runtime/escapeBarrier.hpp"
  #include "runtime/fieldDescriptor.hpp"
  #include "runtime/fieldDescriptor.inline.hpp"
  #include "runtime/frame.inline.hpp"

*** 164,11 ***
    // fetch_unroll_info() is called at the beginning of the deoptimization
    // handler. Note this fact before we start generating temporary frames
    // that can confuse an asynchronous stack walker. This counter is
    // decremented at the end of unpack_frames().
    if (TraceDeoptimization) {
!     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(current));
    }
    current->inc_in_deopt_handler();
  
    if (exec_mode == Unpack_exception) {
      // When we get here, a callee has thrown an exception into a deoptimized
--- 165,11 ---
    // fetch_unroll_info() is called at the beginning of the deoptimization
    // handler. Note this fact before we start generating temporary frames
    // that can confuse an asynchronous stack walker. This counter is
    // decremented at the end of unpack_frames().
    if (TraceDeoptimization) {
!     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT " [%ld]", p2i(current), (long) current->osthread()->thread_id());
    }
    current->inc_in_deopt_handler();
  
    if (exec_mode == Unpack_exception) {
      // When we get here, a callee has thrown an exception into a deoptimized

*** 440,10 ***
--- 441,15 ---
    }
  
    vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures);
  #if COMPILER2_OR_JVMCI
    if (realloc_failures) {
+     // FIXME: This very crudely destroys all ScopeLocal bindings. This
+     // is better than a bound value escaping, but far from ideal.
+     oop java_thread = current->threadObj();
+     current->set_scopeLocalCache(NULL);
+     java_lang_Thread::clear_scopeLocalBindings(java_thread);
      pop_frames_failed_reallocs(current, array);
    }
  #endif
  
    assert(current->vframe_array_head() == NULL, "Pending deopt!");

*** 583,24 ***
    // may not even be enough space).
  
    // QQQ I'd rather see this pushed down into last_frame_adjust
    // and have it take the sender (aka caller).
  
!   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
      caller_adjustment = last_frame_adjust(0, callee_locals);
    } else if (callee_locals > callee_parameters) {
      // The caller frame may need extending to accommodate
      // non-parameter locals of the first unpacked interpreted frame.
      // Compute that adjustment.
      caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
    }
  
    // If the sender is deoptimized the we must retrieve the address of the handler
    // since the frame will "magically" show the original pc before the deopt
    // and we'd undo the deopt.
  
!   frame_pcs[0] = deopt_sender.raw_pc();
  
    assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
  
  #if INCLUDE_JVMCI
    if (exceptionObject() != NULL) {
--- 589,48 ---
    // may not even be enough space).
  
    // QQQ I'd rather see this pushed down into last_frame_adjust
    // and have it take the sender (aka caller).
  
!   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
      caller_adjustment = last_frame_adjust(0, callee_locals);
    } else if (callee_locals > callee_parameters) {
      // The caller frame may need extending to accommodate
      // non-parameter locals of the first unpacked interpreted frame.
      // Compute that adjustment.
      caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
    }
  
+   // We always push the stack to make room for parameters, even if the caller is interpreted and has the parameters on the stack; this makes Loom continuation code simpler.
+   // ... except if we've already done it, which can happen if the deoptimized frame becomes OSR and then deoptimized again.
+   // if (deopt_sender.is_interpreted_frame() && deopt_sender.interpreter_frame_last_sp() > deopt_sender.sp() + 1 && callee_locals > callee_parameters) {
+   //   caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
+   // } else {
+   //   caller_adjustment = last_frame_adjust(0, callee_locals);
+   // }
+   
+   // // If the caller is a continuation entry and the callee has a return barrier
+   // // then we cannot use the parameters in the caller.
+   // bool caller_was_continuation_entry = Continuation::is_cont_post_barrier_entry_frame(deopt_sender);
+   // if (deopt_sender.is_compiled_frame() || caller_was_method_handle || caller_was_continuation_entry) {
+   //   caller_adjustment = last_frame_adjust(0, callee_locals);
+   // } else if (callee_locals > callee_parameters) {
+   //   // The caller frame may need extending to accommodate non-parameter locals of the first unpacked interpreted frame.
+   //   caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
+   // }
+ 
+   // tty->print_cr(">>>>> fetch_unroll_info_helper adjustment: %d locals: %d params: %d", caller_adjustment, callee_locals, callee_parameters);
+ 
    // If the sender is deoptimized the we must retrieve the address of the handler
    // since the frame will "magically" show the original pc before the deopt
    // and we'd undo the deopt.
  
!   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
+   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
+     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
+   }
+   // if (Continuation::is_cont_barrier_frame(deoptee)) tty->print_cr("WOWEE Continuation::is_cont_barrier_frame(deoptee)");
  
    assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
  
  #if INCLUDE_JVMCI
    if (exceptionObject() != NULL) {

*** 721,10 ***
--- 751,12 ---
    // cleaned up in this scope.
    ResetNoHandleMark rnhm;
    HandleMark hm(thread);
  
    frame stub_frame = thread->last_frame();
+   
+   Continuation::notify_deopt(thread, stub_frame.sp());
  
    // Since the frame to unpack is the top frame of this thread, the vframe_array_head
    // must point to the vframeArray for the unpack frame.
    vframeArray* array = thread->vframe_array_head();
  

*** 897,18 ***
  
  void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
    ResourceMark rm;
    DeoptimizationMarker dm;
  
    // Make the dependent methods not entrant
    if (nmethod_only != NULL) {
      nmethod_only->mark_for_deoptimization();
      nmethod_only->make_not_entrant();
    } else {
      MutexLocker mu(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, Mutex::_no_safepoint_check_flag);
!     CodeCache::make_marked_nmethods_not_entrant();
    }
  
    DeoptimizeMarkedClosure deopt;
    if (SafepointSynchronize::is_at_safepoint()) {
      Threads::java_threads_do(&deopt);
    } else {
--- 929,21 ---
  
  void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
    ResourceMark rm;
    DeoptimizationMarker dm;
  
+   GrowableArray<CompiledMethod*>* marked = new GrowableArray<CompiledMethod*>();
    // Make the dependent methods not entrant
    if (nmethod_only != NULL) {
      nmethod_only->mark_for_deoptimization();
      nmethod_only->make_not_entrant();
+     marked->append(nmethod_only);
    } else {
      MutexLocker mu(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, Mutex::_no_safepoint_check_flag);
!     CodeCache::make_marked_nmethods_not_entrant(marked);
    }
+   CodeCache::make_marked_nmethods_deoptimized(marked);
  
    DeoptimizeMarkedClosure deopt;
    if (SafepointSynchronize::is_at_safepoint()) {
      Threads::java_threads_do(&deopt);
    } else {

*** 1483,10 ***
--- 1518,11 ---
            }
          }
          BasicLock* lock = mon_info->lock();
          ObjectSynchronizer::enter(obj, lock, deoptee_thread);
          assert(mon_info->owner()->is_locked(), "object must be locked now");
+         deoptee_thread->inc_held_monitor_count();
        }
      }
    }
    return relocked_objects;
  }

*** 1566,10 ***
--- 1602,11 ---
      if (monitors != NULL) {
        for (int j = 0; j < monitors->number_of_monitors(); j++) {
          BasicObjectLock* src = monitors->at(j);
          if (src->obj() != NULL) {
            ObjectSynchronizer::exit(src->obj(), src->lock(), thread);
+           thread->dec_held_monitor_count();
          }
        }
        array->element(i)->free_monitors(thread);
  #ifdef ASSERT
        array->element(i)->set_removed_monitors();

*** 1599,10 ***
--- 1636,12 ---
        if (sd->is_top())  break;
      }
      xtty->tail("deoptimized");
    }
  
+   Continuation::notify_deopt(thread, fr.sp());
+ 
    // Patch the compiled method so that when execution returns to it we will
    // deopt the execution state and return to the interpreter.
    fr.deoptimize(thread);
  }
  

*** 1961,10 ***
--- 2000,11 ---
          xtty->end_head();
        }
        if (TraceDeoptimization) {  // make noise on the tty
          tty->print("Uncommon trap occurred in");
          nm->method()->print_short_name(tty);
+         // nm->method()->print_codes_on(tty);
          tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
  #if INCLUDE_JVMCI
          if (nm->is_nmethod()) {
            const char* installed_code_name = nm->as_nmethod()->jvmci_name();
            if (installed_code_name != NULL) {
< prev index next >