< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/oopFactory.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/constantPool.hpp"
  46 #include "oops/method.hpp"
  47 #include "oops/objArrayKlass.hpp"
  48 #include "oops/objArrayOop.inline.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/fieldStreams.inline.hpp"
  51 #include "oops/typeArrayOop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "prims/jvmtiDeferredUpdates.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "prims/jvmtiThreadState.hpp"
  56 #include "prims/vectorSupport.hpp"
  57 #include "prims/methodHandles.hpp"
  58 #include "runtime/atomic.hpp"

  59 #include "runtime/deoptimization.hpp"
  60 #include "runtime/escapeBarrier.hpp"
  61 #include "runtime/fieldDescriptor.hpp"
  62 #include "runtime/fieldDescriptor.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/interfaceSupport.inline.hpp"
  66 #include "runtime/jniHandles.inline.hpp"
  67 #include "runtime/keepStackGCProcessed.hpp"
  68 #include "runtime/objectMonitor.inline.hpp"
  69 #include "runtime/osThread.hpp"
  70 #include "runtime/safepointVerifiers.hpp"
  71 #include "runtime/sharedRuntime.hpp"
  72 #include "runtime/signature.hpp"
  73 #include "runtime/stackFrameStream.inline.hpp"
  74 #include "runtime/stackWatermarkSet.hpp"
  75 #include "runtime/stubRoutines.hpp"
  76 #include "runtime/thread.hpp"
  77 #include "runtime/threadSMR.hpp"
  78 #include "runtime/threadWXSetters.inline.hpp"

 149   st.print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 150   st.print(   "  frame_sizes: ");
 151   for (int index = 0; index < number_of_frames(); index++) {
 152     st.print(INTX_FORMAT " ", frame_sizes()[index]);
 153   }
 154   st.cr();
 155   tty->print_raw(st.as_string());
 156 }
 157 
 158 
 159 // In order to make fetch_unroll_info work properly with escape
 160 // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY.
 161 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 162 // which is called from the method fetch_unroll_info_helper below.
 163 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 164   // fetch_unroll_info() is called at the beginning of the deoptimization
 165   // handler. Note this fact before we start generating temporary frames
 166   // that can confuse an asynchronous stack walker. This counter is
 167   // decremented at the end of unpack_frames().
 168   if (TraceDeoptimization) {
 169     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(current));
 170   }
 171   current->inc_in_deopt_handler();
 172 
 173   if (exec_mode == Unpack_exception) {
 174     // When we get here, a callee has thrown an exception into a deoptimized
 175     // frame. That throw might have deferred stack watermark checking until
 176     // after unwinding. So we deal with such deferred requests here.
 177     StackWatermarkSet::after_unwind(current);
 178   }
 179 
 180   return fetch_unroll_info_helper(current, exec_mode);
 181 JRT_END
 182 
 183 #if COMPILER2_OR_JVMCI
 184 #ifndef PRODUCT
 185 // print information about reallocated objects
 186 static void print_objects(JavaThread* deoptee_thread,
 187                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 188   ResourceMark rm;
 189   stringStream st;  // change to logStream with logging

 425     restore_eliminated_locks(current, chunk, realloc_failures, deoptee, exec_mode, unused);
 426   }
 427 #endif // COMPILER2_OR_JVMCI
 428 
 429   ScopeDesc* trap_scope = chunk->at(0)->scope();
 430   Handle exceptionObject;
 431   if (trap_scope->rethrow_exception()) {
 432     if (PrintDeoptimizationDetails) {
 433       tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
 434     }
 435     GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
 436     guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
 437     ScopeValue* topOfStack = expressions->top();
 438     exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
 439     guarantee(exceptionObject() != NULL, "exception oop can not be null");
 440   }
 441 
 442   vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures);
 443 #if COMPILER2_OR_JVMCI
 444   if (realloc_failures) {





 445     pop_frames_failed_reallocs(current, array);
 446   }
 447 #endif
 448 
 449   assert(current->vframe_array_head() == NULL, "Pending deopt!");
 450   current->set_vframe_array_head(array);
 451 
 452   // Now that the vframeArray has been created if we have any deferred local writes
 453   // added by jvmti then we can free up that structure as the data is now in the
 454   // vframeArray
 455 
 456   JvmtiDeferredUpdates::delete_updates_for_frame(current, array->original().id());
 457 
 458   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
 459   CodeBlob* cb = stub_frame.cb();
 460   // Verify we have the right vframeArray
 461   assert(cb->frame_size() >= 0, "Unexpected frame size");
 462   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 463 
 464   // If the deopt call site is a MethodHandle invoke call site we have

 568     methodHandle method(current, array->element(0)->method());
 569     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 570     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 571   }
 572 
 573   // Compute information for handling adapters and adjusting the frame size of the caller.
 574   int caller_adjustment = 0;
 575 
 576   // Compute the amount the oldest interpreter frame will have to adjust
 577   // its caller's stack by. If the caller is a compiled frame then
 578   // we pretend that the callee has no parameters so that the
 579   // extension counts for the full amount of locals and not just
 580   // locals-parms. This is because without a c2i adapter the parm
 581   // area as created by the compiled frame will not be usable by
 582   // the interpreter. (Depending on the calling convention there
 583   // may not even be enough space).
 584 
 585   // QQQ I'd rather see this pushed down into last_frame_adjust
 586   // and have it take the sender (aka caller).
 587 
 588   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
 589     caller_adjustment = last_frame_adjust(0, callee_locals);
 590   } else if (callee_locals > callee_parameters) {
 591     // The caller frame may need extending to accommodate
 592     // non-parameter locals of the first unpacked interpreted frame.
 593     // Compute that adjustment.
 594     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 595   }
 596 




















 597   // If the sender is deoptimized the we must retrieve the address of the handler
 598   // since the frame will "magically" show the original pc before the deopt
 599   // and we'd undo the deopt.
 600 
 601   frame_pcs[0] = deopt_sender.raw_pc();




 602 
 603   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 604 
 605 #if INCLUDE_JVMCI
 606   if (exceptionObject() != NULL) {
 607     current->set_exception_oop(exceptionObject());
 608     exec_mode = Unpack_exception;
 609   }
 610 #endif
 611 
 612   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 613     assert(current->has_pending_exception(), "should have thrown OOME");
 614     current->set_exception_oop(current->pending_exception());
 615     current->clear_pending_exception();
 616     exec_mode = Unpack_exception;
 617   }
 618 
 619 #if INCLUDE_JVMCI
 620   if (current->frames_to_pop_failed_realloc() > 0) {
 621     current->set_pending_monitorenter(false);

 706 
 707   assert(f->is_interpreted_frame(), "must be interpreted");
 708 }
 709 
 710 // Return BasicType of value being returned
 711 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 712 
 713   // We are already active in the special DeoptResourceMark any ResourceObj's we
 714   // allocate will be freed at the end of the routine.
 715 
 716   // JRT_LEAF methods don't normally allocate handles and there is a
 717   // NoHandleMark to enforce that. It is actually safe to use Handles
 718   // in a JRT_LEAF method, and sometimes desirable, but to do so we
 719   // must use ResetNoHandleMark to bypass the NoHandleMark, and
 720   // then use a HandleMark to ensure any Handles we do create are
 721   // cleaned up in this scope.
 722   ResetNoHandleMark rnhm;
 723   HandleMark hm(thread);
 724 
 725   frame stub_frame = thread->last_frame();


 726 
 727   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 728   // must point to the vframeArray for the unpack frame.
 729   vframeArray* array = thread->vframe_array_head();
 730 
 731 #ifndef PRODUCT
 732   if (TraceDeoptimization) {
 733     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
 734                   p2i(thread), p2i(array), exec_mode);
 735   }
 736 #endif
 737   Events::log_deopt_message(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
 738               p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
 739 
 740   UnrollBlock* info = array->unroll_block();
 741 
 742   // We set the last_Java frame. But the stack isn't really parsable here. So we
 743   // clear it to make sure JFR understands not to try and walk stacks from events
 744   // in here.
 745   intptr_t* sp = thread->frame_anchor()->last_Java_sp();

 882     }
 883   }
 884 #endif /* !PRODUCT */
 885 
 886   return bt;
 887 JRT_END
 888 
 889 class DeoptimizeMarkedClosure : public HandshakeClosure {
 890  public:
 891   DeoptimizeMarkedClosure() : HandshakeClosure("Deoptimize") {}
 892   void do_thread(Thread* thread) {
 893     JavaThread* jt = JavaThread::cast(thread);
 894     jt->deoptimize_marked_methods();
 895   }
 896 };
 897 
 898 void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
 899   ResourceMark rm;
 900   DeoptimizationMarker dm;
 901 

 902   // Make the dependent methods not entrant
 903   if (nmethod_only != NULL) {
 904     nmethod_only->mark_for_deoptimization();
 905     nmethod_only->make_not_entrant();

 906   } else {
 907     MutexLocker mu(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, Mutex::_no_safepoint_check_flag);
 908     CodeCache::make_marked_nmethods_not_entrant();
 909   }

 910 
 911   DeoptimizeMarkedClosure deopt;
 912   if (SafepointSynchronize::is_at_safepoint()) {
 913     Threads::java_threads_do(&deopt);
 914   } else {
 915     Handshake::execute(&deopt);
 916   }
 917 }
 918 
 919 Deoptimization::DeoptAction Deoptimization::_unloaded_action
 920   = Deoptimization::Action_reinterpret;
 921 
 922 #if COMPILER2_OR_JVMCI
 923 template<typename CacheType>
 924 class BoxCacheBase : public CHeapObj<mtCompiler> {
 925 protected:
 926   static InstanceKlass* find_cache_klass(Symbol* klass_name) {
 927     ResourceMark rm;
 928     char* klass_name_str = klass_name->as_C_string();
 929     InstanceKlass* ik = SystemDictionary::find_instance_klass(klass_name, Handle(), Handle());

1468             // With exec_mode == Unpack_none obj may be thread local and locked in
1469             // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1470             markWord dmw = mark.displaced_mark_helper();
1471             mark.locker()->set_displaced_header(markWord::encode((BasicLock*) NULL));
1472             obj->set_mark(dmw);
1473           }
1474           if (mark.has_monitor()) {
1475             // defer relocking if the deoptee thread is currently waiting for obj
1476             ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1477             if (waiting_monitor != NULL && waiting_monitor->object() == obj()) {
1478               assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1479               mon_info->lock()->set_displaced_header(markWord::unused_mark());
1480               JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1481               continue;
1482             }
1483           }
1484         }
1485         BasicLock* lock = mon_info->lock();
1486         ObjectSynchronizer::enter(obj, lock, deoptee_thread);
1487         assert(mon_info->owner()->is_locked(), "object must be locked now");

1488       }
1489     }
1490   }
1491   return relocked_objects;
1492 }
1493 #endif // COMPILER2_OR_JVMCI
1494 
1495 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1496   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1497 
1498 #ifndef PRODUCT
1499   if (PrintDeoptimizationDetails) {
1500     ResourceMark rm;
1501     stringStream st;
1502     st.print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread));
1503     fr.print_on(&st);
1504     st.print_cr("     Virtual frames (innermost first):");
1505     for (int index = 0; index < chunk->length(); index++) {
1506       compiledVFrame* vf = chunk->at(index);
1507       st.print("       %2d - ", index);

1551   return array;
1552 }
1553 
1554 #if COMPILER2_OR_JVMCI
1555 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1556   // Reallocation of some scalar replaced objects failed. Record
1557   // that we need to pop all the interpreter frames for the
1558   // deoptimized compiled frame.
1559   assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1560   thread->set_frames_to_pop_failed_realloc(array->frames());
1561   // Unlock all monitors here otherwise the interpreter will see a
1562   // mix of locked and unlocked monitors (because of failed
1563   // reallocations of synchronized objects) and be confused.
1564   for (int i = 0; i < array->frames(); i++) {
1565     MonitorChunk* monitors = array->element(i)->monitors();
1566     if (monitors != NULL) {
1567       for (int j = 0; j < monitors->number_of_monitors(); j++) {
1568         BasicObjectLock* src = monitors->at(j);
1569         if (src->obj() != NULL) {
1570           ObjectSynchronizer::exit(src->obj(), src->lock(), thread);

1571         }
1572       }
1573       array->element(i)->free_monitors(thread);
1574 #ifdef ASSERT
1575       array->element(i)->set_removed_monitors();
1576 #endif
1577     }
1578   }
1579 }
1580 #endif
1581 
1582 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1583   assert(fr.can_be_deoptimized(), "checking frame type");
1584 
1585   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1586 
1587   if (LogCompilation && xtty != NULL) {
1588     CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1589     assert(cm != NULL, "only compiled methods can deopt");
1590 
1591     ttyLocker ttyl;
1592     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1593     cm->log_identity(xtty);
1594     xtty->end_head();
1595     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1596       xtty->begin_elem("jvms bci='%d'", sd->bci());
1597       xtty->method(sd->method());
1598       xtty->end_elem();
1599       if (sd->is_top())  break;
1600     }
1601     xtty->tail("deoptimized");
1602   }
1603 


1604   // Patch the compiled method so that when execution returns to it we will
1605   // deopt the execution state and return to the interpreter.
1606   fr.deoptimize(thread);
1607 }
1608 
1609 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1610   // Deoptimize only if the frame comes from compile code.
1611   // Do not deoptimize the frame which is already patched
1612   // during the execution of the loops below.
1613   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1614     return;
1615   }
1616   ResourceMark rm;
1617   DeoptimizationMarker dm;
1618   deoptimize_single_frame(thread, fr, reason);
1619 }
1620 
1621 #if INCLUDE_JVMCI
1622 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1623   // there is no exception handler for this pc => deoptimize

1946         if (dcnt != 0)
1947           xtty->print(" count='%d'", dcnt);
1948         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1949         int dos = (pdata == NULL)? 0: pdata->trap_state();
1950         if (dos != 0) {
1951           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1952           if (trap_state_is_recompiled(dos)) {
1953             int recnt2 = trap_mdo->overflow_recompile_count();
1954             if (recnt2 != 0)
1955               xtty->print(" recompiles2='%d'", recnt2);
1956           }
1957         }
1958       }
1959       if (xtty != NULL) {
1960         xtty->stamp();
1961         xtty->end_head();
1962       }
1963       if (TraceDeoptimization) {  // make noise on the tty
1964         tty->print("Uncommon trap occurred in");
1965         nm->method()->print_short_name(tty);

1966         tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
1967 #if INCLUDE_JVMCI
1968         if (nm->is_nmethod()) {
1969           const char* installed_code_name = nm->as_nmethod()->jvmci_name();
1970           if (installed_code_name != NULL) {
1971             tty->print(" (JVMCI: installed code name=%s) ", installed_code_name);
1972           }
1973         }
1974 #endif
1975         tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
1976                    p2i(fr.pc()),
1977                    os::current_thread_id(),
1978                    trap_reason_name(reason),
1979                    trap_action_name(action),
1980                    unloaded_class_index
1981 #if INCLUDE_JVMCI
1982                    , debug_id
1983 #endif
1984                    );
1985         if (class_name != NULL) {

  39 #include "interpreter/interpreter.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/oopFactory.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/constantPool.hpp"
  46 #include "oops/method.hpp"
  47 #include "oops/objArrayKlass.hpp"
  48 #include "oops/objArrayOop.inline.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "oops/fieldStreams.inline.hpp"
  51 #include "oops/typeArrayOop.inline.hpp"
  52 #include "oops/verifyOopClosure.hpp"
  53 #include "prims/jvmtiDeferredUpdates.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "prims/jvmtiThreadState.hpp"
  56 #include "prims/vectorSupport.hpp"
  57 #include "prims/methodHandles.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/continuation.hpp"
  60 #include "runtime/deoptimization.hpp"
  61 #include "runtime/escapeBarrier.hpp"
  62 #include "runtime/fieldDescriptor.hpp"
  63 #include "runtime/fieldDescriptor.inline.hpp"
  64 #include "runtime/frame.inline.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/interfaceSupport.inline.hpp"
  67 #include "runtime/jniHandles.inline.hpp"
  68 #include "runtime/keepStackGCProcessed.hpp"
  69 #include "runtime/objectMonitor.inline.hpp"
  70 #include "runtime/osThread.hpp"
  71 #include "runtime/safepointVerifiers.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/signature.hpp"
  74 #include "runtime/stackFrameStream.inline.hpp"
  75 #include "runtime/stackWatermarkSet.hpp"
  76 #include "runtime/stubRoutines.hpp"
  77 #include "runtime/thread.hpp"
  78 #include "runtime/threadSMR.hpp"
  79 #include "runtime/threadWXSetters.inline.hpp"

 150   st.print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 151   st.print(   "  frame_sizes: ");
 152   for (int index = 0; index < number_of_frames(); index++) {
 153     st.print(INTX_FORMAT " ", frame_sizes()[index]);
 154   }
 155   st.cr();
 156   tty->print_raw(st.as_string());
 157 }
 158 
 159 
 160 // In order to make fetch_unroll_info work properly with escape
 161 // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY.
 162 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 163 // which is called from the method fetch_unroll_info_helper below.
 164 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 165   // fetch_unroll_info() is called at the beginning of the deoptimization
 166   // handler. Note this fact before we start generating temporary frames
 167   // that can confuse an asynchronous stack walker. This counter is
 168   // decremented at the end of unpack_frames().
 169   if (TraceDeoptimization) {
 170     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT " [%ld]", p2i(current), (long) current->osthread()->thread_id());
 171   }
 172   current->inc_in_deopt_handler();
 173 
 174   if (exec_mode == Unpack_exception) {
 175     // When we get here, a callee has thrown an exception into a deoptimized
 176     // frame. That throw might have deferred stack watermark checking until
 177     // after unwinding. So we deal with such deferred requests here.
 178     StackWatermarkSet::after_unwind(current);
 179   }
 180 
 181   return fetch_unroll_info_helper(current, exec_mode);
 182 JRT_END
 183 
 184 #if COMPILER2_OR_JVMCI
 185 #ifndef PRODUCT
 186 // print information about reallocated objects
 187 static void print_objects(JavaThread* deoptee_thread,
 188                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 189   ResourceMark rm;
 190   stringStream st;  // change to logStream with logging

 426     restore_eliminated_locks(current, chunk, realloc_failures, deoptee, exec_mode, unused);
 427   }
 428 #endif // COMPILER2_OR_JVMCI
 429 
 430   ScopeDesc* trap_scope = chunk->at(0)->scope();
 431   Handle exceptionObject;
 432   if (trap_scope->rethrow_exception()) {
 433     if (PrintDeoptimizationDetails) {
 434       tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
 435     }
 436     GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
 437     guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
 438     ScopeValue* topOfStack = expressions->top();
 439     exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
 440     guarantee(exceptionObject() != NULL, "exception oop can not be null");
 441   }
 442 
 443   vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures);
 444 #if COMPILER2_OR_JVMCI
 445   if (realloc_failures) {
 446     // FIXME: This very crudely destroys all ScopeLocal bindings. This
 447     // is better than a bound value escaping, but far from ideal.
 448     oop java_thread = current->threadObj();
 449     current->set_scopeLocalCache(NULL);
 450     java_lang_Thread::clear_scopeLocalBindings(java_thread);
 451     pop_frames_failed_reallocs(current, array);
 452   }
 453 #endif
 454 
 455   assert(current->vframe_array_head() == NULL, "Pending deopt!");
 456   current->set_vframe_array_head(array);
 457 
 458   // Now that the vframeArray has been created if we have any deferred local writes
 459   // added by jvmti then we can free up that structure as the data is now in the
 460   // vframeArray
 461 
 462   JvmtiDeferredUpdates::delete_updates_for_frame(current, array->original().id());
 463 
 464   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
 465   CodeBlob* cb = stub_frame.cb();
 466   // Verify we have the right vframeArray
 467   assert(cb->frame_size() >= 0, "Unexpected frame size");
 468   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 469 
 470   // If the deopt call site is a MethodHandle invoke call site we have

 574     methodHandle method(current, array->element(0)->method());
 575     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 576     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 577   }
 578 
 579   // Compute information for handling adapters and adjusting the frame size of the caller.
 580   int caller_adjustment = 0;
 581 
 582   // Compute the amount the oldest interpreter frame will have to adjust
 583   // its caller's stack by. If the caller is a compiled frame then
 584   // we pretend that the callee has no parameters so that the
 585   // extension counts for the full amount of locals and not just
 586   // locals-parms. This is because without a c2i adapter the parm
 587   // area as created by the compiled frame will not be usable by
 588   // the interpreter. (Depending on the calling convention there
 589   // may not even be enough space).
 590 
 591   // QQQ I'd rather see this pushed down into last_frame_adjust
 592   // and have it take the sender (aka caller).
 593 
 594   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 595     caller_adjustment = last_frame_adjust(0, callee_locals);
 596   } else if (callee_locals > callee_parameters) {
 597     // The caller frame may need extending to accommodate
 598     // non-parameter locals of the first unpacked interpreted frame.
 599     // Compute that adjustment.
 600     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 601   }
 602 
 603   // We always push the stack to make room for parameters, even if the caller is interpreted and has the parameters on the stack; this makes Loom continuation code simpler.
 604   // ... except if we've already done it, which can happen if the deoptimized frame becomes OSR and then deoptimized again.
 605   // if (deopt_sender.is_interpreted_frame() && deopt_sender.interpreter_frame_last_sp() > deopt_sender.sp() + 1 && callee_locals > callee_parameters) {
 606   //   caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 607   // } else {
 608   //   caller_adjustment = last_frame_adjust(0, callee_locals);
 609   // }
 610   
 611   // // If the caller is a continuation entry and the callee has a return barrier
 612   // // then we cannot use the parameters in the caller.
 613   // bool caller_was_continuation_entry = Continuation::is_cont_post_barrier_entry_frame(deopt_sender);
 614   // if (deopt_sender.is_compiled_frame() || caller_was_method_handle || caller_was_continuation_entry) {
 615   //   caller_adjustment = last_frame_adjust(0, callee_locals);
 616   // } else if (callee_locals > callee_parameters) {
 617   //   // The caller frame may need extending to accommodate non-parameter locals of the first unpacked interpreted frame.
 618   //   caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 619   // }
 620 
 621   // tty->print_cr(">>>>> fetch_unroll_info_helper adjustment: %d locals: %d params: %d", caller_adjustment, callee_locals, callee_parameters);
 622 
 623   // If the sender is deoptimized the we must retrieve the address of the handler
 624   // since the frame will "magically" show the original pc before the deopt
 625   // and we'd undo the deopt.
 626 
 627   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 628   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 629     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 630   }
 631   // if (Continuation::is_cont_barrier_frame(deoptee)) tty->print_cr("WOWEE Continuation::is_cont_barrier_frame(deoptee)");
 632 
 633   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 634 
 635 #if INCLUDE_JVMCI
 636   if (exceptionObject() != NULL) {
 637     current->set_exception_oop(exceptionObject());
 638     exec_mode = Unpack_exception;
 639   }
 640 #endif
 641 
 642   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 643     assert(current->has_pending_exception(), "should have thrown OOME");
 644     current->set_exception_oop(current->pending_exception());
 645     current->clear_pending_exception();
 646     exec_mode = Unpack_exception;
 647   }
 648 
 649 #if INCLUDE_JVMCI
 650   if (current->frames_to_pop_failed_realloc() > 0) {
 651     current->set_pending_monitorenter(false);

 736 
 737   assert(f->is_interpreted_frame(), "must be interpreted");
 738 }
 739 
 740 // Return BasicType of value being returned
 741 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 742 
 743   // We are already active in the special DeoptResourceMark any ResourceObj's we
 744   // allocate will be freed at the end of the routine.
 745 
 746   // JRT_LEAF methods don't normally allocate handles and there is a
 747   // NoHandleMark to enforce that. It is actually safe to use Handles
 748   // in a JRT_LEAF method, and sometimes desirable, but to do so we
 749   // must use ResetNoHandleMark to bypass the NoHandleMark, and
 750   // then use a HandleMark to ensure any Handles we do create are
 751   // cleaned up in this scope.
 752   ResetNoHandleMark rnhm;
 753   HandleMark hm(thread);
 754 
 755   frame stub_frame = thread->last_frame();
 756   
 757   Continuation::notify_deopt(thread, stub_frame.sp());
 758 
 759   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 760   // must point to the vframeArray for the unpack frame.
 761   vframeArray* array = thread->vframe_array_head();
 762 
 763 #ifndef PRODUCT
 764   if (TraceDeoptimization) {
 765     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
 766                   p2i(thread), p2i(array), exec_mode);
 767   }
 768 #endif
 769   Events::log_deopt_message(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
 770               p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
 771 
 772   UnrollBlock* info = array->unroll_block();
 773 
 774   // We set the last_Java frame. But the stack isn't really parsable here. So we
 775   // clear it to make sure JFR understands not to try and walk stacks from events
 776   // in here.
 777   intptr_t* sp = thread->frame_anchor()->last_Java_sp();

 914     }
 915   }
 916 #endif /* !PRODUCT */
 917 
 918   return bt;
 919 JRT_END
 920 
 921 class DeoptimizeMarkedClosure : public HandshakeClosure {
 922  public:
 923   DeoptimizeMarkedClosure() : HandshakeClosure("Deoptimize") {}
 924   void do_thread(Thread* thread) {
 925     JavaThread* jt = JavaThread::cast(thread);
 926     jt->deoptimize_marked_methods();
 927   }
 928 };
 929 
 930 void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
 931   ResourceMark rm;
 932   DeoptimizationMarker dm;
 933 
 934   GrowableArray<CompiledMethod*>* marked = new GrowableArray<CompiledMethod*>();
 935   // Make the dependent methods not entrant
 936   if (nmethod_only != NULL) {
 937     nmethod_only->mark_for_deoptimization();
 938     nmethod_only->make_not_entrant();
 939     marked->append(nmethod_only);
 940   } else {
 941     MutexLocker mu(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, Mutex::_no_safepoint_check_flag);
 942     CodeCache::make_marked_nmethods_not_entrant(marked);
 943   }
 944   CodeCache::make_marked_nmethods_deoptimized(marked);
 945 
 946   DeoptimizeMarkedClosure deopt;
 947   if (SafepointSynchronize::is_at_safepoint()) {
 948     Threads::java_threads_do(&deopt);
 949   } else {
 950     Handshake::execute(&deopt);
 951   }
 952 }
 953 
 954 Deoptimization::DeoptAction Deoptimization::_unloaded_action
 955   = Deoptimization::Action_reinterpret;
 956 
 957 #if COMPILER2_OR_JVMCI
 958 template<typename CacheType>
 959 class BoxCacheBase : public CHeapObj<mtCompiler> {
 960 protected:
 961   static InstanceKlass* find_cache_klass(Symbol* klass_name) {
 962     ResourceMark rm;
 963     char* klass_name_str = klass_name->as_C_string();
 964     InstanceKlass* ik = SystemDictionary::find_instance_klass(klass_name, Handle(), Handle());

1503             // With exec_mode == Unpack_none obj may be thread local and locked in
1504             // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1505             markWord dmw = mark.displaced_mark_helper();
1506             mark.locker()->set_displaced_header(markWord::encode((BasicLock*) NULL));
1507             obj->set_mark(dmw);
1508           }
1509           if (mark.has_monitor()) {
1510             // defer relocking if the deoptee thread is currently waiting for obj
1511             ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1512             if (waiting_monitor != NULL && waiting_monitor->object() == obj()) {
1513               assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1514               mon_info->lock()->set_displaced_header(markWord::unused_mark());
1515               JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1516               continue;
1517             }
1518           }
1519         }
1520         BasicLock* lock = mon_info->lock();
1521         ObjectSynchronizer::enter(obj, lock, deoptee_thread);
1522         assert(mon_info->owner()->is_locked(), "object must be locked now");
1523         deoptee_thread->inc_held_monitor_count();
1524       }
1525     }
1526   }
1527   return relocked_objects;
1528 }
1529 #endif // COMPILER2_OR_JVMCI
1530 
1531 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1532   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1533 
1534 #ifndef PRODUCT
1535   if (PrintDeoptimizationDetails) {
1536     ResourceMark rm;
1537     stringStream st;
1538     st.print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread));
1539     fr.print_on(&st);
1540     st.print_cr("     Virtual frames (innermost first):");
1541     for (int index = 0; index < chunk->length(); index++) {
1542       compiledVFrame* vf = chunk->at(index);
1543       st.print("       %2d - ", index);

1587   return array;
1588 }
1589 
1590 #if COMPILER2_OR_JVMCI
1591 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1592   // Reallocation of some scalar replaced objects failed. Record
1593   // that we need to pop all the interpreter frames for the
1594   // deoptimized compiled frame.
1595   assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1596   thread->set_frames_to_pop_failed_realloc(array->frames());
1597   // Unlock all monitors here otherwise the interpreter will see a
1598   // mix of locked and unlocked monitors (because of failed
1599   // reallocations of synchronized objects) and be confused.
1600   for (int i = 0; i < array->frames(); i++) {
1601     MonitorChunk* monitors = array->element(i)->monitors();
1602     if (monitors != NULL) {
1603       for (int j = 0; j < monitors->number_of_monitors(); j++) {
1604         BasicObjectLock* src = monitors->at(j);
1605         if (src->obj() != NULL) {
1606           ObjectSynchronizer::exit(src->obj(), src->lock(), thread);
1607           thread->dec_held_monitor_count();
1608         }
1609       }
1610       array->element(i)->free_monitors(thread);
1611 #ifdef ASSERT
1612       array->element(i)->set_removed_monitors();
1613 #endif
1614     }
1615   }
1616 }
1617 #endif
1618 
1619 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1620   assert(fr.can_be_deoptimized(), "checking frame type");
1621 
1622   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1623 
1624   if (LogCompilation && xtty != NULL) {
1625     CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1626     assert(cm != NULL, "only compiled methods can deopt");
1627 
1628     ttyLocker ttyl;
1629     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1630     cm->log_identity(xtty);
1631     xtty->end_head();
1632     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1633       xtty->begin_elem("jvms bci='%d'", sd->bci());
1634       xtty->method(sd->method());
1635       xtty->end_elem();
1636       if (sd->is_top())  break;
1637     }
1638     xtty->tail("deoptimized");
1639   }
1640 
1641   Continuation::notify_deopt(thread, fr.sp());
1642 
1643   // Patch the compiled method so that when execution returns to it we will
1644   // deopt the execution state and return to the interpreter.
1645   fr.deoptimize(thread);
1646 }
1647 
1648 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1649   // Deoptimize only if the frame comes from compile code.
1650   // Do not deoptimize the frame which is already patched
1651   // during the execution of the loops below.
1652   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1653     return;
1654   }
1655   ResourceMark rm;
1656   DeoptimizationMarker dm;
1657   deoptimize_single_frame(thread, fr, reason);
1658 }
1659 
1660 #if INCLUDE_JVMCI
1661 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1662   // there is no exception handler for this pc => deoptimize

1985         if (dcnt != 0)
1986           xtty->print(" count='%d'", dcnt);
1987         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1988         int dos = (pdata == NULL)? 0: pdata->trap_state();
1989         if (dos != 0) {
1990           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1991           if (trap_state_is_recompiled(dos)) {
1992             int recnt2 = trap_mdo->overflow_recompile_count();
1993             if (recnt2 != 0)
1994               xtty->print(" recompiles2='%d'", recnt2);
1995           }
1996         }
1997       }
1998       if (xtty != NULL) {
1999         xtty->stamp();
2000         xtty->end_head();
2001       }
2002       if (TraceDeoptimization) {  // make noise on the tty
2003         tty->print("Uncommon trap occurred in");
2004         nm->method()->print_short_name(tty);
2005         // nm->method()->print_codes_on(tty);
2006         tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
2007 #if INCLUDE_JVMCI
2008         if (nm->is_nmethod()) {
2009           const char* installed_code_name = nm->as_nmethod()->jvmci_name();
2010           if (installed_code_name != NULL) {
2011             tty->print(" (JVMCI: installed code name=%s) ", installed_code_name);
2012           }
2013         }
2014 #endif
2015         tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
2016                    p2i(fr.pc()),
2017                    os::current_thread_id(),
2018                    trap_reason_name(reason),
2019                    trap_action_name(action),
2020                    unloaded_class_index
2021 #if INCLUDE_JVMCI
2022                    , debug_id
2023 #endif
2024                    );
2025         if (class_name != NULL) {
< prev index next >