< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page




  32 #include "code/pcDesc.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "interpreter/bytecode.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "interpreter/oopMapCache.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/oopFactory.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/constantPool.hpp"
  42 #include "oops/method.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/fieldStreams.hpp"
  47 #include "oops/typeArrayOop.inline.hpp"
  48 #include "oops/verifyOopClosure.hpp"
  49 #include "prims/jvmtiThreadState.hpp"
  50 #include "runtime/biasedLocking.hpp"
  51 #include "runtime/compilationPolicy.hpp"

  52 #include "runtime/deoptimization.hpp"
  53 #include "runtime/fieldDescriptor.hpp"
  54 #include "runtime/fieldDescriptor.inline.hpp"
  55 #include "runtime/frame.inline.hpp"
  56 #include "runtime/jniHandles.inline.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/interfaceSupport.inline.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/signature.hpp"
  62 #include "runtime/stubRoutines.hpp"
  63 #include "runtime/thread.hpp"
  64 #include "runtime/threadSMR.hpp"
  65 #include "runtime/vframe.hpp"
  66 #include "runtime/vframeArray.hpp"
  67 #include "runtime/vframe_hp.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/preserveException.hpp"
  70 #include "utilities/xmlstream.hpp"
  71 


 140 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
 141 // of previously eliminated objects occurs in realloc_objects, which is
 142 // called from the method fetch_unroll_info_helper below.
 143 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode))
 144   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 145   // but makes the entry a little slower. There is however a little dance we have to
 146   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 147 
 148   // fetch_unroll_info() is called at the beginning of the deoptimization
 149   // handler. Note this fact before we start generating temporary frames
 150   // that can confuse an asynchronous stack walker. This counter is
 151   // decremented at the end of unpack_frames().
 152   if (TraceDeoptimization) {
 153     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
 154   }
 155   thread->inc_in_deopt_handler();
 156 
 157   return fetch_unroll_info_helper(thread, exec_mode);
 158 JRT_END
 159 

 160 
 161 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 162 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
 163 
 164   // Note: there is a safepoint safety issue here. No matter whether we enter
 165   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 166   // the vframeArray is created.
 167   //
 168 
 169   // Allocate our special deoptimization ResourceMark
 170   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
 171   assert(thread->deopt_mark() == NULL, "Pending deopt!");
 172   thread->set_deopt_mark(dmark);
 173 
 174   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
 175   RegisterMap map(thread, true);
 176   RegisterMap dummy_map(thread, false);
 177   // Now get the deoptee with a valid map
 178   frame deoptee = stub_frame.sender(&map);
 179   // Set the deoptee nmethod


 402   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
 403   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 404 
 405   // It's possible that the number of parameters at the call site is
 406   // different than number of arguments in the callee when method
 407   // handles are used.  If the caller is interpreted get the real
 408   // value so that the proper amount of space can be added to it's
 409   // frame.
 410   bool caller_was_method_handle = false;
 411   if (deopt_sender.is_interpreted_frame()) {
 412     methodHandle method = deopt_sender.interpreter_frame_method();
 413     Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
 414     if (cur.is_invokedynamic() || cur.is_invokehandle()) {
 415       // Method handle invokes may involve fairly arbitrary chains of
 416       // calls so it's impossible to know how much actual space the
 417       // caller has for locals.
 418       caller_was_method_handle = true;
 419     }
 420   }
 421 




 422   //
 423   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
 424   // frame_sizes/frame_pcs[1] next oldest frame (int)
 425   // frame_sizes/frame_pcs[n] youngest frame (int)
 426   //
 427   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
 428   // owns the space for the return address to it's caller).  Confusing ain't it.
 429   //
 430   // The vframe array can address vframes with indices running from
 431   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
 432   // When we create the skeletal frames we need the oldest frame to be in the zero slot
 433   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
 434   // so things look a little strange in this loop.
 435   //
 436   int callee_parameters = 0;
 437   int callee_locals = 0;
 438   for (int index = 0; index < array->frames(); index++ ) {
 439     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
 440     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
 441     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))


 460     methodHandle method(thread, array->element(0)->method());
 461     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 462     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 463   }
 464 
 465   // Compute information for handling adapters and adjusting the frame size of the caller.
 466   int caller_adjustment = 0;
 467 
 468   // Compute the amount the oldest interpreter frame will have to adjust
 469   // its caller's stack by. If the caller is a compiled frame then
 470   // we pretend that the callee has no parameters so that the
 471   // extension counts for the full amount of locals and not just
 472   // locals-parms. This is because without a c2i adapter the parm
 473   // area as created by the compiled frame will not be usable by
 474   // the interpreter. (Depending on the calling convention there
 475   // may not even be enough space).
 476 
 477   // QQQ I'd rather see this pushed down into last_frame_adjust
 478   // and have it take the sender (aka caller).
 479 
 480   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {



 481     caller_adjustment = last_frame_adjust(0, callee_locals);
 482   } else if (callee_locals > callee_parameters) {
 483     // The caller frame may need extending to accommodate
 484     // non-parameter locals of the first unpacked interpreted frame.
 485     // Compute that adjustment.
 486     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 487   }
 488 
 489   // If the sender is deoptimized the we must retrieve the address of the handler
 490   // since the frame will "magically" show the original pc before the deopt
 491   // and we'd undo the deopt.
 492 
 493   frame_pcs[0] = deopt_sender.raw_pc();

 494 
 495   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 496 
 497 #if INCLUDE_JVMCI
 498   if (exceptionObject() != NULL) {
 499     thread->set_exception_oop(exceptionObject());
 500     exec_mode = Unpack_exception;
 501   }
 502 #endif
 503 
 504   if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 505     assert(thread->has_pending_exception(), "should have thrown OOME");
 506     thread->set_exception_oop(thread->pending_exception());
 507     thread->clear_pending_exception();
 508     exec_mode = Unpack_exception;
 509   }
 510 
 511 #if INCLUDE_JVMCI
 512   if (thread->frames_to_pop_failed_realloc() > 0) {
 513     thread->set_pending_monitorenter(false);


 602   // will accomplish.
 603 
 604   // At the moment we have modified c2 to not have any callee save registers
 605   // so this problem does not exist and this routine is just a place holder.
 606 
 607   assert(f->is_interpreted_frame(), "must be interpreted");
 608 }
 609 
 610 // Return BasicType of value being returned
 611 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 612 
 613   // We are already active in the special DeoptResourceMark any ResourceObj's we
 614   // allocate will be freed at the end of the routine.
 615 
 616   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 617   // but makes the entry a little slower. There is however a little dance we have to
 618   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 619   ResetNoHandleMark rnhm; // No-op in release/product versions
 620   HandleMark hm;
 621 

 622   frame stub_frame = thread->last_frame();
 623 
 624   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 625   // must point to the vframeArray for the unpack frame.
 626   vframeArray* array = thread->vframe_array_head();
 627 
 628 #ifndef PRODUCT
 629   if (TraceDeoptimization) {
 630     ttyLocker ttyl;
 631     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
 632                   p2i(thread), p2i(array), exec_mode);
 633   }
 634 #endif
 635   Events::log_deopt_message(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
 636               p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
 637 
 638   UnrollBlock* info = array->unroll_block();
 639 
 640   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
 641   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());


1394     if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1395       objects_to_revoke->append(Handle(thread, mon_info->owner()));
1396     }
1397   }
1398 }
1399 
1400 
1401 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1402   if (!UseBiasedLocking) {
1403     return;
1404   }
1405 
1406   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1407 
1408   // Unfortunately we don't have a RegisterMap available in most of
1409   // the places we want to call this routine so we need to walk the
1410   // stack again to update the register map.
1411   if (map == NULL || !map->update_map()) {
1412     StackFrameStream sfs(thread, true);
1413     bool found = false;
1414     while (!found && !sfs.is_done()) {
1415       frame* cur = sfs.current();
1416       sfs.next();
1417       found = cur->id() == fr.id();

1418     }
1419     assert(found, "frame to be deoptimized not found on target thread's stack");
1420     map = sfs.register_map();
1421   }
1422 
1423   vframe* vf = vframe::new_vframe(&fr, map, thread);
1424   compiledVFrame* cvf = compiledVFrame::cast(vf);
1425   // Revoke monitors' biases in all scopes
1426   while (!cvf->is_top()) {
1427     collect_monitors(cvf, objects_to_revoke);
1428     cvf = compiledVFrame::cast(cvf->sender());
1429   }
1430   collect_monitors(cvf, objects_to_revoke);
1431 
1432   if (SafepointSynchronize::is_at_safepoint()) {
1433     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1434   } else {
1435     BiasedLocking::revoke(objects_to_revoke, thread);
1436   }
1437 }


1442 
1443   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1444 
1445   if (LogCompilation && xtty != NULL) {
1446     CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1447     assert(cm != NULL, "only compiled methods can deopt");
1448 
1449     ttyLocker ttyl;
1450     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1451     cm->log_identity(xtty);
1452     xtty->end_head();
1453     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1454       xtty->begin_elem("jvms bci='%d'", sd->bci());
1455       xtty->method(sd->method());
1456       xtty->end_elem();
1457       if (sd->is_top())  break;
1458     }
1459     xtty->tail("deoptimized");
1460   }
1461 



1462   // Patch the compiled method so that when execution returns to it we will
1463   // deopt the execution state and return to the interpreter.
1464   fr.deoptimize(thread);
1465 }
1466 
1467 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1468   deoptimize(thread, fr, map, Reason_constraint);
1469 }
1470 
1471 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
1472   // Deoptimize only if the frame comes from compile code.
1473   // Do not deoptimize the frame which is already patched
1474   // during the execution of the loops below.
1475   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1476     return;
1477   }
1478   ResourceMark rm;
1479   DeoptimizationMarker dm;
1480   if (UseBiasedLocking) {
1481     revoke_biases_of_monitors(thread, fr, map);


1754         if (dcnt != 0)
1755           xtty->print(" count='%d'", dcnt);
1756         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1757         int dos = (pdata == NULL)? 0: pdata->trap_state();
1758         if (dos != 0) {
1759           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1760           if (trap_state_is_recompiled(dos)) {
1761             int recnt2 = trap_mdo->overflow_recompile_count();
1762             if (recnt2 != 0)
1763               xtty->print(" recompiles2='%d'", recnt2);
1764           }
1765         }
1766       }
1767       if (xtty != NULL) {
1768         xtty->stamp();
1769         xtty->end_head();
1770       }
1771       if (TraceDeoptimization) {  // make noise on the tty
1772         tty->print("Uncommon trap occurred in");
1773         nm->method()->print_short_name(tty);

1774         tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
1775 #if INCLUDE_JVMCI
1776         if (nm->is_nmethod()) {
1777           const char* installed_code_name = nm->as_nmethod()->jvmci_name();
1778           if (installed_code_name != NULL) {
1779             tty->print(" (JVMCI: installed code name=%s) ", installed_code_name);
1780           }
1781         }
1782 #endif
1783         tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
1784                    p2i(fr.pc()),
1785                    os::current_thread_id(),
1786                    trap_reason_name(reason),
1787                    trap_action_name(action),
1788                    unloaded_class_index
1789 #if INCLUDE_JVMCI
1790                    , debug_id
1791 #endif
1792                    );
1793         if (class_name != NULL) {




  32 #include "code/pcDesc.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "interpreter/bytecode.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "interpreter/oopMapCache.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/oopFactory.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/constantPool.hpp"
  42 #include "oops/method.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/objArrayOop.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/fieldStreams.hpp"
  47 #include "oops/typeArrayOop.inline.hpp"
  48 #include "oops/verifyOopClosure.hpp"
  49 #include "prims/jvmtiThreadState.hpp"
  50 #include "runtime/biasedLocking.hpp"
  51 #include "runtime/compilationPolicy.hpp"
  52 #include "runtime/continuation.hpp"
  53 #include "runtime/deoptimization.hpp"
  54 #include "runtime/fieldDescriptor.hpp"
  55 #include "runtime/fieldDescriptor.inline.hpp"
  56 #include "runtime/frame.inline.hpp"
  57 #include "runtime/jniHandles.inline.hpp"
  58 #include "runtime/handles.inline.hpp"
  59 #include "runtime/interfaceSupport.inline.hpp"
  60 #include "runtime/safepointVerifiers.hpp"
  61 #include "runtime/sharedRuntime.hpp"
  62 #include "runtime/signature.hpp"
  63 #include "runtime/stubRoutines.hpp"
  64 #include "runtime/thread.hpp"
  65 #include "runtime/threadSMR.hpp"
  66 #include "runtime/vframe.hpp"
  67 #include "runtime/vframeArray.hpp"
  68 #include "runtime/vframe_hp.hpp"
  69 #include "utilities/events.hpp"
  70 #include "utilities/preserveException.hpp"
  71 #include "utilities/xmlstream.hpp"
  72 


 141 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
 142 // of previously eliminated objects occurs in realloc_objects, which is
 143 // called from the method fetch_unroll_info_helper below.
 144 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode))
 145   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 146   // but makes the entry a little slower. There is however a little dance we have to
 147   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 148 
 149   // fetch_unroll_info() is called at the beginning of the deoptimization
 150   // handler. Note this fact before we start generating temporary frames
 151   // that can confuse an asynchronous stack walker. This counter is
 152   // decremented at the end of unpack_frames().
 153   if (TraceDeoptimization) {
 154     tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
 155   }
 156   thread->inc_in_deopt_handler();
 157 
 158   return fetch_unroll_info_helper(thread, exec_mode);
 159 JRT_END
 160 
 161 extern "C" void pfl();
 162 
 163 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 164 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
 165 
 166   // Note: there is a safepoint safety issue here. No matter whether we enter
 167   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 168   // the vframeArray is created.
 169   //
 170 
 171   // Allocate our special deoptimization ResourceMark
 172   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
 173   assert(thread->deopt_mark() == NULL, "Pending deopt!");
 174   thread->set_deopt_mark(dmark);
 175 
 176   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
 177   RegisterMap map(thread, true);
 178   RegisterMap dummy_map(thread, false);
 179   // Now get the deoptee with a valid map
 180   frame deoptee = stub_frame.sender(&map);
 181   // Set the deoptee nmethod


 404   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
 405   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 406 
 407   // It's possible that the number of parameters at the call site is
 408   // different than number of arguments in the callee when method
 409   // handles are used.  If the caller is interpreted get the real
 410   // value so that the proper amount of space can be added to it's
 411   // frame.
 412   bool caller_was_method_handle = false;
 413   if (deopt_sender.is_interpreted_frame()) {
 414     methodHandle method = deopt_sender.interpreter_frame_method();
 415     Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
 416     if (cur.is_invokedynamic() || cur.is_invokehandle()) {
 417       // Method handle invokes may involve fairly arbitrary chains of
 418       // calls so it's impossible to know how much actual space the
 419       // caller has for locals.
 420       caller_was_method_handle = true;
 421     }
 422   }
 423 
 424   // If the caller is a continuation entry and the callee has a return barrier
 425   // then we cannot use the parameters in the caller.
 426   bool caller_was_continuation_entry = Continuation::is_cont_post_barrier_entry_frame(deopt_sender);
 427 
 428   //
 429   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
 430   // frame_sizes/frame_pcs[1] next oldest frame (int)
 431   // frame_sizes/frame_pcs[n] youngest frame (int)
 432   //
 433   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
 434   // owns the space for the return address to it's caller).  Confusing ain't it.
 435   //
 436   // The vframe array can address vframes with indices running from
 437   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
 438   // When we create the skeletal frames we need the oldest frame to be in the zero slot
 439   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
 440   // so things look a little strange in this loop.
 441   //
 442   int callee_parameters = 0;
 443   int callee_locals = 0;
 444   for (int index = 0; index < array->frames(); index++ ) {
 445     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
 446     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
 447     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))


 466     methodHandle method(thread, array->element(0)->method());
 467     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 468     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 469   }
 470 
 471   // Compute information for handling adapters and adjusting the frame size of the caller.
 472   int caller_adjustment = 0;
 473 
 474   // Compute the amount the oldest interpreter frame will have to adjust
 475   // its caller's stack by. If the caller is a compiled frame then
 476   // we pretend that the callee has no parameters so that the
 477   // extension counts for the full amount of locals and not just
 478   // locals-parms. This is because without a c2i adapter the parm
 479   // area as created by the compiled frame will not be usable by
 480   // the interpreter. (Depending on the calling convention there
 481   // may not even be enough space).
 482 
 483   // QQQ I'd rather see this pushed down into last_frame_adjust
 484   // and have it take the sender (aka caller).
 485 
 486   // TODO LOOM: consider *always* adjusting instead of the conditionals below. 
 487   // That would simplify the alignment code in continuation freeze and particularly thaw, but it makes hotspot/jtreg/vmTestbase/nsk/jvmti/PopFrame/popframe005 fail.
 488   // caller_adjustment = last_frame_adjust(0, callee_locals);
 489   if (deopt_sender.is_compiled_frame() || caller_was_method_handle || caller_was_continuation_entry) {
 490     caller_adjustment = last_frame_adjust(0, callee_locals);
 491   } else if (callee_locals > callee_parameters) {
 492     // The caller frame may need extending to accommodate non-parameter locals of the first unpacked interpreted frame.


 493     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 494   }
 495 
 496   // If the sender is deoptimized the we must retrieve the address of the handler
 497   // since the frame will "magically" show the original pc before the deopt
 498   // and we'd undo the deopt.
 499 
 500   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 501   // if (Continuation::is_cont_barrier_frame(deoptee)) tty->print_cr("WOWEE Continuation::is_cont_barrier_frame(deoptee)");
 502 
 503   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
 504 
 505 #if INCLUDE_JVMCI
 506   if (exceptionObject() != NULL) {
 507     thread->set_exception_oop(exceptionObject());
 508     exec_mode = Unpack_exception;
 509   }
 510 #endif
 511 
 512   if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 513     assert(thread->has_pending_exception(), "should have thrown OOME");
 514     thread->set_exception_oop(thread->pending_exception());
 515     thread->clear_pending_exception();
 516     exec_mode = Unpack_exception;
 517   }
 518 
 519 #if INCLUDE_JVMCI
 520   if (thread->frames_to_pop_failed_realloc() > 0) {
 521     thread->set_pending_monitorenter(false);


 610   // will accomplish.
 611 
 612   // At the moment we have modified c2 to not have any callee save registers
 613   // so this problem does not exist and this routine is just a place holder.
 614 
 615   assert(f->is_interpreted_frame(), "must be interpreted");
 616 }
 617 
 618 // Return BasicType of value being returned
 619 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 620 
 621   // We are already active in the special DeoptResourceMark any ResourceObj's we
 622   // allocate will be freed at the end of the routine.
 623 
 624   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
 625   // but makes the entry a little slower. There is however a little dance we have to
 626   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
 627   ResetNoHandleMark rnhm; // No-op in release/product versions
 628   HandleMark hm;
 629 
 630   thread->set_cont_fastpath(false);
 631   frame stub_frame = thread->last_frame();
 632 
 633   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 634   // must point to the vframeArray for the unpack frame.
 635   vframeArray* array = thread->vframe_array_head();
 636 
 637 #ifndef PRODUCT
 638   if (TraceDeoptimization) {
 639     ttyLocker ttyl;
 640     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
 641                   p2i(thread), p2i(array), exec_mode);
 642   }
 643 #endif
 644   Events::log_deopt_message(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
 645               p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
 646 
 647   UnrollBlock* info = array->unroll_block();
 648 
 649   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
 650   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());


1403     if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1404       objects_to_revoke->append(Handle(thread, mon_info->owner()));
1405     }
1406   }
1407 }
1408 
1409 
1410 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1411   if (!UseBiasedLocking) {
1412     return;
1413   }
1414 
1415   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1416 
1417   // Unfortunately we don't have a RegisterMap available in most of
1418   // the places we want to call this routine so we need to walk the
1419   // stack again to update the register map.
1420   if (map == NULL || !map->update_map()) {
1421     StackFrameStream sfs(thread, true);
1422     bool found = false;
1423     for (; !found && !sfs.is_done(); sfs.next()) {
1424       frame* cur = sfs.current();

1425       found = cur->id() == fr.id();
1426       if (found) break; // we must not call sfs.next
1427     }
1428     assert(found, "frame to be deoptimized not found on target thread's stack");
1429     map = sfs.register_map();
1430   }
1431 
1432   vframe* vf = vframe::new_vframe(&fr, map, thread);
1433   compiledVFrame* cvf = compiledVFrame::cast(vf);
1434   // Revoke monitors' biases in all scopes
1435   while (!cvf->is_top()) {
1436     collect_monitors(cvf, objects_to_revoke);
1437     cvf = compiledVFrame::cast(cvf->sender());
1438   }
1439   collect_monitors(cvf, objects_to_revoke);
1440 
1441   if (SafepointSynchronize::is_at_safepoint()) {
1442     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1443   } else {
1444     BiasedLocking::revoke(objects_to_revoke, thread);
1445   }
1446 }


1451 
1452   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1453 
1454   if (LogCompilation && xtty != NULL) {
1455     CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1456     assert(cm != NULL, "only compiled methods can deopt");
1457 
1458     ttyLocker ttyl;
1459     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1460     cm->log_identity(xtty);
1461     xtty->end_head();
1462     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1463       xtty->begin_elem("jvms bci='%d'", sd->bci());
1464       xtty->method(sd->method());
1465       xtty->end_elem();
1466       if (sd->is_top())  break;
1467     }
1468     xtty->tail("deoptimized");
1469   }
1470 
1471   // For simplicity, we currently clear the fast path if the frame is on _any_ continuation
1472   if (Continuation::is_frame_in_continuation(thread, fr)) thread->set_cont_fastpath(false);
1473 
1474   // Patch the compiled method so that when execution returns to it we will
1475   // deopt the execution state and return to the interpreter.
1476   fr.deoptimize(thread);
1477 }
1478 
1479 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1480   deoptimize(thread, fr, map, Reason_constraint);
1481 }
1482 
1483 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
1484   // Deoptimize only if the frame comes from compile code.
1485   // Do not deoptimize the frame which is already patched
1486   // during the execution of the loops below.
1487   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1488     return;
1489   }
1490   ResourceMark rm;
1491   DeoptimizationMarker dm;
1492   if (UseBiasedLocking) {
1493     revoke_biases_of_monitors(thread, fr, map);


1766         if (dcnt != 0)
1767           xtty->print(" count='%d'", dcnt);
1768         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1769         int dos = (pdata == NULL)? 0: pdata->trap_state();
1770         if (dos != 0) {
1771           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1772           if (trap_state_is_recompiled(dos)) {
1773             int recnt2 = trap_mdo->overflow_recompile_count();
1774             if (recnt2 != 0)
1775               xtty->print(" recompiles2='%d'", recnt2);
1776           }
1777         }
1778       }
1779       if (xtty != NULL) {
1780         xtty->stamp();
1781         xtty->end_head();
1782       }
1783       if (TraceDeoptimization) {  // make noise on the tty
1784         tty->print("Uncommon trap occurred in");
1785         nm->method()->print_short_name(tty);
1786         // nm->method()->print_codes_on(tty);
1787         tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
1788 #if INCLUDE_JVMCI
1789         if (nm->is_nmethod()) {
1790           const char* installed_code_name = nm->as_nmethod()->jvmci_name();
1791           if (installed_code_name != NULL) {
1792             tty->print(" (JVMCI: installed code name=%s) ", installed_code_name);
1793           }
1794         }
1795 #endif
1796         tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
1797                    p2i(fr.pc()),
1798                    os::current_thread_id(),
1799                    trap_reason_name(reason),
1800                    trap_action_name(action),
1801                    unloaded_class_index
1802 #if INCLUDE_JVMCI
1803                    , debug_id
1804 #endif
1805                    );
1806         if (class_name != NULL) {


< prev index next >