1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/vmreg.inline.hpp"
  28 #include "interpreter/bytecode.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "prims/jvmtiThreadState.hpp"
  35 #include "runtime/frame.inline.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/monitorChunk.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/vframe.hpp"
  40 #include "runtime/vframeArray.hpp"
  41 #include "runtime/vframe_hp.hpp"
  42 #include "utilities/copy.hpp"
  43 #include "utilities/events.hpp"
  44 #ifdef COMPILER2
  45 #include "opto/runtime.hpp"
  46 #endif
  47 
  48 int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); }
  49 
  50 void vframeArrayElement::free_monitors(JavaThread* jt) {
  51   if (_monitors != NULL) {
  52      MonitorChunk* chunk = _monitors;
  53      _monitors = NULL;
  54      jt->remove_monitor_chunk(chunk);
  55      delete chunk;
  56   }
  57 }
  58 
  59 void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) {
  60 
  61 // Copy the information from the compiled vframe to the
  62 // interpreter frame we will be creating to replace vf
  63 
  64   _method = vf->method();
  65   _bci    = vf->raw_bci();
  66   _reexecute = vf->should_reexecute();
  67 #ifdef ASSERT
  68   _removed_monitors = false;
  69 #endif
  70 
  71   int index;
  72 
  73   // Get the monitors off-stack
  74 
  75   GrowableArray<MonitorInfo*>* list = vf->monitors();
  76   if (list->is_empty()) {
  77     _monitors = NULL;
  78   } else {
  79 
  80     // Allocate monitor chunk
  81     _monitors = new MonitorChunk(list->length());
  82     vf->thread()->add_monitor_chunk(_monitors);
  83 
  84     // Migrate the BasicLocks from the stack to the monitor chunk
  85     for (index = 0; index < list->length(); index++) {
  86       MonitorInfo* monitor = list->at(index);
  87       assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
  88       BasicObjectLock* dest = _monitors->at(index);
  89       if (monitor->owner_is_scalar_replaced()) {
  90         dest->set_obj(NULL);
  91       } else {
  92         assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
  93         dest->set_obj(monitor->owner());
  94         monitor->lock()->move_to(monitor->owner(), dest->lock());
  95       }
  96     }
  97   }
  98 
  99   // Convert the vframe locals and expressions to off stack
 100   // values. Because we will not gc all oops can be converted to
 101   // intptr_t (i.e. a stack slot) and we are fine. This is
 102   // good since we are inside a HandleMark and the oops in our
 103   // collection would go away between packing them here and
 104   // unpacking them in unpack_on_stack.
 105 
 106   // First the locals go off-stack
 107 
 108   // FIXME this seems silly it creates a StackValueCollection
 109   // in order to get the size to then copy them and
 110   // convert the types to intptr_t size slots. Seems like it
 111   // could do it in place... Still uses less memory than the
 112   // old way though
 113 
 114   StackValueCollection *locs = vf->locals();
 115   _locals = new StackValueCollection(locs->size());
 116   for(index = 0; index < locs->size(); index++) {
 117     StackValue* value = locs->at(index);
 118     switch(value->type()) {
 119       case T_OBJECT:
 120         assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
 121         // preserve object type
 122         _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
 123         break;
 124       case T_CONFLICT:
 125         // A dead local.  Will be initialized to null/zero.
 126         _locals->add( new StackValue());
 127         break;
 128       case T_INT:
 129         _locals->add( new StackValue(value->get_int()));
 130         break;
 131       default:
 132         ShouldNotReachHere();
 133     }
 134   }
 135 
 136   // Now the expressions off-stack
 137   // Same silliness as above
 138 
 139   StackValueCollection *exprs = vf->expressions();
 140   _expressions = new StackValueCollection(exprs->size());
 141   for(index = 0; index < exprs->size(); index++) {
 142     StackValue* value = exprs->at(index);
 143     switch(value->type()) {
 144       case T_OBJECT:
 145         assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
 146         // preserve object type
 147         _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
 148         break;
 149       case T_CONFLICT:
 150         // A dead stack element.  Will be initialized to null/zero.
 151         // This can occur when the compiler emits a state in which stack
 152         // elements are known to be dead (because of an imminent exception).
 153         _expressions->add( new StackValue());
 154         break;
 155       case T_INT:
 156         _expressions->add( new StackValue(value->get_int()));
 157         break;
 158       default:
 159         ShouldNotReachHere();
 160     }
 161   }
 162 }
 163 
 164 int unpack_counter = 0;
 165 
 166 void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
 167                                          int callee_parameters,
 168                                          int callee_locals,
 169                                          frame* caller,
 170                                          bool is_top_frame,
 171                                          bool is_bottom_frame,
 172                                          int exec_mode) {
 173   JavaThread* thread = (JavaThread*) Thread::current();
 174 
 175   bool realloc_failure_exception = thread->frames_to_pop_failed_realloc() > 0;
 176 
 177   // Look at bci and decide on bcp and continuation pc
 178   address bcp;
 179   // C++ interpreter doesn't need a pc since it will figure out what to do when it
 180   // begins execution
 181   address pc;
 182   bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
 183                              // rather than the one associated with bcp
 184   if (raw_bci() == SynchronizationEntryBCI) {
 185     // We are deoptimizing while hanging in prologue code for synchronized method
 186     bcp = method()->bcp_from(0); // first byte code
 187     pc  = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
 188   } else if (should_reexecute()) { //reexecute this bytecode
 189     assert(is_top_frame, "reexecute allowed only for the top frame");
 190     bcp = method()->bcp_from(bci());
 191     pc  = Interpreter::deopt_reexecute_entry(method(), bcp);
 192   } else {
 193     bcp = method()->bcp_from(bci());
 194     pc  = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
 195     use_next_mdp = true;
 196   }
 197   assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
 198 
 199   // Monitorenter and pending exceptions:
 200   //
 201   // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
 202   // because there is no safepoint at the null pointer check (it is either handled explicitly
 203   // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
 204   // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER).  If an asynchronous
 205   // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
 206   // the monitorenter to place it in the proper exception range.
 207   //
 208   // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
 209   // in which case bcp should point to the monitorenter since it is within the exception's range.
 210   //
 211   // For realloc failure exception we just pop frames, skip the guarantee.
 212 
 213   assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
 214   assert(thread->deopt_compiled_method() != NULL, "compiled method should be known");
 215   guarantee(realloc_failure_exception || !(thread->deopt_compiled_method()->is_compiled_by_c2() &&
 216               *bcp == Bytecodes::_monitorenter             &&
 217               exec_mode == Deoptimization::Unpack_exception),
 218             "shouldn't get exception during monitorenter");
 219 
 220   int popframe_preserved_args_size_in_bytes = 0;
 221   int popframe_preserved_args_size_in_words = 0;
 222   if (is_top_frame) {
 223     JvmtiThreadState *state = thread->jvmti_thread_state();
 224     if (JvmtiExport::can_pop_frame() &&
 225         (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
 226       if (thread->has_pending_popframe()) {
 227         // Pop top frame after deoptimization
 228 #ifndef CC_INTERP
 229         pc = Interpreter::remove_activation_preserving_args_entry();
 230 #else
 231         // Do an uncommon trap type entry. c++ interpreter will know
 232         // to pop frame and preserve the args
 233         pc = Interpreter::deopt_entry(vtos, 0);
 234         use_next_mdp = false;
 235 #endif
 236       } else {
 237         // Reexecute invoke in top frame
 238         pc = Interpreter::deopt_entry(vtos, 0);
 239         use_next_mdp = false;
 240         popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
 241         // Note: the PopFrame-related extension of the expression stack size is done in
 242         // Deoptimization::fetch_unroll_info_helper
 243         popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
 244       }
 245     } else if (!realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
 246       // Force early return from top frame after deoptimization
 247 #ifndef CC_INTERP
 248       pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
 249 #endif
 250     } else {
 251       if (realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
 252         state->clr_earlyret_pending();
 253         state->set_earlyret_oop(NULL);
 254         state->clr_earlyret_value();
 255       }
 256       // Possibly override the previous pc computation of the top (youngest) frame
 257       switch (exec_mode) {
 258       case Deoptimization::Unpack_deopt:
 259         // use what we've got
 260         break;
 261       case Deoptimization::Unpack_exception:
 262         // exception is pending
 263         pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
 264         // [phh] We're going to end up in some handler or other, so it doesn't
 265         // matter what mdp we point to.  See exception_handler_for_exception()
 266         // in interpreterRuntime.cpp.
 267         break;
 268       case Deoptimization::Unpack_uncommon_trap:
 269       case Deoptimization::Unpack_reexecute:
 270         // redo last byte code
 271         pc  = Interpreter::deopt_entry(vtos, 0);
 272         use_next_mdp = false;
 273         break;
 274       default:
 275         ShouldNotReachHere();
 276       }
 277     }
 278   }
 279 
 280   // Setup the interpreter frame
 281 
 282   assert(method() != NULL, "method must exist");
 283   int temps = expressions()->size();
 284 
 285   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
 286 
 287   Interpreter::layout_activation(method(),
 288                                  temps + callee_parameters,
 289                                  popframe_preserved_args_size_in_words,
 290                                  locks,
 291                                  caller_actual_parameters,
 292                                  callee_parameters,
 293                                  callee_locals,
 294                                  caller,
 295                                  iframe(),
 296                                  is_top_frame,
 297                                  is_bottom_frame);
 298 
 299   // Update the pc in the frame object and overwrite the temporary pc
 300   // we placed in the skeletal frame now that we finally know the
 301   // exact interpreter address we should use.
 302 
 303   _frame.patch_pc(thread, pc);
 304 
 305   assert (!method()->is_synchronized() || locks > 0 || _removed_monitors || raw_bci() == SynchronizationEntryBCI, "synchronized methods must have monitors");
 306 
 307   BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
 308   for (int index = 0; index < locks; index++) {
 309     top = iframe()->previous_monitor_in_interpreter_frame(top);
 310     BasicObjectLock* src = _monitors->at(index);
 311     top->set_obj(src->obj());
 312     src->lock()->move_to(src->obj(), top->lock());
 313   }
 314   if (ProfileInterpreter) {
 315     iframe()->interpreter_frame_set_mdp(0); // clear out the mdp.
 316   }
 317   iframe()->interpreter_frame_set_bcp(bcp);
 318   if (ProfileInterpreter) {
 319     MethodData* mdo = method()->method_data();
 320     if (mdo != NULL) {
 321       int bci = iframe()->interpreter_frame_bci();
 322       if (use_next_mdp) ++bci;
 323       address mdp = mdo->bci_to_dp(bci);
 324       iframe()->interpreter_frame_set_mdp(mdp);
 325     }
 326   }
 327 
 328   if (PrintDeoptimizationDetails) {
 329     tty->print_cr("Expressions size: %d", expressions()->size());
 330   }
 331 
 332   // Unpack expression stack
 333   // If this is an intermediate frame (i.e. not top frame) then this
 334   // only unpacks the part of the expression stack not used by callee
 335   // as parameters. The callee parameters are unpacked as part of the
 336   // callee locals.
 337   int i;
 338   for(i = 0; i < expressions()->size(); i++) {
 339     StackValue *value = expressions()->at(i);
 340     intptr_t*   addr  = iframe()->interpreter_frame_expression_stack_at(i);
 341     switch(value->type()) {
 342       case T_INT:
 343         *addr = value->get_int();
 344 #ifndef PRODUCT
 345         if (PrintDeoptimizationDetails) {
 346           tty->print_cr("Reconstructed expression %d (INT): %d", i, (int)(*addr));
 347         }
 348 #endif
 349         break;
 350       case T_OBJECT:
 351         *addr = value->get_int(T_OBJECT);
 352 #ifndef PRODUCT
 353         if (PrintDeoptimizationDetails) {
 354           tty->print("Reconstructed expression %d (OBJECT): ", i);
 355           oop o = (oop)(address)(*addr);
 356           if (o == NULL) {
 357             tty->print_cr("NULL");
 358           } else {
 359             ResourceMark rm;
 360             tty->print_raw_cr(o->klass()->name()->as_C_string());
 361           }
 362         }
 363 #endif
 364         break;
 365       case T_CONFLICT:
 366         // A dead stack slot.  Initialize to null in case it is an oop.
 367         *addr = NULL_WORD;
 368         break;
 369       default:
 370         ShouldNotReachHere();
 371     }
 372   }
 373 
 374 
 375   // Unpack the locals
 376   for(i = 0; i < locals()->size(); i++) {
 377     StackValue *value = locals()->at(i);
 378     intptr_t* addr  = iframe()->interpreter_frame_local_at(i);
 379     switch(value->type()) {
 380       case T_INT:
 381         *addr = value->get_int();
 382 #ifndef PRODUCT
 383         if (PrintDeoptimizationDetails) {
 384           tty->print_cr("Reconstructed local %d (INT): %d", i, (int)(*addr));
 385         }
 386 #endif
 387         break;
 388       case T_OBJECT:
 389         *addr = value->get_int(T_OBJECT);
 390 #ifndef PRODUCT
 391         if (PrintDeoptimizationDetails) {
 392           tty->print("Reconstructed local %d (OBJECT): ", i);
 393           oop o = (oop)(address)(*addr);
 394           if (o == NULL) {
 395             tty->print_cr("NULL");
 396           } else {
 397             ResourceMark rm;
 398             tty->print_raw_cr(o->klass()->name()->as_C_string());
 399           }
 400         }
 401 #endif
 402         break;
 403       case T_CONFLICT:
 404         // A dead location. If it is an oop then we need a NULL to prevent GC from following it
 405         *addr = NULL_WORD;
 406         break;
 407       default:
 408         ShouldNotReachHere();
 409     }
 410   }
 411 
 412   if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
 413     // An interpreted frame was popped but it returns to a deoptimized
 414     // frame. The incoming arguments to the interpreted activation
 415     // were preserved in thread-local storage by the
 416     // remove_activation_preserving_args_entry in the interpreter; now
 417     // we put them back into the just-unpacked interpreter frame.
 418     // Note that this assumes that the locals arena grows toward lower
 419     // addresses.
 420     if (popframe_preserved_args_size_in_words != 0) {
 421       void* saved_args = thread->popframe_preserved_args();
 422       assert(saved_args != NULL, "must have been saved by interpreter");
 423 #ifdef ASSERT
 424       assert(popframe_preserved_args_size_in_words <=
 425              iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
 426              "expression stack size should have been extended");
 427 #endif // ASSERT
 428       int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
 429       intptr_t* base;
 430       if (frame::interpreter_frame_expression_stack_direction() < 0) {
 431         base = iframe()->interpreter_frame_expression_stack_at(top_element);
 432       } else {
 433         base = iframe()->interpreter_frame_expression_stack();
 434       }
 435       Copy::conjoint_jbytes(saved_args,
 436                             base,
 437                             popframe_preserved_args_size_in_bytes);
 438       thread->popframe_free_preserved_args();
 439     }
 440   }
 441 
 442 #ifndef PRODUCT
 443   if (PrintDeoptimizationDetails) {
 444     ttyLocker ttyl;
 445     tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
 446     iframe()->print_on(tty);
 447     RegisterMap map(thread);
 448     vframe* f = vframe::new_vframe(iframe(), &map, thread);
 449     f->print();
 450 
 451     tty->print_cr("locals size     %d", locals()->size());
 452     tty->print_cr("expression size %d", expressions()->size());
 453 
 454     method()->print_value();
 455     tty->cr();
 456     // method()->print_codes();
 457   } else if (TraceDeoptimization) {
 458     tty->print("     ");
 459     method()->print_value();
 460     Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
 461     int bci = method()->bci_from(bcp);
 462     tty->print(" - %s", Bytecodes::name(code));
 463     tty->print(" @ bci %d ", bci);
 464     tty->print_cr("sp = " PTR_FORMAT, p2i(iframe()->sp()));
 465   }
 466 #endif // PRODUCT
 467 
 468   // The expression stack and locals are in the resource area don't leave
 469   // a dangling pointer in the vframeArray we leave around for debug
 470   // purposes
 471 
 472   _locals = _expressions = NULL;
 473 
 474 }
 475 
 476 int vframeArrayElement::on_stack_size(int callee_parameters,
 477                                       int callee_locals,
 478                                       bool is_top_frame,
 479                                       int popframe_extra_stack_expression_els) const {
 480   assert(method()->max_locals() == locals()->size(), "just checking");
 481   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
 482   int temps = expressions()->size();
 483   return Interpreter::size_activation(method()->max_stack(),
 484                                       temps + callee_parameters,
 485                                       popframe_extra_stack_expression_els,
 486                                       locks,
 487                                       callee_parameters,
 488                                       callee_locals,
 489                                       is_top_frame);
 490 }
 491 
 492 
 493 intptr_t* vframeArray::unextended_sp() const {
 494   assert(owner_thread()->is_in_usable_stack((address) _original.unextended_sp()), INTPTR_FORMAT, p2i(_original.unextended_sp())); 
 495   return _original.unextended_sp();
 496 }
 497 
 498 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
 499                                    RegisterMap *reg_map, frame sender, frame caller, frame self,
 500                                    bool realloc_failures) {
 501 
 502   // Allocate the vframeArray
 503   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
 504                                                      sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
 505                                                      mtCompiler);
 506   result->_frames = chunk->length();
 507   result->_owner_thread = thread;
 508   result->_sender = sender;
 509   result->_caller = caller;
 510   result->_original = self;
 511   result->set_unroll_block(NULL); // initialize it
 512   result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures);
 513   return result;
 514 }
 515 
 516 void vframeArray::fill_in(JavaThread* thread,
 517                           int frame_size,
 518                           GrowableArray<compiledVFrame*>* chunk,
 519                           const RegisterMap *reg_map,
 520                           bool realloc_failures) {
 521   // Set owner first, it is used when adding monitor chunks
 522 
 523   _frame_size = frame_size;
 524   for(int i = 0; i < chunk->length(); i++) {
 525     element(i)->fill_in(chunk->at(i), realloc_failures);
 526   }
 527 
 528   // Copy registers for callee-saved registers
 529   if (reg_map != NULL) {
 530     for(int i = 0; i < RegisterMap::reg_count; i++) {
 531 #ifdef AMD64
 532       // The register map has one entry for every int (32-bit value), so
 533       // 64-bit physical registers have two entries in the map, one for
 534       // each half.  Ignore the high halves of 64-bit registers, just like
 535       // frame::oopmapreg_to_location does.
 536       //
 537       // [phh] FIXME: this is a temporary hack!  This code *should* work
 538       // correctly w/o this hack, possibly by changing RegisterMap::pd_location
 539       // in frame_amd64.cpp and the values of the phantom high half registers
 540       // in amd64.ad.
 541       //      if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
 542         intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
 543         _callee_registers[i] = src != NULL ? *src : NULL_WORD;
 544         //      } else {
 545         //      jint* src = (jint*) reg_map->location(VMReg::Name(i));
 546         //      _callee_registers[i] = src != NULL ? *src : NULL_WORD;
 547         //      }
 548 #else
 549       jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i));
 550       _callee_registers[i] = src != NULL ? *src : NULL_WORD;
 551 #endif
 552       if (src == NULL) {
 553         set_location_valid(i, false);
 554       } else {
 555         set_location_valid(i, true);
 556         jint* dst = (jint*) register_location(i);
 557         *dst = *src;
 558       }
 559     }
 560   }
 561 }
 562 
 563 void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) {
 564   // stack picture
 565   //   unpack_frame
 566   //   [new interpreter frames ] (frames are skeletal but walkable)
 567   //   caller_frame
 568   //
 569   //  This routine fills in the missing data for the skeletal interpreter frames
 570   //  in the above picture.
 571 
 572   // Find the skeletal interpreter frames to unpack into
 573   JavaThread* THREAD = JavaThread::current();
 574   RegisterMap map(THREAD, false);
 575   // Get the youngest frame we will unpack (last to be unpacked)
 576   frame me = unpack_frame.sender(&map);
 577   int index;
 578   for (index = 0; index < frames(); index++ ) {
 579     *element(index)->iframe() = me;
 580     // Get the caller frame (possibly skeletal)
 581     me = me.sender(&map);
 582   }
 583 
 584   // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
 585   // Unpack the frames from the oldest (frames() -1) to the youngest (0)
 586   frame* caller_frame = &me;
 587   for (index = frames() - 1; index >= 0 ; index--) {
 588     vframeArrayElement* elem = element(index);  // caller
 589     int callee_parameters, callee_locals;
 590     if (index == 0) {
 591       callee_parameters = callee_locals = 0;
 592     } else {
 593       methodHandle caller = elem->method();
 594       methodHandle callee = element(index - 1)->method();
 595       Bytecode_invoke inv(caller, elem->bci());
 596       // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
 597       // NOTE:  Use machinery here that avoids resolving of any kind.
 598       const bool has_member_arg =
 599           !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
 600       callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0);
 601       callee_locals     = callee->max_locals();
 602     }
 603     elem->unpack_on_stack(caller_actual_parameters,
 604                           callee_parameters,
 605                           callee_locals,
 606                           caller_frame,
 607                           index == 0,
 608                           index == frames() - 1,
 609                           exec_mode);
 610     if (index == frames() - 1) {
 611       Deoptimization::unwind_callee_save_values(elem->iframe(), this);
 612     }
 613     caller_frame = elem->iframe();
 614     caller_actual_parameters = callee_parameters;
 615   }
 616   deallocate_monitor_chunks();
 617 }
 618 
 619 void vframeArray::deallocate_monitor_chunks() {
 620   JavaThread* jt = JavaThread::current();
 621   for (int index = 0; index < frames(); index++ ) {
 622      element(index)->free_monitors(jt);
 623   }
 624 }
 625 
 626 #ifndef PRODUCT
 627 
 628 bool vframeArray::structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk) {
 629   if (owner_thread() != thread) return false;
 630   int index = 0;
 631 #if 0 // FIXME can't do this comparison
 632 
 633   // Compare only within vframe array.
 634   for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) {
 635     if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false;
 636     index++;
 637   }
 638   if (index != chunk->length()) return false;
 639 #endif
 640 
 641   return true;
 642 }
 643 
 644 #endif
 645 
 646 address vframeArray::register_location(int i) const {
 647   assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds");
 648   return (address) & _callee_registers[i];
 649 }
 650 
 651 
 652 #ifndef PRODUCT
 653 
 654 // Printing
 655 
 656 // Note: we cannot have print_on as const, as we allocate inside the method
 657 void vframeArray::print_on_2(outputStream* st)  {
 658   st->print_cr(" - sp: " INTPTR_FORMAT, p2i(sp()));
 659   st->print(" - thread: ");
 660   Thread::current()->print();
 661   st->print_cr(" - frame size: %d", frame_size());
 662   for (int index = 0; index < frames() ; index++ ) {
 663     element(index)->print(st);
 664   }
 665 }
 666 
 667 void vframeArrayElement::print(outputStream* st) {
 668   st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, p2i(iframe()->sp()));
 669 }
 670 
 671 void vframeArray::print_value_on(outputStream* st) const {
 672   st->print_cr("vframeArray [%d] ", frames());
 673 }
 674 
 675 
 676 #endif