1 /*
  2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/vmSymbols.hpp"
 27 #include "code/vmreg.inline.hpp"
 28 #include "interpreter/bytecode.hpp"
 29 #include "interpreter/interpreter.hpp"
 30 #include "memory/allocation.inline.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/methodData.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "prims/jvmtiThreadState.hpp"
 35 #include "prims/methodHandles.hpp"
 36 #include "runtime/frame.inline.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/monitorChunk.hpp"
 39 #include "runtime/sharedRuntime.hpp"
 40 #include "runtime/vframe.hpp"
 41 #include "runtime/vframeArray.hpp"
 42 #include "runtime/vframe_hp.hpp"
 43 #include "utilities/copy.hpp"
 44 #include "utilities/events.hpp"
 45 #ifdef COMPILER2
 46 #include "opto/runtime.hpp"
 47 #endif
 48 
 49 int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); }
 50 
 51 void vframeArrayElement::free_monitors(JavaThread* jt) {
 52   if (_monitors != NULL) {
 53      MonitorChunk* chunk = _monitors;
 54      _monitors = NULL;
 55      jt->remove_monitor_chunk(chunk);
 56      delete chunk;
 57   }
 58 }
 59 
 60 void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) {
 61 
 62 // Copy the information from the compiled vframe to the
 63 // interpreter frame we will be creating to replace vf
 64 
 65   _method = vf->method();
 66   _bci    = vf->raw_bci();
 67   _reexecute = vf->should_reexecute();
 68 #ifdef ASSERT
 69   _removed_monitors = false;
 70 #endif
 71 
 72   int index;
 73 
 74   {
 75     Thread* current_thread = Thread::current();
 76     ResourceMark rm(current_thread);
 77     HandleMark hm(current_thread);
 78 
 79     // Get the monitors off-stack
 80 
 81     GrowableArray<MonitorInfo*>* list = vf->monitors();
 82     if (list->is_empty()) {
 83       _monitors = NULL;
 84     } else {
 85 
 86       // Allocate monitor chunk
 87       _monitors = new MonitorChunk(list->length());
 88       vf->thread()->add_monitor_chunk(_monitors);
 89 
 90       // Migrate the BasicObjectLocks from the stack to the monitor chunk
 91       for (index = 0; index < list->length(); index++) {
 92         MonitorInfo* monitor = list->at(index);
 93         assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
 94         BasicObjectLock* dest = _monitors->at(index);
 95         if (monitor->owner_is_scalar_replaced()) {
 96           dest->set_obj(NULL);
 97         } else {
 98           assert(monitor->owner() == NULL || !monitor->owner()->is_unlocked(), "object must be null or locked");
 99           dest->set_obj(monitor->owner());
100         }
101       }
102     }
103   }
104 
105   // Convert the vframe locals and expressions to off stack
106   // values. Because we will not gc all oops can be converted to
107   // intptr_t (i.e. a stack slot) and we are fine. This is
108   // good since we are inside a HandleMark and the oops in our
109   // collection would go away between packing them here and
110   // unpacking them in unpack_on_stack.
111 
112   // First the locals go off-stack
113 
114   // FIXME this seems silly it creates a StackValueCollection
115   // in order to get the size to then copy them and
116   // convert the types to intptr_t size slots. Seems like it
117   // could do it in place... Still uses less memory than the
118   // old way though
119 
120   StackValueCollection *locs = vf->locals();
121   _locals = new StackValueCollection(locs->size());
122   for(index = 0; index < locs->size(); index++) {
123     StackValue* value = locs->at(index);
124     switch(value->type()) {
125       case T_OBJECT:
126         assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
127         // preserve object type
128         _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
129         break;
130       case T_CONFLICT:
131         // A dead local.  Will be initialized to null/zero.
132         _locals->add( new StackValue());
133         break;
134       case T_INT:
135         _locals->add( new StackValue(value->get_int()));
136         break;
137       default:
138         ShouldNotReachHere();
139     }
140   }
141 
142   // Now the expressions off-stack
143   // Same silliness as above
144 
145   StackValueCollection *exprs = vf->expressions();
146   _expressions = new StackValueCollection(exprs->size());
147   for(index = 0; index < exprs->size(); index++) {
148     StackValue* value = exprs->at(index);
149     switch(value->type()) {
150       case T_OBJECT:
151         assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
152         // preserve object type
153         _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
154         break;
155       case T_CONFLICT:
156         // A dead stack element.  Will be initialized to null/zero.
157         // This can occur when the compiler emits a state in which stack
158         // elements are known to be dead (because of an imminent exception).
159         _expressions->add( new StackValue());
160         break;
161       case T_INT:
162         _expressions->add( new StackValue(value->get_int()));
163         break;
164       default:
165         ShouldNotReachHere();
166     }
167   }
168 }
169 
170 int unpack_counter = 0;
171 
172 void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
173                                          int callee_parameters,
174                                          int callee_locals,
175                                          frame* caller,
176                                          bool is_top_frame,
177                                          bool is_bottom_frame,
178                                          int exec_mode) {
179   JavaThread* thread = JavaThread::current();
180 
181   bool realloc_failure_exception = thread->frames_to_pop_failed_realloc() > 0;
182 
183   // Look at bci and decide on bcp and continuation pc
184   address bcp;
185   // C++ interpreter doesn't need a pc since it will figure out what to do when it
186   // begins execution
187   address pc;
188   bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
189                              // rather than the one associated with bcp
190   if (raw_bci() == SynchronizationEntryBCI) {
191     // We are deoptimizing while hanging in prologue code for synchronized method
192     bcp = method()->bcp_from(0); // first byte code
193     pc  = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
194   } else if (should_reexecute()) { //reexecute this bytecode
195     assert(is_top_frame, "reexecute allowed only for the top frame");
196     bcp = method()->bcp_from(bci());
197     pc  = Interpreter::deopt_reexecute_entry(method(), bcp);
198   } else {
199     bcp = method()->bcp_from(bci());
200     pc  = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
201     use_next_mdp = true;
202   }
203   assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
204 
205   // Monitorenter and pending exceptions:
206   //
207   // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
208   // because there is no safepoint at the null pointer check (it is either handled explicitly
209   // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
210   // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER).  If an asynchronous
211   // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
212   // the monitorenter to place it in the proper exception range.
213   //
214   // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
215   // in which case bcp should point to the monitorenter since it is within the exception's range.
216   //
217   // For realloc failure exception we just pop frames, skip the guarantee.
218 
219   assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
220   assert(thread->deopt_compiled_method() != NULL, "compiled method should be known");
221   guarantee(realloc_failure_exception || !(thread->deopt_compiled_method()->is_compiled_by_c2() &&
222               *bcp == Bytecodes::_monitorenter             &&
223               exec_mode == Deoptimization::Unpack_exception),
224             "shouldn't get exception during monitorenter");
225 
226   int popframe_preserved_args_size_in_bytes = 0;
227   int popframe_preserved_args_size_in_words = 0;
228   if (is_top_frame) {
229     JvmtiThreadState *state = thread->jvmti_thread_state();
230     if (JvmtiExport::can_pop_frame() &&
231         (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
232       if (thread->has_pending_popframe()) {
233         // Pop top frame after deoptimization
234         pc = Interpreter::remove_activation_preserving_args_entry();
235       } else {
236         // Reexecute invoke in top frame
237         pc = Interpreter::deopt_entry(vtos, 0);
238         use_next_mdp = false;
239         popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
240         // Note: the PopFrame-related extension of the expression stack size is done in
241         // Deoptimization::fetch_unroll_info_helper
242         popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
243       }
244     } else if (!realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL &&
245                state->is_earlyret_pending()) {
246       // Force early return from top frame after deoptimization
247       pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
248     } else {
249       if (realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
250         state->clr_earlyret_pending();
251         state->set_earlyret_oop(NULL);
252         state->clr_earlyret_value();
253       }
254       // Possibly override the previous pc computation of the top (youngest) frame
255       switch (exec_mode) {
256       case Deoptimization::Unpack_deopt:
257         // use what we've got
258         break;
259       case Deoptimization::Unpack_exception:
260         // exception is pending
261         pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
262         // [phh] We're going to end up in some handler or other, so it doesn't
263         // matter what mdp we point to.  See exception_handler_for_exception()
264         // in interpreterRuntime.cpp.
265         break;
266       case Deoptimization::Unpack_uncommon_trap:
267       case Deoptimization::Unpack_reexecute:
268         // redo last byte code
269         pc  = Interpreter::deopt_entry(vtos, 0);
270         use_next_mdp = false;
271         break;
272       default:
273         ShouldNotReachHere();
274       }
275     }
276   }
277 
278   // Setup the interpreter frame
279 
280   assert(method() != NULL, "method must exist");
281   int temps = expressions()->size();
282 
283   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
284 
285   Interpreter::layout_activation(method(),
286                                  temps + callee_parameters,
287                                  popframe_preserved_args_size_in_words,
288                                  locks,
289                                  caller_actual_parameters,
290                                  callee_parameters,
291                                  callee_locals,
292                                  caller,
293                                  iframe(),
294                                  is_top_frame,
295                                  is_bottom_frame);
296 
297   // Update the pc in the frame object and overwrite the temporary pc
298   // we placed in the skeletal frame now that we finally know the
299   // exact interpreter address we should use.
300 
301   _frame.patch_pc(thread, pc);
302 
303   assert (!method()->is_synchronized() || locks > 0 || _removed_monitors || raw_bci() == SynchronizationEntryBCI, "synchronized methods must have monitors");
304 
305   BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
306   for (int index = 0; index < locks; index++) {
307     top = iframe()->previous_monitor_in_interpreter_frame(top);
308     BasicObjectLock* src = _monitors->at(index);
309     top->set_obj(src->obj());
310   }
311   if (ProfileInterpreter) {
312     iframe()->interpreter_frame_set_mdp(0); // clear out the mdp.
313   }
314   iframe()->interpreter_frame_set_bcp(bcp);
315   if (ProfileInterpreter) {
316     MethodData* mdo = method()->method_data();
317     if (mdo != NULL) {
318       int bci = iframe()->interpreter_frame_bci();
319       if (use_next_mdp) ++bci;
320       address mdp = mdo->bci_to_dp(bci);
321       iframe()->interpreter_frame_set_mdp(mdp);
322     }
323   }
324 
325 #ifndef PRODUCT
326   if (PrintDeoptimizationDetails) {
327     tty->print_cr("Expressions size: %d", expressions()->size());
328   }
329 #endif // !PRODUCT
330 
331   // Unpack expression stack
332   // If this is an intermediate frame (i.e. not top frame) then this
333   // only unpacks the part of the expression stack not used by callee
334   // as parameters. The callee parameters are unpacked as part of the
335   // callee locals.
336   int i;
337   for(i = 0; i < expressions()->size(); i++) {
338     StackValue *value = expressions()->at(i);
339     intptr_t*   addr  = iframe()->interpreter_frame_expression_stack_at(i);
340     assert(!is_bottom_frame || !(caller->is_compiled_caller() && addr >= caller->unextended_sp()), "overwriting caller frame!");
341     switch(value->type()) {
342       case T_INT:
343         *addr = value->get_int();
344 #ifndef PRODUCT
345         if (PrintDeoptimizationDetails) {
346           tty->print_cr(" - Reconstructed expression %d (INT): %d", i, (int)(*addr));
347         }
348 #endif // !PRODUCT
349         break;
350       case T_OBJECT:
351         *addr = value->get_int(T_OBJECT);
352 #ifndef PRODUCT
353         if (PrintDeoptimizationDetails) {
354           tty->print(" - Reconstructed expression %d (OBJECT): ", i);
355           oop o = cast_to_oop((address)(*addr));
356           if (o == NULL) {
357             tty->print_cr("NULL");
358           } else {
359             ResourceMark rm;
360             tty->print_raw_cr(o->klass()->name()->as_C_string());
361           }
362         }
363 #endif // !PRODUCT
364         break;
365       case T_CONFLICT:
366         // A dead stack slot.  Initialize to null in case it is an oop.
367         *addr = NULL_WORD;
368         break;
369       default:
370         ShouldNotReachHere();
371     }
372   }
373 
374 #ifndef PRODUCT
375   if (PrintDeoptimizationDetails) {
376     tty->print_cr("Locals size: %d", locals()->size());
377   }
378 #endif // !PRODUCT
379 
380   // Unpack the locals
381   for(i = 0; i < locals()->size(); i++) {
382     StackValue *value = locals()->at(i);
383     intptr_t* addr  = iframe()->interpreter_frame_local_at(i);
384     assert(!is_bottom_frame || !(caller->is_compiled_caller() && addr >= caller->unextended_sp()), "overwriting caller frame!");
385     switch(value->type()) {
386       case T_INT:
387         *addr = value->get_int();
388 #ifndef PRODUCT
389         if (PrintDeoptimizationDetails) {
390           tty->print_cr(" - Reconstructed local %d (INT): %d", i, (int)(*addr));
391         }
392 #endif // !PRODUCT
393         break;
394       case T_OBJECT:
395         *addr = value->get_int(T_OBJECT);
396 #ifndef PRODUCT
397         if (PrintDeoptimizationDetails) {
398           tty->print(" - Reconstructed local %d (OBJECT): ", i);
399           oop o = cast_to_oop((address)(*addr));
400           if (o == NULL) {
401             tty->print_cr("NULL");
402           } else {
403             ResourceMark rm;
404             tty->print_raw_cr(o->klass()->name()->as_C_string());
405           }
406         }
407 #endif // !PRODUCT
408         break;
409       case T_CONFLICT:
410         // A dead location. If it is an oop then we need a NULL to prevent GC from following it
411         *addr = NULL_WORD;
412         break;
413       default:
414         ShouldNotReachHere();
415     }
416   }
417 
418   if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
419     // An interpreted frame was popped but it returns to a deoptimized
420     // frame. The incoming arguments to the interpreted activation
421     // were preserved in thread-local storage by the
422     // remove_activation_preserving_args_entry in the interpreter; now
423     // we put them back into the just-unpacked interpreter frame.
424     // Note that this assumes that the locals arena grows toward lower
425     // addresses.
426     if (popframe_preserved_args_size_in_words != 0) {
427       void* saved_args = thread->popframe_preserved_args();
428       assert(saved_args != NULL, "must have been saved by interpreter");
429 #ifdef ASSERT
430       assert(popframe_preserved_args_size_in_words <=
431              iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
432              "expression stack size should have been extended");
433 #endif // ASSERT
434       int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
435       intptr_t* base;
436       if (frame::interpreter_frame_expression_stack_direction() < 0) {
437         base = iframe()->interpreter_frame_expression_stack_at(top_element);
438       } else {
439         base = iframe()->interpreter_frame_expression_stack();
440       }
441       Copy::conjoint_jbytes(saved_args,
442                             base,
443                             popframe_preserved_args_size_in_bytes);
444       thread->popframe_free_preserved_args();
445     }
446   }
447 
448 #ifndef PRODUCT
449   if (PrintDeoptimizationDetails) {
450     ttyLocker ttyl;
451     tty->print_cr("[%d. Interpreted Frame]", ++unpack_counter);
452     iframe()->print_on(tty);
453     RegisterMap map(thread,
454                     RegisterMap::UpdateMap::include,
455                     RegisterMap::ProcessFrames::include,
456                     RegisterMap::WalkContinuation::skip);
457     vframe* f = vframe::new_vframe(iframe(), &map, thread);
458     f->print();
459     if (WizardMode && Verbose) method()->print_codes();
460     tty->cr();
461   }
462 #endif // !PRODUCT
463 
464   // The expression stack and locals are in the resource area don't leave
465   // a dangling pointer in the vframeArray we leave around for debug
466   // purposes
467 
468   _locals = _expressions = NULL;
469 
470 }
471 
472 int vframeArrayElement::on_stack_size(int callee_parameters,
473                                       int callee_locals,
474                                       bool is_top_frame,
475                                       int popframe_extra_stack_expression_els) const {
476   assert(method()->max_locals() == locals()->size(), "just checking");
477   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
478   int temps = expressions()->size();
479   return Interpreter::size_activation(method()->max_stack(),
480                                       temps + callee_parameters,
481                                       popframe_extra_stack_expression_els,
482                                       locks,
483                                       callee_parameters,
484                                       callee_locals,
485                                       is_top_frame);
486 }
487 
488 
489 intptr_t* vframeArray::unextended_sp() const {
490   assert(owner_thread()->is_in_usable_stack((address) _original.unextended_sp()), INTPTR_FORMAT, p2i(_original.unextended_sp()));
491   return _original.unextended_sp();
492 }
493 
494 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
495                                    RegisterMap *reg_map, frame sender, frame caller, frame self,
496                                    bool realloc_failures) {
497 
498   // Allocate the vframeArray
499   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
500                                                      sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
501                                                      mtCompiler);
502   result->_frames = chunk->length();
503   result->_owner_thread = thread;
504   result->_sender = sender;
505   result->_caller = caller;
506   result->_original = self;
507   result->set_unroll_block(NULL); // initialize it
508   result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures);
509   return result;
510 }
511 
512 void vframeArray::fill_in(JavaThread* thread,
513                           int frame_size,
514                           GrowableArray<compiledVFrame*>* chunk,
515                           const RegisterMap *reg_map,
516                           bool realloc_failures) {
517   // Set owner first, it is used when adding monitor chunks
518 
519   _frame_size = frame_size;
520   for(int i = 0; i < chunk->length(); i++) {
521     element(i)->fill_in(chunk->at(i), realloc_failures);
522   }
523 
524   // Copy registers for callee-saved registers
525   if (reg_map != NULL) {
526     for(int i = 0; i < RegisterMap::reg_count; i++) {
527 #ifdef AMD64
528       // The register map has one entry for every int (32-bit value), so
529       // 64-bit physical registers have two entries in the map, one for
530       // each half.  Ignore the high halves of 64-bit registers, just like
531       // frame::oopmapreg_to_location does.
532       //
533       // [phh] FIXME: this is a temporary hack!  This code *should* work
534       // correctly w/o this hack, possibly by changing RegisterMap::pd_location
535       // in frame_amd64.cpp and the values of the phantom high half registers
536       // in amd64.ad.
537       //      if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
538         intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i), _caller.sp());
539         _callee_registers[i] = src != NULL ? *src : NULL_WORD;
540         //      } else {
541         //      jint* src = (jint*) reg_map->location(VMReg::Name(i));
542         //      _callee_registers[i] = src != NULL ? *src : NULL_WORD;
543         //      }
544 #else
545       jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i), _caller.sp());
546       _callee_registers[i] = src != NULL ? *src : NULL_WORD;
547 #endif
548       if (src == NULL) {
549         set_location_valid(i, false);
550       } else {
551         set_location_valid(i, true);
552         jint* dst = (jint*) register_location(i);
553         *dst = *src;
554       }
555     }
556   }
557 }
558 
559 void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) {
560   // stack picture
561   //   unpack_frame
562   //   [new interpreter frames ] (frames are skeletal but walkable)
563   //   caller_frame
564   //
565   //  This routine fills in the missing data for the skeletal interpreter frames
566   //  in the above picture.
567 
568   // Find the skeletal interpreter frames to unpack into
569   JavaThread* current = JavaThread::current();
570 
571   RegisterMap map(current,
572                   RegisterMap::UpdateMap::skip,
573                   RegisterMap::ProcessFrames::include,
574                   RegisterMap::WalkContinuation::skip);
575   // Get the youngest frame we will unpack (last to be unpacked)
576   frame me = unpack_frame.sender(&map);
577   int index;
578   for (index = 0; index < frames(); index++ ) {
579     *element(index)->iframe() = me;
580     // Get the caller frame (possibly skeletal)
581     me = me.sender(&map);
582   }
583 
584   Events::log_deopt_message(current, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
585                             p2i(unpack_frame.pc()), p2i(unpack_frame.sp()), exec_mode);
586 
587   if (TraceDeoptimization) {
588     ResourceMark rm;
589     stringStream st;
590     st.print_cr("DEOPT UNPACKING thread=" INTPTR_FORMAT " vframeArray=" INTPTR_FORMAT " mode=%d",
591                 p2i(current), p2i(this), exec_mode);
592     st.print_cr("   Virtual frames (outermost/oldest first):");
593     tty->print_raw(st.freeze());
594   }
595 
596   // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
597   // Unpack the frames from the oldest (frames() -1) to the youngest (0)
598   frame* caller_frame = &me;
599   for (index = frames() - 1; index >= 0 ; index--) {
600     vframeArrayElement* elem = element(index);  // caller
601     int callee_parameters, callee_locals;
602     if (index == 0) {
603       callee_parameters = callee_locals = 0;
604     } else {
605       methodHandle caller(current, elem->method());
606       methodHandle callee(current, element(index - 1)->method());
607       Bytecode_invoke inv(caller, elem->bci());
608       // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
609       // NOTE:  Use machinery here that avoids resolving of any kind.
610       const bool has_member_arg =
611           !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
612       callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0);
613       callee_locals     = callee->max_locals();
614     }
615     if (TraceDeoptimization) {
616       ResourceMark rm;
617       stringStream st;
618       st.print("      VFrame %d (" INTPTR_FORMAT ")", index, p2i(elem));
619       st.print(" - %s", elem->method()->name_and_sig_as_C_string());
620       int bci = elem->raw_bci();
621       const char* code_name;
622       if (bci == SynchronizationEntryBCI) {
623         code_name = "sync entry";
624       } else {
625         Bytecodes::Code code = elem->method()->code_at(bci);
626         code_name = Bytecodes::name(code);
627       }
628       st.print(" - %s", code_name);
629       st.print(" @ bci=%d ", bci);
630       st.print_cr("sp=" PTR_FORMAT, p2i(elem->iframe()->sp()));
631       tty->print_raw(st.freeze());
632     }
633     elem->unpack_on_stack(caller_actual_parameters,
634                           callee_parameters,
635                           callee_locals,
636                           caller_frame,
637                           index == 0,
638                           index == frames() - 1,
639                           exec_mode);
640     if (index == frames() - 1) {
641       Deoptimization::unwind_callee_save_values(elem->iframe(), this);
642     }
643     caller_frame = elem->iframe();
644     caller_actual_parameters = callee_parameters;
645   }
646   deallocate_monitor_chunks();
647   if (TraceDeoptimization) {
648     tty->cr();
649   }
650 }
651 
652 void vframeArray::deallocate_monitor_chunks() {
653   JavaThread* jt = JavaThread::current();
654   for (int index = 0; index < frames(); index++ ) {
655      element(index)->free_monitors(jt);
656   }
657 }
658 
659 #ifndef PRODUCT
660 
661 bool vframeArray::structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk) {
662   if (owner_thread() != thread) return false;
663   int index = 0;
664 #if 0 // FIXME can't do this comparison
665 
666   // Compare only within vframe array.
667   for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) {
668     if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false;
669     index++;
670   }
671   if (index != chunk->length()) return false;
672 #endif
673 
674   return true;
675 }
676 
677 #endif // !PRODUCT
678 
679 address vframeArray::register_location(int i) const {
680   assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds");
681   return (address) & _callee_registers[i];
682 }
683 
684 
685 #ifndef PRODUCT
686 
687 // Printing
688 
689 // Note: we cannot have print_on as const, as we allocate inside the method
690 void vframeArray::print_on_2(outputStream* st)  {
691   st->print_cr(" - sp: " INTPTR_FORMAT, p2i(sp()));
692   st->print(" - thread: ");
693   Thread::current()->print();
694   st->print_cr(" - frame size: %d", frame_size());
695   for (int index = 0; index < frames() ; index++ ) {
696     element(index)->print(st);
697   }
698 }
699 
700 void vframeArrayElement::print(outputStream* st) {
701   st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, p2i(iframe()->sp()));
702 }
703 
704 void vframeArray::print_value_on(outputStream* st) const {
705   st->print_cr("vframeArray [%d] ", frames());
706 }
707 
708 
709 #endif // !PRODUCT