1 /*
  2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "compiler/oopMap.hpp"
 27 #include "interpreter/interpreter.hpp"
 28 #include "memory/resourceArea.hpp"
 29 #include "memory/universe.hpp"
 30 #include "oops/markWord.hpp"
 31 #include "oops/method.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "prims/methodHandles.hpp"
 34 #include "runtime/frame.inline.hpp"
 35 #include "runtime/handles.inline.hpp"
 36 #include "runtime/javaCalls.hpp"
 37 #include "runtime/monitorChunk.hpp"
 38 #include "runtime/signature.hpp"
 39 #include "runtime/stackWatermarkSet.hpp"
 40 #include "runtime/stubCodeGenerator.hpp"
 41 #include "runtime/stubRoutines.hpp"
 42 #include "vmreg_x86.inline.hpp"
 43 #include "utilities/formatBuffer.hpp"
 44 #ifdef COMPILER1
 45 #include "c1/c1_Runtime1.hpp"
 46 #include "runtime/vframeArray.hpp"
 47 #endif
 48 
 49 #ifdef ASSERT
 50 void RegisterMap::check_location_valid() {
 51 }
 52 #endif
 53 
 54 // Profiling/safepoint support
 55 
 56 bool frame::safe_for_sender(JavaThread *thread) {
 57   address   sp = (address)_sp;
 58   address   fp = (address)_fp;
 59   address   unextended_sp = (address)_unextended_sp;
 60 
 61   // consider stack guards when trying to determine "safe" stack pointers
 62   // sp must be within the usable part of the stack (not in guards)
 63   if (!thread->is_in_usable_stack(sp)) {
 64     return false;
 65   }
 66 
 67   // unextended sp must be within the stack and above or equal sp
 68   if (!thread->is_in_stack_range_incl(unextended_sp, sp)) {
 69     return false;
 70   }
 71 
 72   // an fp must be within the stack and above (but not equal) sp
 73   // second evaluation on fp+ is added to handle situation where fp is -1
 74   bool fp_safe = thread->is_in_stack_range_excl(fp, sp) &&
 75                  thread->is_in_full_stack_checked(fp + (return_addr_offset * sizeof(void*)));
 76 
 77   // We know sp/unextended_sp are safe only fp is questionable here
 78 
 79   // If the current frame is known to the code cache then we can attempt to
 80   // construct the sender and do some validation of it. This goes a long way
 81   // toward eliminating issues when we get in frame construction code
 82 
 83   if (_cb != NULL ) {
 84 
 85     // First check if frame is complete and tester is reliable
 86     // Unfortunately we can only check frame complete for runtime stubs and nmethod
 87     // other generic buffer blobs are more problematic so we just assume they are
 88     // ok. adapter blobs never have a frame complete and are never ok.
 89 
 90     if (!_cb->is_frame_complete_at(_pc)) {
 91       if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
 92         return false;
 93       }
 94     }
 95 
 96     // Could just be some random pointer within the codeBlob
 97     if (!_cb->code_contains(_pc)) {
 98       return false;
 99     }
100 
101     // Entry frame checks
102     if (is_entry_frame()) {
103       // an entry frame must have a valid fp.
104       return fp_safe && is_entry_frame_valid(thread);
105     } else if (is_optimized_entry_frame()) {
106       return fp_safe;
107     }
108 
109     intptr_t* sender_sp = NULL;
110     intptr_t* sender_unextended_sp = NULL;
111     address   sender_pc = NULL;
112     intptr_t* saved_fp =  NULL;
113 
114     if (is_interpreted_frame()) {
115       // fp must be safe
116       if (!fp_safe) {
117         return false;
118       }
119 
120       sender_pc = (address) this->fp()[return_addr_offset];
121       // for interpreted frames, the value below is the sender "raw" sp,
122       // which can be different from the sender unextended sp (the sp seen
123       // by the sender) because of current frame local variables
124       sender_sp = (intptr_t*) addr_at(sender_sp_offset);
125       sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
126       saved_fp = (intptr_t*) this->fp()[link_offset];
127 
128     } else {
129       // must be some sort of compiled/runtime frame
130       // fp does not have to be safe (although it could be check for c1?)
131 
132       // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
133       if (_cb->frame_size() <= 0) {
134         return false;
135       }
136 
137       sender_sp = _unextended_sp + _cb->frame_size();
138       // Is sender_sp safe?
139       if (!thread->is_in_full_stack_checked((address)sender_sp)) {
140         return false;
141       }
142       // On Intel the return_address is always the word on the stack
143       sender_pc = (address) *(sender_sp-1);
144       // Note: frame::sender_sp_offset is only valid for compiled frame
145       intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
146       saved_fp = *saved_fp_addr;
147 
148       // Repair the sender sp if this is a method with scalarized inline type args
149       sender_sp = repair_sender_sp(sender_sp, saved_fp_addr);
150       sender_unextended_sp = sender_sp;
151     }
152 
153     // If the potential sender is the interpreter then we can do some more checking
154     if (Interpreter::contains(sender_pc)) {
155 
156       // ebp is always saved in a recognizable place in any code we generate. However
157       // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp
158       // is really a frame pointer.
159 
160       if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
161         return false;
162       }
163 
164       // construct the potential sender
165 
166       frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
167 
168       return sender.is_interpreted_frame_valid(thread);
169 
170     }
171 
172     // We must always be able to find a recognizable pc
173     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
174     if (sender_pc == NULL ||  sender_blob == NULL) {
175       return false;
176     }
177 
178     // Could be a zombie method
179     if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
180       return false;
181     }
182 
183     // Could just be some random pointer within the codeBlob
184     if (!sender_blob->code_contains(sender_pc)) {
185       return false;
186     }
187 
188     // We should never be able to see an adapter if the current frame is something from code cache
189     if (sender_blob->is_adapter_blob()) {
190       return false;
191     }
192 
193     // Could be the call_stub
194     if (StubRoutines::returns_to_call_stub(sender_pc)) {
195       if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
196         return false;
197       }
198 
199       // construct the potential sender
200 
201       frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
202 
203       // Validate the JavaCallWrapper an entry frame must have
204       address jcw = (address)sender.entry_frame_call_wrapper();
205 
206       return thread->is_in_stack_range_excl(jcw, (address)sender.fp());
207     } else if (sender_blob->is_optimized_entry_blob()) {
208       return false;
209     }
210 
211     CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
212     if (nm != NULL) {
213         if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
214             nm->method()->is_method_handle_intrinsic()) {
215             return false;
216         }
217     }
218 
219     // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
220     // because the return address counts against the callee's frame.
221 
222     if (sender_blob->frame_size() <= 0) {
223       assert(!sender_blob->is_compiled(), "should count return address at least");
224       return false;
225     }
226 
227     // We should never be able to see anything here except an nmethod. If something in the
228     // code cache (current frame) is called by an entity within the code cache that entity
229     // should not be anything but the call stub (already covered), the interpreter (already covered)
230     // or an nmethod.
231 
232     if (!sender_blob->is_compiled()) {
233         return false;
234     }
235 
236     // Could put some more validation for the potential non-interpreted sender
237     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
238 
239     // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
240 
241     // We've validated the potential sender that would be created
242     return true;
243   }
244 
245   // Must be native-compiled frame. Since sender will try and use fp to find
246   // linkages it must be safe
247 
248   if (!fp_safe) {
249     return false;
250   }
251 
252   // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
253 
254   if ( (address) this->fp()[return_addr_offset] == NULL) return false;
255 
256 
257   // could try and do some more potential verification of native frame if we could think of some...
258 
259   return true;
260 
261 }
262 
263 
264 void frame::patch_pc(Thread* thread, address pc) {
265   assert(_cb == CodeCache::find_blob(pc), "unexpected pc");
266   address* pc_addr = &(((address*) sp())[-1]);
267   if (TracePcPatching) {
268     tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
269                   p2i(pc_addr), p2i(*pc_addr), p2i(pc));
270   }
271   // Either the return address is the original one or we are going to
272   // patch in the same address that's already there.
273   assert(_pc == *pc_addr || pc == *pc_addr, "must be");
274   *pc_addr = pc;
275   address original_pc = CompiledMethod::get_deopt_original_pc(this);
276   if (original_pc != NULL) {
277     assert(original_pc == _pc, "expected original PC to be stored before patching");
278     _deopt_state = is_deoptimized;
279     // leave _pc as is
280   } else {
281     _deopt_state = not_deoptimized;
282     _pc = pc;
283   }
284 }
285 
286 bool frame::is_interpreted_frame() const  {
287   return Interpreter::contains(pc());
288 }
289 
290 int frame::frame_size(RegisterMap* map) const {
291   frame sender = this->sender(map);
292   return sender.sp() - sp();
293 }
294 
295 intptr_t* frame::entry_frame_argument_at(int offset) const {
296   // convert offset to index to deal with tsi
297   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
298   // Entry frame's arguments are always in relation to unextended_sp()
299   return &unextended_sp()[index];
300 }
301 
302 // sender_sp
303 
304 intptr_t* frame::interpreter_frame_sender_sp() const {
305   assert(is_interpreted_frame(), "interpreted frame expected");
306   return (intptr_t*) at(interpreter_frame_sender_sp_offset);
307 }
308 
309 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
310   assert(is_interpreted_frame(), "interpreted frame expected");
311   ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
312 }
313 
314 
315 // monitor elements
316 
317 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
318   return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
319 }
320 
321 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
322   BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
323   // make sure the pointer points inside the frame
324   assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
325   assert((intptr_t*) result < fp(),  "monitor end should be strictly below the frame pointer");
326   return result;
327 }
328 
329 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
330   *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
331 }
332 
333 // Used by template based interpreter deoptimization
334 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
335     *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
336 }
337 
338 frame frame::sender_for_entry_frame(RegisterMap* map) const {
339   assert(map != NULL, "map must be set");
340   // Java frame called from C; skip all C frames and return top C
341   // frame of that chunk as the sender
342   JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
343   assert(!entry_frame_is_first(), "next Java fp must be non zero");
344   assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
345   // Since we are walking the stack now this nested anchor is obviously walkable
346   // even if it wasn't when it was stacked.
347   if (!jfa->walkable()) {
348     // Capture _last_Java_pc (if needed) and mark anchor walkable.
349     jfa->capture_last_Java_pc();
350   }
351   map->clear();
352   assert(map->include_argument_oops(), "should be set by clear");
353   vmassert(jfa->last_Java_pc() != NULL, "not walkable");
354   frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
355 
356   return fr;
357 }
358 
359 OptimizedEntryBlob::FrameData* OptimizedEntryBlob::frame_data_for_frame(const frame& frame) const {
360   assert(frame.is_optimized_entry_frame(), "wrong frame");
361   // need unextended_sp here, since normal sp is wrong for interpreter callees
362   return reinterpret_cast<OptimizedEntryBlob::FrameData*>(
363     reinterpret_cast<char*>(frame.unextended_sp()) + in_bytes(_frame_data_offset));
364 }
365 
366 bool frame::optimized_entry_frame_is_first() const {
367   assert(is_optimized_entry_frame(), "must be optimzed entry frame");
368   OptimizedEntryBlob* blob = _cb->as_optimized_entry_blob();
369   JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
370   return jfa->last_Java_sp() == NULL;
371 }
372 
373 frame frame::sender_for_optimized_entry_frame(RegisterMap* map) const {
374   assert(map != NULL, "map must be set");
375   OptimizedEntryBlob* blob = _cb->as_optimized_entry_blob();
376   // Java frame called from C; skip all C frames and return top C
377   // frame of that chunk as the sender
378   JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
379   assert(!optimized_entry_frame_is_first(), "must have a frame anchor to go back to");
380   assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
381   // Since we are walking the stack now this nested anchor is obviously walkable
382   // even if it wasn't when it was stacked.
383   if (!jfa->walkable()) {
384     // Capture _last_Java_pc (if needed) and mark anchor walkable.
385     jfa->capture_last_Java_pc();
386   }
387   map->clear();
388   assert(map->include_argument_oops(), "should be set by clear");
389   vmassert(jfa->last_Java_pc() != NULL, "not walkable");
390   frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
391 
392   return fr;
393 }
394 
395 //------------------------------------------------------------------------------
396 // frame::verify_deopt_original_pc
397 //
398 // Verifies the calculated original PC of a deoptimization PC for the
399 // given unextended SP.
400 #ifdef ASSERT
401 void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
402   frame fr;
403 
404   // This is ugly but it's better than to change {get,set}_original_pc
405   // to take an SP value as argument.  And it's only a debugging
406   // method anyway.
407   fr._unextended_sp = unextended_sp;
408 
409   address original_pc = nm->get_original_pc(&fr);
410   assert(nm->insts_contains_inclusive(original_pc),
411          "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
412 }
413 #endif
414 
415 //------------------------------------------------------------------------------
416 // frame::adjust_unextended_sp
417 #ifdef ASSERT
418 void frame::adjust_unextended_sp() {
419   // On x86, sites calling method handle intrinsics and lambda forms are treated
420   // as any other call site. Therefore, no special action is needed when we are
421   // returning to any of these call sites.
422 
423   if (_cb != NULL) {
424     CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
425     if (sender_cm != NULL) {
426       // If the sender PC is a deoptimization point, get the original PC.
427       if (sender_cm->is_deopt_entry(_pc) ||
428           sender_cm->is_deopt_mh_entry(_pc)) {
429         verify_deopt_original_pc(sender_cm, _unextended_sp);
430       }
431     }
432   }
433 }
434 #endif
435 
436 //------------------------------------------------------------------------------
437 // frame::update_map_with_saved_link
438 void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
439   // The interpreter and compiler(s) always save EBP/RBP in a known
440   // location on entry. We must record where that location is
441   // so this if EBP/RBP was live on callout from c2 we can find
442   // the saved copy no matter what it called.
443 
444   // Since the interpreter always saves EBP/RBP if we record where it is then
445   // we don't have to always save EBP/RBP on entry and exit to c2 compiled
446   // code, on entry will be enough.
447   map->set_location(rbp->as_VMReg(), (address) link_addr);
448 #ifdef AMD64
449   // this is weird "H" ought to be at a higher address however the
450   // oopMaps seems to have the "H" regs at the same address and the
451   // vanilla register.
452   // XXXX make this go away
453   if (true) {
454     map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
455   }
456 #endif // AMD64
457 }
458 
459 
460 //------------------------------------------------------------------------------
461 // frame::sender_for_interpreter_frame
462 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
463   // SP is the raw SP from the sender after adapter or interpreter
464   // extension.
465   intptr_t* sender_sp = this->sender_sp();
466 
467   // This is the sp before any possible extension (adapter/locals).
468   intptr_t* unextended_sp = interpreter_frame_sender_sp();
469 
470 #if COMPILER2_OR_JVMCI
471   if (map->update_map()) {
472     update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
473   }
474 #endif // COMPILER2_OR_JVMCI
475 
476   return frame(sender_sp, unextended_sp, link(), sender_pc());
477 }
478 
479 
480 //------------------------------------------------------------------------------
481 // frame::sender_for_compiled_frame
482 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
483   assert(map != NULL, "map must be set");
484 
485   // frame owned by optimizing compiler
486   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
487   intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
488 
489 #ifdef ASSERT
490   address sender_pc_copy = (address) *(sender_sp-1);
491 #endif
492 
493   // This is the saved value of EBP which may or may not really be an FP.
494   // It is only an FP if the sender is an interpreter frame (or C1?).
495   intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
496 
497   // Repair the sender sp if the frame has been extended
498   sender_sp = repair_sender_sp(sender_sp, saved_fp_addr);
499 
500   // On Intel the return_address is always the word on the stack
501   address sender_pc = (address) *(sender_sp-1);
502 
503 #ifdef ASSERT
504   if (sender_pc != sender_pc_copy) {
505     // When extending the stack in the callee method entry to make room for unpacking of value
506     // type args, we keep a copy of the sender pc at the expected location in the callee frame.
507     // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
508     nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
509     assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
510   }
511 #endif
512 
513   if (map->update_map()) {
514     // Tell GC to use argument oopmaps for some runtime stubs that need it.
515     // For C1, the runtime stub might not have oop maps, so set this flag
516     // outside of update_register_map.
517     bool caller_args = _cb->caller_must_gc_arguments(map->thread());
518 #ifdef COMPILER1
519     if (!caller_args) {
520       nmethod* nm = _cb->as_nmethod_or_null();
521       if (nm != NULL && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
522           pc() < nm->verified_inline_entry_point()) {
523         // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
524         // before doing any argument shuffling, so we need to scan the oops
525         // as the caller passes them.
526         caller_args = true;
527 #ifdef ASSERT
528         NativeCall* call = nativeCall_before(pc());
529         address dest = call->destination();
530         assert(dest == Runtime1::entry_for(Runtime1::buffer_inline_args_no_receiver_id) ||
531                dest == Runtime1::entry_for(Runtime1::buffer_inline_args_id), "unexpected safepoint in entry point");
532 #endif
533       }
534     }
535 #endif
536     map->set_include_argument_oops(caller_args);
537     if (_cb->oop_maps() != NULL) {
538       OopMapSet::update_register_map(this, map);
539     }
540 
541     // Since the prolog does the save and restore of EBP there is no oopmap
542     // for it so we must fill in its location as if there was an oopmap entry
543     // since if our caller was compiled code there could be live jvm state in it.
544     update_map_with_saved_link(map, saved_fp_addr);
545   }
546 
547   assert(sender_sp != sp(), "must have changed");
548   return frame(sender_sp, sender_sp, *saved_fp_addr, sender_pc);
549 }
550 
551 
552 //------------------------------------------------------------------------------
553 // frame::sender_raw
554 frame frame::sender_raw(RegisterMap* map) const {
555   // Default is we don't have to follow them. The sender_for_xxx will
556   // update it accordingly
557   map->set_include_argument_oops(false);
558 
559   if (is_entry_frame())        return sender_for_entry_frame(map);
560   if (is_optimized_entry_frame()) return sender_for_optimized_entry_frame(map);
561   if (is_interpreted_frame())  return sender_for_interpreter_frame(map);
562   assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
563 
564   if (_cb != NULL) {
565     return sender_for_compiled_frame(map);
566   }
567   // Must be native-compiled frame, i.e. the marshaling code for native
568   // methods that exists in the core system.
569   return frame(sender_sp(), link(), sender_pc());
570 }
571 
572 frame frame::sender(RegisterMap* map) const {
573   frame result = sender_raw(map);
574 
575   if (map->process_frames()) {
576     StackWatermarkSet::on_iteration(map->thread(), result);
577   }
578 
579   return result;
580 }
581 
582 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
583   assert(is_interpreted_frame(), "Not an interpreted frame");
584   // These are reasonable sanity checks
585   if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
586     return false;
587   }
588   if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
589     return false;
590   }
591   if (fp() + interpreter_frame_initial_sp_offset < sp()) {
592     return false;
593   }
594   // These are hacks to keep us out of trouble.
595   // The problem with these is that they mask other problems
596   if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
597     return false;
598   }
599 
600   // do some validation of frame elements
601   // first the method
602 
603   Method* m = *interpreter_frame_method_addr();
604 
605   // validate the method we'd find in this potential sender
606   if (!Method::is_valid_method(m)) return false;
607 
608   // stack frames shouldn't be much larger than max_stack elements
609   // this test requires the use the unextended_sp which is the sp as seen by
610   // the current frame, and not sp which is the "raw" pc which could point
611   // further because of local variables of the callee method inserted after
612   // method arguments
613   if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
614     return false;
615   }
616 
617   // validate bci/bcp
618 
619   address bcp = interpreter_frame_bcp();
620   if (m->validate_bci_from_bcp(bcp) < 0) {
621     return false;
622   }
623 
624   // validate ConstantPoolCache*
625   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
626   if (MetaspaceObj::is_valid(cp) == false) return false;
627 
628   // validate locals
629 
630   address locals =  (address) *interpreter_frame_locals_addr();
631   return thread->is_in_stack_range_incl(locals, (address)fp());
632 }
633 
634 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
635   assert(is_interpreted_frame(), "interpreted frame expected");
636   Method* method = interpreter_frame_method();
637   BasicType type = method->result_type();
638 
639   intptr_t* tos_addr;
640   if (method->is_native()) {
641     // Prior to calling into the runtime to report the method_exit the possible
642     // return value is pushed to the native stack. If the result is a jfloat/jdouble
643     // then ST0 is saved before EAX/EDX. See the note in generate_native_result
644     tos_addr = (intptr_t*)sp();
645     if (type == T_FLOAT || type == T_DOUBLE) {
646     // QQQ seems like this code is equivalent on the two platforms
647 #ifdef AMD64
648       // This is times two because we do a push(ltos) after pushing XMM0
649       // and that takes two interpreter stack slots.
650       tos_addr += 2 * Interpreter::stackElementWords;
651 #else
652       tos_addr += 2;
653 #endif // AMD64
654     }
655   } else {
656     tos_addr = (intptr_t*)interpreter_frame_tos_address();
657   }
658 
659   switch (type) {
660     case T_OBJECT  :
661     case T_INLINE_TYPE:
662     case T_ARRAY   : {
663       oop obj;
664       if (method->is_native()) {
665         obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
666       } else {
667         oop* obj_p = (oop*)tos_addr;
668         obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
669       }
670       assert(Universe::is_in_heap_or_null(obj), "sanity check");
671       *oop_result = obj;
672       break;
673     }
674     case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
675     case T_BYTE    : value_result->b = *(jbyte*)tos_addr; break;
676     case T_CHAR    : value_result->c = *(jchar*)tos_addr; break;
677     case T_SHORT   : value_result->s = *(jshort*)tos_addr; break;
678     case T_INT     : value_result->i = *(jint*)tos_addr; break;
679     case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
680     case T_FLOAT   : {
681 #ifdef AMD64
682         value_result->f = *(jfloat*)tos_addr;
683 #else
684       if (method->is_native()) {
685         jdouble d = *(jdouble*)tos_addr;  // Result was in ST0 so need to convert to jfloat
686         value_result->f = (jfloat)d;
687       } else {
688         value_result->f = *(jfloat*)tos_addr;
689       }
690 #endif // AMD64
691       break;
692     }
693     case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
694     case T_VOID    : /* Nothing to do */ break;
695     default        : ShouldNotReachHere();
696   }
697 
698   return type;
699 }
700 
701 
702 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
703   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
704   return &interpreter_frame_tos_address()[index];
705 }
706 
707 #ifndef PRODUCT
708 
709 #define DESCRIBE_FP_OFFSET(name) \
710   values.describe(frame_no, fp() + frame::name##_offset, #name)
711 
712 void frame::describe_pd(FrameValues& values, int frame_no) {
713   if (is_interpreted_frame()) {
714     DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
715     DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
716     DESCRIBE_FP_OFFSET(interpreter_frame_method);
717     DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
718     DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
719     DESCRIBE_FP_OFFSET(interpreter_frame_cache);
720     DESCRIBE_FP_OFFSET(interpreter_frame_locals);
721     DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
722     DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
723 #ifdef AMD64
724   } else if (is_entry_frame()) {
725     // This could be more descriptive if we use the enum in
726     // stubGenerator to map to real names but it's most important to
727     // claim these frame slots so the error checking works.
728     for (int i = 0; i < entry_frame_after_call_words; i++) {
729       values.describe(frame_no, fp() - i, err_msg("call_stub word fp - %d", i));
730     }
731 #endif // AMD64
732   }
733 }
734 #endif // !PRODUCT
735 
736 intptr_t *frame::initial_deoptimization_info() {
737   // used to reset the saved FP
738   return fp();
739 }
740 
741 intptr_t* frame::real_fp() const {
742   if (_cb != NULL) {
743     // use the frame size if valid
744     int size = _cb->frame_size();
745     if (size > 0) {
746       return unextended_sp() + size;
747     }
748   }
749   // else rely on fp()
750   assert(! is_compiled_frame(), "unknown compiled frame size");
751   return fp();
752 }
753 
754 #ifndef PRODUCT
755 // This is a generic constructor which is only used by pns() in debug.cpp.
756 frame::frame(void* sp, void* fp, void* pc) {
757   init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
758 }
759 
760 #endif
761 
762 // Check for a method with scalarized inline type arguments that needs
763 // a stack repair and return the repaired sender stack pointer.
764 intptr_t* frame::repair_sender_sp(intptr_t* sender_sp, intptr_t** saved_fp_addr) const {
765   CompiledMethod* cm = _cb->as_compiled_method_or_null();
766   if (cm != NULL && cm->needs_stack_repair()) {
767     // The stack increment resides just below the saved rbp on the stack
768     // and does not account for the return address.
769     intptr_t* real_frame_size_addr = (intptr_t*) (saved_fp_addr - 1);
770     int real_frame_size = ((*real_frame_size_addr) + wordSize) / wordSize;
771     assert(real_frame_size >= _cb->frame_size() && real_frame_size <= 1000000, "invalid frame size");
772     sender_sp = unextended_sp() + real_frame_size;
773   }
774   return sender_sp;
775 }
776 
777 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
778   // last frame set?
779   if (last_Java_sp() == NULL) return;
780   // already walkable?
781   if (walkable()) return;
782   vmassert(Thread::current() == (Thread*)thread, "not current thread");
783   vmassert(last_Java_sp() != NULL, "not called from Java code?");
784   vmassert(last_Java_pc() == NULL, "already walkable");
785   capture_last_Java_pc();
786   vmassert(walkable(), "something went wrong");
787 }
788 
789 void JavaFrameAnchor::capture_last_Java_pc() {
790   vmassert(_last_Java_sp != NULL, "no last frame set");
791   vmassert(_last_Java_pc == NULL, "already walkable");
792   _last_Java_pc = (address)_last_Java_sp[-1];
793 }