< prev index next >

src/hotspot/cpu/aarch64/frame_aarch64.cpp

Print this page
@@ -113,10 +113,12 @@
  
      // Entry frame checks
      if (is_entry_frame()) {
        // an entry frame must have a valid fp.
        return fp_safe && is_entry_frame_valid(thread);
+     } else if (is_optimized_entry_frame()) {
+       return fp_safe;
      }
  
      intptr_t* sender_sp = NULL;
      intptr_t* sender_unextended_sp = NULL;
      address   sender_pc = NULL;

@@ -209,10 +211,12 @@
  
        // Validate the JavaCallWrapper an entry frame must have
        address jcw = (address)sender.entry_frame_call_wrapper();
  
        return thread->is_in_stack_range_excl(jcw, (address)sender.fp());
+     } else if (sender_blob->is_optimized_entry_blob()) {
+       return false;
      }
  
      CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
      if (nm != NULL) {
        if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||

@@ -361,22 +365,43 @@
  
    return fr;
  }
  
  OptimizedEntryBlob::FrameData* OptimizedEntryBlob::frame_data_for_frame(const frame& frame) const {
-   ShouldNotCallThis();
-   return nullptr;
+   assert(frame.is_optimized_entry_frame(), "wrong frame");
+   // need unextended_sp here, since normal sp is wrong for interpreter callees
+   return reinterpret_cast<OptimizedEntryBlob::FrameData*>(
+     reinterpret_cast<char*>(frame.unextended_sp()) + in_bytes(_frame_data_offset));
  }
  
  bool frame::optimized_entry_frame_is_first() const {
-   ShouldNotCallThis();
-   return false;
+   assert(is_optimized_entry_frame(), "must be optimzed entry frame");
+   OptimizedEntryBlob* blob = _cb->as_optimized_entry_blob();
+   JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
+   return jfa->last_Java_sp() == NULL;
  }
  
  frame frame::sender_for_optimized_entry_frame(RegisterMap* map) const {
-   ShouldNotCallThis();
-   return {};
+   assert(map != NULL, "map must be set");
+   OptimizedEntryBlob* blob = _cb->as_optimized_entry_blob();
+   // Java frame called from C; skip all C frames and return top C
+   // frame of that chunk as the sender
+   JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
+   assert(!optimized_entry_frame_is_first(), "must have a frame anchor to go back to");
+   assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
+   // Since we are walking the stack now this nested anchor is obviously walkable
+   // even if it wasn't when it was stacked.
+   if (!jfa->walkable()) {
+     // Capture _last_Java_pc (if needed) and mark anchor walkable.
+     jfa->capture_last_Java_pc();
+   }
+   map->clear();
+   assert(map->include_argument_oops(), "should be set by clear");
+   vmassert(jfa->last_Java_pc() != NULL, "not walkable");
+   frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
+ 
+   return fr;
  }
  
  //------------------------------------------------------------------------------
  // frame::verify_deopt_original_pc
  //

@@ -505,10 +530,12 @@
    // update it accordingly
     map->set_include_argument_oops(false);
  
    if (is_entry_frame())
      return sender_for_entry_frame(map);
+   if (is_optimized_entry_frame())
+     return sender_for_optimized_entry_frame(map);
    if (is_interpreted_frame())
      return sender_for_interpreter_frame(map);
    assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
  
    // This test looks odd: why is it not is_compiled_frame() ?  That's
< prev index next >