< prev index next >

src/hotspot/cpu/x86/frame_x86.inline.hpp

Print this page
@@ -30,10 +30,13 @@
  #include "code/vmreg.inline.hpp"
  #include "compiler/oopMap.inline.hpp"
  #include "interpreter/interpreter.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "runtime/registerMap.hpp"
+ #ifdef COMPILER1
+ #include "c1/c1_Runtime1.hpp"
+ #endif
  
  // Inline functions for Intel frames:
  
  // Constructors:
  

@@ -383,24 +386,59 @@
    // frame owned by optimizing compiler
    assert(_cb->frame_size() > 0, "must have non-zero frame size");
    intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
    assert(sender_sp == real_fp(), "");
  
-   // On Intel the return_address is always the word on the stack
-   address sender_pc = (address) *(sender_sp-1);
+ #ifdef ASSERT
+   address sender_pc_copy = (address) *(sender_sp-1);
+ #endif
  
    // This is the saved value of EBP which may or may not really be an FP.
    // It is only an FP if the sender is an interpreter frame (or C1?).
    // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier)
    intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
  
+   // Repair the sender sp if the frame has been extended
+   sender_sp = repair_sender_sp(sender_sp, saved_fp_addr);
+ 
+   // On Intel the return_address is always the word on the stack
+   address sender_pc = (address) *(sender_sp-1);
+ 
+ #ifdef ASSERT
+   if (sender_pc != sender_pc_copy) {
+     // When extending the stack in the callee method entry to make room for unpacking of value
+     // type args, we keep a copy of the sender pc at the expected location in the callee frame.
+     // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
+     nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
+     assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
+   }
+ #endif
+ 
    if (map->update_map()) {
      // Tell GC to use argument oopmaps for some runtime stubs that need it.
      // For C1, the runtime stub might not have oop maps, so set this flag
      // outside of update_register_map.
-     if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers
-       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
+     bool c1_buffering = false;
+ #ifdef COMPILER1
+     nmethod* nm = _cb->as_nmethod_or_null();
+     if (nm != nullptr && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
+         pc() < nm->verified_inline_entry_point()) {
+       // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
+       // before doing any argument shuffling, so we need to scan the oops
+       // as the caller passes them.
+       c1_buffering = true;
+ #ifdef ASSERT
+       NativeCall* call = nativeCall_before(pc());
+       address dest = call->destination();
+       assert(dest == Runtime1::entry_for(C1StubId::buffer_inline_args_no_receiver_id) ||
+              dest == Runtime1::entry_for(C1StubId::buffer_inline_args_id), "unexpected safepoint in entry point");
+ #endif
+     }
+ #endif
+     if (!_cb->is_nmethod() || c1_buffering) { // compiled frames do not use callee-saved registers
+       bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
+       map->set_include_argument_oops(caller_args);
        if (oop_map() != nullptr) {
          _oop_map->update_register_map(this, map);
        }
      } else {
        assert(!_cb->caller_must_gc_arguments(map->thread()), "");
< prev index next >