< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page
*** 26,10 ***
--- 26,11 ---
  
  #include "precompiled.hpp"
  #include "asm/macroAssembler.hpp"
  #include "asm/macroAssembler.inline.hpp"
  #include "code/codeCache.hpp"
+ #include "code/compiledIC.hpp"
  #include "code/debugInfoRec.hpp"
  #include "code/icBuffer.hpp"
  #include "code/vtableStubs.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/barrierSetAssembler.hpp"

*** 230,11 ***
  #if !INCLUDE_JVMCI
    assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
  #endif
    __ pop_CPU_state(_save_vectors);
  #endif
!   __ leave();
  
  }
  
  // Is vector's size (in bytes) bigger than a size saved by default?
  // 8 bytes vector registers are saved by default on AArch64.
--- 231,11 ---
  #if !INCLUDE_JVMCI
    assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
  #endif
    __ pop_CPU_state(_save_vectors);
  #endif
!   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
  
  }
  
  // Is vector's size (in bytes) bigger than a size saved by default?
  // 8 bytes vector registers are saved by default on AArch64.

*** 680,10 ***
--- 681,14 ---
          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
        }
      }
    }
  
+   __ mov(rscratch2, rscratch1);
+   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
+   __ mov(rscratch1, rscratch2);
+ 
    // 6243940 We might end up in handle_wrong_method if
    // the callee is deoptimized as we race thru here. If that
    // happens we don't want to take a safepoint because the
    // caller frame will look interpreted and arguments are now
    // "compiled" so it is much better to make this transition

*** 1205,10 ***
--- 1210,99 ---
        }
      }
    }
  }
  
+ // defined in stubGenerator_aarch64.cpp
+ OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots);
+ void fill_continuation_entry(MacroAssembler* masm);
+ void continuation_enter_cleanup(MacroAssembler* masm);
+ 
+ // enterSpecial(Continuation c, boolean isContinue)
+ // On entry: c_rarg1 -- the continuation object
+ //           c_rarg2 -- isContinue
+ static void gen_continuation_enter(MacroAssembler* masm,
+                                  const methodHandle& method,
+                                  const BasicType* sig_bt,
+                                  const VMRegPair* regs,
+                                  int& exception_offset,
+                                  OopMapSet*oop_maps,
+                                  int& frame_complete,
+                                  int& stack_slots) {
+   //verify_oop_args(masm, method, sig_bt, regs);
+   Address resolve(SharedRuntime::get_resolve_static_call_stub(), 
+                   relocInfo::static_call_type);
+ 
+   stack_slots = 2; // will be overwritten
+   address start = __ pc();
+ 
+   Label call_thaw, exit;
+ 
+   __ enter(); // push(rbp);
+ 
+   //BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+   //bs->nmethod_entry_barrier(masm);
+   OopMap* map = continuation_enter_setup(masm, stack_slots);
+ 
+   // Frame is now completed as far as size and linkage.
+   frame_complete =__ pc() - start;
+ 
+   fill_continuation_entry(masm);
+ 
+   __ cmp(c_rarg2, (u1)0);
+   __ br(Assembler::NE, call_thaw);
+   
+   address mark = __ pc();
+ //  __ relocate(resolve.rspec());
+   //if (!far_branches()) {
+ //  __ bl(resolve.target()); 
+   __ trampoline_call1(resolve, NULL, false);
+ 
+   oop_maps->add_gc_map(__ pc() - start, map);
+   __ post_call_nop();
+ 
+   __ b(exit);
+ 
+   __ bind(call_thaw);
+ 
+   rt_call(masm, CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
+   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
+   ContinuationEntry::return_pc_offset = __ pc() - start;
+   __ post_call_nop();
+ 
+   __ bind(exit);
+   continuation_enter_cleanup(masm);
+   __ leave();
+   __ ret(lr);
+ 
+   /// exception handling
+ 
+   exception_offset = __ pc() - start;
+   {
+       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
+   
+       continuation_enter_cleanup(masm);
+       // __ mov(sp, rfp);
+   
+       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
+       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
+ 
+       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
+ 
+       __ mov(r1, r0); // the exception handler
+       __ mov(r0, r19); // restore return value contaning the exception oop
+       __ verify_oop(r0);
+ 
+       __ leave();
+       __ mov(r3, lr);
+       __ br(r1); // the exception handler
+   }
+ 
+   CodeBuffer* cbuf = masm->code_section()->outer();
+   address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, mark);
+ }
+ 
  static void gen_special_dispatch(MacroAssembler* masm,
                                   const methodHandle& method,
                                   const BasicType* sig_bt,
                                   const VMRegPair* regs) {
    verify_oop_args(masm, method, sig_bt, regs);

*** 1287,10 ***
--- 1381,41 ---
                                                  int compile_id,
                                                  BasicType* in_sig_bt,
                                                  VMRegPair* in_regs,
                                                  BasicType ret_type,
                                                  address critical_entry) {
+   if (method->is_continuation_enter_intrinsic()) {
+     vmIntrinsics::ID iid = method->intrinsic_id();
+     intptr_t start = (intptr_t)__ pc();
+     int vep_offset = ((intptr_t)__ pc()) - start;
+     int exception_offset = 0;
+     int frame_complete = 0;
+     int stack_slots = 0;
+     OopMapSet* oop_maps =  new OopMapSet();
+     gen_continuation_enter(masm,
+                          method,
+                          in_sig_bt,
+                          in_regs,
+                          exception_offset,
+                          oop_maps,
+                          frame_complete,
+                          stack_slots);
+     __ flush();
+     nmethod* nm = nmethod::new_native_nmethod(method,
+                                               compile_id,
+                                               masm->code(),
+                                               vep_offset,
+                                               frame_complete,
+                                               stack_slots,
+                                               in_ByteSize(-1),
+                                               in_ByteSize(-1),
+                                               oop_maps,
+                                               exception_offset);
+     ContinuationEntry::set_enter_nmethod(nm);
+     return nm;
+   }
+ 
    if (method->is_method_handle_intrinsic()) {
      vmIntrinsics::ID iid = method->intrinsic_id();
      intptr_t start = (intptr_t)__ pc();
      int vep_offset = ((intptr_t)__ pc()) - start;
  
< prev index next >