< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page
@@ -1177,10 +1177,11 @@
  
    __ b(exit);
  
    __ bind(call_thaw);
  
+   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
    __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
    oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
    ContinuationEntry::_return_pc_offset = __ pc() - start;
    __ post_call_nop();
  

@@ -1279,10 +1280,14 @@
  
      OopMap* map = new OopMap(framesize, 1);
      oop_maps->add_gc_map(the_pc - start, map);
  }
  
+ void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
+   ::continuation_enter_cleanup(masm);
+ }
+ 
  static void gen_special_dispatch(MacroAssembler* masm,
                                   const methodHandle& method,
                                   const BasicType* sig_bt,
                                   const VMRegPair* regs) {
    verify_oop_args(masm, method, sig_bt, regs);

@@ -1747,13 +1752,12 @@
    }
  
    // Change state to native (we save the return address in the thread, since it might not
    // be pushed on the stack when we do a stack traversal).
    // We use the same pc/oopMap repeatedly when we call out
- 
-   Label native_return;
-   __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
+   Label resume_pc;
+   __ set_last_Java_frame(sp, noreg, resume_pc, rscratch1);
  
    Label dtrace_method_entry, dtrace_method_entry_done;
    if (DTraceMethodProbes) {
      __ b(dtrace_method_entry);
      __ bind(dtrace_method_entry_done);

@@ -1827,16 +1831,18 @@
        __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
  
        // Save the test result, for recursive case, the result is zero
        __ str(swap_reg, Address(lock_reg, mark_word_offset));
        __ br(Assembler::NE, slow_path_lock);
+       __ b(lock_done);
      } else {
        assert(LockingMode == LM_LIGHTWEIGHT, "must be");
        __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
+       __ b(lock_done);
      }
      __ bind(count);
-     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
+     __ inc_held_monitor_count();
  
      // Slow path will re-enter here
      __ bind(lock_done);
    }
  

@@ -1851,15 +1857,10 @@
    __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
    __ stlrw(rscratch1, rscratch2);
  
    __ rt_call(native_func);
  
-   __ bind(native_return);
- 
-   intptr_t return_pc = (intptr_t) __ pc();
-   oop_maps->add_gc_map(return_pc - start, map);
- 
    // Verify or restore cpu control state after JNI call
    __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
  
    // Unpack native results.
    switch (ret_type) {

@@ -1922,10 +1923,24 @@
    __ mov(rscratch1, _thread_in_Java);
    __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
    __ stlrw(rscratch1, rscratch2);
    __ bind(after_transition);
  
+   // Check preemption for Object.wait()
+   if (method->is_object_wait0()) {
+     Label not_preempted;
+     __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
+     __ cbz(rscratch1, not_preempted);
+     __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
+     __ br(rscratch1);
+     __ bind(not_preempted);
+   }
+   __ bind(resume_pc);
+ 
+   intptr_t the_pc = (intptr_t) __ pc();
+   oop_maps->add_gc_map(the_pc - start, map);
+ 
    Label reguard;
    Label reguard_done;
    __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
    __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
    __ br(Assembler::EQ, reguard);

@@ -1945,11 +1960,10 @@
  
      if (LockingMode == LM_LEGACY) {
        // Simple recursive lock?
        __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
        __ cbnz(rscratch1, not_recursive);
-       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
        __ b(done);
      }
  
      __ bind(not_recursive);
  

@@ -1968,15 +1982,14 @@
  
        // Atomic swap old header if oop still contains the stack lock
        Label count;
        __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
        __ bind(count);
-       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
+       __ dec_held_monitor_count();
      } else {
        assert(LockingMode == LM_LIGHTWEIGHT, "");
        __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
-       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
      }
  
      // slow path re-enters here
      __ bind(unlock_done);
      if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {

@@ -2040,11 +2053,13 @@
      __ mov(c_rarg0, obj_reg);
      __ mov(c_rarg1, lock_reg);
      __ mov(c_rarg2, rthread);
  
      // Not a leaf but we have last_Java_frame setup as we want
+     __ push_cont_fastpath();
      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
+     __ pop_cont_fastpath();
      restore_args(masm, total_c_args, c_arg, out_regs);
  
  #ifdef ASSERT
      { Label L;
        __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));

@@ -2579,10 +2594,14 @@
  
  uint SharedRuntime::out_preserve_stack_slots() {
    return 0;
  }
  
+ VMReg SharedRuntime::thread_register() {
+   return rthread->as_VMReg();
+ }
+ 
  #ifdef COMPILER2
  //------------------------------generate_uncommon_trap_blob--------------------
  void SharedRuntime::generate_uncommon_trap_blob() {
    // Allocate space for the code
    ResourceMark rm;
< prev index next >