< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page
@@ -1138,10 +1138,11 @@
  
    __ b(exit);
  
    __ bind(call_thaw);
  
+   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
    __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
    oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
    ContinuationEntry::_return_pc_offset = __ pc() - start;
    __ post_call_nop();
  

@@ -1241,10 +1242,14 @@
  
      OopMap* map = new OopMap(framesize, 1);
      oop_maps->add_gc_map(the_pc - start, map);
  }
  
+ void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
+   ::continuation_enter_cleanup(masm);
+ }
+ 
  static void gen_special_dispatch(MacroAssembler* masm,
                                   const methodHandle& method,
                                   const BasicType* sig_bt,
                                   const VMRegPair* regs) {
    verify_oop_args(masm, method, sig_bt, regs);

@@ -1792,16 +1797,18 @@
        __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
  
        // Save the test result, for recursive case, the result is zero
        __ str(swap_reg, Address(lock_reg, mark_word_offset));
        __ br(Assembler::NE, slow_path_lock);
+       __ b(lock_done);
      } else {
        assert(LockingMode == LM_LIGHTWEIGHT, "must be");
        __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
+       __ b(lock_done);
      }
      __ bind(count);
-     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
+     __ inc_held_monitor_count();
  
      // Slow path will re-enter here
      __ bind(lock_done);
    }
  

@@ -1910,11 +1917,10 @@
  
      if (LockingMode == LM_LEGACY) {
        // Simple recursive lock?
        __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
        __ cbnz(rscratch1, not_recursive);
-       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
        __ b(done);
      }
  
      __ bind(not_recursive);
  

@@ -1933,15 +1939,14 @@
  
        // Atomic swap old header if oop still contains the stack lock
        Label count;
        __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
        __ bind(count);
-       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
+       __ dec_held_monitor_count();
      } else {
        assert(LockingMode == LM_LIGHTWEIGHT, "");
        __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
-       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
      }
  
      // slow path re-enters here
      __ bind(unlock_done);
      if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {

@@ -2546,10 +2551,14 @@
  
  uint SharedRuntime::out_preserve_stack_slots() {
    return 0;
  }
  
+ VMReg SharedRuntime::thread_register() {
+   return rthread->as_VMReg();
+ }
+ 
  #ifdef COMPILER2
  //------------------------------generate_uncommon_trap_blob--------------------
  void SharedRuntime::generate_uncommon_trap_blob() {
    // Allocate space for the code
    ResourceMark rm;
< prev index next >