< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page
*** 443,11 ***
  
    int offset = code_offset();
  
    // Fetch the exception from TLS and clear out exception related thread state
    Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
!   NOT_LP64(__ get_thread(rsi));
    __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
    __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
    __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
  
    __ bind(_unwind_handler_entry);
--- 443,11 ---
  
    int offset = code_offset();
  
    // Fetch the exception from TLS and clear out exception related thread state
    Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
!   NOT_LP64(__ get_thread(thread));
    __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
    __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
    __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
  
    __ bind(_unwind_handler_entry);

*** 461,10 ***
--- 461,12 ---
    if (method()->is_synchronized()) {
      monitor_address(0, FrameMap::rax_opr);
      stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
      __ unlock_object(rdi, rsi, rax, *stub->entry());
      __ bind(*stub->continuation());
+     NOT_LP64(__ get_thread(thread);)
+     __ dec_held_monitor_count(thread);
    }
  
    if (compilation()->env()->dtrace_method_probes()) {
  #ifdef _LP64
      __ mov(rdi, r15_thread);

*** 2880,18 ***
--- 2882,22 ---
  void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
    assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
           "must be aligned");
    __ call(AddressLiteral(op->addr(), rtype));
    add_call_info(code_offset(), op->info());
+   __ oopmap_metadata(op->info());
+   __ post_call_nop();
  }
  
  
  void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
    __ ic_call(op->addr());
    add_call_info(code_offset(), op->info());
+   __ oopmap_metadata(op->info());
    assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
           "must be aligned");
+   __ post_call_nop();
  }
  
  
  void LIR_Assembler::emit_static_call_stub() {
    address call_pc = __ pc();

*** 3523,11 ***
--- 3529,42 ---
      assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
      __ unlock_object(hdr, obj, lock, *op->stub()->entry());
    } else {
      Unimplemented();
    }
+   if (op->code() == lir_lock) {
+     // If deoptimization happens in Runtime1::monitorenter, inc_held_monitor_count after backing from slowpath
+     // will be skipped. Solution is
+     // 1. Increase only in fastpath
+     // 2. Runtime1::monitorenter increase count after locking
+ #ifndef _LP64
+     Register thread = rsi;
+     __ push(thread);
+     __ get_thread(thread);
+ #else
+     Register thread = r15_thread;
+ #endif
+     __ inc_held_monitor_count(thread);
+ #ifndef _LP64
+     __ pop(thread);
+ #endif
+   }
    __ bind(*op->stub()->continuation());
+   if (op->code() == lir_unlock) {
+     // unlock in slowpath is JRT_Leaf stub, no deoptimization can happen
+ #ifndef _LP64
+     Register thread = rsi;
+     __ push(thread);
+     __ get_thread(thread);
+ #else
+     Register thread = r15_thread;
+ #endif
+     __ dec_held_monitor_count(thread);
+ #ifndef _LP64
+     __ pop(thread);
+ #endif
+   }
  }
  
  
  void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
    ciMethod* method = op->profiled_method();

*** 3866,11 ***
--- 3903,13 ---
  void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
    assert(!tmp->is_valid(), "don't need temporary");
    __ call(RuntimeAddress(dest));
    if (info != NULL) {
      add_call_info_here(info);
+     __ oopmap_metadata(info);
    }
+   __ post_call_nop();
  }
  
  
  void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
    assert(type == T_LONG, "only for volatile long fields");
< prev index next >