< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page
*** 30,15 ***
--- 30,17 ---
  #include "c1/c1_LIRAssembler.hpp"
  #include "c1/c1_MacroAssembler.hpp"
  #include "c1/c1_Runtime1.hpp"
  #include "c1/c1_ValueStack.hpp"
  #include "ci/ciArrayKlass.hpp"
+ #include "ci/ciInlineKlass.hpp"
  #include "ci/ciInstance.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/collectedHeap.hpp"
  #include "gc/shared/gc_globals.hpp"
  #include "nativeInst_x86.hpp"
+ #include "oops/oop.inline.hpp"
  #include "oops/objArrayKlass.hpp"
  #include "runtime/frame.inline.hpp"
  #include "runtime/safepointMechanism.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "runtime/stubRoutines.hpp"

*** 477,11 ***
    if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
      __ mov(rax, rbx);  // Restore the exception
    }
  
    // remove the activation and dispatch to the unwind handler
!   __ remove_frame(initial_frame_size_in_bytes());
    __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
  
    // Emit the slow path assembly
    if (stub != nullptr) {
      stub->emit_code(this);
--- 479,11 ---
    if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
      __ mov(rax, rbx);  // Restore the exception
    }
  
    // remove the activation and dispatch to the unwind handler
!   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
    __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
  
    // Emit the slow path assembly
    if (stub != nullptr) {
      stub->emit_code(this);

*** 514,13 ***
  void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
    assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
    if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
      assert(result->fpu() == 0, "result must already be on TOS");
    }
  
    // Pop the stack before the safepoint code
!   __ remove_frame(initial_frame_size_in_bytes());
  
    if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
      __ reserved_stack_check();
    }
  
--- 516,44 ---
  void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
    assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
    if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
      assert(result->fpu() == 0, "result must already be on TOS");
    }
+   if (InlineTypeReturnedAsFields) {
+   #ifndef _LP64
+      Unimplemented();
+   #endif
+     // Check if we are returning an non-null inline type and load its fields into registers
+     ciType* return_type = compilation()->method()->return_type();
+     if (return_type->is_inlinetype()) {
+       ciInlineKlass* vk = return_type->as_inline_klass();
+       if (vk->can_be_returned_as_fields()) {
+         address unpack_handler = vk->unpack_handler();
+         assert(unpack_handler != nullptr, "must be");
+         __ call(RuntimeAddress(unpack_handler));
+       }
+     } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
+       Label skip;
+       __ test_oop_is_not_inline_type(rax, rscratch1, skip);
+ 
+       // Load fields from a buffered value with an inline class specific handler
+       __ load_klass(rdi, rax, rscratch1);
+       __ movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset()));
+       __ movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
+       // Unpack handler can be null if inline type is not scalarizable in returns
+       __ testptr(rdi, rdi);
+       __ jcc(Assembler::zero, skip);
+       __ call(rdi);
+ 
+       __ bind(skip);
+     }
+     // At this point, rax points to the value object (for interpreter or C1 caller).
+     // The fields of the object are copied into registers (for C2 caller).
+   }
  
    // Pop the stack before the safepoint code
!   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
  
    if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
      __ reserved_stack_check();
    }
  

*** 538,10 ***
--- 571,14 ---
    __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
    __ ret(0);
  }
  
  
+ int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
+   return (__ store_inline_type_fields_to_buf(vk, false));
+ }
+ 
  int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
    guarantee(info != nullptr, "Shouldn't be null");
    int offset = __ offset();
  #ifdef _LP64
    const Register poll_addr = rscratch1;

*** 1612,11 ***
  
  void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
    Register len =  op->len()->as_register();
    LP64_ONLY( __ movslq(len, len); )
  
!   if (UseSlowPath ||
        (!UseFastNewObjectArray && is_reference_type(op->type())) ||
        (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
      __ jmp(*op->stub()->entry());
    } else {
      Register tmp1 = op->tmp1()->as_register();
--- 1649,11 ---
  
  void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
    Register len =  op->len()->as_register();
    LP64_ONLY( __ movslq(len, len); )
  
!   if (UseSlowPath || op->is_null_free() ||
        (!UseFastNewObjectArray && is_reference_type(op->type())) ||
        (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
      __ jmp(*op->stub()->entry());
    } else {
      Register tmp1 = op->tmp1()->as_register();

*** 1710,34 ***
      select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
    }
  
    assert_different_registers(obj, k_RInfo, klass_RInfo);
  
!   __ testptr(obj, obj);
!   if (op->should_profile()) {
!     Label not_null;
!     Register mdo  = klass_RInfo;
!     __ mov_metadata(mdo, md->constant_encoding());
!     __ jccb(Assembler::notEqual, not_null);
!     // Object is null; update MDO and exit
!     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
!     int header_bits = BitData::null_seen_byte_constant();
!     __ orb(data_addr, header_bits);
!     __ jmp(*obj_is_null);
!     __ bind(not_null);
! 
!     Label update_done;
!     Register recv = k_RInfo;
!     __ load_klass(recv, obj, tmp_load_klass);
!     type_profile_helper(mdo, md, data, recv, &update_done);
! 
!     Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
!     __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
! 
!     __ bind(update_done);
!   } else {
!     __ jcc(Assembler::equal, *obj_is_null);
    }
  
    if (!k->is_loaded()) {
      klass2reg_with_patching(k_RInfo, op->info_for_patch());
    } else {
--- 1747,36 ---
      select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
    }
  
    assert_different_registers(obj, k_RInfo, klass_RInfo);
  
!   if (op->need_null_check()) {
!     __ testptr(obj, obj);
!     if (op->should_profile()) {
!       Label not_null;
!       Register mdo  = klass_RInfo;
!       __ mov_metadata(mdo, md->constant_encoding());
!       __ jccb(Assembler::notEqual, not_null);
!       // Object is null; update MDO and exit
!       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
!       int header_bits = BitData::null_seen_byte_constant();
!       __ orb(data_addr, header_bits);
!       __ jmp(*obj_is_null);
!       __ bind(not_null);
! 
!       Label update_done;
!       Register recv = k_RInfo;
!       __ load_klass(recv, obj, tmp_load_klass);
!       type_profile_helper(mdo, md, data, recv, &update_done);
! 
!       Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
!       __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
! 
!       __ bind(update_done);
!     } else {
+       __ jcc(Assembler::equal, *obj_is_null);
+     }
    }
  
    if (!k->is_loaded()) {
      klass2reg_with_patching(k_RInfo, op->info_for_patch());
    } else {

*** 1925,10 ***
--- 1964,134 ---
          ShouldNotReachHere();
        }
  
  }
  
+ void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
+   // We are loading/storing from/to an array that *may* be a flat array (the
+   // declared type is Object[], abstract[], interface[] or VT.ref[]).
+   // If this array is a flat array, take the slow path.
+   Register klass = op->tmp()->as_register();
+   if (UseArrayMarkWordCheck) {
+     __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
+   } else {
+     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
+     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
+     __ testl(klass, Klass::_lh_array_tag_flat_value_bit_inplace);
+     __ jcc(Assembler::notZero, *op->stub()->entry());
+   }
+   if (!op->value()->is_illegal()) {
+     // The array is not a flat array, but it might be null-free. If we are storing
+     // a null into a null-free array, take the slow path (which will throw NPE).
+     Label skip;
+     __ cmpptr(op->value()->as_register(), NULL_WORD);
+     __ jcc(Assembler::notEqual, skip);
+     if (UseArrayMarkWordCheck) {
+       __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
+     } else {
+       __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
+       __ jcc(Assembler::notZero, *op->stub()->entry());
+     }
+     __ bind(skip);
+   }
+ }
+ 
+ void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
+   // We are storing into an array that *may* be null-free (the declared type is
+   // Object[], abstract[], interface[] or VT.ref[]).
+   if (UseArrayMarkWordCheck) {
+     Label test_mark_word;
+     Register tmp = op->tmp()->as_register();
+     __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
+     __ testl(tmp, markWord::unlocked_value);
+     __ jccb(Assembler::notZero, test_mark_word);
+     __ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
+     __ bind(test_mark_word);
+     __ testl(tmp, markWord::null_free_array_bit_in_place);
+   } else {
+     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+     Register klass = op->tmp()->as_register();
+     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
+     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
+     __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
+   }
+ }
+ 
+ void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
+   Label L_oops_equal;
+   Label L_oops_not_equal;
+   Label L_end;
+ 
+   Register left  = op->left()->as_register();
+   Register right = op->right()->as_register();
+ 
+   __ cmpptr(left, right);
+   __ jcc(Assembler::equal, L_oops_equal);
+ 
+   // (1) Null check -- if one of the operands is null, the other must not be null (because
+   //     the two references are not equal), so they are not substitutable,
+   //     FIXME: do null check only if the operand is nullable
+   __ testptr(left, right);
+   __ jcc(Assembler::zero, L_oops_not_equal);
+ 
+   ciKlass* left_klass = op->left_klass();
+   ciKlass* right_klass = op->right_klass();
+ 
+   // (2) Inline type check -- if either of the operands is not a inline type,
+   //     they are not substitutable. We do this only if we are not sure that the
+   //     operands are inline type
+   if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
+       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
+     Register tmp1  = op->tmp1()->as_register();
+     __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
+     __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
+     __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
+     __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
+     __ jcc(Assembler::notEqual, L_oops_not_equal);
+   }
+ 
+   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
+   if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
+     // No need to load klass -- the operands are statically known to be the same inline klass.
+     __ jmp(*op->stub()->entry());
+   } else {
+     Register left_klass_op = op->left_klass_op()->as_register();
+     Register right_klass_op = op->right_klass_op()->as_register();
+ 
+     if (UseCompressedClassPointers) {
+       __ movl(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
+       __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
+       __ cmpl(left_klass_op, right_klass_op);
+     } else {
+       __ movptr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
+       __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
+       __ cmpptr(left_klass_op, right_klass_op);
+     }
+ 
+     __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
+     // fall through to L_oops_not_equal
+   }
+ 
+   __ bind(L_oops_not_equal);
+   move(op->not_equal_result(), op->result_opr());
+   __ jmp(L_end);
+ 
+   __ bind(L_oops_equal);
+   move(op->equal_result(), op->result_opr());
+   __ jmp(L_end);
+ 
+   // We've returned from the stub. RAX contains 0x0 IFF the two
+   // operands are not substitutable. (Don't compare against 0x1 in case the
+   // C compiler is naughty)
+   __ bind(*op->stub()->continuation());
+   __ cmpl(rax, 0);
+   __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
+   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
+   // fall-through
+   __ bind(L_end);
+ }
  
  void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
    if (LP64_ONLY(false &&) op->code() == lir_cas_long) {
      assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
      assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");

*** 1985,10 ***
--- 2148,25 ---
    } else {
      Unimplemented();
    }
  }
  
+ void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
+   assert(dst->is_cpu_register(), "must be");
+   assert(dst->type() == src->type(), "must be");
+ 
+   if (src->is_cpu_register()) {
+     reg2reg(src, dst);
+   } else if (src->is_stack()) {
+     stack2reg(src, dst, dst->type());
+   } else if (src->is_constant()) {
+     const2reg(src, dst, lir_patch_none, nullptr);
+   } else {
+     ShouldNotReachHere();
+   }
+ }
+ 
  void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
                            LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
    assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
  
    Assembler::Condition acond, ncond;

*** 2862,18 ***
  
  void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
    assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
           "must be aligned");
    __ call(AddressLiteral(op->addr(), rtype));
!   add_call_info(code_offset(), op->info());
    __ post_call_nop();
  }
  
  
  void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
    __ ic_call(op->addr());
!   add_call_info(code_offset(), op->info());
    assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
           "must be aligned");
    __ post_call_nop();
  }
  
--- 3040,18 ---
  
  void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
    assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
           "must be aligned");
    __ call(AddressLiteral(op->addr(), rtype));
!   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
    __ post_call_nop();
  }
  
  
  void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
    __ ic_call(op->addr());
!   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
    assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
           "must be aligned");
    __ post_call_nop();
  }
  

*** 3050,10 ***
--- 3228,36 ---
    assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
    __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
  }
  
  
+ void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
+   if (null_check) {
+     __ testptr(obj, obj);
+     __ jcc(Assembler::zero, *slow_path->entry());
+   }
+   if (UseArrayMarkWordCheck) {
+     if (is_dest) {
+       __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
+     } else {
+       __ test_flat_array_oop(obj, tmp, *slow_path->entry());
+     }
+   } else {
+     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
+     __ load_klass(tmp, obj, tmp_load_klass);
+     __ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
+     if (is_dest) {
+       // Take the slow path if it's a null_free destination array, in case the source array contains nullptrs.
+       __ testl(tmp, Klass::_lh_null_free_array_bit_inplace);
+     } else {
+       __ testl(tmp, Klass::_lh_array_tag_flat_value_bit_inplace);
+     }
+     __ jcc(Assembler::notZero, *slow_path->entry());
+   }
+ }
+ 
+ 
  // This code replaces a call to arraycopy; no exception may
  // be thrown in this code, they must be thrown in the System.arraycopy
  // activation frame; we could save some checks if this would not be the case
  void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
    ciArrayKlass* default_type = op->expected_type();

*** 3068,10 ***
--- 3272,16 ---
    CodeStub* stub = op->stub();
    int flags = op->flags();
    BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
    if (is_reference_type(basic_type)) basic_type = T_OBJECT;
  
+   if (flags & LIR_OpArrayCopy::always_slow_path) {
+     __ jmp(*stub->entry());
+     __ bind(*stub->continuation());
+     return;
+   }
+ 
    // if we don't know anything, just go through the generic arraycopy
    if (default_type == nullptr) {
      // save outgoing arguments on stack in case call to System.arraycopy is needed
      // HACK ALERT. This code used to push the parameters in a hardwired fashion
      // for interpreter calling conventions. Now we have to do it in new style conventions.

*** 3161,10 ***
--- 3371,18 ---
  
      __ bind(*stub->continuation());
      return;
    }
  
+   // Handle inline type arrays
+   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
+     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
+   }
+   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
+     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
+   }
+ 
    assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
  
    int elem_size = type2aelembytes(basic_type);
    Address::ScaleFactor scale;
  

*** 3788,10 ***
--- 4006,30 ---
      }
    }
    __ bind(next);
  }
  
+ void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
+   Register obj = op->obj()->as_register();
+   Register tmp = op->tmp()->as_pointer_register();
+   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
+   bool not_null = op->not_null();
+   int flag = op->flag();
+ 
+   Label not_inline_type;
+   if (!not_null) {
+     __ testptr(obj, obj);
+     __ jccb(Assembler::zero, not_inline_type);
+   }
+ 
+   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
+ 
+   __ orb(mdo_addr, flag);
+ 
+   __ bind(not_inline_type);
+ }
+ 
  void LIR_Assembler::emit_delay(LIR_OpDelay*) {
    Unimplemented();
  }
  
  

*** 4051,10 ***
--- 4289,13 ---
  #else
    __ get_thread(result_reg->as_register());
  #endif // _LP64
  }
  
+ void LIR_Assembler::check_orig_pc() {
+   __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD);
+ }
  
  void LIR_Assembler::peephole(LIR_List*) {
    // do nothing for now
  }
  
< prev index next >