< prev index next >

src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp

Print this page
*** 45,14 ***
  
  #ifndef PRODUCT
  extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
  #endif
  
! VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
    const int stub_code_length = code_size_limit(true);
!   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
    // Can be null if there is no free space in the code cache.
    if (s == nullptr) {
      return nullptr;
    }
  
--- 45,14 ---
  
  #ifndef PRODUCT
  extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
  #endif
  
! VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
    // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
    const int stub_code_length = code_size_limit(true);
!   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
    // Can be null if there is no free space in the code cache.
    if (s == nullptr) {
      return nullptr;
    }
  

*** 61,10 ***
--- 61,14 ---
    // overflow in subsequently generated stubs.
    address   start_pc;
    int       slop_bytes = 0;
    int       slop_delta = 0;
  
+ // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
+   const int index_dependent_slop     = 0;
+   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
+ 
    ResourceMark    rm;
    CodeBuffer      cb(s->entry_point(), stub_code_length);
    MacroAssembler* masm = new MacroAssembler(&cb);
  
  #if (!defined(PRODUCT) && defined(COMPILER2))

*** 114,35 ***
  
  #ifndef PRODUCT
    if (DebugVtables) {
      Label L;
      __ cbz(rmethod, L);
!     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
      __ cbnz(rscratch1, L);
      __ stop("Vtable entry is null");
      __ bind(L);
    }
  #endif // PRODUCT
  
    // r0: receiver klass
    // rmethod: Method*
    // r2: receiver
    address ame_addr = __ pc();
!   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
    __ br(rscratch1);
  
    masm->flush();
!   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
  
    return s;
  }
  
  
! VtableStub* VtableStubs::create_itable_stub(int itable_index) {
    // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
    const int stub_code_length = code_size_limit(false);
!   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
    // Can be null if there is no free space in the code cache.
    if (s == nullptr) {
      return nullptr;
    }
  
--- 118,36 ---
  
  #ifndef PRODUCT
    if (DebugVtables) {
      Label L;
      __ cbz(rmethod, L);
!     __ ldr(rscratch1, Address(rmethod, entry_offset));
      __ cbnz(rscratch1, L);
      __ stop("Vtable entry is null");
      __ bind(L);
    }
  #endif // PRODUCT
  
    // r0: receiver klass
    // rmethod: Method*
    // r2: receiver
    address ame_addr = __ pc();
!   __ ldr(rscratch1, Address(rmethod, entry_offset));
    __ br(rscratch1);
  
    masm->flush();
!   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
  
    return s;
  }
  
  
! VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
    // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
    const int stub_code_length = code_size_limit(false);
!   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
    // Can be null if there is no free space in the code cache.
    if (s == nullptr) {
      return nullptr;
    }
  

*** 151,10 ***
--- 156,14 ---
    // overflow in subsequently generated stubs.
    address   start_pc;
    int       slop_bytes = 0;
    int       slop_delta = 0;
  
+   const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
+                                    (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
+   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
+ 
    ResourceMark    rm;
    CodeBuffer      cb(s->entry_point(), stub_code_length);
    MacroAssembler* masm = new MacroAssembler(&cb);
  
  #if (!defined(PRODUCT) && defined(COMPILER2))

*** 205,21 ***
  
  #ifdef ASSERT
    if (DebugVtables) {
      Label L2;
      __ cbz(rmethod, L2);
!     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
      __ cbnz(rscratch1, L2);
      __ stop("compiler entrypoint is null");
      __ bind(L2);
    }
  #endif // ASSERT
  
    // rmethod: Method*
    // j_rarg0: receiver
    address ame_addr = __ pc();
!   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
    __ br(rscratch1);
  
    __ bind(L_no_such_interface);
    // Handle IncompatibleClassChangeError in itable stubs.
    // More detailed error message.
--- 214,21 ---
  
  #ifdef ASSERT
    if (DebugVtables) {
      Label L2;
      __ cbz(rmethod, L2);
!     __ ldr(rscratch1, Address(rmethod, entry_offset));
      __ cbnz(rscratch1, L2);
      __ stop("compiler entrypoint is null");
      __ bind(L2);
    }
  #endif // ASSERT
  
    // rmethod: Method*
    // j_rarg0: receiver
    address ame_addr = __ pc();
!   __ ldr(rscratch1, Address(rmethod, entry_offset));
    __ br(rscratch1);
  
    __ bind(L_no_such_interface);
    // Handle IncompatibleClassChangeError in itable stubs.
    // More detailed error message.

*** 228,11 ***
    // dirty work.
    assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
    __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
  
    masm->flush();
!   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
  
    return s;
  }
  
  int VtableStub::pd_code_alignment() {
--- 237,12 ---
    // dirty work.
    assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
    __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
  
    masm->flush();
!   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
  
    return s;
  }
  
  int VtableStub::pd_code_alignment() {
< prev index next >