< prev index next >

src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp

Print this page
*** 26,10 ***
--- 26,11 ---
  #include "precompiled.hpp"
  #include "asm/macroAssembler.hpp"
  #include "asm/macroAssembler.inline.hpp"
  #include "asm/register.hpp"
  #include "atomic_aarch64.hpp"
+ #include "code/SCCache.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/barrierSet.hpp"
  #include "gc/shared/barrierSetAssembler.hpp"
  #include "gc/shared/gc_globals.hpp"
  #include "gc/shared/tlab_globals.hpp"

*** 4649,10 ***
--- 4650,14 ---
    address generate_multiplyToLen() {
      __ align(CodeEntryAlignment);
      StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
  
      address start = __ pc();
+  
+     if (SCCache::load_stub(this, vmIntrinsics::_multiplyToLen, "multiplyToLen", start)) {
+       return start;
+     }
      const Register x     = r0;
      const Register xlen  = r1;
      const Register y     = r2;
      const Register ylen  = r3;
      const Register z     = r4;

*** 4670,10 ***
--- 4675,11 ---
      __ enter(); // required for proper stackwalking of RuntimeStub frame
      __ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
      __ leave(); // required for proper stackwalking of RuntimeStub frame
      __ ret(lr);
  
+     SCCache::store_stub(this, vmIntrinsics::_multiplyToLen, "multiplyToLen", start);
      return start;
    }
  
    address generate_squareToLen() {
      // squareToLen algorithm for sizes 1..127 described in java code works

*** 4681,10 ***
--- 4687,13 ---
      // multiply_to_len shows a bit better overall results
      __ align(CodeEntryAlignment);
      StubCodeMark mark(this, "StubRoutines", "squareToLen");
      address start = __ pc();
  
+     if (SCCache::load_stub(this, vmIntrinsics::_squareToLen, "squareToLen", start)) {
+       return start;
+     }
      const Register x     = r0;
      const Register xlen  = r1;
      const Register z     = r2;
      const Register y     = r4; // == x
      const Register ylen  = r5; // == xlen

*** 4706,19 ***
--- 4715,24 ---
      __ mov(ylen, xlen);
      __ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
      __ pop(spilled_regs, sp);
      __ leave();
      __ ret(lr);
+ 
+     SCCache::store_stub(this, vmIntrinsics::_squareToLen, "squareToLen", start);
      return start;
    }
  
    address generate_mulAdd() {
      __ align(CodeEntryAlignment);
      StubCodeMark mark(this, "StubRoutines", "mulAdd");
  
      address start = __ pc();
  
+     if (SCCache::load_stub(this, vmIntrinsics::_mulAdd, "mulAdd", start)) {
+       return start;
+     }
      const Register out     = r0;
      const Register in      = r1;
      const Register offset  = r2;
      const Register len     = r3;
      const Register k       = r4;

*** 4727,10 ***
--- 4741,11 ---
      __ enter();
      __ mul_add(out, in, offset, len, k);
      __ leave();
      __ ret(lr);
  
+     SCCache::store_stub(this, vmIntrinsics::_mulAdd, "mulAdd", start);
      return start;
    }
  
    // Arguments:
    //
< prev index next >