1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "assembler_arm.inline.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "memory/universe.hpp"
  32 #include "nativeInst_arm.hpp"
  33 #include "oops/instanceOop.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/methodHandles.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubCodeGenerator.hpp"
  42 #include "runtime/stubRoutines.hpp"
  43 #include "utilities/align.hpp"
  44 #ifdef COMPILER2
  45 #include "opto/runtime.hpp"
  46 #endif
  47 
  48 // Declaration and definition of StubGenerator (no .hpp file).
  49 // For a more detailed description of the stub routine structure
  50 // see the comment in stubRoutines.hpp
  51 
  52 #define __ _masm->
  53 
  54 #ifdef PRODUCT
  55 #define BLOCK_COMMENT(str) /* nothing */
  56 #else
  57 #define BLOCK_COMMENT(str) __ block_comment(str)
  58 #endif
  59 
  60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  61 
  62 // -------------------------------------------------------------------------------------------------------------------------
  63 // Stub Code definitions
  64 
  65 // Platform dependent parameters for array copy stubs
  66 
  67 // Note: we have noticed a huge change in behavior on a microbenchmark
  68 // from platform to platform depending on the configuration.
  69 
  70 // Instead of adding a series of command line options (which
  71 // unfortunately have to be done in the shared file and cannot appear
  72 // only in the ARM port), the tested result are hard-coded here in a set
  73 // of options, selected by specifying 'ArmCopyPlatform'
  74 
  75 // Currently, this 'platform' is hardcoded to a value that is a good
  76 // enough trade-off.  However, one can easily modify this file to test
  77 // the hard-coded configurations or create new ones. If the gain is
  78 // significant, we could decide to either add command line options or
  79 // add code to automatically choose a configuration.
  80 
  81 // see comments below for the various configurations created
  82 #define DEFAULT_ARRAYCOPY_CONFIG 0
  83 #define TEGRA2_ARRAYCOPY_CONFIG 1
  84 #define IMX515_ARRAYCOPY_CONFIG 2
  85 
  86 // Hard coded choices (XXX: could be changed to a command line option)
  87 #define ArmCopyPlatform DEFAULT_ARRAYCOPY_CONFIG
  88 
  89 #define ArmCopyCacheLineSize 32 // not worth optimizing to 64 according to measured gains
  90 
  91 // configuration for each kind of loop
  92 typedef struct {
  93   int pld_distance;       // prefetch distance (0 => no prefetch, <0: prefetch_before);
  94   bool split_ldm;         // if true, split each STM in STMs with fewer registers
  95   bool split_stm;         // if true, split each LTM in LTMs with fewer registers
  96 } arraycopy_loop_config;
  97 
  98 // configuration for all loops
  99 typedef struct {
 100   // const char *description;
 101   arraycopy_loop_config forward_aligned;
 102   arraycopy_loop_config backward_aligned;
 103   arraycopy_loop_config forward_shifted;
 104   arraycopy_loop_config backward_shifted;
 105 } arraycopy_platform_config;
 106 
 107 // configured platforms
 108 static arraycopy_platform_config arraycopy_configurations[] = {
 109   // configuration parameters for arraycopy loops
 110 
 111   // Configurations were chosen based on manual analysis of benchmark
 112   // results, minimizing overhead with respect to best results on the
 113   // different test cases.
 114 
 115   // Prefetch before is always favored since it avoids dirtying the
 116   // cache uselessly for small copies. Code for prefetch after has
 117   // been kept in case the difference is significant for some
 118   // platforms but we might consider dropping it.
 119 
 120   // distance, ldm, stm
 121   {
 122     // default: tradeoff tegra2/imx515/nv-tegra2,
 123     // Notes on benchmarking:
 124     // - not far from optimal configuration on nv-tegra2
 125     // - within 5% of optimal configuration except for backward aligned on IMX
 126     // - up to 40% from optimal configuration for backward shifted and backward align for tegra2
 127     //   but still on par with the operating system copy
 128     {-256, true,  true  }, // forward aligned
 129     {-256, true,  true  }, // backward aligned
 130     {-256, false, false }, // forward shifted
 131     {-256, true,  true  } // backward shifted
 132   },
 133   {
 134     // configuration tuned on tegra2-4.
 135     // Warning: should not be used on nv-tegra2 !
 136     // Notes:
 137     // - prefetch after gives 40% gain on backward copies on tegra2-4,
 138     //   resulting in better number than the operating system
 139     //   copy. However, this can lead to a 300% loss on nv-tegra and has
 140     //   more impact on the cache (fetches futher than what is
 141     //   copied). Use this configuration with care, in case it improves
 142     //   reference benchmarks.
 143     {-256, true,  true  }, // forward aligned
 144     {96,   false, false }, // backward aligned
 145     {-256, false, false }, // forward shifted
 146     {96,   false, false } // backward shifted
 147   },
 148   {
 149     // configuration tuned on imx515
 150     // Notes:
 151     // - smaller prefetch distance is sufficient to get good result and might be more stable
 152     // - refined backward aligned options within 5% of optimal configuration except for
 153     //   tests were the arrays fit in the cache
 154     {-160, false, false }, // forward aligned
 155     {-160, false, false }, // backward aligned
 156     {-160, false, false }, // forward shifted
 157     {-160, true,  true  } // backward shifted
 158   }
 159 };
 160 
 161 class StubGenerator: public StubCodeGenerator {
 162 
 163 #ifdef PRODUCT
 164 #define inc_counter_np(a,b,c) ((void)0)
 165 #else
 166 #define inc_counter_np(counter, t1, t2) \
 167   BLOCK_COMMENT("inc_counter " #counter); \
 168   __ inc_counter(&counter, t1, t2);
 169 #endif
 170 
 171  private:
 172 
 173   address generate_call_stub(address& return_address) {
 174     StubCodeMark mark(this, "StubRoutines", "call_stub");
 175     address start = __ pc();
 176 
 177 
 178     assert(frame::entry_frame_call_wrapper_offset == 0, "adjust this code");
 179 
 180     __ mov(Rtemp, SP);
 181     __ push(RegisterSet(FP) | RegisterSet(LR));
 182 #ifndef __SOFTFP__
 183     __ fstmdbd(SP, FloatRegisterSet(D8, 8), writeback);
 184 #endif
 185     __ stmdb(SP, RegisterSet(R0, R2) | RegisterSet(R4, R6) | RegisterSet(R8, R10) | altFP_7_11, writeback);
 186     __ mov(Rmethod, R3);
 187     __ ldmia(Rtemp, RegisterSet(R1, R3) | Rthread); // stacked arguments
 188 
 189     // XXX: TODO
 190     // Would be better with respect to native tools if the following
 191     // setting of FP was changed to conform to the native ABI, with FP
 192     // pointing to the saved FP slot (and the corresponding modifications
 193     // for entry_frame_call_wrapper_offset and frame::real_fp).
 194     __ mov(FP, SP);
 195 
 196     {
 197       Label no_parameters, pass_parameters;
 198       __ cmp(R3, 0);
 199       __ b(no_parameters, eq);
 200 
 201       __ bind(pass_parameters);
 202       __ ldr(Rtemp, Address(R2, wordSize, post_indexed)); // Rtemp OK, unused and scratchable
 203       __ subs(R3, R3, 1);
 204       __ push(Rtemp);
 205       __ b(pass_parameters, ne);
 206       __ bind(no_parameters);
 207     }
 208 
 209     __ mov(Rsender_sp, SP);
 210     __ blx(R1);
 211     return_address = __ pc();
 212 
 213     __ add(SP, FP, wordSize); // Skip link to JavaCallWrapper
 214     __ pop(RegisterSet(R2, R3));
 215 #ifndef __ABI_HARD__
 216     __ cmp(R3, T_LONG);
 217     __ cmp(R3, T_DOUBLE, ne);
 218     __ str(R0, Address(R2));
 219     __ str(R1, Address(R2, wordSize), eq);
 220 #else
 221     Label cont, l_float, l_double;
 222 
 223     __ cmp(R3, T_DOUBLE);
 224     __ b(l_double, eq);
 225 
 226     __ cmp(R3, T_FLOAT);
 227     __ b(l_float, eq);
 228 
 229     __ cmp(R3, T_LONG);
 230     __ str(R0, Address(R2));
 231     __ str(R1, Address(R2, wordSize), eq);
 232     __ b(cont);
 233 
 234 
 235     __ bind(l_double);
 236     __ fstd(D0, Address(R2));
 237     __ b(cont);
 238 
 239     __ bind(l_float);
 240     __ fsts(S0, Address(R2));
 241 
 242     __ bind(cont);
 243 #endif
 244 
 245     __ pop(RegisterSet(R4, R6) | RegisterSet(R8, R10) | altFP_7_11);
 246 #ifndef __SOFTFP__
 247     __ fldmiad(SP, FloatRegisterSet(D8, 8), writeback);
 248 #endif
 249     __ pop(RegisterSet(FP) | RegisterSet(PC));
 250 
 251     return start;
 252   }
 253 
 254 
 255   // (in) Rexception_obj: exception oop
 256   address generate_catch_exception() {
 257     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 258     address start = __ pc();
 259 
 260     __ str(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
 261     __ b(StubRoutines::_call_stub_return_address);
 262 
 263     return start;
 264   }
 265 
 266 
 267   // (in) Rexception_pc: return address
 268   address generate_forward_exception() {
 269     StubCodeMark mark(this, "StubRoutines", "forward exception");
 270     address start = __ pc();
 271 
 272     __ mov(c_rarg0, Rthread);
 273     __ mov(c_rarg1, Rexception_pc);
 274     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 275                          SharedRuntime::exception_handler_for_return_address),
 276                          c_rarg0, c_rarg1);
 277     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
 278     const Register Rzero = __ zero_register(Rtemp); // Rtemp OK (cleared by above call)
 279     __ str(Rzero, Address(Rthread, Thread::pending_exception_offset()));
 280 
 281 #ifdef ASSERT
 282     // make sure exception is set
 283     { Label L;
 284       __ cbnz(Rexception_obj, L);
 285       __ stop("StubRoutines::forward exception: no pending exception (2)");
 286       __ bind(L);
 287     }
 288 #endif
 289 
 290     // Verify that there is really a valid exception in RAX.
 291     __ verify_oop(Rexception_obj);
 292 
 293     __ jump(R0); // handler is returned in R0 by runtime function
 294     return start;
 295   }
 296 
 297 
 298 
 299   // Integer division shared routine
 300   //   Input:
 301   //     R0  - dividend
 302   //     R2  - divisor
 303   //   Output:
 304   //     R0  - remainder
 305   //     R1  - quotient
 306   //   Destroys:
 307   //     R2
 308   //     LR
 309   address generate_idiv_irem() {
 310     Label positive_arguments, negative_or_zero, call_slow_path;
 311     Register dividend  = R0;
 312     Register divisor   = R2;
 313     Register remainder = R0;
 314     Register quotient  = R1;
 315     Register tmp       = LR;
 316     assert(dividend == remainder, "must be");
 317 
 318     address start = __ pc();
 319 
 320     // Check for special cases: divisor <= 0 or dividend < 0
 321     __ cmp(divisor, 0);
 322     __ orrs(quotient, dividend, divisor, ne);
 323     __ b(negative_or_zero, le);
 324 
 325     __ bind(positive_arguments);
 326     // Save return address on stack to free one extra register
 327     __ push(LR);
 328     // Approximate the mamximum order of the quotient
 329     __ clz(tmp, dividend);
 330     __ clz(quotient, divisor);
 331     __ subs(tmp, quotient, tmp);
 332     __ mov(quotient, 0);
 333     // Jump to the appropriate place in the unrolled loop below
 334     __ ldr(PC, Address(PC, tmp, lsl, 2), pl);
 335     // If divisor is greater than dividend, return immediately
 336     __ pop(PC);
 337 
 338     // Offset table
 339     Label offset_table[32];
 340     int i;
 341     for (i = 0; i <= 31; i++) {
 342       __ emit_address(offset_table[i]);
 343     }
 344 
 345     // Unrolled loop of 32 division steps
 346     for (i = 31; i >= 0; i--) {
 347       __ bind(offset_table[i]);
 348       __ cmp(remainder, AsmOperand(divisor, lsl, i));
 349       __ sub(remainder, remainder, AsmOperand(divisor, lsl, i), hs);
 350       __ add(quotient, quotient, 1 << i, hs);
 351     }
 352     __ pop(PC);
 353 
 354     __ bind(negative_or_zero);
 355     // Find the combination of argument signs and jump to corresponding handler
 356     __ andr(quotient, dividend, 0x80000000, ne);
 357     __ orr(quotient, quotient, AsmOperand(divisor, lsr, 31), ne);
 358     __ add(PC, PC, AsmOperand(quotient, ror, 26), ne);
 359     __ str(LR, Address(Rthread, JavaThread::saved_exception_pc_offset()));
 360 
 361     // The leaf runtime function can destroy R0-R3 and R12 registers which are still alive
 362     RegisterSet saved_registers = RegisterSet(R3) | RegisterSet(R12);
 363 #if R9_IS_SCRATCHED
 364     // Safer to save R9 here since callers may have been written
 365     // assuming R9 survives. This is suboptimal but may not be worth
 366     // revisiting for this slow case.
 367 
 368     // save also R10 for alignment
 369     saved_registers = saved_registers | RegisterSet(R9, R10);
 370 #endif
 371     {
 372       // divisor == 0
 373       FixedSizeCodeBlock zero_divisor(_masm, 8, true);
 374       __ push(saved_registers);
 375       __ mov(R0, Rthread);
 376       __ mov(R1, LR);
 377       __ mov(R2, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
 378       __ b(call_slow_path);
 379     }
 380 
 381     {
 382       // divisor > 0 && dividend < 0
 383       FixedSizeCodeBlock positive_divisor_negative_dividend(_masm, 8, true);
 384       __ push(LR);
 385       __ rsb(dividend, dividend, 0);
 386       __ bl(positive_arguments);
 387       __ rsb(remainder, remainder, 0);
 388       __ rsb(quotient, quotient, 0);
 389       __ pop(PC);
 390     }
 391 
 392     {
 393       // divisor < 0 && dividend > 0
 394       FixedSizeCodeBlock negative_divisor_positive_dividend(_masm, 8, true);
 395       __ push(LR);
 396       __ rsb(divisor, divisor, 0);
 397       __ bl(positive_arguments);
 398       __ rsb(quotient, quotient, 0);
 399       __ pop(PC);
 400     }
 401 
 402     {
 403       // divisor < 0 && dividend < 0
 404       FixedSizeCodeBlock negative_divisor_negative_dividend(_masm, 8, true);
 405       __ push(LR);
 406       __ rsb(dividend, dividend, 0);
 407       __ rsb(divisor, divisor, 0);
 408       __ bl(positive_arguments);
 409       __ rsb(remainder, remainder, 0);
 410       __ pop(PC);
 411     }
 412 
 413     __ bind(call_slow_path);
 414     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::continuation_for_implicit_exception));
 415     __ pop(saved_registers);
 416     __ bx(R0);
 417 
 418     return start;
 419   }
 420 
 421 
 422  // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
 423  //  <fence>; <op>; <membar StoreLoad|StoreStore>
 424  // But for load-linked/store-conditional based systems a fence here simply means
 425  // no load/store can be reordered with respect to the initial load-linked, so we have:
 426  // <membar storeload|loadload> ; load-linked; <op>; store-conditional; <membar storeload|storestore>
 427  // There are no memory actions in <op> so nothing further is needed.
 428  //
 429  // So we define the following for convenience:
 430 #define MEMBAR_ATOMIC_OP_PRE \
 431     MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::LoadLoad)
 432 #define MEMBAR_ATOMIC_OP_POST \
 433     MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad|MacroAssembler::StoreStore)
 434 
 435   // Note: JDK 9 only supports ARMv7+ so we always have ldrexd available even though the
 436   // code below allows for it to be otherwise. The else clause indicates an ARMv5 system
 437   // for which we do not support MP and so membars are not necessary. This ARMv5 code will
 438   // be removed in the future.
 439 
 440   // Support for jint Atomic::add(jint add_value, volatile jint *dest)
 441   //
 442   // Arguments :
 443   //
 444   //      add_value:      R0
 445   //      dest:           R1
 446   //
 447   // Results:
 448   //
 449   //     R0: the new stored in dest
 450   //
 451   // Overwrites:
 452   //
 453   //     R1, R2, R3
 454   //
 455   address generate_atomic_add() {
 456     address start;
 457 
 458     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 459     Label retry;
 460     start = __ pc();
 461     Register addval    = R0;
 462     Register dest      = R1;
 463     Register prev      = R2;
 464     Register ok        = R2;
 465     Register newval    = R3;
 466 
 467     if (VM_Version::supports_ldrex()) {
 468       __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
 469       __ bind(retry);
 470       __ ldrex(newval, Address(dest));
 471       __ add(newval, addval, newval);
 472       __ strex(ok, newval, Address(dest));
 473       __ cmp(ok, 0);
 474       __ b(retry, ne);
 475       __ mov (R0, newval);
 476       __ membar(MEMBAR_ATOMIC_OP_POST, prev);
 477     } else {
 478       __ bind(retry);
 479       __ ldr (prev, Address(dest));
 480       __ add(newval, addval, prev);
 481       __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
 482       __ b(retry, ne);
 483       __ mov (R0, newval);
 484     }
 485     __ bx(LR);
 486 
 487     return start;
 488   }
 489 
 490   // Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest)
 491   //
 492   // Arguments :
 493   //
 494   //      exchange_value: R0
 495   //      dest:           R1
 496   //
 497   // Results:
 498   //
 499   //     R0: the value previously stored in dest
 500   //
 501   // Overwrites:
 502   //
 503   //     R1, R2, R3
 504   //
 505   address generate_atomic_xchg() {
 506     address start;
 507 
 508     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 509     start = __ pc();
 510     Register newval    = R0;
 511     Register dest      = R1;
 512     Register prev      = R2;
 513 
 514     Label retry;
 515 
 516     if (VM_Version::supports_ldrex()) {
 517       Register ok=R3;
 518       __ membar(MEMBAR_ATOMIC_OP_PRE, prev);
 519       __ bind(retry);
 520       __ ldrex(prev, Address(dest));
 521       __ strex(ok, newval, Address(dest));
 522       __ cmp(ok, 0);
 523       __ b(retry, ne);
 524       __ mov (R0, prev);
 525       __ membar(MEMBAR_ATOMIC_OP_POST, prev);
 526     } else {
 527       __ bind(retry);
 528       __ ldr (prev, Address(dest));
 529       __ atomic_cas_bool(prev, newval, dest, 0, noreg/*ignored*/);
 530       __ b(retry, ne);
 531       __ mov (R0, prev);
 532     }
 533     __ bx(LR);
 534 
 535     return start;
 536   }
 537 
 538   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
 539   //
 540   // Arguments :
 541   //
 542   //      compare_value:  R0
 543   //      exchange_value: R1
 544   //      dest:           R2
 545   //
 546   // Results:
 547   //
 548   //     R0: the value previously stored in dest
 549   //
 550   // Overwrites:
 551   //
 552   //     R0, R1, R2, R3, Rtemp
 553   //
 554   address generate_atomic_cmpxchg() {
 555     address start;
 556 
 557     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 558     start = __ pc();
 559     Register cmp       = R0;
 560     Register newval    = R1;
 561     Register dest      = R2;
 562     Register temp1     = R3;
 563     Register temp2     = Rtemp; // Rtemp free (native ABI)
 564 
 565     __ membar(MEMBAR_ATOMIC_OP_PRE, temp1);
 566 
 567     // atomic_cas returns previous value in R0
 568     __ atomic_cas(temp1, temp2, cmp, newval, dest, 0);
 569 
 570     __ membar(MEMBAR_ATOMIC_OP_POST, temp1);
 571 
 572     __ bx(LR);
 573 
 574     return start;
 575   }
 576 
 577   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 578   // reordered before by a wrapper to (jlong compare_value, jlong exchange_value, volatile jlong *dest)
 579   //
 580   // Arguments :
 581   //
 582   //      compare_value:  R1 (High), R0 (Low)
 583   //      exchange_value: R3 (High), R2 (Low)
 584   //      dest:           SP+0
 585   //
 586   // Results:
 587   //
 588   //     R0:R1: the value previously stored in dest
 589   //
 590   // Overwrites:
 591   //
 592   address generate_atomic_cmpxchg_long() {
 593     address start;
 594 
 595     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 596     start = __ pc();
 597     Register cmp_lo      = R0;
 598     Register cmp_hi      = R1;
 599     Register newval_lo   = R2;
 600     Register newval_hi   = R3;
 601     Register addr        = Rtemp;  /* After load from stack */
 602     Register temp_lo     = R4;
 603     Register temp_hi     = R5;
 604     Register temp_result = R8;
 605     assert_different_registers(cmp_lo, newval_lo, temp_lo, addr, temp_result, R7);
 606     assert_different_registers(cmp_hi, newval_hi, temp_hi, addr, temp_result, R7);
 607 
 608     __ membar(MEMBAR_ATOMIC_OP_PRE, Rtemp); // Rtemp free (native ABI)
 609 
 610     // Stack is unaligned, maintain double word alignment by pushing
 611     // odd number of regs.
 612     __ push(RegisterSet(temp_result) | RegisterSet(temp_lo, temp_hi));
 613     __ ldr(addr, Address(SP, 12));
 614 
 615     // atomic_cas64 returns previous value in temp_lo, temp_hi
 616     __ atomic_cas64(temp_lo, temp_hi, temp_result, cmp_lo, cmp_hi,
 617                     newval_lo, newval_hi, addr, 0);
 618     __ mov(R0, temp_lo);
 619     __ mov(R1, temp_hi);
 620 
 621     __ pop(RegisterSet(temp_result) | RegisterSet(temp_lo, temp_hi));
 622 
 623     __ membar(MEMBAR_ATOMIC_OP_POST, Rtemp); // Rtemp free (native ABI)
 624     __ bx(LR);
 625 
 626     return start;
 627   }
 628 
 629   address generate_atomic_load_long() {
 630     address start;
 631 
 632     StubCodeMark mark(this, "StubRoutines", "atomic_load_long");
 633     start = __ pc();
 634     Register result_lo = R0;
 635     Register result_hi = R1;
 636     Register src       = R0;
 637 
 638     if (!os::is_MP()) {
 639       __ ldmia(src, RegisterSet(result_lo, result_hi));
 640       __ bx(LR);
 641     } else if (VM_Version::supports_ldrexd()) {
 642       __ ldrexd(result_lo, Address(src));
 643       __ clrex(); // FIXME: safe to remove?
 644       __ bx(LR);
 645     } else {
 646       __ stop("Atomic load(jlong) unsupported on this platform");
 647       __ bx(LR);
 648     }
 649 
 650     return start;
 651   }
 652 
 653   address generate_atomic_store_long() {
 654     address start;
 655 
 656     StubCodeMark mark(this, "StubRoutines", "atomic_store_long");
 657     start = __ pc();
 658     Register newval_lo = R0;
 659     Register newval_hi = R1;
 660     Register dest      = R2;
 661     Register scratch_lo    = R2;
 662     Register scratch_hi    = R3;  /* After load from stack */
 663     Register result    = R3;
 664 
 665     if (!os::is_MP()) {
 666       __ stmia(dest, RegisterSet(newval_lo, newval_hi));
 667       __ bx(LR);
 668     } else if (VM_Version::supports_ldrexd()) {
 669       __ mov(Rtemp, dest);  // get dest to Rtemp
 670       Label retry;
 671       __ bind(retry);
 672       __ ldrexd(scratch_lo, Address(Rtemp));
 673       __ strexd(result, R0, Address(Rtemp));
 674       __ rsbs(result, result, 1);
 675       __ b(retry, eq);
 676       __ bx(LR);
 677     } else {
 678       __ stop("Atomic store(jlong) unsupported on this platform");
 679       __ bx(LR);
 680     }
 681 
 682     return start;
 683   }
 684 
 685 
 686 
 687 #ifdef COMPILER2
 688   // Support for uint StubRoutine::Arm::partial_subtype_check( Klass sub, Klass super );
 689   // Arguments :
 690   //
 691   //      ret  : R0, returned
 692   //      icc/xcc: set as R0 (depending on wordSize)
 693   //      sub  : R1, argument, not changed
 694   //      super: R2, argument, not changed
 695   //      raddr: LR, blown by call
 696   address generate_partial_subtype_check() {
 697     __ align(CodeEntryAlignment);
 698     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
 699     address start = __ pc();
 700 
 701     // based on SPARC check_klass_subtype_[fast|slow]_path (without CompressedOops)
 702 
 703     // R0 used as tmp_reg (in addition to return reg)
 704     Register sub_klass = R1;
 705     Register super_klass = R2;
 706     Register tmp_reg2 = R3;
 707     Register tmp_reg3 = R4;
 708 #define saved_set tmp_reg2, tmp_reg3
 709 
 710     Label L_loop, L_fail;
 711 
 712     int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 713 
 714     // fast check should be redundant
 715 
 716     // slow check
 717     {
 718       __ raw_push(saved_set);
 719 
 720       // a couple of useful fields in sub_klass:
 721       int ss_offset = in_bytes(Klass::secondary_supers_offset());
 722 
 723       // Do a linear scan of the secondary super-klass chain.
 724       // This code is rarely used, so simplicity is a virtue here.
 725 
 726       inc_counter_np(SharedRuntime::_partial_subtype_ctr, tmp_reg2, tmp_reg3);
 727 
 728       Register scan_temp = tmp_reg2;
 729       Register count_temp = tmp_reg3;
 730 
 731       // We will consult the secondary-super array.
 732       __ ldr(scan_temp, Address(sub_klass, ss_offset));
 733 
 734       Register search_key = super_klass;
 735 
 736       // Load the array length.
 737       __ ldr_s32(count_temp, Address(scan_temp, Array<Klass*>::length_offset_in_bytes()));
 738       __ add(scan_temp, scan_temp, Array<Klass*>::base_offset_in_bytes());
 739 
 740       __ add(count_temp, count_temp, 1);
 741 
 742       // Top of search loop
 743       __ bind(L_loop);
 744       // Notes:
 745       //  scan_temp starts at the array elements
 746       //  count_temp is 1+size
 747       __ subs(count_temp, count_temp, 1);
 748       __ b(L_fail, eq); // not found in the array
 749 
 750       // Load next super to check
 751       // In the array of super classes elements are pointer sized.
 752       int element_size = wordSize;
 753       __ ldr(R0, Address(scan_temp, element_size, post_indexed));
 754 
 755       // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
 756       __ subs(R0, R0, search_key); // set R0 to 0 on success (and flags to eq)
 757 
 758       // A miss means we are NOT a subtype and need to keep looping
 759       __ b(L_loop, ne);
 760 
 761       // Falling out the bottom means we found a hit; we ARE a subtype
 762 
 763       // Success.  Cache the super we found and proceed in triumph.
 764       __ str(super_klass, Address(sub_klass, sc_offset));
 765 
 766       // Return success
 767       // R0 is already 0 and flags are already set to eq
 768       __ raw_pop(saved_set);
 769       __ ret();
 770 
 771       // Return failure
 772       __ bind(L_fail);
 773       __ movs(R0, 1); // sets the flags
 774       __ raw_pop(saved_set);
 775       __ ret();
 776     }
 777     return start;
 778   }
 779 #undef saved_set
 780 #endif // COMPILER2
 781 
 782 
 783   //----------------------------------------------------------------------------------------------------
 784   // Non-destructive plausibility checks for oops
 785 
 786   address generate_verify_oop() {
 787     StubCodeMark mark(this, "StubRoutines", "verify_oop");
 788     address start = __ pc();
 789 
 790     // Incoming arguments:
 791     //
 792     // R0: error message (char* )
 793     // R1: address of register save area
 794     // R2: oop to verify
 795     //
 796     // All registers are saved before calling this stub. However, condition flags should be saved here.
 797 
 798     const Register oop   = R2;
 799     const Register klass = R3;
 800     const Register tmp1  = R6;
 801     const Register tmp2  = R8;
 802 
 803     const Register flags     = Rtmp_save0; // R4/R19
 804     const Register ret_addr  = Rtmp_save1; // R5/R20
 805     assert_different_registers(oop, klass, tmp1, tmp2, flags, ret_addr, R7);
 806 
 807     Label exit, error;
 808     InlinedAddress verify_oop_count((address) StubRoutines::verify_oop_count_addr());
 809 
 810     __ mrs(Assembler::CPSR, flags);
 811 
 812     __ ldr_literal(tmp1, verify_oop_count);
 813     __ ldr_s32(tmp2, Address(tmp1));
 814     __ add(tmp2, tmp2, 1);
 815     __ str_32(tmp2, Address(tmp1));
 816 
 817     // make sure object is 'reasonable'
 818     __ cbz(oop, exit);                           // if obj is NULL it is ok
 819 
 820     // Check if the oop is in the right area of memory
 821     // Note: oop_mask and oop_bits must be updated if the code is saved/reused
 822     const address oop_mask = (address) Universe::verify_oop_mask();
 823     const address oop_bits = (address) Universe::verify_oop_bits();
 824     __ mov_address(tmp1, oop_mask);
 825     __ andr(tmp2, oop, tmp1);
 826     __ mov_address(tmp1, oop_bits);
 827     __ cmp(tmp2, tmp1);
 828     __ b(error, ne);
 829 
 830     // make sure klass is 'reasonable'
 831     __ load_klass(klass, oop);                   // get klass
 832     __ cbz(klass, error);                        // if klass is NULL it is broken
 833 
 834     // return if everything seems ok
 835     __ bind(exit);
 836 
 837     __ msr(Assembler::CPSR_f, flags);
 838 
 839     __ ret();
 840 
 841     // handle errors
 842     __ bind(error);
 843 
 844     __ mov(ret_addr, LR);                      // save return address
 845 
 846     // R0: error message
 847     // R1: register save area
 848     __ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug));
 849 
 850     __ mov(LR, ret_addr);
 851     __ b(exit);
 852 
 853     __ bind_literal(verify_oop_count);
 854 
 855     return start;
 856   }
 857 
 858   //----------------------------------------------------------------------------------------------------
 859   // Array copy stubs
 860 
 861   //
 862   //  Generate overlap test for array copy stubs
 863   //
 864   //  Input:
 865   //    R0    -  array1
 866   //    R1    -  array2
 867   //    R2    -  element count, 32-bit int
 868   //
 869   //  input registers are preserved
 870   //
 871   void array_overlap_test(address no_overlap_target, int log2_elem_size, Register tmp1, Register tmp2) {
 872     assert(no_overlap_target != NULL, "must be generated");
 873     array_overlap_test(no_overlap_target, NULL, log2_elem_size, tmp1, tmp2);
 874   }
 875   void array_overlap_test(Label& L_no_overlap, int log2_elem_size, Register tmp1, Register tmp2) {
 876     array_overlap_test(NULL, &L_no_overlap, log2_elem_size, tmp1, tmp2);
 877   }
 878   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size, Register tmp1, Register tmp2) {
 879     const Register from       = R0;
 880     const Register to         = R1;
 881     const Register count      = R2;
 882     const Register to_from    = tmp1; // to - from
 883     const Register byte_count = (log2_elem_size == 0) ? count : tmp2; // count << log2_elem_size
 884     assert_different_registers(from, to, count, tmp1, tmp2);
 885 
 886     // no_overlap version works if 'to' lower (unsigned) than 'from'
 887     // and or 'to' more than (count*size) from 'from'
 888 
 889     BLOCK_COMMENT("Array Overlap Test:");
 890     __ subs(to_from, to, from);
 891     if (log2_elem_size != 0) {
 892       __ mov(byte_count, AsmOperand(count, lsl, log2_elem_size));
 893     }
 894     if (NOLp == NULL)
 895       __ b(no_overlap_target,lo);
 896     else
 897       __ b((*NOLp), lo);
 898     __ cmp(to_from, byte_count);
 899     if (NOLp == NULL)
 900       __ b(no_overlap_target, ge);
 901     else
 902       __ b((*NOLp), ge);
 903   }
 904 
 905 
 906   //   probably we should choose between "prefetch-store before or after store", not "before or after load".
 907   void prefetch(Register from, Register to, int offset, int to_delta = 0) {
 908     __ prefetch_read(Address(from, offset));
 909   }
 910 
 911   // Generate the inner loop for forward aligned array copy
 912   //
 913   // Arguments
 914   //      from:      src address, 64 bits  aligned
 915   //      to:        dst address, wordSize aligned
 916   //      count:     number of elements (32-bit int)
 917   //      bytes_per_count: number of bytes for each unit of 'count'
 918   //
 919   // Return the minimum initial value for count
 920   //
 921   // Notes:
 922   // - 'from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
 923   // - 'to' aligned on wordSize
 924   // - 'count' must be greater or equal than the returned value
 925   //
 926   // Increases 'from' and 'to' by count*bytes_per_count.
 927   //
 928   // Scratches 'count', R3.
 929   // R4-R10 are preserved (saved/restored).
 930   //
 931   int generate_forward_aligned_copy_loop(Register from, Register to, Register count, int bytes_per_count) {
 932     assert (from == R0 && to == R1 && count == R2, "adjust the implementation below");
 933 
 934     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
 935     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].forward_aligned;
 936     int pld_offset = config->pld_distance;
 937     const int count_per_loop = bytes_per_loop / bytes_per_count;
 938 
 939     bool split_read= config->split_ldm;
 940     bool split_write= config->split_stm;
 941 
 942     // XXX optim: use VLDM/VSTM when available (Neon) with PLD
 943     //  NEONCopyPLD
 944     //      PLD [r1, #0xC0]
 945     //      VLDM r1!,{d0-d7}
 946     //      VSTM r0!,{d0-d7}
 947     //      SUBS r2,r2,#0x40
 948     //      BGE NEONCopyPLD
 949 
 950     __ push(RegisterSet(R4,R10));
 951 
 952     const bool prefetch_before = pld_offset < 0;
 953     const bool prefetch_after = pld_offset > 0;
 954 
 955     Label L_skip_pld;
 956 
 957     // predecrease to exit when there is less than count_per_loop
 958     __ sub_32(count, count, count_per_loop);
 959 
 960     if (pld_offset != 0) {
 961       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
 962 
 963       prefetch(from, to, 0);
 964 
 965       if (prefetch_before) {
 966         // If prefetch is done ahead, final PLDs that overflow the
 967         // copied area can be easily avoided. 'count' is predecreased
 968         // by the prefetch distance to optimize the inner loop and the
 969         // outer loop skips the PLD.
 970         __ subs_32(count, count, (bytes_per_loop+pld_offset)/bytes_per_count);
 971 
 972         // skip prefetch for small copies
 973         __ b(L_skip_pld, lt);
 974       }
 975 
 976       int offset = ArmCopyCacheLineSize;
 977       while (offset <= pld_offset) {
 978         prefetch(from, to, offset);
 979         offset += ArmCopyCacheLineSize;
 980       };
 981     }
 982 
 983     {
 984       // 32-bit ARM note: we have tried implementing loop unrolling to skip one
 985       // PLD with 64 bytes cache line but the gain was not significant.
 986 
 987       Label L_copy_loop;
 988       __ align(OptoLoopAlignment);
 989       __ BIND(L_copy_loop);
 990 
 991       if (prefetch_before) {
 992         prefetch(from, to, bytes_per_loop + pld_offset);
 993         __ BIND(L_skip_pld);
 994       }
 995 
 996       if (split_read) {
 997         // Split the register set in two sets so that there is less
 998         // latency between LDM and STM (R3-R6 available while R7-R10
 999         // still loading) and less register locking issue when iterating
1000         // on the first LDM.
1001         __ ldmia(from, RegisterSet(R3, R6), writeback);
1002         __ ldmia(from, RegisterSet(R7, R10), writeback);
1003       } else {
1004         __ ldmia(from, RegisterSet(R3, R10), writeback);
1005       }
1006 
1007       __ subs_32(count, count, count_per_loop);
1008 
1009       if (prefetch_after) {
1010         prefetch(from, to, pld_offset, bytes_per_loop);
1011       }
1012 
1013       if (split_write) {
1014         __ stmia(to, RegisterSet(R3, R6), writeback);
1015         __ stmia(to, RegisterSet(R7, R10), writeback);
1016       } else {
1017         __ stmia(to, RegisterSet(R3, R10), writeback);
1018       }
1019 
1020       __ b(L_copy_loop, ge);
1021 
1022       if (prefetch_before) {
1023         // the inner loop may end earlier, allowing to skip PLD for the last iterations
1024         __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1025         __ b(L_skip_pld, ge);
1026       }
1027     }
1028     BLOCK_COMMENT("Remaining bytes:");
1029     // still 0..bytes_per_loop-1 aligned bytes to copy, count already decreased by (at least) bytes_per_loop bytes
1030 
1031     // __ add(count, count, ...); // addition useless for the bit tests
1032     assert (pld_offset % bytes_per_loop == 0, "decreasing count by pld_offset before loop must not change tested bits");
1033 
1034     __ tst(count, 16 / bytes_per_count);
1035     __ ldmia(from, RegisterSet(R3, R6), writeback, ne); // copy 16 bytes
1036     __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1037 
1038     __ tst(count, 8 / bytes_per_count);
1039     __ ldmia(from, RegisterSet(R3, R4), writeback, ne); // copy 8 bytes
1040     __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1041 
1042     if (bytes_per_count <= 4) {
1043       __ tst(count, 4 / bytes_per_count);
1044       __ ldr(R3, Address(from, 4, post_indexed), ne); // copy 4 bytes
1045       __ str(R3, Address(to, 4, post_indexed), ne);
1046     }
1047 
1048     if (bytes_per_count <= 2) {
1049       __ tst(count, 2 / bytes_per_count);
1050       __ ldrh(R3, Address(from, 2, post_indexed), ne); // copy 2 bytes
1051       __ strh(R3, Address(to, 2, post_indexed), ne);
1052     }
1053 
1054     if (bytes_per_count == 1) {
1055       __ tst(count, 1);
1056       __ ldrb(R3, Address(from, 1, post_indexed), ne);
1057       __ strb(R3, Address(to, 1, post_indexed), ne);
1058     }
1059 
1060     __ pop(RegisterSet(R4,R10));
1061 
1062     return count_per_loop;
1063   }
1064 
1065 
1066   // Generate the inner loop for backward aligned array copy
1067   //
1068   // Arguments
1069   //      end_from:      src end address, 64 bits  aligned
1070   //      end_to:        dst end address, wordSize aligned
1071   //      count:         number of elements (32-bit int)
1072   //      bytes_per_count: number of bytes for each unit of 'count'
1073   //
1074   // Return the minimum initial value for count
1075   //
1076   // Notes:
1077   // - 'end_from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
1078   // - 'end_to' aligned on wordSize
1079   // - 'count' must be greater or equal than the returned value
1080   //
1081   // Decreases 'end_from' and 'end_to' by count*bytes_per_count.
1082   //
1083   // Scratches 'count', R3.
1084   // ARM R4-R10 are preserved (saved/restored).
1085   //
1086   int generate_backward_aligned_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count) {
1087     assert (end_from == R0 && end_to == R1 && count == R2, "adjust the implementation below");
1088 
1089     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
1090     const int count_per_loop = bytes_per_loop / bytes_per_count;
1091 
1092     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].backward_aligned;
1093     int pld_offset = config->pld_distance;
1094 
1095     bool split_read= config->split_ldm;
1096     bool split_write= config->split_stm;
1097 
1098     // See the forward copy variant for additional comments.
1099 
1100     __ push(RegisterSet(R4,R10));
1101 
1102     __ sub_32(count, count, count_per_loop);
1103 
1104     const bool prefetch_before = pld_offset < 0;
1105     const bool prefetch_after = pld_offset > 0;
1106 
1107     Label L_skip_pld;
1108 
1109     if (pld_offset != 0) {
1110       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1111 
1112       prefetch(end_from, end_to, -wordSize);
1113 
1114       if (prefetch_before) {
1115         __ subs_32(count, count, (bytes_per_loop + pld_offset) / bytes_per_count);
1116         __ b(L_skip_pld, lt);
1117       }
1118 
1119       int offset = ArmCopyCacheLineSize;
1120       while (offset <= pld_offset) {
1121         prefetch(end_from, end_to, -(wordSize + offset));
1122         offset += ArmCopyCacheLineSize;
1123       };
1124     }
1125 
1126     {
1127       // 32-bit ARM note: we have tried implementing loop unrolling to skip one
1128       // PLD with 64 bytes cache line but the gain was not significant.
1129 
1130       Label L_copy_loop;
1131       __ align(OptoLoopAlignment);
1132       __ BIND(L_copy_loop);
1133 
1134       if (prefetch_before) {
1135         prefetch(end_from, end_to, -(wordSize + bytes_per_loop + pld_offset));
1136         __ BIND(L_skip_pld);
1137       }
1138 
1139       if (split_read) {
1140         __ ldmdb(end_from, RegisterSet(R7, R10), writeback);
1141         __ ldmdb(end_from, RegisterSet(R3, R6), writeback);
1142       } else {
1143         __ ldmdb(end_from, RegisterSet(R3, R10), writeback);
1144       }
1145 
1146       __ subs_32(count, count, count_per_loop);
1147 
1148       if (prefetch_after) {
1149         prefetch(end_from, end_to, -(wordSize + pld_offset), -bytes_per_loop);
1150       }
1151 
1152       if (split_write) {
1153         __ stmdb(end_to, RegisterSet(R7, R10), writeback);
1154         __ stmdb(end_to, RegisterSet(R3, R6), writeback);
1155       } else {
1156         __ stmdb(end_to, RegisterSet(R3, R10), writeback);
1157       }
1158 
1159       __ b(L_copy_loop, ge);
1160 
1161       if (prefetch_before) {
1162         __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1163         __ b(L_skip_pld, ge);
1164       }
1165     }
1166     BLOCK_COMMENT("Remaining bytes:");
1167     // still 0..bytes_per_loop-1 aligned bytes to copy, count already decreased by (at least) bytes_per_loop bytes
1168 
1169     // __ add(count, count, ...); // addition useless for the bit tests
1170     assert (pld_offset % bytes_per_loop == 0, "decreasing count by pld_offset before loop must not change tested bits");
1171 
1172     __ tst(count, 16 / bytes_per_count);
1173     __ ldmdb(end_from, RegisterSet(R3, R6), writeback, ne); // copy 16 bytes
1174     __ stmdb(end_to, RegisterSet(R3, R6), writeback, ne);
1175 
1176     __ tst(count, 8 / bytes_per_count);
1177     __ ldmdb(end_from, RegisterSet(R3, R4), writeback, ne); // copy 8 bytes
1178     __ stmdb(end_to, RegisterSet(R3, R4), writeback, ne);
1179 
1180     if (bytes_per_count <= 4) {
1181       __ tst(count, 4 / bytes_per_count);
1182       __ ldr(R3, Address(end_from, -4, pre_indexed), ne); // copy 4 bytes
1183       __ str(R3, Address(end_to, -4, pre_indexed), ne);
1184     }
1185 
1186     if (bytes_per_count <= 2) {
1187       __ tst(count, 2 / bytes_per_count);
1188       __ ldrh(R3, Address(end_from, -2, pre_indexed), ne); // copy 2 bytes
1189       __ strh(R3, Address(end_to, -2, pre_indexed), ne);
1190     }
1191 
1192     if (bytes_per_count == 1) {
1193       __ tst(count, 1);
1194       __ ldrb(R3, Address(end_from, -1, pre_indexed), ne);
1195       __ strb(R3, Address(end_to, -1, pre_indexed), ne);
1196     }
1197 
1198     __ pop(RegisterSet(R4,R10));
1199 
1200     return count_per_loop;
1201   }
1202 
1203 
1204   // Generate the inner loop for shifted forward array copy (unaligned copy).
1205   // It can be used when bytes_per_count < wordSize, i.e. byte/short copy
1206   //
1207   // Arguments
1208   //      from:      start src address, 64 bits aligned
1209   //      to:        start dst address, (now) wordSize aligned
1210   //      count:     number of elements (32-bit int)
1211   //      bytes_per_count: number of bytes for each unit of 'count'
1212   //      lsr_shift: shift applied to 'old' value to skipped already written bytes
1213   //      lsl_shift: shift applied to 'new' value to set the high bytes of the next write
1214   //
1215   // Return the minimum initial value for count
1216   //
1217   // Notes:
1218   // - 'from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
1219   // - 'to' aligned on wordSize
1220   // - 'count' must be greater or equal than the returned value
1221   // - 'lsr_shift' + 'lsl_shift' = BitsPerWord
1222   // - 'bytes_per_count' is 1 or 2
1223   //
1224   // Increases 'to' by count*bytes_per_count.
1225   //
1226   // Scratches 'from' and 'count', R3-R10, R12
1227   //
1228   // On entry:
1229   // - R12 is preloaded with the first 'BitsPerWord' bits read just before 'from'
1230   // - (R12 >> lsr_shift) is the part not yet written (just before 'to')
1231   // --> (*to) = (R12 >> lsr_shift) | (*from) << lsl_shift); ...
1232   //
1233   // This implementation may read more bytes than required.
1234   // Actually, it always reads exactly all data from the copied region with upper bound aligned up by wordSize,
1235   // so excessive read do not cross a word bound and is thus harmless.
1236   //
1237   int generate_forward_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, int lsr_shift, int lsl_shift) {
1238     assert (from == R0 && to == R1 && count == R2, "adjust the implementation below");
1239 
1240     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iter
1241     const int count_per_loop = bytes_per_loop / bytes_per_count;
1242 
1243     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].forward_shifted;
1244     int pld_offset = config->pld_distance;
1245 
1246     bool split_read= config->split_ldm;
1247     bool split_write= config->split_stm;
1248 
1249     const bool prefetch_before = pld_offset < 0;
1250     const bool prefetch_after = pld_offset > 0;
1251     Label L_skip_pld, L_last_read, L_done;
1252     if (pld_offset != 0) {
1253 
1254       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1255 
1256       prefetch(from, to, 0);
1257 
1258       if (prefetch_before) {
1259         __ cmp_32(count, count_per_loop);
1260         __ b(L_last_read, lt);
1261         // skip prefetch for small copies
1262         // warning: count is predecreased by the prefetch distance to optimize the inner loop
1263         __ subs_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1264         __ b(L_skip_pld, lt);
1265       }
1266 
1267       int offset = ArmCopyCacheLineSize;
1268       while (offset <= pld_offset) {
1269         prefetch(from, to, offset);
1270         offset += ArmCopyCacheLineSize;
1271       };
1272     }
1273 
1274     Label L_shifted_loop;
1275 
1276     __ align(OptoLoopAlignment);
1277     __ BIND(L_shifted_loop);
1278 
1279     if (prefetch_before) {
1280       // do it early if there might be register locking issues
1281       prefetch(from, to, bytes_per_loop + pld_offset);
1282       __ BIND(L_skip_pld);
1283     } else {
1284       __ cmp_32(count, count_per_loop);
1285       __ b(L_last_read, lt);
1286     }
1287 
1288     // read 32 bytes
1289     if (split_read) {
1290       // if write is not split, use less registers in first set to reduce locking
1291       RegisterSet set1 = split_write ? RegisterSet(R4, R7) : RegisterSet(R4, R5);
1292       RegisterSet set2 = (split_write ? RegisterSet(R8, R10) : RegisterSet(R6, R10)) | R12;
1293       __ ldmia(from, set1, writeback);
1294       __ mov(R3, AsmOperand(R12, lsr, lsr_shift)); // part of R12 not yet written
1295       __ ldmia(from, set2, writeback);
1296       __ subs(count, count, count_per_loop); // XXX: should it be before the 2nd LDM ? (latency vs locking)
1297     } else {
1298       __ mov(R3, AsmOperand(R12, lsr, lsr_shift)); // part of R12 not yet written
1299       __ ldmia(from, RegisterSet(R4, R10) | R12, writeback); // Note: small latency on R4
1300       __ subs(count, count, count_per_loop);
1301     }
1302 
1303     if (prefetch_after) {
1304       // do it after the 1st ldm/ldp anyway  (no locking issues with early STM/STP)
1305       prefetch(from, to, pld_offset, bytes_per_loop);
1306     }
1307 
1308     // prepare (shift) the values in R3..R10
1309     __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift)); // merged below low bytes of next val
1310     __ logical_shift_right(R4, R4, lsr_shift); // unused part of next val
1311     __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift)); // ...
1312     __ logical_shift_right(R5, R5, lsr_shift);
1313     __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift));
1314     __ logical_shift_right(R6, R6, lsr_shift);
1315     __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift));
1316     if (split_write) {
1317       // write the first half as soon as possible to reduce stm locking
1318       __ stmia(to, RegisterSet(R3, R6), writeback, prefetch_before ? gt : ge);
1319     }
1320     __ logical_shift_right(R7, R7, lsr_shift);
1321     __ orr(R7, R7, AsmOperand(R8, lsl, lsl_shift));
1322     __ logical_shift_right(R8, R8, lsr_shift);
1323     __ orr(R8, R8, AsmOperand(R9, lsl, lsl_shift));
1324     __ logical_shift_right(R9, R9, lsr_shift);
1325     __ orr(R9, R9, AsmOperand(R10, lsl, lsl_shift));
1326     __ logical_shift_right(R10, R10, lsr_shift);
1327     __ orr(R10, R10, AsmOperand(R12, lsl, lsl_shift));
1328 
1329     if (split_write) {
1330       __ stmia(to, RegisterSet(R7, R10), writeback, prefetch_before ? gt : ge);
1331     } else {
1332       __ stmia(to, RegisterSet(R3, R10), writeback, prefetch_before ? gt : ge);
1333     }
1334     __ b(L_shifted_loop, gt); // no need to loop if 0 (when count need not be precise modulo bytes_per_loop)
1335 
1336     if (prefetch_before) {
1337       // the first loop may end earlier, allowing to skip pld at the end
1338       __ cmn_32(count, (bytes_per_loop + pld_offset)/bytes_per_count);
1339       __ stmia(to, RegisterSet(R3, R10), writeback); // stmia was skipped
1340       __ b(L_skip_pld, ge);
1341       __ adds_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1342     }
1343 
1344     __ BIND(L_last_read);
1345     __ b(L_done, eq);
1346 
1347     switch (bytes_per_count) {
1348     case 2:
1349       __ mov(R3, AsmOperand(R12, lsr, lsr_shift));
1350       __ tst(count, 8);
1351       __ ldmia(from, RegisterSet(R4, R7), writeback, ne);
1352       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1353       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1354       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1355       __ mov(R5, AsmOperand(R5, lsr, lsr_shift), ne);
1356       __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift), ne);
1357       __ mov(R6, AsmOperand(R6, lsr, lsr_shift), ne);
1358       __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift), ne);
1359       __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1360       __ mov(R3, AsmOperand(R7, lsr, lsr_shift), ne);
1361 
1362       __ tst(count, 4);
1363       __ ldmia(from, RegisterSet(R4, R5), writeback, ne);
1364       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1365       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1366       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1367       __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1368       __ mov(R3, AsmOperand(R5, lsr, lsr_shift), ne);
1369 
1370       __ tst(count, 2);
1371       __ ldr(R4, Address(from, 4, post_indexed), ne);
1372       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne);
1373       __ str(R3, Address(to, 4, post_indexed), ne);
1374       __ mov(R3, AsmOperand(R4, lsr, lsr_shift), ne);
1375 
1376       __ tst(count, 1);
1377       __ strh(R3, Address(to, 2, post_indexed), ne); // one last short
1378       break;
1379 
1380     case 1:
1381       __ mov(R3, AsmOperand(R12, lsr, lsr_shift));
1382       __ tst(count, 16);
1383       __ ldmia(from, RegisterSet(R4, R7), writeback, ne);
1384       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1385       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1386       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1387       __ mov(R5, AsmOperand(R5, lsr, lsr_shift), ne);
1388       __ orr(R5, R5, AsmOperand(R6, lsl, lsl_shift), ne);
1389       __ mov(R6, AsmOperand(R6, lsr, lsr_shift), ne);
1390       __ orr(R6, R6, AsmOperand(R7, lsl, lsl_shift), ne);
1391       __ stmia(to, RegisterSet(R3, R6), writeback, ne);
1392       __ mov(R3, AsmOperand(R7, lsr, lsr_shift), ne);
1393 
1394       __ tst(count, 8);
1395       __ ldmia(from, RegisterSet(R4, R5), writeback, ne);
1396       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne); // merged below low bytes of next val
1397       __ mov(R4, AsmOperand(R4, lsr, lsr_shift), ne); // unused part of next val
1398       __ orr(R4, R4, AsmOperand(R5, lsl, lsl_shift), ne); // ...
1399       __ stmia(to, RegisterSet(R3, R4), writeback, ne);
1400       __ mov(R3, AsmOperand(R5, lsr, lsr_shift), ne);
1401 
1402       __ tst(count, 4);
1403       __ ldr(R4, Address(from, 4, post_indexed), ne);
1404       __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ne);
1405       __ str(R3, Address(to, 4, post_indexed), ne);
1406       __ mov(R3, AsmOperand(R4, lsr, lsr_shift), ne);
1407 
1408       __ andr(count, count, 3);
1409       __ cmp(count, 2);
1410 
1411       // Note: R3 might contain enough bytes ready to write (3 needed at most),
1412       // thus load on lsl_shift==24 is not needed (in fact forces reading
1413       // beyond source buffer end boundary)
1414       if (lsl_shift == 8) {
1415         __ ldr(R4, Address(from, 4, post_indexed), ge);
1416         __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), ge);
1417       } else if (lsl_shift == 16) {
1418         __ ldr(R4, Address(from, 4, post_indexed), gt);
1419         __ orr(R3, R3, AsmOperand(R4, lsl, lsl_shift), gt);
1420       }
1421 
1422       __ strh(R3, Address(to, 2, post_indexed), ge); // two last bytes
1423       __ mov(R3, AsmOperand(R3, lsr, 16), gt);
1424 
1425       __ tst(count, 1);
1426       __ strb(R3, Address(to, 1, post_indexed), ne); // one last byte
1427       break;
1428     }
1429 
1430     __ BIND(L_done);
1431     return 0; // no minimum
1432   }
1433 
1434   // Generate the inner loop for shifted backward array copy (unaligned copy).
1435   // It can be used when bytes_per_count < wordSize, i.e. byte/short copy
1436   //
1437   // Arguments
1438   //      end_from:  end src address, 64 bits aligned
1439   //      end_to:    end dst address, (now) wordSize aligned
1440   //      count:     number of elements (32-bit int)
1441   //      bytes_per_count: number of bytes for each unit of 'count'
1442   //      lsl_shift: shift applied to 'old' value to skipped already written bytes
1443   //      lsr_shift: shift applied to 'new' value to set the low bytes of the next write
1444   //
1445   // Return the minimum initial value for count
1446   //
1447   // Notes:
1448   // - 'end_from' aligned on 64-bit (recommended for 32-bit ARM in case this speeds up LDMIA)
1449   // - 'end_to' aligned on wordSize
1450   // - 'count' must be greater or equal than the returned value
1451   // - 'lsr_shift' + 'lsl_shift' = 'BitsPerWord'
1452   // - 'bytes_per_count' is 1 or 2 on 32-bit ARM
1453   //
1454   // Decreases 'end_to' by count*bytes_per_count.
1455   //
1456   // Scratches 'end_from', 'count', R3-R10, R12
1457   //
1458   // On entry:
1459   // - R3 is preloaded with the first 'BitsPerWord' bits read just after 'from'
1460   // - (R3 << lsl_shift) is the part not yet written
1461   // --> (*--to) = (R3 << lsl_shift) | (*--from) >> lsr_shift); ...
1462   //
1463   // This implementation may read more bytes than required.
1464   // Actually, it always reads exactly all data from the copied region with beginning aligned down by wordSize,
1465   // so excessive read do not cross a word bound and is thus harmless.
1466   //
1467   int generate_backward_shifted_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count, int lsr_shift, int lsl_shift) {
1468     assert (end_from == R0 && end_to == R1 && count == R2, "adjust the implementation below");
1469 
1470     const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iter
1471     const int count_per_loop = bytes_per_loop / bytes_per_count;
1472 
1473     arraycopy_loop_config *config=&arraycopy_configurations[ArmCopyPlatform].backward_shifted;
1474     int pld_offset = config->pld_distance;
1475 
1476     bool split_read= config->split_ldm;
1477     bool split_write= config->split_stm;
1478 
1479 
1480     const bool prefetch_before = pld_offset < 0;
1481     const bool prefetch_after = pld_offset > 0;
1482 
1483     Label L_skip_pld, L_done, L_last_read;
1484     if (pld_offset != 0) {
1485 
1486       pld_offset = (pld_offset < 0) ? -pld_offset : pld_offset;
1487 
1488       prefetch(end_from, end_to, -wordSize);
1489 
1490       if (prefetch_before) {
1491         __ cmp_32(count, count_per_loop);
1492         __ b(L_last_read, lt);
1493 
1494         // skip prefetch for small copies
1495         // warning: count is predecreased by the prefetch distance to optimize the inner loop
1496         __ subs_32(count, count, ((bytes_per_loop + pld_offset)/bytes_per_count) + count_per_loop);
1497         __ b(L_skip_pld, lt);
1498       }
1499 
1500       int offset = ArmCopyCacheLineSize;
1501       while (offset <= pld_offset) {
1502         prefetch(end_from, end_to, -(wordSize + offset));
1503         offset += ArmCopyCacheLineSize;
1504       };
1505     }
1506 
1507     Label L_shifted_loop;
1508     __ align(OptoLoopAlignment);
1509     __ BIND(L_shifted_loop);
1510 
1511     if (prefetch_before) {
1512       // do the 1st ldm/ldp first anyway (no locking issues with early STM/STP)
1513       prefetch(end_from, end_to, -(wordSize + bytes_per_loop + pld_offset));
1514       __ BIND(L_skip_pld);
1515     } else {
1516       __ cmp_32(count, count_per_loop);
1517       __ b(L_last_read, lt);
1518     }
1519 
1520     if (split_read) {
1521       __ ldmdb(end_from, RegisterSet(R7, R10), writeback);
1522       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1523       __ ldmdb(end_from, RegisterSet(R3, R6), writeback);
1524     } else {
1525       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1526       __ ldmdb(end_from, RegisterSet(R3, R10), writeback);
1527     }
1528 
1529     __ subs_32(count, count, count_per_loop);
1530 
1531     if (prefetch_after) { // do prefetch during ldm/ldp latency
1532       prefetch(end_from, end_to, -(wordSize + pld_offset), -bytes_per_loop);
1533     }
1534 
1535     // prepare the values in R4..R10,R12
1536     __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift)); // merged above high  bytes of prev val
1537     __ logical_shift_left(R10, R10, lsl_shift); // unused part of prev val
1538     __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift)); // ...
1539     __ logical_shift_left(R9, R9, lsl_shift);
1540     __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift));
1541     __ logical_shift_left(R8, R8, lsl_shift);
1542     __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift));
1543     __ logical_shift_left(R7, R7, lsl_shift);
1544     __ orr(R7, R7, AsmOperand(R6, lsr, lsr_shift));
1545     __ logical_shift_left(R6, R6, lsl_shift);
1546     __ orr(R6, R6, AsmOperand(R5, lsr, lsr_shift));
1547     if (split_write) {
1548       // store early to reduce locking issues
1549       __ stmdb(end_to, RegisterSet(R6, R10) | R12, writeback, prefetch_before ? gt : ge);
1550     }
1551     __ logical_shift_left(R5, R5, lsl_shift);
1552     __ orr(R5, R5, AsmOperand(R4, lsr, lsr_shift));
1553     __ logical_shift_left(R4, R4, lsl_shift);
1554     __ orr(R4, R4, AsmOperand(R3, lsr, lsr_shift));
1555 
1556     if (split_write) {
1557       __ stmdb(end_to, RegisterSet(R4, R5), writeback, prefetch_before ? gt : ge);
1558     } else {
1559       __ stmdb(end_to, RegisterSet(R4, R10) | R12, writeback, prefetch_before ? gt : ge);
1560     }
1561 
1562     __ b(L_shifted_loop, gt); // no need to loop if 0 (when count need not be precise modulo bytes_per_loop)
1563 
1564     if (prefetch_before) {
1565       // the first loop may end earlier, allowing to skip pld at the end
1566       __ cmn_32(count, ((bytes_per_loop + pld_offset)/bytes_per_count));
1567       __ stmdb(end_to, RegisterSet(R4, R10) | R12, writeback); // stmdb was skipped
1568       __ b(L_skip_pld, ge);
1569       __ adds_32(count, count, ((bytes_per_loop + pld_offset) / bytes_per_count) + count_per_loop);
1570     }
1571 
1572     __ BIND(L_last_read);
1573     __ b(L_done, eq);
1574 
1575       switch(bytes_per_count) {
1576       case 2:
1577       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1578       __ tst(count, 8);
1579       __ ldmdb(end_from, RegisterSet(R7,R10), writeback, ne);
1580       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1581       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1582       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
1583       __ mov(R9, AsmOperand(R9, lsl, lsl_shift),ne);
1584       __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift),ne);
1585       __ mov(R8, AsmOperand(R8, lsl, lsl_shift),ne);
1586       __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift),ne);
1587       __ stmdb(end_to, RegisterSet(R8,R10)|R12, writeback, ne);
1588       __ mov(R12, AsmOperand(R7, lsl, lsl_shift), ne);
1589 
1590       __ tst(count, 4);
1591       __ ldmdb(end_from, RegisterSet(R9, R10), writeback, ne);
1592       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1593       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1594       __ orr(R10, R10, AsmOperand(R9, lsr,lsr_shift),ne); // ...
1595       __ stmdb(end_to, RegisterSet(R10)|R12, writeback, ne);
1596       __ mov(R12, AsmOperand(R9, lsl, lsl_shift), ne);
1597 
1598       __ tst(count, 2);
1599       __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
1600       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1601       __ str(R12, Address(end_to, -4, pre_indexed), ne);
1602       __ mov(R12, AsmOperand(R10, lsl, lsl_shift), ne);
1603 
1604       __ tst(count, 1);
1605       __ mov(R12, AsmOperand(R12, lsr, lsr_shift),ne);
1606       __ strh(R12, Address(end_to, -2, pre_indexed), ne); // one last short
1607       break;
1608 
1609       case 1:
1610       __ mov(R12, AsmOperand(R3, lsl, lsl_shift)); // part of R3 not yet written
1611       __ tst(count, 16);
1612       __ ldmdb(end_from, RegisterSet(R7,R10), writeback, ne);
1613       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1614       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1615       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
1616       __ mov(R9, AsmOperand(R9, lsl, lsl_shift),ne);
1617       __ orr(R9, R9, AsmOperand(R8, lsr, lsr_shift),ne);
1618       __ mov(R8, AsmOperand(R8, lsl, lsl_shift),ne);
1619       __ orr(R8, R8, AsmOperand(R7, lsr, lsr_shift),ne);
1620       __ stmdb(end_to, RegisterSet(R8,R10)|R12, writeback, ne);
1621       __ mov(R12, AsmOperand(R7, lsl, lsl_shift), ne);
1622 
1623       __ tst(count, 8);
1624       __ ldmdb(end_from, RegisterSet(R9,R10), writeback, ne);
1625       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1626       __ mov(R10, AsmOperand(R10, lsl, lsl_shift),ne); // unused part of prev val
1627       __ orr(R10, R10, AsmOperand(R9, lsr, lsr_shift),ne); // ...
1628       __ stmdb(end_to, RegisterSet(R10)|R12, writeback, ne);
1629       __ mov(R12, AsmOperand(R9, lsl, lsl_shift), ne);
1630 
1631       __ tst(count, 4);
1632       __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
1633       __ orr(R12, R12, AsmOperand(R10, lsr, lsr_shift), ne);
1634       __ str(R12, Address(end_to, -4, pre_indexed), ne);
1635       __ mov(R12, AsmOperand(R10, lsl, lsl_shift), ne);
1636 
1637       __ tst(count, 2);
1638       if (lsr_shift != 24) {
1639         // avoid useless reading R10 when we already have 3 bytes ready in R12
1640         __ ldr(R10, Address(end_from, -4, pre_indexed), ne);
1641         __ orr(R12, R12, AsmOperand(R10, lsr,lsr_shift), ne);
1642       }
1643 
1644       // Note: R12 contains enough bytes ready to write (3 needed at most)
1645       // write the 2 MSBs
1646       __ mov(R9, AsmOperand(R12, lsr, 16), ne);
1647       __ strh(R9, Address(end_to, -2, pre_indexed), ne);
1648       // promote remaining to MSB
1649       __ mov(R12, AsmOperand(R12, lsl, 16), ne);
1650 
1651       __ tst(count, 1);
1652       // write the MSB of R12
1653       __ mov(R12, AsmOperand(R12, lsr, 24), ne);
1654       __ strb(R12, Address(end_to, -1, pre_indexed), ne);
1655 
1656       break;
1657       }
1658 
1659     __ BIND(L_done);
1660     return 0; // no minimum
1661   }
1662 
1663   // This method is very useful for merging forward/backward implementations
1664   Address get_addr_with_indexing(Register base, int delta, bool forward) {
1665     if (forward) {
1666       return Address(base, delta, post_indexed);
1667     } else {
1668       return Address(base, -delta, pre_indexed);
1669     }
1670   }
1671 
1672   void load_one(Register rd, Register from, int size_in_bytes, bool forward, AsmCondition cond = al, Register rd2 = noreg) {
1673     assert_different_registers(from, rd, rd2);
1674     if (size_in_bytes < 8) {
1675       Address addr = get_addr_with_indexing(from, size_in_bytes, forward);
1676       __ load_sized_value(rd, addr, size_in_bytes, false, cond);
1677     } else {
1678       assert (rd2 != noreg, "second value register must be specified");
1679       assert (rd->encoding() < rd2->encoding(), "wrong value register set");
1680 
1681       if (forward) {
1682         __ ldmia(from, RegisterSet(rd) | rd2, writeback, cond);
1683       } else {
1684         __ ldmdb(from, RegisterSet(rd) | rd2, writeback, cond);
1685       }
1686     }
1687   }
1688 
1689   void store_one(Register rd, Register to, int size_in_bytes, bool forward, AsmCondition cond = al, Register rd2 = noreg) {
1690     assert_different_registers(to, rd, rd2);
1691     if (size_in_bytes < 8) {
1692       Address addr = get_addr_with_indexing(to, size_in_bytes, forward);
1693       __ store_sized_value(rd, addr, size_in_bytes, cond);
1694     } else {
1695       assert (rd2 != noreg, "second value register must be specified");
1696       assert (rd->encoding() < rd2->encoding(), "wrong value register set");
1697 
1698       if (forward) {
1699         __ stmia(to, RegisterSet(rd) | rd2, writeback, cond);
1700       } else {
1701         __ stmdb(to, RegisterSet(rd) | rd2, writeback, cond);
1702       }
1703     }
1704   }
1705 
1706   // Copies data from 'from' to 'to' in specified direction to align 'from' by 64 bits.
1707   // (on 32-bit ARM 64-bit alignment is better for LDM).
1708   //
1709   // Arguments:
1710   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1711   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1712   //     count:             32-bit int, maximum number of elements which can be copied
1713   //     bytes_per_count:   size of an element
1714   //     forward:           specifies copy direction
1715   //
1716   // Notes:
1717   //   'from' and 'to' must be aligned by 'bytes_per_count'
1718   //   'count' must not be less than the returned value
1719   //   shifts 'from' and 'to' by the number of copied bytes in corresponding direction
1720   //   decreases 'count' by the number of elements copied
1721   //
1722   // Returns maximum number of bytes which may be copied.
1723   int align_src(Register from, Register to, Register count, Register tmp, int bytes_per_count, bool forward) {
1724     assert_different_registers(from, to, count, tmp);
1725     if (bytes_per_count < 8) {
1726       Label L_align_src;
1727       __ BIND(L_align_src);
1728       __ tst(from, 7);
1729       // ne => not aligned: copy one element and (if bytes_per_count < 4) loop
1730       __ sub(count, count, 1, ne);
1731       load_one(tmp, from, bytes_per_count, forward, ne);
1732       store_one(tmp, to, bytes_per_count, forward, ne);
1733       if (bytes_per_count < 4) {
1734         __ b(L_align_src, ne); // if bytes_per_count == 4, then 0 or 1 loop iterations are enough
1735       }
1736     }
1737     return 7/bytes_per_count;
1738   }
1739 
1740   // Copies 'count' of 'bytes_per_count'-sized elements in the specified direction.
1741   //
1742   // Arguments:
1743   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1744   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1745   //     count:             32-bit int, number of elements to be copied
1746   //     entry:             copy loop entry point
1747   //     bytes_per_count:   size of an element
1748   //     forward:           specifies copy direction
1749   //
1750   // Notes:
1751   //     shifts 'from' and 'to'
1752   void copy_small_array(Register from, Register to, Register count, Register tmp, Register tmp2, int bytes_per_count, bool forward, Label & entry) {
1753     assert_different_registers(from, to, count, tmp);
1754 
1755     __ align(OptoLoopAlignment);
1756     Label L_small_loop;
1757     __ BIND(L_small_loop);
1758     store_one(tmp, to, bytes_per_count, forward, al, tmp2);
1759     __ BIND(entry); // entry point
1760     __ subs(count, count, 1);
1761     load_one(tmp, from, bytes_per_count, forward, ge, tmp2);
1762     __ b(L_small_loop, ge);
1763   }
1764 
1765   // Aligns 'to' by reading one word from 'from' and writting its part to 'to'.
1766   //
1767   // Arguments:
1768   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1769   //     count:             32-bit int, number of elements allowed to be copied
1770   //     to_remainder:      remainder of dividing 'to' by wordSize
1771   //     bytes_per_count:   size of an element
1772   //     forward:           specifies copy direction
1773   //     Rval:              contains an already read but not yet written word;
1774   //                        its' LSBs (if forward) or MSBs (if !forward) are to be written to align 'to'.
1775   //
1776   // Notes:
1777   //     'count' must not be less then the returned value
1778   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
1779   //     shifts 'to' by the number of written bytes (so that it becomes the bound of memory to be written)
1780   //     decreases 'count' by the the number of elements written
1781   //     Rval's MSBs or LSBs remain to be written further by generate_{forward,backward}_shifted_copy_loop
1782   int align_dst(Register to, Register count, Register Rval, Register tmp,
1783                                         int to_remainder, int bytes_per_count, bool forward) {
1784     assert_different_registers(to, count, tmp, Rval);
1785 
1786     assert (0 < to_remainder && to_remainder < wordSize, "to_remainder is not valid");
1787     assert (to_remainder % bytes_per_count == 0, "to must be aligned by bytes_per_count");
1788 
1789     int bytes_to_write = forward ? (wordSize - to_remainder) : to_remainder;
1790 
1791     int offset = 0;
1792 
1793     for (int l = 0; l < LogBytesPerWord; ++l) {
1794       int s = (1 << l);
1795       if (bytes_to_write & s) {
1796         int new_offset = offset + s*BitsPerByte;
1797         if (forward) {
1798           if (offset == 0) {
1799             store_one(Rval, to, s, forward);
1800           } else {
1801             __ logical_shift_right(tmp, Rval, offset);
1802             store_one(tmp, to, s, forward);
1803           }
1804         } else {
1805           __ logical_shift_right(tmp, Rval, BitsPerWord - new_offset);
1806           store_one(tmp, to, s, forward);
1807         }
1808 
1809         offset = new_offset;
1810       }
1811     }
1812 
1813     assert (offset == bytes_to_write * BitsPerByte, "all bytes must be copied");
1814 
1815     __ sub_32(count, count, bytes_to_write/bytes_per_count);
1816 
1817     return bytes_to_write / bytes_per_count;
1818   }
1819 
1820   // Copies 'count' of elements using shifted copy loop
1821   //
1822   // Arguments:
1823   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1824   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1825   //     count:             32-bit int, number of elements to be copied
1826   //     to_remainder:      remainder of dividing 'to' by wordSize
1827   //     bytes_per_count:   size of an element
1828   //     forward:           specifies copy direction
1829   //     Rval:              contains an already read but not yet written word
1830   //
1831   //
1832   // Notes:
1833   //     'count' must not be less then the returned value
1834   //     'from' must be aligned by wordSize
1835   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
1836   //     shifts 'to' by the number of copied bytes
1837   //
1838   // Scratches R3-R10, R12
1839   int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, Register Rval,
1840                                                         int to_remainder, int bytes_per_count, bool forward) {
1841 
1842     assert (0 < to_remainder && to_remainder < wordSize, "to_remainder is invalid");
1843 
1844     const Register tmp  = forward ? R3 : R12;
1845     assert_different_registers(from, to, count, Rval, tmp);
1846 
1847     int required_to_align = align_dst(to, count, Rval, tmp, to_remainder, bytes_per_count, forward);
1848 
1849     int lsr_shift = (wordSize - to_remainder) * BitsPerByte;
1850     int lsl_shift = to_remainder * BitsPerByte;
1851 
1852     int min_copy;
1853     if (forward) {
1854       min_copy = generate_forward_shifted_copy_loop(from, to, count, bytes_per_count, lsr_shift, lsl_shift);
1855     } else {
1856       min_copy = generate_backward_shifted_copy_loop(from, to, count, bytes_per_count, lsr_shift, lsl_shift);
1857     }
1858 
1859     return min_copy + required_to_align;
1860   }
1861 
1862   // Copies 'count' of elements using shifted copy loop
1863   //
1864   // Arguments:
1865   //     from:              beginning (if forward) or upper bound (if !forward) of the region to be read
1866   //     to:                beginning (if forward) or upper bound (if !forward) of the region to be written
1867   //     count:             32-bit int, number of elements to be copied
1868   //     bytes_per_count:   size of an element
1869   //     forward:           specifies copy direction
1870   //
1871   // Notes:
1872   //     'count' must not be less then the returned value
1873   //     'from' must be aligned by wordSize
1874   //     'to' must be aligned by bytes_per_count but must not be aligned by wordSize
1875   //     shifts 'to' by the number of copied bytes
1876   //
1877   // Scratches 'from', 'count', R3 and R12.
1878   // R4-R10 saved for use.
1879   int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, bool forward) {
1880 
1881     const Register Rval = forward ? R12 : R3; // as generate_{forward,backward}_shifted_copy_loop expect
1882 
1883     int min_copy = 0;
1884 
1885     // Note: if {seq} is a sequence of numbers, L{seq} means that if the execution reaches this point,
1886     // then the remainder of 'to' divided by wordSize is one of elements of {seq}.
1887 
1888     __ push(RegisterSet(R4,R10));
1889     load_one(Rval, from, wordSize, forward);
1890 
1891     switch (bytes_per_count) {
1892       case 2:
1893         min_copy = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
1894         break;
1895       case 1:
1896       {
1897         Label L1, L2, L3;
1898         int min_copy1, min_copy2, min_copy3;
1899 
1900         Label L_loop_finished;
1901 
1902         if (forward) {
1903             __ tbz(to, 0, L2);
1904             __ tbz(to, 1, L1);
1905 
1906             __ BIND(L3);
1907             min_copy3 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 3, bytes_per_count, forward);
1908             __ b(L_loop_finished);
1909 
1910             __ BIND(L1);
1911             min_copy1 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 1, bytes_per_count, forward);
1912             __ b(L_loop_finished);
1913 
1914             __ BIND(L2);
1915             min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
1916         } else {
1917             __ tbz(to, 0, L2);
1918             __ tbnz(to, 1, L3);
1919 
1920             __ BIND(L1);
1921             min_copy1 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 1, bytes_per_count, forward);
1922             __ b(L_loop_finished);
1923 
1924              __ BIND(L3);
1925             min_copy3 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 3, bytes_per_count, forward);
1926             __ b(L_loop_finished);
1927 
1928            __ BIND(L2);
1929             min_copy2 = align_dst_and_generate_shifted_copy_loop(from, to, count, Rval, 2, bytes_per_count, forward);
1930         }
1931 
1932         min_copy = MAX2(MAX2(min_copy1, min_copy2), min_copy3);
1933 
1934         __ BIND(L_loop_finished);
1935 
1936         break;
1937       }
1938       default:
1939         ShouldNotReachHere();
1940         break;
1941     }
1942 
1943     __ pop(RegisterSet(R4,R10));
1944 
1945     return min_copy;
1946   }
1947 
1948 #ifndef PRODUCT
1949   int * get_arraycopy_counter(int bytes_per_count) {
1950     switch (bytes_per_count) {
1951       case 1:
1952         return &SharedRuntime::_jbyte_array_copy_ctr;
1953       case 2:
1954         return &SharedRuntime::_jshort_array_copy_ctr;
1955       case 4:
1956         return &SharedRuntime::_jint_array_copy_ctr;
1957       case 8:
1958         return &SharedRuntime::_jlong_array_copy_ctr;
1959       default:
1960         ShouldNotReachHere();
1961         return NULL;
1962     }
1963   }
1964 #endif // !PRODUCT
1965 
1966   //
1967   //  Generate stub for primitive array copy.  If "aligned" is true, the
1968   //  "from" and "to" addresses are assumed to be heapword aligned.
1969   //
1970   //  If "disjoint" is true, arrays are assumed to be disjoint, otherwise they may overlap and
1971   //  "nooverlap_target" must be specified as the address to jump if they don't.
1972   //
1973   // Arguments for generated stub:
1974   //      from:  R0
1975   //      to:    R1
1976   //      count: R2 treated as signed 32-bit int
1977   //
1978   address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = NULL) {
1979     __ align(CodeEntryAlignment);
1980     StubCodeMark mark(this, "StubRoutines", name);
1981     address start = __ pc();
1982 
1983     const Register from  = R0;   // source array address
1984     const Register to    = R1;   // destination array address
1985     const Register count = R2;   // elements count
1986     const Register tmp1  = R3;
1987     const Register tmp2  = R12;
1988 
1989     if (!aligned)  {
1990       BLOCK_COMMENT("Entry:");
1991     }
1992 
1993     __ zap_high_non_significant_bits(R2);
1994 
1995     if (!disjoint) {
1996       assert (nooverlap_target != NULL, "must be specified for conjoint case");
1997       array_overlap_test(nooverlap_target, exact_log2(bytes_per_count), tmp1, tmp2);
1998     }
1999 
2000     inc_counter_np(*get_arraycopy_counter(bytes_per_count), tmp1, tmp2);
2001 
2002     // Conjoint case: since execution reaches this point, the arrays overlap, so performing backward copy
2003     // Disjoint case: perform forward copy
2004     bool forward = disjoint;
2005 
2006 
2007     if (!forward) {
2008       // Set 'from' and 'to' to upper bounds
2009       int log_bytes_per_count = exact_log2(bytes_per_count);
2010       __ add_ptr_scaled_int32(to,   to,   count, log_bytes_per_count);
2011       __ add_ptr_scaled_int32(from, from, count, log_bytes_per_count);
2012     }
2013 
2014     // There are two main copy loop implementations:
2015     //  *) The huge and complex one applicable only for large enough arrays
2016     //  *) The small and simple one applicable for any array (but not efficient for large arrays).
2017     // Currently "small" implementation is used if and only if the "large" one could not be used.
2018     // XXX optim: tune the limit higher ?
2019     // Large implementation lower applicability bound is actually determined by
2020     // aligned copy loop which require <=7 bytes for src alignment, and 8 words for aligned copy loop.
2021     const int small_copy_limit = (8*wordSize + 7) / bytes_per_count;
2022 
2023     Label L_small_array;
2024     __ cmp_32(count, small_copy_limit);
2025     __ b(L_small_array, le);
2026 
2027     // Otherwise proceed with large implementation.
2028 
2029     bool from_is_aligned = (bytes_per_count >= 8);
2030     if (aligned && forward && (HeapWordSize % 8 == 0)) {
2031         // if 'from' is heapword aligned and HeapWordSize is divisible by 8,
2032         //  then from is aligned by 8
2033         from_is_aligned = true;
2034     }
2035 
2036     int count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
2037     assert (small_copy_limit >= count_required_to_align, "alignment could exhaust count");
2038 
2039     // now 'from' is aligned
2040 
2041     bool to_is_aligned = false;
2042 
2043     if (bytes_per_count >= wordSize) {
2044       // 'to' is aligned by bytes_per_count, so it is aligned by wordSize
2045       to_is_aligned = true;
2046     } else {
2047       if (aligned && (8 % HeapWordSize == 0) && (HeapWordSize % wordSize == 0)) {
2048         // Originally 'from' and 'to' were heapword aligned;
2049         // (from - to) has not been changed, so since now 'from' is 8-byte aligned, then it is also heapword aligned,
2050         //  so 'to' is also heapword aligned and thus aligned by wordSize.
2051         to_is_aligned = true;
2052       }
2053     }
2054 
2055     Label L_unaligned_dst;
2056 
2057     if (!to_is_aligned) {
2058       BLOCK_COMMENT("Check dst alignment:");
2059       __ tst(to, wordSize - 1);
2060       __ b(L_unaligned_dst, ne); // 'to' is not aligned
2061     }
2062 
2063     // 'from' and 'to' are properly aligned
2064 
2065     int min_copy;
2066     if (forward) {
2067       min_copy = generate_forward_aligned_copy_loop (from, to, count, bytes_per_count);
2068     } else {
2069       min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count);
2070     }
2071     assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
2072 
2073     if (status) {
2074       __ mov(R0, 0); // OK
2075     }
2076 
2077     __ ret();
2078 
2079     {
2080       copy_small_array(from, to, count, tmp1, tmp2, bytes_per_count, forward, L_small_array /* entry */);
2081 
2082       if (status) {
2083         __ mov(R0, 0); // OK
2084       }
2085 
2086       __ ret();
2087     }
2088 
2089     if (! to_is_aligned) {
2090       __ BIND(L_unaligned_dst);
2091       int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
2092       assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
2093 
2094       if (status) {
2095         __ mov(R0, 0); // OK
2096       }
2097 
2098       __ ret();
2099     }
2100 
2101     return start;
2102   }
2103 
2104 
2105   // Generates pattern of code to be placed after raw data copying in generate_oop_copy
2106   // Includes return from arraycopy stub.
2107   //
2108   // Arguments:
2109   //     to:       destination pointer after copying.
2110   //               if 'forward' then 'to' == upper bound, else 'to' == beginning of the modified region
2111   //     count:    total number of copied elements, 32-bit int
2112   //
2113   // Blows all volatile R0-R3, Rtemp, LR) and 'to', 'count', 'tmp' registers.
2114   void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward, DecoratorSet decorators) {
2115     assert_different_registers(to, count, tmp);
2116 
2117     if (forward) {
2118       // 'to' is upper bound of the modified region
2119       // restore initial dst:
2120       __ sub_ptr_scaled_int32(to, to, count, LogBytesPerHeapOop);
2121     }
2122 
2123     // 'to' is the beginning of the region
2124 
2125     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2126     bs->arraycopy_epilogue(_masm, decorators, true, to, count, tmp);
2127 
2128     if (status) {
2129       __ mov(R0, 0); // OK
2130     }
2131 
2132     __ pop(PC);
2133   }
2134 
2135 
2136   //  Generate stub for assign-compatible oop copy.  If "aligned" is true, the
2137   //  "from" and "to" addresses are assumed to be heapword aligned.
2138   //
2139   //  If "disjoint" is true, arrays are assumed to be disjoint, otherwise they may overlap and
2140   //  "nooverlap_target" must be specified as the address to jump if they don't.
2141   //
2142   // Arguments for generated stub:
2143   //      from:  R0
2144   //      to:    R1
2145   //      count: R2 treated as signed 32-bit int
2146   //
2147   address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = NULL) {
2148     __ align(CodeEntryAlignment);
2149     StubCodeMark mark(this, "StubRoutines", name);
2150     address start = __ pc();
2151 
2152     Register from  = R0;
2153     Register to    = R1;
2154     Register count = R2;
2155     Register tmp1  = R3;
2156     Register tmp2  = R12;
2157 
2158 
2159     if (!aligned) {
2160       BLOCK_COMMENT("Entry:");
2161     }
2162 
2163     __ zap_high_non_significant_bits(R2);
2164 
2165     if (!disjoint) {
2166       assert (nooverlap_target != NULL, "must be specified for conjoint case");
2167       array_overlap_test(nooverlap_target, LogBytesPerHeapOop, tmp1, tmp2);
2168     }
2169 
2170     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, tmp1, tmp2);
2171 
2172     // Conjoint case: since execution reaches this point, the arrays overlap, so performing backward copy
2173     // Disjoint case: perform forward copy
2174     bool forward = disjoint;
2175 
2176     const int bytes_per_count = BytesPerHeapOop;
2177     const int log_bytes_per_count = LogBytesPerHeapOop;
2178 
2179     const Register saved_count = LR;
2180     const int callee_saved_regs = 3; // R0-R2
2181 
2182     // LR is used later to save barrier args
2183     __ push(LR);
2184 
2185     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2186     if (disjoint) {
2187       decorators |= ARRAYCOPY_DISJOINT;
2188     }
2189     if (aligned) {
2190       decorators |= ARRAYCOPY_ALIGNED;
2191     }
2192 
2193     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2194     bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
2195 
2196     // save arguments for barrier generation (after the pre barrier)
2197     __ mov(saved_count, count);
2198 
2199     if (!forward) {
2200       __ add_ptr_scaled_int32(to,   to,   count, log_bytes_per_count);
2201       __ add_ptr_scaled_int32(from, from, count, log_bytes_per_count);
2202     }
2203 
2204     // for short arrays, just do single element copy
2205     Label L_small_array;
2206     const int small_copy_limit = (8*wordSize + 7)/bytes_per_count; // XXX optim: tune the limit higher ?
2207     __ cmp_32(count, small_copy_limit);
2208     __ b(L_small_array, le);
2209 
2210     bool from_is_aligned = (bytes_per_count >= 8);
2211     if (aligned && forward && (HeapWordSize % 8 == 0)) {
2212         // if 'from' is heapword aligned and HeapWordSize is divisible by 8,
2213         //  then from is aligned by 8
2214         from_is_aligned = true;
2215     }
2216 
2217     int count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
2218     assert (small_copy_limit >= count_required_to_align, "alignment could exhaust count");
2219 
2220     // now 'from' is aligned
2221 
2222     bool to_is_aligned = false;
2223 
2224     if (bytes_per_count >= wordSize) {
2225       // 'to' is aligned by bytes_per_count, so it is aligned by wordSize
2226       to_is_aligned = true;
2227     } else {
2228       if (aligned && (8 % HeapWordSize == 0) && (HeapWordSize % wordSize == 0)) {
2229         // Originally 'from' and 'to' were heapword aligned;
2230         // (from - to) has not been changed, so since now 'from' is 8-byte aligned, then it is also heapword aligned,
2231         //  so 'to' is also heapword aligned and thus aligned by wordSize.
2232         to_is_aligned = true;
2233       }
2234     }
2235 
2236     Label L_unaligned_dst;
2237 
2238     if (!to_is_aligned) {
2239       BLOCK_COMMENT("Check dst alignment:");
2240       __ tst(to, wordSize - 1);
2241       __ b(L_unaligned_dst, ne); // 'to' is not aligned
2242     }
2243 
2244     int min_copy;
2245     if (forward) {
2246       min_copy = generate_forward_aligned_copy_loop(from, to, count, bytes_per_count);
2247     } else {
2248       min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count);
2249     }
2250     assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
2251 
2252     oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
2253 
2254     {
2255       copy_small_array(from, to, count, tmp1, noreg, bytes_per_count, forward, L_small_array);
2256 
2257       oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
2258     }
2259 
2260     if (!to_is_aligned) {
2261       __ BIND(L_unaligned_dst);
2262       ShouldNotReachHere();
2263       int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
2264       assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
2265 
2266       oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
2267     }
2268 
2269     return start;
2270   }
2271 
2272   //  Generate 'unsafe' array copy stub
2273   //  Though just as safe as the other stubs, it takes an unscaled
2274   //  size_t argument instead of an element count.
2275   //
2276   // Arguments for generated stub:
2277   //      from:  R0
2278   //      to:    R1
2279   //      count: R2 byte count, treated as ssize_t, can be zero
2280   //
2281   // Examines the alignment of the operands and dispatches
2282   // to a long, int, short, or byte copy loop.
2283   //
2284   address generate_unsafe_copy(const char* name) {
2285 
2286     const Register R0_from   = R0;      // source array address
2287     const Register R1_to     = R1;      // destination array address
2288     const Register R2_count  = R2;      // elements count
2289 
2290     const Register R3_bits   = R3;      // test copy of low bits
2291 
2292     __ align(CodeEntryAlignment);
2293     StubCodeMark mark(this, "StubRoutines", name);
2294     address start = __ pc();
2295     const Register tmp = Rtemp;
2296 
2297     // bump this on entry, not on exit:
2298     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R3, tmp);
2299 
2300     __ orr(R3_bits, R0_from, R1_to);
2301     __ orr(R3_bits, R2_count, R3_bits);
2302 
2303     __ tst(R3_bits, BytesPerLong-1);
2304     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerLong), eq);
2305     __ jump(StubRoutines::_jlong_arraycopy, relocInfo::runtime_call_type, tmp, eq);
2306 
2307     __ tst(R3_bits, BytesPerInt-1);
2308     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerInt), eq);
2309     __ jump(StubRoutines::_jint_arraycopy, relocInfo::runtime_call_type, tmp, eq);
2310 
2311     __ tst(R3_bits, BytesPerShort-1);
2312     __ mov(R2_count,AsmOperand(R2_count,asr,LogBytesPerShort), eq);
2313     __ jump(StubRoutines::_jshort_arraycopy, relocInfo::runtime_call_type, tmp, eq);
2314 
2315     __ jump(StubRoutines::_jbyte_arraycopy, relocInfo::runtime_call_type, tmp);
2316     return start;
2317   }
2318 
2319   // Helper for generating a dynamic type check.
2320   // Smashes only the given temp registers.
2321   void generate_type_check(Register sub_klass,
2322                            Register super_check_offset,
2323                            Register super_klass,
2324                            Register tmp1,
2325                            Register tmp2,
2326                            Register tmp3,
2327                            Label& L_success) {
2328     assert_different_registers(sub_klass, super_check_offset, super_klass, tmp1, tmp2, tmp3);
2329 
2330     BLOCK_COMMENT("type_check:");
2331 
2332     // If the pointers are equal, we are done (e.g., String[] elements).
2333 
2334     __ cmp(super_klass, sub_klass);
2335     __ b(L_success, eq); // fast success
2336 
2337 
2338     Label L_loop, L_fail;
2339 
2340     int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
2341 
2342     // Check the supertype display:
2343     __ ldr(tmp1, Address(sub_klass, super_check_offset));
2344     __ cmp(tmp1, super_klass);
2345     __ b(L_success, eq);
2346 
2347     __ cmp(super_check_offset, sc_offset);
2348     __ b(L_fail, ne); // failure
2349 
2350     BLOCK_COMMENT("type_check_slow_path:");
2351 
2352     // a couple of useful fields in sub_klass:
2353     int ss_offset = in_bytes(Klass::secondary_supers_offset());
2354 
2355     // Do a linear scan of the secondary super-klass chain.
2356 
2357 #ifndef PRODUCT
2358     int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
2359     __ inc_counter((address) pst_counter, tmp1, tmp2);
2360 #endif
2361 
2362     Register scan_temp = tmp1;
2363     Register count_temp = tmp2;
2364 
2365     // We will consult the secondary-super array.
2366     __ ldr(scan_temp, Address(sub_klass, ss_offset));
2367 
2368     Register search_key = super_klass;
2369 
2370     // Load the array length.
2371     __ ldr_s32(count_temp, Address(scan_temp, Array<Klass*>::length_offset_in_bytes()));
2372     __ add(scan_temp, scan_temp, Array<Klass*>::base_offset_in_bytes());
2373 
2374     __ add(count_temp, count_temp, 1);
2375 
2376     // Top of search loop
2377     __ bind(L_loop);
2378     // Notes:
2379     //  scan_temp starts at the array elements
2380     //  count_temp is 1+size
2381 
2382     __ subs(count_temp, count_temp, 1);
2383     __ b(L_fail, eq); // not found
2384 
2385     // Load next super to check
2386     // In the array of super classes elements are pointer sized.
2387     int element_size = wordSize;
2388     __ ldr(tmp3, Address(scan_temp, element_size, post_indexed));
2389 
2390     // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
2391     __ cmp(tmp3, search_key);
2392 
2393     // A miss means we are NOT a subtype and need to keep looping
2394     __ b(L_loop, ne);
2395 
2396     // Falling out the bottom means we found a hit; we ARE a subtype
2397 
2398     // Success.  Cache the super we found and proceed in triumph.
2399     __ str(super_klass, Address(sub_klass, sc_offset));
2400 
2401     // Jump to success
2402     __ b(L_success);
2403 
2404     // Fall through on failure!
2405     __ bind(L_fail);
2406   }
2407 
2408   //  Generate stub for checked oop copy.
2409   //
2410   // Arguments for generated stub:
2411   //      from:  R0
2412   //      to:    R1
2413   //      count: R2 treated as signed 32-bit int
2414   //      ckoff: R3 (super_check_offset)
2415   //      ckval: R4 (super_klass)
2416   //      ret:   R0 zero for success; (-1^K) where K is partial transfer count (32-bit)
2417   //
2418   address generate_checkcast_copy(const char * name) {
2419     __ align(CodeEntryAlignment);
2420     StubCodeMark mark(this, "StubRoutines", name);
2421     address start = __ pc();
2422 
2423     const Register from  = R0;  // source array address
2424     const Register to    = R1;  // destination array address
2425     const Register count = R2;  // elements count
2426 
2427     const Register R3_ckoff  = R3;      // super_check_offset
2428     const Register R4_ckval  = R4;      // super_klass
2429 
2430     const int callee_saved_regs = 4; // LR saved differently
2431 
2432     Label load_element, store_element, do_epilogue, fail;
2433 
2434     BLOCK_COMMENT("Entry:");
2435 
2436     __ zap_high_non_significant_bits(R2);
2437 
2438     int pushed = 0;
2439     __ push(LR);
2440     pushed+=1;
2441 
2442     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
2443 
2444     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
2445     bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
2446 
2447     const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
2448     __ push(caller_saved_regs);
2449     assert(caller_saved_regs.size() == 6, "check the count");
2450     pushed+=6;
2451 
2452     __ ldr(R4_ckval,Address(SP, wordSize*pushed)); // read the argument that was on the stack
2453 
2454     // Save arguments for barrier generation (after the pre barrier):
2455     // - must be a caller saved register and not LR
2456     // - ARM32: avoid R10 in case RThread is needed
2457     const Register saved_count = altFP_7_11;
2458     __ movs(saved_count, count); // and test count
2459     __ b(load_element,ne);
2460 
2461     // nothing to copy
2462     __ mov(R0, 0);
2463 
2464     __ pop(caller_saved_regs);
2465     __ pop(PC);
2466 
2467     // ======== begin loop ========
2468     // (Loop is rotated; its entry is load_element.)
2469     __ align(OptoLoopAlignment);
2470     __ BIND(store_element);
2471     if (UseCompressedOops) {
2472       __ store_heap_oop(Address(to, BytesPerHeapOop, post_indexed), R5);  // store the oop, changes flags
2473       __ subs_32(count,count,1);
2474     } else {
2475       __ subs_32(count,count,1);
2476       __ str(R5, Address(to, BytesPerHeapOop, post_indexed));             // store the oop
2477     }
2478     __ b(do_epilogue, eq); // count exhausted
2479 
2480     // ======== loop entry is here ========
2481     __ BIND(load_element);
2482     __ load_heap_oop(R5, Address(from, BytesPerHeapOop, post_indexed));  // load the oop
2483     __ cbz(R5, store_element); // NULL
2484 
2485     __ load_klass(R6, R5);
2486 
2487     generate_type_check(R6, R3_ckoff, R4_ckval, /*tmps*/ R12, R8, R9,
2488                         // branch to this on success:
2489                         store_element);
2490     // ======== end loop ========
2491 
2492     // It was a real error; we must depend on the caller to finish the job.
2493     // Register count has number of *remaining* oops, saved_count number of *total* oops.
2494     // Emit GC store barriers for the oops we have copied
2495     // and report their number to the caller (0 or (-1^n))
2496     __ BIND(fail);
2497 
2498     // Note: fail marked by the fact that count differs from saved_count
2499 
2500     __ BIND(do_epilogue);
2501 
2502     Register copied = R4; // saved
2503     Label L_not_copied;
2504 
2505     __ subs_32(copied, saved_count, count); // copied count (in saved reg)
2506     __ b(L_not_copied, eq); // nothing was copied, skip post barrier
2507     __ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
2508     __ mov(R12, copied); // count arg scratched by post barrier
2509 
2510     bs->arraycopy_epilogue(_masm, decorators, true, to, R12, R3);
2511 
2512     assert_different_registers(R3,R12,LR,copied,saved_count);
2513     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
2514 
2515     __ BIND(L_not_copied);
2516     __ cmp_32(copied, saved_count); // values preserved in saved registers
2517 
2518     __ mov(R0, 0, eq); // 0 if all copied
2519     __ mvn(R0, copied, ne); // else NOT(copied)
2520     __ pop(caller_saved_regs);
2521     __ pop(PC);
2522 
2523     return start;
2524   }
2525 
2526   // Perform range checks on the proposed arraycopy.
2527   // Kills the two temps, but nothing else.
2528   void arraycopy_range_checks(Register src,     // source array oop
2529                               Register src_pos, // source position (32-bit int)
2530                               Register dst,     // destination array oop
2531                               Register dst_pos, // destination position (32-bit int)
2532                               Register length,  // length of copy (32-bit int)
2533                               Register temp1, Register temp2,
2534                               Label& L_failed) {
2535 
2536     BLOCK_COMMENT("arraycopy_range_checks:");
2537 
2538     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2539 
2540     const Register array_length = temp1;  // scratch
2541     const Register end_pos      = temp2;  // scratch
2542 
2543     __ add_32(end_pos, length, src_pos);  // src_pos + length
2544     __ ldr_s32(array_length, Address(src, arrayOopDesc::length_offset_in_bytes()));
2545     __ cmp_32(end_pos, array_length);
2546     __ b(L_failed, hi);
2547 
2548     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2549     __ add_32(end_pos, length, dst_pos); // dst_pos + length
2550     __ ldr_s32(array_length, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2551     __ cmp_32(end_pos, array_length);
2552     __ b(L_failed, hi);
2553 
2554     BLOCK_COMMENT("arraycopy_range_checks done");
2555   }
2556 
2557   //
2558   //  Generate generic array copy stubs
2559   //
2560   //  Input:
2561   //    R0    -  src oop
2562   //    R1    -  src_pos (32-bit int)
2563   //    R2    -  dst oop
2564   //    R3    -  dst_pos (32-bit int)
2565   //    SP[0] -  element count (32-bit int)
2566   //
2567   //  Output: (32-bit int)
2568   //    R0 ==  0  -  success
2569   //    R0 <   0  -  need to call System.arraycopy
2570   //
2571   address generate_generic_copy(const char *name) {
2572     Label L_failed, L_objArray;
2573 
2574     // Input registers
2575     const Register src      = R0;  // source array oop
2576     const Register src_pos  = R1;  // source position
2577     const Register dst      = R2;  // destination array oop
2578     const Register dst_pos  = R3;  // destination position
2579 
2580     // registers used as temp
2581     const Register R5_src_klass = R5; // source array klass
2582     const Register R6_dst_klass = R6; // destination array klass
2583     const Register R_lh         = altFP_7_11; // layout handler
2584     const Register R8_temp      = R8;
2585 
2586     __ align(CodeEntryAlignment);
2587     StubCodeMark mark(this, "StubRoutines", name);
2588     address start = __ pc();
2589 
2590     __ zap_high_non_significant_bits(R1);
2591     __ zap_high_non_significant_bits(R3);
2592     __ zap_high_non_significant_bits(R4);
2593 
2594     int pushed = 0;
2595     const RegisterSet saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
2596     __ push(saved_regs);
2597     assert(saved_regs.size() == 6, "check the count");
2598     pushed+=6;
2599 
2600     // bump this on entry, not on exit:
2601     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, R5, R12);
2602 
2603     const Register length   = R4;  // elements count
2604     __ ldr(length, Address(SP,4*pushed));
2605 
2606 
2607     //-----------------------------------------------------------------------
2608     // Assembler stubs will be used for this call to arraycopy
2609     // if the following conditions are met:
2610     //
2611     // (1) src and dst must not be null.
2612     // (2) src_pos must not be negative.
2613     // (3) dst_pos must not be negative.
2614     // (4) length  must not be negative.
2615     // (5) src klass and dst klass should be the same and not NULL.
2616     // (6) src and dst should be arrays.
2617     // (7) src_pos + length must not exceed length of src.
2618     // (8) dst_pos + length must not exceed length of dst.
2619     BLOCK_COMMENT("arraycopy initial argument checks");
2620 
2621     //  if (src == NULL) return -1;
2622     __ cbz(src, L_failed);
2623 
2624     //  if (src_pos < 0) return -1;
2625     __ cmp_32(src_pos, 0);
2626     __ b(L_failed, lt);
2627 
2628     //  if (dst == NULL) return -1;
2629     __ cbz(dst, L_failed);
2630 
2631     //  if (dst_pos < 0) return -1;
2632     __ cmp_32(dst_pos, 0);
2633     __ b(L_failed, lt);
2634 
2635     //  if (length < 0) return -1;
2636     __ cmp_32(length, 0);
2637     __ b(L_failed, lt);
2638 
2639     BLOCK_COMMENT("arraycopy argument klass checks");
2640     //  get src->klass()
2641     __ load_klass(R5_src_klass, src);
2642 
2643     // Load layout helper
2644     //
2645     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2646     // 32        30    24            16              8     2                 0
2647     //
2648     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2649     //
2650 
2651     int lh_offset = in_bytes(Klass::layout_helper_offset());
2652     __ ldr_u32(R_lh, Address(R5_src_klass, lh_offset));
2653 
2654     __ load_klass(R6_dst_klass, dst);
2655 
2656     // Handle objArrays completely differently...
2657     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2658     __ mov_slow(R8_temp, objArray_lh);
2659     __ cmp_32(R_lh, R8_temp);
2660     __ b(L_objArray,eq);
2661 
2662     //  if (src->klass() != dst->klass()) return -1;
2663     __ cmp(R5_src_klass, R6_dst_klass);
2664     __ b(L_failed, ne);
2665 
2666     //  if (!src->is_Array()) return -1;
2667     __ cmp_32(R_lh, Klass::_lh_neutral_value); // < 0
2668     __ b(L_failed, ge);
2669 
2670     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2671                            R8_temp, R6_dst_klass, L_failed);
2672 
2673     {
2674       // TypeArrayKlass
2675       //
2676       // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2677       // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2678       //
2679 
2680       const Register R6_offset = R6_dst_klass;    // array offset
2681       const Register R12_elsize = R12;            // log2 element size
2682 
2683       __ logical_shift_right(R6_offset, R_lh, Klass::_lh_header_size_shift);
2684       __ andr(R6_offset, R6_offset, (unsigned int)Klass::_lh_header_size_mask); // array_offset
2685       __ add(src, src, R6_offset);       // src array offset
2686       __ add(dst, dst, R6_offset);       // dst array offset
2687       __ andr(R12_elsize, R_lh, (unsigned int)Klass::_lh_log2_element_size_mask); // log2 element size
2688 
2689       // next registers should be set before the jump to corresponding stub
2690       const Register from     = R0;  // source array address
2691       const Register to       = R1;  // destination array address
2692       const Register count    = R2;  // elements count
2693 
2694       // 'from', 'to', 'count' registers should be set in this order
2695       // since they are the same as 'src', 'src_pos', 'dst'.
2696 
2697 
2698       BLOCK_COMMENT("scale indexes to element size");
2699       __ add(from, src, AsmOperand(src_pos, lsl, R12_elsize));       // src_addr
2700       __ add(to, dst, AsmOperand(dst_pos, lsl, R12_elsize));         // dst_addr
2701 
2702       __ mov(count, length);  // length
2703 
2704       // XXX optim: avoid later push in arraycopy variants ?
2705 
2706       __ pop(saved_regs);
2707 
2708       BLOCK_COMMENT("choose copy loop based on element size");
2709       __ cmp(R12_elsize, 0);
2710       __ b(StubRoutines::_jbyte_arraycopy,eq);
2711 
2712       __ cmp(R12_elsize, LogBytesPerShort);
2713       __ b(StubRoutines::_jshort_arraycopy,eq);
2714 
2715       __ cmp(R12_elsize, LogBytesPerInt);
2716       __ b(StubRoutines::_jint_arraycopy,eq);
2717 
2718       __ b(StubRoutines::_jlong_arraycopy);
2719 
2720     }
2721 
2722     // ObjArrayKlass
2723     __ BIND(L_objArray);
2724     // live at this point:  R5_src_klass, R6_dst_klass, src[_pos], dst[_pos], length
2725 
2726     Label L_plain_copy, L_checkcast_copy;
2727     //  test array classes for subtyping
2728     __ cmp(R5_src_klass, R6_dst_klass);         // usual case is exact equality
2729     __ b(L_checkcast_copy, ne);
2730 
2731     BLOCK_COMMENT("Identically typed arrays");
2732     {
2733       // Identically typed arrays can be copied without element-wise checks.
2734       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2735                              R8_temp, R_lh, L_failed);
2736 
2737       // next registers should be set before the jump to corresponding stub
2738       const Register from     = R0;  // source array address
2739       const Register to       = R1;  // destination array address
2740       const Register count    = R2;  // elements count
2741 
2742       __ add(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2743       __ add(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2744       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerHeapOop);         // src_addr
2745       __ add_ptr_scaled_int32(to, dst, dst_pos, LogBytesPerHeapOop);           // dst_addr
2746       __ BIND(L_plain_copy);
2747       __ mov(count, length);
2748 
2749       __ pop(saved_regs); // XXX optim: avoid later push in oop_arraycopy ?
2750       __ b(StubRoutines::_oop_arraycopy);
2751     }
2752 
2753     {
2754       __ BIND(L_checkcast_copy);
2755       // live at this point:  R5_src_klass, R6_dst_klass
2756 
2757       // Before looking at dst.length, make sure dst is also an objArray.
2758       __ ldr_u32(R8_temp, Address(R6_dst_klass, lh_offset));
2759       __ cmp_32(R_lh, R8_temp);
2760       __ b(L_failed, ne);
2761 
2762       // It is safe to examine both src.length and dst.length.
2763 
2764       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2765                              R8_temp, R_lh, L_failed);
2766 
2767       // next registers should be set before the jump to corresponding stub
2768       const Register from     = R0;  // source array address
2769       const Register to       = R1;  // destination array address
2770       const Register count    = R2;  // elements count
2771 
2772       // Marshal the base address arguments now, freeing registers.
2773       __ add(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2774       __ add(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2775       __ add_ptr_scaled_int32(from, src, src_pos, LogBytesPerHeapOop);         // src_addr
2776       __ add_ptr_scaled_int32(to, dst, dst_pos, LogBytesPerHeapOop);           // dst_addr
2777 
2778       __ mov(count, length); // length (reloaded)
2779 
2780       Register sco_temp = R3;                   // this register is free now
2781       assert_different_registers(from, to, count, sco_temp,
2782                                  R6_dst_klass, R5_src_klass);
2783 
2784       // Generate the type check.
2785       int sco_offset = in_bytes(Klass::super_check_offset_offset());
2786       __ ldr_u32(sco_temp, Address(R6_dst_klass, sco_offset));
2787       generate_type_check(R5_src_klass, sco_temp, R6_dst_klass,
2788                           R8_temp, R9,
2789                           R12,
2790                           L_plain_copy);
2791 
2792       // Fetch destination element klass from the ObjArrayKlass header.
2793       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2794 
2795       // the checkcast_copy loop needs two extra arguments:
2796       const Register Rdst_elem_klass = R3;
2797       __ ldr(Rdst_elem_klass, Address(R6_dst_klass, ek_offset));   // dest elem klass
2798       __ pop(saved_regs); // XXX optim: avoid later push in oop_arraycopy ?
2799       __ str(Rdst_elem_klass, Address(SP,0));    // dest elem klass argument
2800       __ ldr_u32(R3, Address(Rdst_elem_klass, sco_offset));  // sco of elem klass
2801       __ b(StubRoutines::_checkcast_arraycopy);
2802     }
2803 
2804     __ BIND(L_failed);
2805 
2806     __ pop(saved_regs);
2807     __ mvn(R0, 0); // failure, with 0 copied
2808     __ ret();
2809 
2810     return start;
2811   }
2812 
2813   // Safefetch stubs.
2814   void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
2815     // safefetch signatures:
2816     //   int      SafeFetch32(int*      adr, int      errValue);
2817     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
2818     //
2819     // arguments:
2820     //   R0 = adr
2821     //   R1 = errValue
2822     //
2823     // result:
2824     //   R0  = *adr or errValue
2825 
2826     StubCodeMark mark(this, "StubRoutines", name);
2827 
2828     // Entry point, pc or function descriptor.
2829     *entry = __ pc();
2830 
2831     // Load *adr into c_rarg2, may fault.
2832     *fault_pc = __ pc();
2833 
2834     switch (size) {
2835       case 4: // int32_t
2836         __ ldr_s32(R1, Address(R0));
2837         break;
2838 
2839       case 8: // int64_t
2840         Unimplemented();
2841         break;
2842 
2843       default:
2844         ShouldNotReachHere();
2845     }
2846 
2847     // return errValue or *adr
2848     *continuation_pc = __ pc();
2849     __ mov(R0, R1);
2850     __ ret();
2851   }
2852 
2853   void generate_arraycopy_stubs() {
2854 
2855     // Note:  the disjoint stubs must be generated first, some of
2856     //        the conjoint stubs use them.
2857 
2858     bool status = false; // non failing C2 stubs need not return a status in R0
2859 
2860 #ifdef TEST_C2_GENERIC_ARRAYCOPY /* Internal development flag */
2861     // With this flag, the C2 stubs are tested by generating calls to
2862     // generic_arraycopy instead of Runtime1::arraycopy
2863 
2864     // Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied)
2865     // and the result is tested to see whether the arraycopy stub should
2866     // be called.
2867 
2868     // When we test arraycopy this way, we must generate extra code in the
2869     // arraycopy methods callable from C2 generic_arraycopy to set the
2870     // status to 0 for those who always succeed (calling the slow path stub might
2871     // lead to errors since the copy has already been performed).
2872 
2873     status = true; // generate a status compatible with C1 calls
2874 #endif
2875 
2876     // these need always status in case they are called from generic_arraycopy
2877     StubRoutines::_jbyte_disjoint_arraycopy  = generate_primitive_copy(false, "jbyte_disjoint_arraycopy",  true, 1, true);
2878     StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(false, "jshort_disjoint_arraycopy", true, 2, true);
2879     StubRoutines::_jint_disjoint_arraycopy   = generate_primitive_copy(false, "jint_disjoint_arraycopy",   true, 4, true);
2880     StubRoutines::_jlong_disjoint_arraycopy  = generate_primitive_copy(false, "jlong_disjoint_arraycopy",  true, 8, true);
2881     StubRoutines::_oop_disjoint_arraycopy    = generate_oop_copy      (false, "oop_disjoint_arraycopy",    true,    true);
2882 
2883     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = generate_primitive_copy(true, "arrayof_jbyte_disjoint_arraycopy", status, 1, true);
2884     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jshort_disjoint_arraycopy",status, 2, true);
2885     StubRoutines::_arrayof_jint_disjoint_arraycopy   = generate_primitive_copy(true, "arrayof_jint_disjoint_arraycopy",  status, 4, true);
2886     StubRoutines::_arrayof_jlong_disjoint_arraycopy  = generate_primitive_copy(true, "arrayof_jlong_disjoint_arraycopy", status, 8, true);
2887     StubRoutines::_arrayof_oop_disjoint_arraycopy    = generate_oop_copy      (true, "arrayof_oop_disjoint_arraycopy",   status,    true);
2888 
2889     // these need always status in case they are called from generic_arraycopy
2890     StubRoutines::_jbyte_arraycopy  = generate_primitive_copy(false, "jbyte_arraycopy",  true, 1, false, StubRoutines::_jbyte_disjoint_arraycopy);
2891     StubRoutines::_jshort_arraycopy = generate_primitive_copy(false, "jshort_arraycopy", true, 2, false, StubRoutines::_jshort_disjoint_arraycopy);
2892     StubRoutines::_jint_arraycopy   = generate_primitive_copy(false, "jint_arraycopy",   true, 4, false, StubRoutines::_jint_disjoint_arraycopy);
2893     StubRoutines::_jlong_arraycopy  = generate_primitive_copy(false, "jlong_arraycopy",  true, 8, false, StubRoutines::_jlong_disjoint_arraycopy);
2894     StubRoutines::_oop_arraycopy    = generate_oop_copy      (false, "oop_arraycopy",    true,    false, StubRoutines::_oop_disjoint_arraycopy);
2895 
2896     StubRoutines::_arrayof_jbyte_arraycopy    = generate_primitive_copy(true, "arrayof_jbyte_arraycopy",  status, 1, false, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
2897     StubRoutines::_arrayof_jshort_arraycopy   = generate_primitive_copy(true, "arrayof_jshort_arraycopy", status, 2, false, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
2898 #ifdef _LP64
2899     // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
2900     StubRoutines::_arrayof_jint_arraycopy     = generate_primitive_copy(true, "arrayof_jint_arraycopy",   status, 4, false, StubRoutines::_arrayof_jint_disjoint_arraycopy);
2901 #else
2902     StubRoutines::_arrayof_jint_arraycopy     = StubRoutines::_jint_arraycopy;
2903 #endif
2904     if (BytesPerHeapOop < HeapWordSize) {
2905       StubRoutines::_arrayof_oop_arraycopy    = generate_oop_copy      (true, "arrayof_oop_arraycopy",    status,    false, StubRoutines::_arrayof_oop_disjoint_arraycopy);
2906     } else {
2907       StubRoutines::_arrayof_oop_arraycopy    = StubRoutines::_oop_arraycopy;
2908     }
2909     StubRoutines::_arrayof_jlong_arraycopy    = StubRoutines::_jlong_arraycopy;
2910 
2911     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2912     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
2913     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
2914 
2915 
2916   }
2917 
2918 #define COMPILE_CRYPTO
2919 #include "stubRoutinesCrypto_arm.cpp"
2920 
2921  private:
2922 
2923 #undef  __
2924 #define __ masm->
2925 
2926   //------------------------------------------------------------------------------------------------------------------------
2927   // Continuation point for throwing of implicit exceptions that are not handled in
2928   // the current activation. Fabricates an exception oop and initiates normal
2929   // exception dispatching in this frame.
2930   address generate_throw_exception(const char* name, address runtime_entry) {
2931     int insts_size = 128;
2932     int locs_size  = 32;
2933     CodeBuffer code(name, insts_size, locs_size);
2934     OopMapSet* oop_maps;
2935     int frame_size;
2936     int frame_complete;
2937 
2938     oop_maps = new OopMapSet();
2939     MacroAssembler* masm = new MacroAssembler(&code);
2940 
2941     address start = __ pc();
2942 
2943     frame_size = 2;
2944     __ mov(Rexception_pc, LR);
2945     __ raw_push(FP, LR);
2946 
2947     frame_complete = __ pc() - start;
2948 
2949     // Any extra arguments are already supposed to be R1 and R2
2950     __ mov(R0, Rthread);
2951 
2952     int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
2953     assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
2954     __ call(runtime_entry);
2955     if (pc_offset == -1) {
2956       pc_offset = __ offset();
2957     }
2958 
2959     // Generate oop map
2960     OopMap* map =  new OopMap(frame_size*VMRegImpl::slots_per_word, 0);
2961     oop_maps->add_gc_map(pc_offset, map);
2962     __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
2963 
2964     __ raw_pop(FP, LR);
2965     __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
2966 
2967     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2968                                                       frame_size, oop_maps, false);
2969     return stub->entry_point();
2970   }
2971 
2972   //---------------------------------------------------------------------------
2973   // Initialization
2974 
2975   void generate_initial() {
2976     // Generates all stubs and initializes the entry points
2977 
2978     //------------------------------------------------------------------------------------------------------------------------
2979     // entry points that exist in all platforms
2980     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
2981     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
2982     StubRoutines::_forward_exception_entry      = generate_forward_exception();
2983 
2984     StubRoutines::_call_stub_entry              =
2985       generate_call_stub(StubRoutines::_call_stub_return_address);
2986     // is referenced by megamorphic call
2987     StubRoutines::_catch_exception_entry        = generate_catch_exception();
2988 
2989     // stub for throwing stack overflow error used both by interpreter and compiler
2990     StubRoutines::_throw_StackOverflowError_entry  = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
2991 
2992     // integer division used both by interpreter and compiler
2993     StubRoutines::Arm::_idiv_irem_entry = generate_idiv_irem();
2994 
2995     StubRoutines::_atomic_add_entry = generate_atomic_add();
2996     StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2997     StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
2998     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2999     StubRoutines::_atomic_load_long_entry = generate_atomic_load_long();
3000     StubRoutines::_atomic_store_long_entry = generate_atomic_store_long();
3001   }
3002 
3003   void generate_all() {
3004     // Generates all stubs and initializes the entry points
3005 
3006 #ifdef COMPILER2
3007     // Generate partial_subtype_check first here since its code depends on
3008     // UseZeroBaseCompressedOops which is defined after heap initialization.
3009     StubRoutines::Arm::_partial_subtype_check                = generate_partial_subtype_check();
3010 #endif
3011     // These entry points require SharedInfo::stack0 to be set up in non-core builds
3012     // and need to be relocatable, so they each fabricate a RuntimeStub internally.
3013     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
3014     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
3015     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
3016 
3017     //------------------------------------------------------------------------------------------------------------------------
3018     // entry points that are platform specific
3019 
3020     // support for verify_oop (must happen after universe_init)
3021     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop();
3022 
3023     // arraycopy stubs used by compilers
3024     generate_arraycopy_stubs();
3025 
3026     // Safefetch stubs.
3027     generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
3028                                                    &StubRoutines::_safefetch32_fault_pc,
3029                                                    &StubRoutines::_safefetch32_continuation_pc);
3030     assert (sizeof(int) == wordSize, "32-bit architecture");
3031     StubRoutines::_safefetchN_entry           = StubRoutines::_safefetch32_entry;
3032     StubRoutines::_safefetchN_fault_pc        = StubRoutines::_safefetch32_fault_pc;
3033     StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
3034 
3035 #ifdef COMPILE_CRYPTO
3036     // generate AES intrinsics code
3037     if (UseAESIntrinsics) {
3038       aes_init();
3039       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3040       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3041       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
3042       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
3043     }
3044 #endif // COMPILE_CRYPTO
3045   }
3046 
3047 
3048  public:
3049   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3050     if (all) {
3051       generate_all();
3052     } else {
3053       generate_initial();
3054     }
3055   }
3056 }; // end class declaration
3057 
3058 void StubGenerator_generate(CodeBuffer* code, bool all) {
3059   StubGenerator g(code, all);
3060 }