1 /*
  2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "c1/c1_CodeStubs.hpp"
 26 #include "c1/c1_FrameMap.hpp"
 27 #include "c1/c1_LIRAssembler.hpp"
 28 #include "c1/c1_MacroAssembler.hpp"
 29 #include "c1/c1_Runtime1.hpp"
 30 #include "classfile/javaClasses.hpp"
 31 #include "nativeInst_x86.hpp"
 32 #include "oops/objArrayKlass.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "utilities/align.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "vmreg_x86.inline.hpp"
 37 
 38 
 39 #define __ ce->masm()->
 40 
 41 #ifndef _LP64
 42 float ConversionStub::float_zero = 0.0;
 43 double ConversionStub::double_zero = 0.0;
 44 
 45 void ConversionStub::emit_code(LIR_Assembler* ce) {
 46   __ bind(_entry);
 47   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
 48 
 49 
 50   if (input()->is_single_xmm()) {
 51     __ comiss(input()->as_xmm_float_reg(),
 52               ExternalAddress((address)&float_zero));
 53   } else if (input()->is_double_xmm()) {
 54     __ comisd(input()->as_xmm_double_reg(),
 55               ExternalAddress((address)&double_zero));
 56   } else {
 57     __ push(rax);
 58     __ ftst();
 59     __ fnstsw_ax();
 60     __ sahf();
 61     __ pop(rax);
 62   }
 63 
 64   Label NaN, do_return;
 65   __ jccb(Assembler::parity, NaN);
 66   __ jccb(Assembler::below, do_return);
 67 
 68   // input is > 0 -> return maxInt
 69   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
 70   __ decrement(result()->as_register());
 71   __ jmpb(do_return);
 72 
 73   // input is NaN -> return 0
 74   __ bind(NaN);
 75   __ xorptr(result()->as_register(), result()->as_register());
 76 
 77   __ bind(do_return);
 78   __ jmp(_continuation);
 79 }
 80 #endif // !_LP64
 81 
 82 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 83   __ bind(_entry);
 84   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 85 #ifdef _LP64
 86   __ lea(rscratch1, safepoint_pc);
 87   __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
 88 #else
 89   const Register tmp1 = rcx;
 90   const Register tmp2 = rdx;
 91   __ push(tmp1);
 92   __ push(tmp2);
 93 
 94   __ lea(tmp1, safepoint_pc);
 95   __ get_thread(tmp2);
 96   __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);
 97 
 98   __ pop(tmp2);
 99   __ pop(tmp1);
100 #endif /* _LP64 */
101   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
102          "polling page return stub not created yet");
103 
104   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
105   __ jump(RuntimeAddress(stub));
106 }
107 
108 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
109   __ bind(_entry);
110   Metadata *m = _method->as_constant_ptr()->as_metadata();
111   ce->store_parameter(m, 1);
112   ce->store_parameter(_bci, 0);
113   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id)));
114   ce->add_call_info_here(_info);
115   ce->verify_oop_map(_info);
116   __ jmp(_continuation);
117 }
118 
119 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
120   __ bind(_entry);
121   if (_info->deoptimize_on_exception()) {
122     address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
123     __ call(RuntimeAddress(a));
124     ce->add_call_info_here(_info);
125     ce->verify_oop_map(_info);
126     debug_only(__ should_not_reach_here());
127     return;
128   }
129 
130   // pass the array index on stack because all registers must be preserved
131   if (_index->is_cpu_register()) {
132     ce->store_parameter(_index->as_register(), 0);
133   } else {
134     ce->store_parameter(_index->as_jint(), 0);
135   }
136   C1StubId stub_id;
137   if (_throw_index_out_of_bounds_exception) {
138     stub_id = C1StubId::throw_index_exception_id;
139   } else {
140     stub_id = C1StubId::throw_range_check_failed_id;
141     ce->store_parameter(_array->as_pointer_register(), 1);
142   }
143   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
144   ce->add_call_info_here(_info);
145   ce->verify_oop_map(_info);
146   debug_only(__ should_not_reach_here());
147 }
148 
149 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
150   _info = new CodeEmitInfo(info);
151 }
152 
153 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
154   __ bind(_entry);
155   address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
156   __ call(RuntimeAddress(a));
157   ce->add_call_info_here(_info);
158   ce->verify_oop_map(_info);
159   debug_only(__ should_not_reach_here());
160 }
161 
162 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
163   if (_offset != -1) {
164     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
165   }
166   __ bind(_entry);
167   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id)));
168   ce->add_call_info_here(_info);
169   debug_only(__ should_not_reach_here());
170 }
171 
172 
173 // Implementation of LoadFlattenedArrayStub
174 
175 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
176   _array = array;
177   _index = index;
178   _result = result;
179   // Tell the register allocator that the runtime call will scratch rax.
180   _scratch_reg = FrameMap::rax_oop_opr;
181   _info = new CodeEmitInfo(info);
182 }
183 
184 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
185   assert(__ rsp_offset() == 0, "frame size should be fixed");
186   __ bind(_entry);
187   ce->store_parameter(_array->as_register(), 1);
188   ce->store_parameter(_index->as_register(), 0);
189   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::load_flat_array_id)));
190   ce->add_call_info_here(_info);
191   ce->verify_oop_map(_info);
192   if (_result->as_register() != rax) {
193     __ movptr(_result->as_register(), rax);
194   }
195   __ jmp(_continuation);
196 }
197 
198 
199 // Implementation of StoreFlattenedArrayStub
200 
201 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
202   _array = array;
203   _index = index;
204   _value = value;
205   // Tell the register allocator that the runtime call will scratch rax.
206   _scratch_reg = FrameMap::rax_oop_opr;
207   _info = new CodeEmitInfo(info);
208 }
209 
210 
211 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
212   assert(__ rsp_offset() == 0, "frame size should be fixed");
213   __ bind(_entry);
214   ce->store_parameter(_array->as_register(), 2);
215   ce->store_parameter(_index->as_register(), 1);
216   ce->store_parameter(_value->as_register(), 0);
217   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::store_flat_array_id)));
218   ce->add_call_info_here(_info);
219   ce->verify_oop_map(_info);
220   __ jmp(_continuation);
221 }
222 
223 
224 // Implementation of SubstitutabilityCheckStub
225 
226 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
227   _left = left;
228   _right = right;
229   // Tell the register allocator that the runtime call will scratch rax.
230   _scratch_reg = FrameMap::rax_oop_opr;
231   _info = new CodeEmitInfo(info);
232 }
233 
234 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) {
235   assert(__ rsp_offset() == 0, "frame size should be fixed");
236   __ bind(_entry);
237   ce->store_parameter(_left->as_register(), 1);
238   ce->store_parameter(_right->as_register(), 0);
239   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::substitutability_check_id)));
240   ce->add_call_info_here(_info);
241   ce->verify_oop_map(_info);
242   __ jmp(_continuation);
243 }
244 
245 
246 // Implementation of NewInstanceStub
247 
248 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
249   _result = result;
250   _klass = klass;
251   _klass_reg = klass_reg;
252   _info = new CodeEmitInfo(info);
253   assert(stub_id == C1StubId::new_instance_id                 ||
254          stub_id == C1StubId::fast_new_instance_id            ||
255          stub_id == C1StubId::fast_new_instance_init_check_id,
256          "need new_instance id");
257   _stub_id   = stub_id;
258 }
259 
260 
261 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
262   assert(__ rsp_offset() == 0, "frame size should be fixed");
263   __ bind(_entry);
264   __ movptr(rdx, _klass_reg->as_register());
265   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
266   ce->add_call_info_here(_info);
267   ce->verify_oop_map(_info);
268   assert(_result->as_register() == rax, "result must in rax,");
269   __ jmp(_continuation);
270 }
271 
272 
273 // Implementation of NewTypeArrayStub
274 
275 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
276   _klass_reg = klass_reg;
277   _length = length;
278   _result = result;
279   _info = new CodeEmitInfo(info);
280 }
281 
282 
283 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
284   assert(__ rsp_offset() == 0, "frame size should be fixed");
285   __ bind(_entry);
286   assert(_length->as_register() == rbx, "length must in rbx,");
287   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
288   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id)));
289   ce->add_call_info_here(_info);
290   ce->verify_oop_map(_info);
291   assert(_result->as_register() == rax, "result must in rax,");
292   __ jmp(_continuation);
293 }
294 
295 
296 // Implementation of NewObjectArrayStub
297 
298 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
299                                        CodeEmitInfo* info, bool is_null_free) {
300   _klass_reg = klass_reg;
301   _result = result;
302   _length = length;
303   _info = new CodeEmitInfo(info);
304   _is_null_free = is_null_free;
305 }
306 
307 
308 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
309   assert(__ rsp_offset() == 0, "frame size should be fixed");
310   __ bind(_entry);
311   assert(_length->as_register() == rbx, "length must in rbx,");
312   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
313   if (_is_null_free) {
314     __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_null_free_array_id)));
315   } else {
316     __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id)));
317   }
318   ce->add_call_info_here(_info);
319   ce->verify_oop_map(_info);
320   assert(_result->as_register() == rax, "result must in rax,");
321   __ jmp(_continuation);
322 }
323 
324 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
325   assert(__ rsp_offset() == 0, "frame size should be fixed");
326   __ bind(_entry);
327   if (_throw_ie_stub != nullptr) {
328     // When we come here, _obj_reg has already been checked to be non-null.
329     const int is_value_mask = markWord::inline_type_pattern;
330     Register mark = _scratch_reg->as_register();
331     __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes()));
332     __ andptr(mark, is_value_mask);
333     __ cmpl(mark, is_value_mask);
334     __ jcc(Assembler::equal, *_throw_ie_stub->entry());
335   }
336   ce->store_parameter(_obj_reg->as_register(),  1);
337   ce->store_parameter(_lock_reg->as_register(), 0);
338   C1StubId enter_id;
339   if (ce->compilation()->has_fpu_code()) {
340     enter_id = C1StubId::monitorenter_id;
341   } else {
342     enter_id = C1StubId::monitorenter_nofpu_id;
343   }
344   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
345   ce->add_call_info_here(_info);
346   ce->verify_oop_map(_info);
347   __ jmp(_continuation);
348 }
349 
350 
351 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
352   __ bind(_entry);
353   if (_compute_lock) {
354     // lock_reg was destroyed by fast unlocking attempt => recompute it
355     ce->monitor_address(_monitor_ix, _lock_reg);
356   }
357   ce->store_parameter(_lock_reg->as_register(), 0);
358   // note: non-blocking leaf routine => no call info needed
359   C1StubId exit_id;
360   if (ce->compilation()->has_fpu_code()) {
361     exit_id = C1StubId::monitorexit_id;
362   } else {
363     exit_id = C1StubId::monitorexit_nofpu_id;
364   }
365   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
366   __ jmp(_continuation);
367 }
368 
369 
370 // Implementation of patching:
371 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
372 // - Replace original code with a call to the stub
373 // At Runtime:
374 // - call to stub, jump to runtime
375 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
376 // - in runtime: after initializing class, restore original code, reexecute instruction
377 
378 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
379 
380 void PatchingStub::align_patch_site(MacroAssembler* masm) {
381   // We're patching a 5-7 byte instruction on intel and we need to
382   // make sure that we don't see a piece of the instruction.  It
383   // appears mostly impossible on Intel to simply invalidate other
384   // processors caches and since they may do aggressive prefetch it's
385   // very hard to make a guess about what code might be in the icache.
386   // Force the instruction to be double word aligned so that it
387   // doesn't span a cache line.
388   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
389 }
390 
391 void PatchingStub::emit_code(LIR_Assembler* ce) {
392   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
393 
394   Label call_patch;
395 
396   // static field accesses have special semantics while the class
397   // initializer is being run so we emit a test which can be used to
398   // check that this code is being executed by the initializing
399   // thread.
400   address being_initialized_entry = __ pc();
401   if (CommentedAssembly) {
402     __ block_comment(" patch template");
403   }
404   if (_id == load_klass_id) {
405     // produce a copy of the load klass instruction for use by the being initialized case
406 #ifdef ASSERT
407     address start = __ pc();
408 #endif
409     Metadata* o = nullptr;
410     __ mov_metadata(_obj, o);
411 #ifdef ASSERT
412     for (int i = 0; i < _bytes_to_copy; i++) {
413       address ptr = (address)(_pc_start + i);
414       int a_byte = (*ptr) & 0xFF;
415       assert(a_byte == *start++, "should be the same code");
416     }
417 #endif
418   } else if (_id == load_mirror_id) {
419     // produce a copy of the load mirror instruction for use by the being
420     // initialized case
421 #ifdef ASSERT
422     address start = __ pc();
423 #endif
424     jobject o = nullptr;
425     __ movoop(_obj, o);
426 #ifdef ASSERT
427     for (int i = 0; i < _bytes_to_copy; i++) {
428       address ptr = (address)(_pc_start + i);
429       int a_byte = (*ptr) & 0xFF;
430       assert(a_byte == *start++, "should be the same code");
431     }
432 #endif
433   } else {
434     // make a copy the code which is going to be patched.
435     for (int i = 0; i < _bytes_to_copy; i++) {
436       address ptr = (address)(_pc_start + i);
437       int a_byte = (*ptr) & 0xFF;
438       __ emit_int8(a_byte);
439       *ptr = 0x90; // make the site look like a nop
440     }
441   }
442 
443   address end_of_patch = __ pc();
444   int bytes_to_skip = 0;
445   if (_id == load_mirror_id) {
446     int offset = __ offset();
447     if (CommentedAssembly) {
448       __ block_comment(" being_initialized check");
449     }
450     assert(_obj != noreg, "must be a valid register");
451     Register tmp = rax;
452     __ push(tmp);
453     __ movptr(tmp, Address(_obj, java_lang_Class::klass_offset()));
454     __ cmpptr(r15_thread, Address(tmp, InstanceKlass::init_thread_offset()));
455     __ pop(tmp); // pop it right away, no matter which path we take
456     __ jccb(Assembler::notEqual, call_patch);
457 
458     // access_field patches may execute the patched code before it's
459     // copied back into place so we need to jump back into the main
460     // code of the nmethod to continue execution.
461     __ jmp(_patch_site_continuation);
462 
463     // make sure this extra code gets skipped
464     bytes_to_skip += __ offset() - offset;
465   }
466   if (CommentedAssembly) {
467     __ block_comment("patch data encoded as movl");
468   }
469   // Now emit the patch record telling the runtime how to find the
470   // pieces of the patch.  We only need 3 bytes but for readability of
471   // the disassembly we make the data look like a movl reg, imm32,
472   // which requires 5 bytes
473   int sizeof_patch_record = 5;
474   bytes_to_skip += sizeof_patch_record;
475 
476   // emit the offsets needed to find the code to patch
477   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
478 
479   __ emit_int8((unsigned char)0xB8);
480   __ emit_int8(0);
481   __ emit_int8(being_initialized_entry_offset);
482   __ emit_int8(bytes_to_skip);
483   __ emit_int8(_bytes_to_copy);
484   address patch_info_pc = __ pc();
485   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
486 
487   address entry = __ pc();
488   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
489   address target = nullptr;
490   relocInfo::relocType reloc_type = relocInfo::none;
491   switch (_id) {
492     case access_field_id:  target = Runtime1::entry_for(C1StubId::access_field_patching_id); break;
493     case load_klass_id:    target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
494     case load_mirror_id:   target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
495     case load_appendix_id:      target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
496     default: ShouldNotReachHere();
497   }
498   __ bind(call_patch);
499 
500   if (CommentedAssembly) {
501     __ block_comment("patch entry point");
502   }
503   __ call(RuntimeAddress(target));
504   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
505   ce->add_call_info_here(_info);
506   int jmp_off = __ offset();
507   __ jmp(_patch_site_entry);
508   // Add enough nops so deoptimization can overwrite the jmp above with a call
509   // and not destroy the world. We cannot use fat nops here, since the concurrent
510   // code rewrite may transiently create the illegal instruction sequence.
511   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
512     __ nop();
513   }
514   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
515     CodeSection* cs = __ code_section();
516     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
517     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
518   }
519 }
520 
521 
522 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
523   __ bind(_entry);
524   ce->store_parameter(_trap_request, 0);
525   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id)));
526   ce->add_call_info_here(_info);
527   DEBUG_ONLY(__ should_not_reach_here());
528 }
529 
530 
531 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
532   address a;
533   if (_info->deoptimize_on_exception()) {
534     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
535     a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
536   } else {
537     a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
538   }
539 
540   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
541   __ bind(_entry);
542   __ call(RuntimeAddress(a));
543   ce->add_call_info_here(_info);
544   ce->verify_oop_map(_info);
545   debug_only(__ should_not_reach_here());
546 }
547 
548 
549 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
550   assert(__ rsp_offset() == 0, "frame size should be fixed");
551 
552   __ bind(_entry);
553   // pass the object on stack because all registers must be preserved
554   if (_obj->is_cpu_register()) {
555     ce->store_parameter(_obj->as_register(), 0);
556   }
557   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
558   ce->add_call_info_here(_info);
559   debug_only(__ should_not_reach_here());
560 }
561 
562 
563 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
564   //---------------slow case: call to native-----------------
565   __ bind(_entry);
566   // Figure out where the args should go
567   // This should really convert the IntrinsicID to the Method* and signature
568   // but I don't know how to do that.
569   //
570   VMRegPair args[5];
571   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
572   SharedRuntime::java_calling_convention(signature, args, 5);
573 
574   // push parameters
575   // (src, src_pos, dest, destPos, length)
576   Register r[5];
577   r[0] = src()->as_register();
578   r[1] = src_pos()->as_register();
579   r[2] = dst()->as_register();
580   r[3] = dst_pos()->as_register();
581   r[4] = length()->as_register();
582 
583   // next registers will get stored on the stack
584   for (int i = 0; i < 5 ; i++ ) {
585     VMReg r_1 = args[i].first();
586     if (r_1->is_stack()) {
587       int st_off = r_1->reg2stack() * wordSize;
588       __ movptr (Address(rsp, st_off), r[i]);
589     } else {
590       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
591     }
592   }
593 
594   ce->align_call(lir_static_call);
595 
596   ce->emit_static_call_stub();
597   if (ce->compilation()->bailed_out()) {
598     return; // CodeCache is full
599   }
600   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
601                          relocInfo::static_call_type);
602   __ call(resolve);
603   ce->add_call_info_here(info());
604 
605 #ifndef PRODUCT
606   if (PrintC1Statistics) {
607     __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt), rscratch1);
608   }
609 #endif
610 
611   __ jmp(_continuation);
612 }
613 
614 #undef __