1 /*
  2  * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_CodeStubs.hpp"
 27 #include "c1/c1_FrameMap.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "c1/c1_Runtime1.hpp"
 31 #include "classfile/javaClasses.hpp"
 32 #include "nativeInst_x86.hpp"
 33 #include "oops/objArrayKlass.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "utilities/align.hpp"
 36 #include "utilities/macros.hpp"
 37 #include "vmreg_x86.inline.hpp"
 38 
 39 
 40 #define __ ce->masm()->
 41 
 42 #ifndef _LP64
 43 float ConversionStub::float_zero = 0.0;
 44 double ConversionStub::double_zero = 0.0;
 45 
 46 void ConversionStub::emit_code(LIR_Assembler* ce) {
 47   __ bind(_entry);
 48   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
 49 
 50 
 51   if (input()->is_single_xmm()) {
 52     __ comiss(input()->as_xmm_float_reg(),
 53               ExternalAddress((address)&float_zero));
 54   } else if (input()->is_double_xmm()) {
 55     __ comisd(input()->as_xmm_double_reg(),
 56               ExternalAddress((address)&double_zero));
 57   } else {
 58     __ push(rax);
 59     __ ftst();
 60     __ fnstsw_ax();
 61     __ sahf();
 62     __ pop(rax);
 63   }
 64 
 65   Label NaN, do_return;
 66   __ jccb(Assembler::parity, NaN);
 67   __ jccb(Assembler::below, do_return);
 68 
 69   // input is > 0 -> return maxInt
 70   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
 71   __ decrement(result()->as_register());
 72   __ jmpb(do_return);
 73 
 74   // input is NaN -> return 0
 75   __ bind(NaN);
 76   __ xorptr(result()->as_register(), result()->as_register());
 77 
 78   __ bind(do_return);
 79   __ jmp(_continuation);
 80 }
 81 #endif // !_LP64
 82 
 83 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 84   __ bind(_entry);
 85   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 86 #ifdef _LP64
 87   __ lea(rscratch1, safepoint_pc);
 88   __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
 89 #else
 90   const Register tmp1 = rcx;
 91   const Register tmp2 = rdx;
 92   __ push(tmp1);
 93   __ push(tmp2);
 94 
 95   __ lea(tmp1, safepoint_pc);
 96   __ get_thread(tmp2);
 97   __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);
 98 
 99   __ pop(tmp2);
100   __ pop(tmp1);
101 #endif /* _LP64 */
102   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
103          "polling page return stub not created yet");
104 
105   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
106   __ jump(RuntimeAddress(stub));
107 }
108 
109 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
110   __ bind(_entry);
111   Metadata *m = _method->as_constant_ptr()->as_metadata();
112   ce->store_parameter(m, 1);
113   ce->store_parameter(_bci, 0);
114   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id)));
115   ce->add_call_info_here(_info);
116   ce->verify_oop_map(_info);
117   __ jmp(_continuation);
118 }
119 
120 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
121   __ bind(_entry);
122   if (_info->deoptimize_on_exception()) {
123     address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
124     __ call(RuntimeAddress(a));
125     ce->add_call_info_here(_info);
126     ce->verify_oop_map(_info);
127     debug_only(__ should_not_reach_here());
128     return;
129   }
130 
131   // pass the array index on stack because all registers must be preserved
132   if (_index->is_cpu_register()) {
133     ce->store_parameter(_index->as_register(), 0);
134   } else {
135     ce->store_parameter(_index->as_jint(), 0);
136   }
137   C1StubId stub_id;
138   if (_throw_index_out_of_bounds_exception) {
139     stub_id = C1StubId::throw_index_exception_id;
140   } else {
141     stub_id = C1StubId::throw_range_check_failed_id;
142     ce->store_parameter(_array->as_pointer_register(), 1);
143   }
144   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
145   ce->add_call_info_here(_info);
146   ce->verify_oop_map(_info);
147   debug_only(__ should_not_reach_here());
148 }
149 
150 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
151   _info = new CodeEmitInfo(info);
152 }
153 
154 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
155   __ bind(_entry);
156   address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
157   __ call(RuntimeAddress(a));
158   ce->add_call_info_here(_info);
159   ce->verify_oop_map(_info);
160   debug_only(__ should_not_reach_here());
161 }
162 
163 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
164   if (_offset != -1) {
165     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
166   }
167   __ bind(_entry);
168   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id)));
169   ce->add_call_info_here(_info);
170   debug_only(__ should_not_reach_here());
171 }
172 
173 
174 // Implementation of LoadFlattenedArrayStub
175 
176 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
177   _array = array;
178   _index = index;
179   _result = result;
180   // Tell the register allocator that the runtime call will scratch rax.
181   _scratch_reg = FrameMap::rax_oop_opr;
182   _info = new CodeEmitInfo(info);
183 }
184 
185 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
186   assert(__ rsp_offset() == 0, "frame size should be fixed");
187   __ bind(_entry);
188   ce->store_parameter(_array->as_register(), 1);
189   ce->store_parameter(_index->as_register(), 0);
190   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::load_flat_array_id)));
191   ce->add_call_info_here(_info);
192   ce->verify_oop_map(_info);
193   if (_result->as_register() != rax) {
194     __ movptr(_result->as_register(), rax);
195   }
196   __ jmp(_continuation);
197 }
198 
199 
200 // Implementation of StoreFlattenedArrayStub
201 
202 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
203   _array = array;
204   _index = index;
205   _value = value;
206   // Tell the register allocator that the runtime call will scratch rax.
207   _scratch_reg = FrameMap::rax_oop_opr;
208   _info = new CodeEmitInfo(info);
209 }
210 
211 
212 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
213   assert(__ rsp_offset() == 0, "frame size should be fixed");
214   __ bind(_entry);
215   ce->store_parameter(_array->as_register(), 2);
216   ce->store_parameter(_index->as_register(), 1);
217   ce->store_parameter(_value->as_register(), 0);
218   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::store_flat_array_id)));
219   ce->add_call_info_here(_info);
220   ce->verify_oop_map(_info);
221   __ jmp(_continuation);
222 }
223 
224 
225 // Implementation of SubstitutabilityCheckStub
226 
227 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
228   _left = left;
229   _right = right;
230   // Tell the register allocator that the runtime call will scratch rax.
231   _scratch_reg = FrameMap::rax_oop_opr;
232   _info = new CodeEmitInfo(info);
233 }
234 
235 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) {
236   assert(__ rsp_offset() == 0, "frame size should be fixed");
237   __ bind(_entry);
238   ce->store_parameter(_left->as_register(), 1);
239   ce->store_parameter(_right->as_register(), 0);
240   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::substitutability_check_id)));
241   ce->add_call_info_here(_info);
242   ce->verify_oop_map(_info);
243   __ jmp(_continuation);
244 }
245 
246 
247 // Implementation of NewInstanceStub
248 
249 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
250   _result = result;
251   _klass = klass;
252   _klass_reg = klass_reg;
253   _info = new CodeEmitInfo(info);
254   assert(stub_id == C1StubId::new_instance_id                 ||
255          stub_id == C1StubId::fast_new_instance_id            ||
256          stub_id == C1StubId::fast_new_instance_init_check_id,
257          "need new_instance id");
258   _stub_id   = stub_id;
259 }
260 
261 
262 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
263   assert(__ rsp_offset() == 0, "frame size should be fixed");
264   __ bind(_entry);
265   __ movptr(rdx, _klass_reg->as_register());
266   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
267   ce->add_call_info_here(_info);
268   ce->verify_oop_map(_info);
269   assert(_result->as_register() == rax, "result must in rax,");
270   __ jmp(_continuation);
271 }
272 
273 
274 // Implementation of NewTypeArrayStub
275 
276 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
277   _klass_reg = klass_reg;
278   _length = length;
279   _result = result;
280   _info = new CodeEmitInfo(info);
281 }
282 
283 
284 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
285   assert(__ rsp_offset() == 0, "frame size should be fixed");
286   __ bind(_entry);
287   assert(_length->as_register() == rbx, "length must in rbx,");
288   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
289   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id)));
290   ce->add_call_info_here(_info);
291   ce->verify_oop_map(_info);
292   assert(_result->as_register() == rax, "result must in rax,");
293   __ jmp(_continuation);
294 }
295 
296 
297 // Implementation of NewObjectArrayStub
298 
299 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
300                                        CodeEmitInfo* info, bool is_null_free) {
301   _klass_reg = klass_reg;
302   _result = result;
303   _length = length;
304   _info = new CodeEmitInfo(info);
305   _is_null_free = is_null_free;
306 }
307 
308 
309 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
310   assert(__ rsp_offset() == 0, "frame size should be fixed");
311   __ bind(_entry);
312   assert(_length->as_register() == rbx, "length must in rbx,");
313   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
314   if (_is_null_free) {
315     __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_null_free_array_id)));
316   } else {
317     __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id)));
318   }
319   ce->add_call_info_here(_info);
320   ce->verify_oop_map(_info);
321   assert(_result->as_register() == rax, "result must in rax,");
322   __ jmp(_continuation);
323 }
324 
325 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
326   assert(__ rsp_offset() == 0, "frame size should be fixed");
327   __ bind(_entry);
328   if (_throw_ie_stub != nullptr) {
329     // When we come here, _obj_reg has already been checked to be non-null.
330     const int is_value_mask = markWord::inline_type_pattern;
331     Register mark = _scratch_reg->as_register();
332     __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes()));
333     __ andptr(mark, is_value_mask);
334     __ cmpl(mark, is_value_mask);
335     __ jcc(Assembler::equal, *_throw_ie_stub->entry());
336   }
337   ce->store_parameter(_obj_reg->as_register(),  1);
338   ce->store_parameter(_lock_reg->as_register(), 0);
339   C1StubId enter_id;
340   if (ce->compilation()->has_fpu_code()) {
341     enter_id = C1StubId::monitorenter_id;
342   } else {
343     enter_id = C1StubId::monitorenter_nofpu_id;
344   }
345   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
346   ce->add_call_info_here(_info);
347   ce->verify_oop_map(_info);
348   __ jmp(_continuation);
349 }
350 
351 
352 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
353   __ bind(_entry);
354   if (_compute_lock) {
355     // lock_reg was destroyed by fast unlocking attempt => recompute it
356     ce->monitor_address(_monitor_ix, _lock_reg);
357   }
358   ce->store_parameter(_lock_reg->as_register(), 0);
359   // note: non-blocking leaf routine => no call info needed
360   C1StubId exit_id;
361   if (ce->compilation()->has_fpu_code()) {
362     exit_id = C1StubId::monitorexit_id;
363   } else {
364     exit_id = C1StubId::monitorexit_nofpu_id;
365   }
366   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
367   __ jmp(_continuation);
368 }
369 
370 
371 // Implementation of patching:
372 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
373 // - Replace original code with a call to the stub
374 // At Runtime:
375 // - call to stub, jump to runtime
376 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
377 // - in runtime: after initializing class, restore original code, reexecute instruction
378 
379 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
380 
381 void PatchingStub::align_patch_site(MacroAssembler* masm) {
382   // We're patching a 5-7 byte instruction on intel and we need to
383   // make sure that we don't see a piece of the instruction.  It
384   // appears mostly impossible on Intel to simply invalidate other
385   // processors caches and since they may do aggressive prefetch it's
386   // very hard to make a guess about what code might be in the icache.
387   // Force the instruction to be double word aligned so that it
388   // doesn't span a cache line.
389   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
390 }
391 
392 void PatchingStub::emit_code(LIR_Assembler* ce) {
393   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
394 
395   Label call_patch;
396 
397   // static field accesses have special semantics while the class
398   // initializer is being run so we emit a test which can be used to
399   // check that this code is being executed by the initializing
400   // thread.
401   address being_initialized_entry = __ pc();
402   if (CommentedAssembly) {
403     __ block_comment(" patch template");
404   }
405   if (_id == load_klass_id) {
406     // produce a copy of the load klass instruction for use by the being initialized case
407 #ifdef ASSERT
408     address start = __ pc();
409 #endif
410     Metadata* o = nullptr;
411     __ mov_metadata(_obj, o);
412 #ifdef ASSERT
413     for (int i = 0; i < _bytes_to_copy; i++) {
414       address ptr = (address)(_pc_start + i);
415       int a_byte = (*ptr) & 0xFF;
416       assert(a_byte == *start++, "should be the same code");
417     }
418 #endif
419   } else if (_id == load_mirror_id) {
420     // produce a copy of the load mirror instruction for use by the being
421     // initialized case
422 #ifdef ASSERT
423     address start = __ pc();
424 #endif
425     jobject o = nullptr;
426     __ movoop(_obj, o);
427 #ifdef ASSERT
428     for (int i = 0; i < _bytes_to_copy; i++) {
429       address ptr = (address)(_pc_start + i);
430       int a_byte = (*ptr) & 0xFF;
431       assert(a_byte == *start++, "should be the same code");
432     }
433 #endif
434   } else {
435     // make a copy the code which is going to be patched.
436     for (int i = 0; i < _bytes_to_copy; i++) {
437       address ptr = (address)(_pc_start + i);
438       int a_byte = (*ptr) & 0xFF;
439       __ emit_int8(a_byte);
440       *ptr = 0x90; // make the site look like a nop
441     }
442   }
443 
444   address end_of_patch = __ pc();
445   int bytes_to_skip = 0;
446   if (_id == load_mirror_id) {
447     int offset = __ offset();
448     if (CommentedAssembly) {
449       __ block_comment(" being_initialized check");
450     }
451     assert(_obj != noreg, "must be a valid register");
452     Register tmp = rax;
453     Register tmp2 = rbx;
454     __ push(tmp);
455     __ push(tmp2);
456     // Load without verification to keep code size small. We need it because
457     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
458     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset()));
459     __ get_thread(tmp);
460     __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
461     __ pop(tmp2);
462     __ pop(tmp);
463     __ jcc(Assembler::notEqual, call_patch);
464 
465     // access_field patches may execute the patched code before it's
466     // copied back into place so we need to jump back into the main
467     // code of the nmethod to continue execution.
468     __ jmp(_patch_site_continuation);
469 
470     // make sure this extra code gets skipped
471     bytes_to_skip += __ offset() - offset;
472   }
473   if (CommentedAssembly) {
474     __ block_comment("patch data encoded as movl");
475   }
476   // Now emit the patch record telling the runtime how to find the
477   // pieces of the patch.  We only need 3 bytes but for readability of
478   // the disassembly we make the data look like a movl reg, imm32,
479   // which requires 5 bytes
480   int sizeof_patch_record = 5;
481   bytes_to_skip += sizeof_patch_record;
482 
483   // emit the offsets needed to find the code to patch
484   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
485 
486   __ emit_int8((unsigned char)0xB8);
487   __ emit_int8(0);
488   __ emit_int8(being_initialized_entry_offset);
489   __ emit_int8(bytes_to_skip);
490   __ emit_int8(_bytes_to_copy);
491   address patch_info_pc = __ pc();
492   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
493 
494   address entry = __ pc();
495   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
496   address target = nullptr;
497   relocInfo::relocType reloc_type = relocInfo::none;
498   switch (_id) {
499     case access_field_id:  target = Runtime1::entry_for(C1StubId::access_field_patching_id); break;
500     case load_klass_id:    target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
501     case load_mirror_id:   target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
502     case load_appendix_id:      target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
503     default: ShouldNotReachHere();
504   }
505   __ bind(call_patch);
506 
507   if (CommentedAssembly) {
508     __ block_comment("patch entry point");
509   }
510   __ call(RuntimeAddress(target));
511   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
512   ce->add_call_info_here(_info);
513   int jmp_off = __ offset();
514   __ jmp(_patch_site_entry);
515   // Add enough nops so deoptimization can overwrite the jmp above with a call
516   // and not destroy the world. We cannot use fat nops here, since the concurrent
517   // code rewrite may transiently create the illegal instruction sequence.
518   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
519     __ nop();
520   }
521   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
522     CodeSection* cs = __ code_section();
523     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
524     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
525   }
526 }
527 
528 
529 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
530   __ bind(_entry);
531   ce->store_parameter(_trap_request, 0);
532   __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id)));
533   ce->add_call_info_here(_info);
534   DEBUG_ONLY(__ should_not_reach_here());
535 }
536 
537 
538 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
539   address a;
540   if (_info->deoptimize_on_exception()) {
541     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
542     a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
543   } else {
544     a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
545   }
546 
547   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
548   __ bind(_entry);
549   __ call(RuntimeAddress(a));
550   ce->add_call_info_here(_info);
551   ce->verify_oop_map(_info);
552   debug_only(__ should_not_reach_here());
553 }
554 
555 
556 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
557   assert(__ rsp_offset() == 0, "frame size should be fixed");
558 
559   __ bind(_entry);
560   // pass the object on stack because all registers must be preserved
561   if (_obj->is_cpu_register()) {
562     ce->store_parameter(_obj->as_register(), 0);
563   }
564   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
565   ce->add_call_info_here(_info);
566   debug_only(__ should_not_reach_here());
567 }
568 
569 
570 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
571   //---------------slow case: call to native-----------------
572   __ bind(_entry);
573   // Figure out where the args should go
574   // This should really convert the IntrinsicID to the Method* and signature
575   // but I don't know how to do that.
576   //
577   VMRegPair args[5];
578   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
579   SharedRuntime::java_calling_convention(signature, args, 5);
580 
581   // push parameters
582   // (src, src_pos, dest, destPos, length)
583   Register r[5];
584   r[0] = src()->as_register();
585   r[1] = src_pos()->as_register();
586   r[2] = dst()->as_register();
587   r[3] = dst_pos()->as_register();
588   r[4] = length()->as_register();
589 
590   // next registers will get stored on the stack
591   for (int i = 0; i < 5 ; i++ ) {
592     VMReg r_1 = args[i].first();
593     if (r_1->is_stack()) {
594       int st_off = r_1->reg2stack() * wordSize;
595       __ movptr (Address(rsp, st_off), r[i]);
596     } else {
597       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
598     }
599   }
600 
601   ce->align_call(lir_static_call);
602 
603   ce->emit_static_call_stub();
604   if (ce->compilation()->bailed_out()) {
605     return; // CodeCache is full
606   }
607   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
608                          relocInfo::static_call_type);
609   __ call(resolve);
610   ce->add_call_info_here(info());
611 
612 #ifndef PRODUCT
613   if (PrintC1Statistics) {
614     __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt), rscratch1);
615   }
616 #endif
617 
618   __ jmp(_continuation);
619 }
620 
621 #undef __