6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "ci/ciUtilities.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/barrierSetAssembler.hpp"
32 #include "gc/shared/barrierSetNMethod.hpp"
33 #include "gc/shared/gc_globals.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "memory/universe.hpp"
36 #include "nativeInst_x86.hpp"
37 #include "oops/instanceOop.hpp"
38 #include "oops/method.hpp"
39 #include "oops/objArrayKlass.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "prims/methodHandles.hpp"
42 #include "runtime/arguments.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/handles.inline.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/stubCodeGenerator.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "runtime/thread.inline.hpp"
49 #ifdef COMPILER2
50 #include "opto/runtime.hpp"
51 #endif
52 #if INCLUDE_JVMCI
53 #include "jvmci/jvmci_globals.hpp"
54 #endif
55 #if INCLUDE_ZGC
56 #include "gc/z/zThreadLocalData.hpp"
57 #endif
58
59 // Declaration and definition of StubGenerator (no .hpp file).
60 // For a more detailed description of the stub routine structure
61 // see the comment in stubRoutines.hpp
62
63 #define __ _masm->
64 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
65 #define a__ ((Assembler*)_masm)->
66
67 #ifdef PRODUCT
68 #define BLOCK_COMMENT(str) /* nothing */
69 #else
70 #define BLOCK_COMMENT(str) __ block_comment(str)
71 #endif
72
73 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
74 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
75
76 // Stub Code definitions
77
78 class StubGenerator: public StubCodeGenerator {
325 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
326 __ BIND(loop);
327 __ movptr(rax, Address(c_rarg2, 0));// get parameter
328 __ addptr(c_rarg2, wordSize); // advance to next parameter
329 __ decrementl(c_rarg1); // decrement counter
330 __ push(rax); // pass parameter
331 __ jcc(Assembler::notZero, loop);
332
333 // call Java function
334 __ BIND(parameters_done);
335 __ movptr(rbx, method); // get Method*
336 __ movptr(c_rarg1, entry_point); // get entry_point
337 __ mov(r13, rsp); // set sender sp
338 BLOCK_COMMENT("call Java function");
339 __ call(c_rarg1);
340
341 BLOCK_COMMENT("call_stub_return_address:");
342 return_address = __ pc();
343
344 // store result depending on type (everything that is not
345 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
346 __ movptr(c_rarg0, result);
347 Label is_long, is_float, is_double, exit;
348 __ movl(c_rarg1, result_type);
349 __ cmpl(c_rarg1, T_OBJECT);
350 __ jcc(Assembler::equal, is_long);
351 __ cmpl(c_rarg1, T_LONG);
352 __ jcc(Assembler::equal, is_long);
353 __ cmpl(c_rarg1, T_FLOAT);
354 __ jcc(Assembler::equal, is_float);
355 __ cmpl(c_rarg1, T_DOUBLE);
356 __ jcc(Assembler::equal, is_double);
357
358 // handle T_INT case
359 __ movl(Address(c_rarg0, 0), rax);
360
361 __ BIND(exit);
362
363 // pop parameters
364 __ lea(rsp, rsp_after_call);
365
366 #ifdef ASSERT
367 // verify that threads correspond
368 {
369 Label L1, L2, L3;
370 __ cmpptr(r15_thread, thread);
371 __ jcc(Assembler::equal, L1);
372 __ stop("StubRoutines::call_stub: r15_thread is corrupted");
373 __ bind(L1);
374 __ get_thread(rbx);
375 __ cmpptr(r15_thread, thread);
376 __ jcc(Assembler::equal, L2);
377 __ stop("StubRoutines::call_stub: r15_thread is modified by call");
378 __ bind(L2);
379 __ cmpptr(r15_thread, rbx);
401 __ movptr(r13, r13_save);
402 __ movptr(r12, r12_save);
403 __ movptr(rbx, rbx_save);
404
405 #ifdef _WIN64
406 __ movptr(rdi, rdi_save);
407 __ movptr(rsi, rsi_save);
408 #else
409 __ ldmxcsr(mxcsr_save);
410 #endif
411
412 // restore rsp
413 __ addptr(rsp, -rsp_after_call_off * wordSize);
414
415 // return
416 __ vzeroupper();
417 __ pop(rbp);
418 __ ret(0);
419
420 // handle return types different from T_INT
421 __ BIND(is_long);
422 __ movq(Address(c_rarg0, 0), rax);
423 __ jmp(exit);
424
425 __ BIND(is_float);
426 __ movflt(Address(c_rarg0, 0), xmm0);
427 __ jmp(exit);
428
429 __ BIND(is_double);
430 __ movdbl(Address(c_rarg0, 0), xmm0);
431 __ jmp(exit);
432
433 return start;
434 }
435
436 // Return point for a Java call if there's an exception thrown in
437 // Java code. The exception is caught and transformed into a
438 // pending exception stored in JavaThread that can be tested from
439 // within the VM.
440 //
441 // Note: Usually the parameters are removed by the callee. In case
442 // of an exception crossing an activation frame boundary, that is
443 // not the case if the callee is compiled code => need to setup the
444 // rsp.
445 //
446 // rax: exception oop
447
448 address generate_catch_exception() {
449 StubCodeMark mark(this, "StubRoutines", "catch_exception");
450 address start = __ pc();
3126 // 32 30 24 16 8 2 0
3127 //
3128 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
3129 //
3130
3131 const int lh_offset = in_bytes(Klass::layout_helper_offset());
3132
3133 // Handle objArrays completely differently...
3134 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3135 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
3136 __ jcc(Assembler::equal, L_objArray);
3137
3138 // if (src->klass() != dst->klass()) return -1;
3139 __ load_klass(rax, dst, rklass_tmp);
3140 __ cmpq(r10_src_klass, rax);
3141 __ jcc(Assembler::notEqual, L_failed);
3142
3143 const Register rax_lh = rax; // layout helper
3144 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
3145
3146 // if (!src->is_Array()) return -1;
3147 __ cmpl(rax_lh, Klass::_lh_neutral_value);
3148 __ jcc(Assembler::greaterEqual, L_failed);
3149
3150 // At this point, it is known to be a typeArray (array_tag 0x3).
3151 #ifdef ASSERT
3152 {
3153 BLOCK_COMMENT("assert primitive array {");
3154 Label L;
3155 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
3156 __ jcc(Assembler::greaterEqual, L);
3157 __ stop("must be a primitive array");
3158 __ bind(L);
3159 BLOCK_COMMENT("} assert primitive array done");
3160 }
3161 #endif
3162
3163 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
3164 r10, L_failed);
3165
3166 // TypeArrayKlass
3167 //
3168 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
3169 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
3170 //
3171
3172 const Register r10_offset = r10; // array offset
3173 const Register rax_elsize = rax_lh; // element size
3174
3175 __ movl(r10_offset, rax_lh);
3176 __ shrl(r10_offset, Klass::_lh_header_size_shift);
3244
3245 // Identically typed arrays can be copied without element-wise checks.
3246 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
3247 r10, L_failed);
3248
3249 __ lea(from, Address(src, src_pos, TIMES_OOP,
3250 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
3251 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
3252 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
3253 __ movl2ptr(count, r11_length); // length
3254 __ BIND(L_plain_copy);
3255 #ifdef _WIN64
3256 __ pop(rklass_tmp); // Restore callee-save rdi
3257 #endif
3258 __ jump(RuntimeAddress(oop_copy_entry));
3259
3260 __ BIND(L_checkcast_copy);
3261 // live at this point: r10_src_klass, r11_length, rax (dst_klass)
3262 {
3263 // Before looking at dst.length, make sure dst is also an objArray.
3264 __ cmpl(Address(rax, lh_offset), objArray_lh);
3265 __ jcc(Assembler::notEqual, L_failed);
3266
3267 // It is safe to examine both src.length and dst.length.
3268 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
3269 rax, L_failed);
3270
3271 const Register r11_dst_klass = r11;
3272 __ load_klass(r11_dst_klass, dst, rklass_tmp); // reload
3273
3274 // Marshal the base address arguments now, freeing registers.
3275 __ lea(from, Address(src, src_pos, TIMES_OOP,
3276 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
3277 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
3278 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
3279 __ movl(count, length); // length (reloaded)
3280 Register sco_temp = c_rarg3; // this register is free now
3281 assert_different_registers(from, to, count, sco_temp,
3282 r11_dst_klass, r10_src_klass);
3283 assert_clean_int(count, sco_temp);
3284
3285 // Generate the type check.
3286 const int sco_offset = in_bytes(Klass::super_check_offset_offset());
7547 __ bind(L);
7548 #endif // ASSERT
7549 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
7550
7551
7552 // codeBlob framesize is in words (not VMRegImpl::slot_size)
7553 RuntimeStub* stub =
7554 RuntimeStub::new_runtime_stub(name,
7555 &code,
7556 frame_complete,
7557 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
7558 oop_maps, false);
7559 return stub->entry_point();
7560 }
7561
7562 void create_control_words() {
7563 // Round to nearest, 64-bit mode, exceptions masked
7564 StubRoutines::x86::_mxcsr_std = 0x1F80;
7565 }
7566
7567 // Initialization
7568 void generate_initial() {
7569 // Generates all stubs and initializes the entry points
7570
7571 // This platform-specific settings are needed by generate_call_stub()
7572 create_control_words();
7573
7574 // entry points that exist in all platforms Note: This is code
7575 // that could be shared among different platforms - however the
7576 // benefit seems to be smaller than the disadvantage of having a
7577 // much more complicated generator structure. See also comment in
7578 // stubRoutines.hpp.
7579
7580 StubRoutines::_forward_exception_entry = generate_forward_exception();
7581
7582 StubRoutines::_call_stub_entry =
7583 generate_call_stub(StubRoutines::_call_stub_return_address);
7584
7585 // is referenced by megamorphic call
7586 StubRoutines::_catch_exception_entry = generate_catch_exception();
7587
7588 // atomic calls
7589 StubRoutines::_fence_entry = generate_orderaccess_fence();
7590
7591 // platform dependent
7592 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
7593
7594 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
7595
7596 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
7597 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
7598 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
7599 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
7600
7601 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
7602 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
7603 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "ci/ciUtilities.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/barrierSetAssembler.hpp"
33 #include "gc/shared/barrierSetNMethod.hpp"
34 #include "gc/shared/gc_globals.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "memory/universe.hpp"
37 #include "nativeInst_x86.hpp"
38 #include "oops/instanceOop.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "prims/methodHandles.hpp"
43 #include "register_x86.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/stubCodeGenerator.hpp"
49 #include "runtime/stubRoutines.hpp"
50 #include "runtime/thread.inline.hpp"
51 #include "utilities/macros.hpp"
52 #include "vmreg_x86.inline.hpp"
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
56 #if INCLUDE_JVMCI
57 #include "jvmci/jvmci_globals.hpp"
58 #endif
59 #if INCLUDE_ZGC
60 #include "gc/z/zThreadLocalData.hpp"
61 #endif
62
63
64 // Declaration and definition of StubGenerator (no .hpp file).
65 // For a more detailed description of the stub routine structure
66 // see the comment in stubRoutines.hpp
67
68 #define __ _masm->
69 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
70 #define a__ ((Assembler*)_masm)->
71
72 #ifdef PRODUCT
73 #define BLOCK_COMMENT(str) /* nothing */
74 #else
75 #define BLOCK_COMMENT(str) __ block_comment(str)
76 #endif
77
78 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
79 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
80
81 // Stub Code definitions
82
83 class StubGenerator: public StubCodeGenerator {
330 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
331 __ BIND(loop);
332 __ movptr(rax, Address(c_rarg2, 0));// get parameter
333 __ addptr(c_rarg2, wordSize); // advance to next parameter
334 __ decrementl(c_rarg1); // decrement counter
335 __ push(rax); // pass parameter
336 __ jcc(Assembler::notZero, loop);
337
338 // call Java function
339 __ BIND(parameters_done);
340 __ movptr(rbx, method); // get Method*
341 __ movptr(c_rarg1, entry_point); // get entry_point
342 __ mov(r13, rsp); // set sender sp
343 BLOCK_COMMENT("call Java function");
344 __ call(c_rarg1);
345
346 BLOCK_COMMENT("call_stub_return_address:");
347 return_address = __ pc();
348
349 // store result depending on type (everything that is not
350 // T_OBJECT, T_PRIMITIVE_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
351 __ movptr(r13, result);
352 Label is_long, is_float, is_double, check_prim, exit;
353 __ movl(rbx, result_type);
354 __ cmpl(rbx, T_OBJECT);
355 __ jcc(Assembler::equal, check_prim);
356 __ cmpl(rbx, T_PRIMITIVE_OBJECT);
357 __ jcc(Assembler::equal, check_prim);
358 __ cmpl(rbx, T_LONG);
359 __ jcc(Assembler::equal, is_long);
360 __ cmpl(rbx, T_FLOAT);
361 __ jcc(Assembler::equal, is_float);
362 __ cmpl(rbx, T_DOUBLE);
363 __ jcc(Assembler::equal, is_double);
364
365 // handle T_INT case
366 __ movl(Address(r13, 0), rax);
367
368 __ BIND(exit);
369
370 // pop parameters
371 __ lea(rsp, rsp_after_call);
372
373 #ifdef ASSERT
374 // verify that threads correspond
375 {
376 Label L1, L2, L3;
377 __ cmpptr(r15_thread, thread);
378 __ jcc(Assembler::equal, L1);
379 __ stop("StubRoutines::call_stub: r15_thread is corrupted");
380 __ bind(L1);
381 __ get_thread(rbx);
382 __ cmpptr(r15_thread, thread);
383 __ jcc(Assembler::equal, L2);
384 __ stop("StubRoutines::call_stub: r15_thread is modified by call");
385 __ bind(L2);
386 __ cmpptr(r15_thread, rbx);
408 __ movptr(r13, r13_save);
409 __ movptr(r12, r12_save);
410 __ movptr(rbx, rbx_save);
411
412 #ifdef _WIN64
413 __ movptr(rdi, rdi_save);
414 __ movptr(rsi, rsi_save);
415 #else
416 __ ldmxcsr(mxcsr_save);
417 #endif
418
419 // restore rsp
420 __ addptr(rsp, -rsp_after_call_off * wordSize);
421
422 // return
423 __ vzeroupper();
424 __ pop(rbp);
425 __ ret(0);
426
427 // handle return types different from T_INT
428 __ BIND(check_prim);
429 if (InlineTypeReturnedAsFields) {
430 // Check for scalarized return value
431 __ testptr(rax, 1);
432 __ jcc(Assembler::zero, is_long);
433 // Load pack handler address
434 __ andptr(rax, -2);
435 __ movptr(rax, Address(rax, InstanceKlass::adr_inlineklass_fixed_block_offset()));
436 __ movptr(rbx, Address(rax, InlineKlass::pack_handler_jobject_offset()));
437 // Call pack handler to initialize the buffer
438 __ call(rbx);
439 __ jmp(exit);
440 }
441 __ BIND(is_long);
442 __ movq(Address(r13, 0), rax);
443 __ jmp(exit);
444
445 __ BIND(is_float);
446 __ movflt(Address(r13, 0), xmm0);
447 __ jmp(exit);
448
449 __ BIND(is_double);
450 __ movdbl(Address(r13, 0), xmm0);
451 __ jmp(exit);
452
453 return start;
454 }
455
456 // Return point for a Java call if there's an exception thrown in
457 // Java code. The exception is caught and transformed into a
458 // pending exception stored in JavaThread that can be tested from
459 // within the VM.
460 //
461 // Note: Usually the parameters are removed by the callee. In case
462 // of an exception crossing an activation frame boundary, that is
463 // not the case if the callee is compiled code => need to setup the
464 // rsp.
465 //
466 // rax: exception oop
467
468 address generate_catch_exception() {
469 StubCodeMark mark(this, "StubRoutines", "catch_exception");
470 address start = __ pc();
3146 // 32 30 24 16 8 2 0
3147 //
3148 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
3149 //
3150
3151 const int lh_offset = in_bytes(Klass::layout_helper_offset());
3152
3153 // Handle objArrays completely differently...
3154 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3155 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
3156 __ jcc(Assembler::equal, L_objArray);
3157
3158 // if (src->klass() != dst->klass()) return -1;
3159 __ load_klass(rax, dst, rklass_tmp);
3160 __ cmpq(r10_src_klass, rax);
3161 __ jcc(Assembler::notEqual, L_failed);
3162
3163 const Register rax_lh = rax; // layout helper
3164 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
3165
3166 // Check for flat inline type array -> return -1
3167 __ testl(rax_lh, Klass::_lh_array_tag_flat_value_bit_inplace);
3168 __ jcc(Assembler::notZero, L_failed);
3169
3170 // Check for null-free (non-flat) inline type array -> handle as object array
3171 __ testl(rax_lh, Klass::_lh_null_free_array_bit_inplace);
3172 __ jcc(Assembler::notZero, L_objArray);
3173
3174 // if (!src->is_Array()) return -1;
3175 __ cmpl(rax_lh, Klass::_lh_neutral_value);
3176 __ jcc(Assembler::greaterEqual, L_failed);
3177
3178 // At this point, it is known to be a typeArray (array_tag 0x3).
3179 #ifdef ASSERT
3180 {
3181 BLOCK_COMMENT("assert primitive array {");
3182 Label L;
3183 __ movl(rklass_tmp, rax_lh);
3184 __ sarl(rklass_tmp, Klass::_lh_array_tag_shift);
3185 __ cmpl(rklass_tmp, Klass::_lh_array_tag_type_value);
3186 __ jcc(Assembler::equal, L);
3187 __ stop("must be a primitive array");
3188 __ bind(L);
3189 BLOCK_COMMENT("} assert primitive array done");
3190 }
3191 #endif
3192
3193 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
3194 r10, L_failed);
3195
3196 // TypeArrayKlass
3197 //
3198 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
3199 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
3200 //
3201
3202 const Register r10_offset = r10; // array offset
3203 const Register rax_elsize = rax_lh; // element size
3204
3205 __ movl(r10_offset, rax_lh);
3206 __ shrl(r10_offset, Klass::_lh_header_size_shift);
3274
3275 // Identically typed arrays can be copied without element-wise checks.
3276 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
3277 r10, L_failed);
3278
3279 __ lea(from, Address(src, src_pos, TIMES_OOP,
3280 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
3281 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
3282 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
3283 __ movl2ptr(count, r11_length); // length
3284 __ BIND(L_plain_copy);
3285 #ifdef _WIN64
3286 __ pop(rklass_tmp); // Restore callee-save rdi
3287 #endif
3288 __ jump(RuntimeAddress(oop_copy_entry));
3289
3290 __ BIND(L_checkcast_copy);
3291 // live at this point: r10_src_klass, r11_length, rax (dst_klass)
3292 {
3293 // Before looking at dst.length, make sure dst is also an objArray.
3294 // This check also fails for flat/null-free arrays which are not supported.
3295 __ cmpl(Address(rax, lh_offset), objArray_lh);
3296 __ jcc(Assembler::notEqual, L_failed);
3297
3298 #ifdef ASSERT
3299 {
3300 BLOCK_COMMENT("assert not null-free array {");
3301 Label L;
3302 __ movl(rklass_tmp, Address(rax, lh_offset));
3303 __ testl(rklass_tmp, Klass::_lh_null_free_array_bit_inplace);
3304 __ jcc(Assembler::zero, L);
3305 __ stop("unexpected null-free array");
3306 __ bind(L);
3307 BLOCK_COMMENT("} assert not null-free array");
3308 }
3309 #endif
3310
3311 // It is safe to examine both src.length and dst.length.
3312 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
3313 rax, L_failed);
3314
3315 const Register r11_dst_klass = r11;
3316 __ load_klass(r11_dst_klass, dst, rklass_tmp); // reload
3317
3318 // Marshal the base address arguments now, freeing registers.
3319 __ lea(from, Address(src, src_pos, TIMES_OOP,
3320 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
3321 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
3322 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
3323 __ movl(count, length); // length (reloaded)
3324 Register sco_temp = c_rarg3; // this register is free now
3325 assert_different_registers(from, to, count, sco_temp,
3326 r11_dst_klass, r10_src_klass);
3327 assert_clean_int(count, sco_temp);
3328
3329 // Generate the type check.
3330 const int sco_offset = in_bytes(Klass::super_check_offset_offset());
7591 __ bind(L);
7592 #endif // ASSERT
7593 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
7594
7595
7596 // codeBlob framesize is in words (not VMRegImpl::slot_size)
7597 RuntimeStub* stub =
7598 RuntimeStub::new_runtime_stub(name,
7599 &code,
7600 frame_complete,
7601 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
7602 oop_maps, false);
7603 return stub->entry_point();
7604 }
7605
7606 void create_control_words() {
7607 // Round to nearest, 64-bit mode, exceptions masked
7608 StubRoutines::x86::_mxcsr_std = 0x1F80;
7609 }
7610
7611 // Call here from the interpreter or compiled code to either load
7612 // multiple returned values from the inline type instance being
7613 // returned to registers or to store returned values to a newly
7614 // allocated inline type instance.
7615 // Register is a class, but it would be assigned numerical value.
7616 // "0" is assigned for xmm0. Thus we need to ignore -Wnonnull.
7617 PRAGMA_DIAG_PUSH
7618 PRAGMA_NONNULL_IGNORED
7619 address generate_return_value_stub(address destination, const char* name, bool has_res) {
7620 // We need to save all registers the calling convention may use so
7621 // the runtime calls read or update those registers. This needs to
7622 // be in sync with SharedRuntime::java_return_convention().
7623 enum layout {
7624 pad_off = frame::arg_reg_save_area_bytes/BytesPerInt, pad_off_2,
7625 rax_off, rax_off_2,
7626 j_rarg5_off, j_rarg5_2,
7627 j_rarg4_off, j_rarg4_2,
7628 j_rarg3_off, j_rarg3_2,
7629 j_rarg2_off, j_rarg2_2,
7630 j_rarg1_off, j_rarg1_2,
7631 j_rarg0_off, j_rarg0_2,
7632 j_farg0_off, j_farg0_2,
7633 j_farg1_off, j_farg1_2,
7634 j_farg2_off, j_farg2_2,
7635 j_farg3_off, j_farg3_2,
7636 j_farg4_off, j_farg4_2,
7637 j_farg5_off, j_farg5_2,
7638 j_farg6_off, j_farg6_2,
7639 j_farg7_off, j_farg7_2,
7640 rbp_off, rbp_off_2,
7641 return_off, return_off_2,
7642
7643 framesize
7644 };
7645
7646 CodeBuffer buffer(name, 1000, 512);
7647 MacroAssembler* masm = new MacroAssembler(&buffer);
7648
7649 int frame_size_in_bytes = align_up(framesize*BytesPerInt, 16);
7650 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned");
7651 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
7652 int frame_size_in_words = frame_size_in_bytes / wordSize;
7653
7654 OopMapSet *oop_maps = new OopMapSet();
7655 OopMap* map = new OopMap(frame_size_in_slots, 0);
7656
7657 map->set_callee_saved(VMRegImpl::stack2reg(rax_off), rax->as_VMReg());
7658 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg());
7659 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg());
7660 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg());
7661 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg());
7662 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg());
7663 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg());
7664 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg());
7665 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg());
7666 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg());
7667 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg());
7668 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg());
7669 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg());
7670 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg());
7671 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg());
7672
7673 int start = __ offset();
7674
7675 __ subptr(rsp, frame_size_in_bytes - 8 /* return address*/);
7676
7677 __ movptr(Address(rsp, rbp_off * BytesPerInt), rbp);
7678 __ movdbl(Address(rsp, j_farg7_off * BytesPerInt), j_farg7);
7679 __ movdbl(Address(rsp, j_farg6_off * BytesPerInt), j_farg6);
7680 __ movdbl(Address(rsp, j_farg5_off * BytesPerInt), j_farg5);
7681 __ movdbl(Address(rsp, j_farg4_off * BytesPerInt), j_farg4);
7682 __ movdbl(Address(rsp, j_farg3_off * BytesPerInt), j_farg3);
7683 __ movdbl(Address(rsp, j_farg2_off * BytesPerInt), j_farg2);
7684 __ movdbl(Address(rsp, j_farg1_off * BytesPerInt), j_farg1);
7685 __ movdbl(Address(rsp, j_farg0_off * BytesPerInt), j_farg0);
7686
7687 __ movptr(Address(rsp, j_rarg0_off * BytesPerInt), j_rarg0);
7688 __ movptr(Address(rsp, j_rarg1_off * BytesPerInt), j_rarg1);
7689 __ movptr(Address(rsp, j_rarg2_off * BytesPerInt), j_rarg2);
7690 __ movptr(Address(rsp, j_rarg3_off * BytesPerInt), j_rarg3);
7691 __ movptr(Address(rsp, j_rarg4_off * BytesPerInt), j_rarg4);
7692 __ movptr(Address(rsp, j_rarg5_off * BytesPerInt), j_rarg5);
7693 __ movptr(Address(rsp, rax_off * BytesPerInt), rax);
7694
7695 int frame_complete = __ offset();
7696
7697 __ set_last_Java_frame(noreg, noreg, NULL);
7698
7699 __ mov(c_rarg0, r15_thread);
7700 __ mov(c_rarg1, rax);
7701
7702 __ call(RuntimeAddress(destination));
7703
7704 // Set an oopmap for the call site.
7705
7706 oop_maps->add_gc_map( __ offset() - start, map);
7707
7708 // clear last_Java_sp
7709 __ reset_last_Java_frame(false);
7710
7711 __ movptr(rbp, Address(rsp, rbp_off * BytesPerInt));
7712 __ movdbl(j_farg7, Address(rsp, j_farg7_off * BytesPerInt));
7713 __ movdbl(j_farg6, Address(rsp, j_farg6_off * BytesPerInt));
7714 __ movdbl(j_farg5, Address(rsp, j_farg5_off * BytesPerInt));
7715 __ movdbl(j_farg4, Address(rsp, j_farg4_off * BytesPerInt));
7716 __ movdbl(j_farg3, Address(rsp, j_farg3_off * BytesPerInt));
7717 __ movdbl(j_farg2, Address(rsp, j_farg2_off * BytesPerInt));
7718 __ movdbl(j_farg1, Address(rsp, j_farg1_off * BytesPerInt));
7719 __ movdbl(j_farg0, Address(rsp, j_farg0_off * BytesPerInt));
7720
7721 __ movptr(j_rarg0, Address(rsp, j_rarg0_off * BytesPerInt));
7722 __ movptr(j_rarg1, Address(rsp, j_rarg1_off * BytesPerInt));
7723 __ movptr(j_rarg2, Address(rsp, j_rarg2_off * BytesPerInt));
7724 __ movptr(j_rarg3, Address(rsp, j_rarg3_off * BytesPerInt));
7725 __ movptr(j_rarg4, Address(rsp, j_rarg4_off * BytesPerInt));
7726 __ movptr(j_rarg5, Address(rsp, j_rarg5_off * BytesPerInt));
7727 __ movptr(rax, Address(rsp, rax_off * BytesPerInt));
7728
7729 __ addptr(rsp, frame_size_in_bytes-8);
7730
7731 // check for pending exceptions
7732 Label pending;
7733 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
7734 __ jcc(Assembler::notEqual, pending);
7735
7736 if (has_res) {
7737 __ get_vm_result(rax, r15_thread);
7738 }
7739
7740 __ ret(0);
7741
7742 __ bind(pending);
7743
7744 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
7745 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
7746
7747 // -------------
7748 // make sure all code is generated
7749 masm->flush();
7750
7751 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, false);
7752 return stub->entry_point();
7753 }
7754
7755 // Initialization
7756 void generate_initial() {
7757 // Generates all stubs and initializes the entry points
7758
7759 // This platform-specific settings are needed by generate_call_stub()
7760 create_control_words();
7761
7762 // entry points that exist in all platforms Note: This is code
7763 // that could be shared among different platforms - however the
7764 // benefit seems to be smaller than the disadvantage of having a
7765 // much more complicated generator structure. See also comment in
7766 // stubRoutines.hpp.
7767
7768 StubRoutines::_forward_exception_entry = generate_forward_exception();
7769
7770 // Generate these first because they are called from other stubs
7771 if (InlineTypeReturnedAsFields) {
7772 StubRoutines::_load_inline_type_fields_in_regs =
7773 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_inline_type_fields_in_regs), "load_inline_type_fields_in_regs", false);
7774 StubRoutines::_store_inline_type_fields_to_buf =
7775 generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_inline_type_fields_to_buf), "store_inline_type_fields_to_buf", true);
7776 }
7777 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
7778
7779 // is referenced by megamorphic call
7780 StubRoutines::_catch_exception_entry = generate_catch_exception();
7781
7782 // atomic calls
7783 StubRoutines::_fence_entry = generate_orderaccess_fence();
7784
7785 // platform dependent
7786 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
7787
7788 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
7789
7790 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
7791 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
7792 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
7793 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
7794
7795 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
7796 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
7797 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
|