594 if (UseSSE >= 2 && VerifyFPU) {
595 st->print("\n\t");
596 st->print("# verify FPU stack (must be clean on entry)");
597 }
598
599 #ifdef ASSERT
600 if (VerifyStackAtCalls) {
601 st->print("\n\t");
602 st->print("# stack alignment check");
603 }
604 #endif
605 st->cr();
606 }
607 #endif
608
609
610 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
611 Compile* C = ra_->C;
612 C2_MacroAssembler _masm(&cbuf);
613
614 int framesize = C->output()->frame_size_in_bytes();
615 int bangsize = C->output()->bang_size_in_bytes();
616
617 __ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode(), C->stub_function() != NULL);
618
619 C->output()->set_frame_complete(cbuf.insts_size());
620
621 if (C->has_mach_constant_base_node()) {
622 // NOTE: We set the table base offset here because users might be
623 // emitted before MachConstantBaseNode.
624 ConstantTable& constant_table = C->output()->constant_table();
625 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
626 }
627 }
628
629 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
630 return MachNode::size(ra_); // too many variables; just compute it the hard way
631 }
632
633 int MachPrologNode::reloc() const {
634 return 0; // a large enough number
635 }
636
637 //=============================================================================
1035 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1036 // Get registers to move
1037 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1038 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1039 OptoReg::Name dst_second = ra_->get_reg_second(this );
1040 OptoReg::Name dst_first = ra_->get_reg_first(this );
1041
1042 enum RC src_second_rc = rc_class(src_second);
1043 enum RC src_first_rc = rc_class(src_first);
1044 enum RC dst_second_rc = rc_class(dst_second);
1045 enum RC dst_first_rc = rc_class(dst_first);
1046
1047 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1048
1049 // Generate spill code!
1050 int size = 0;
1051
1052 if( src_first == dst_first && src_second == dst_second )
1053 return size; // Self copy, no move
1054
1055 if (bottom_type()->isa_vect() != NULL && bottom_type()->isa_vectmask() == NULL) {
1056 uint ireg = ideal_reg();
1057 assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1058 assert((src_first_rc != rc_float && dst_first_rc != rc_float), "sanity");
1059 assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY || ireg == Op_VecZ ), "sanity");
1060 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1061 // mem -> mem
1062 int src_offset = ra_->reg2offset(src_first);
1063 int dst_offset = ra_->reg2offset(dst_first);
1064 vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
1065 } else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1066 vec_mov_helper(cbuf, src_first, dst_first, src_second, dst_second, ireg, st);
1067 } else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1068 int stack_offset = ra_->reg2offset(dst_first);
1069 vec_spill_helper(cbuf, false, stack_offset, src_first, ireg, st);
1070 } else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
1071 int stack_offset = ra_->reg2offset(src_first);
1072 vec_spill_helper(cbuf, true, stack_offset, dst_first, ireg, st);
1073 } else {
1074 ShouldNotReachHere();
1075 }
1303 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1304
1305 // Check for second word int-int move
1306 if( src_second_rc == rc_int && dst_second_rc == rc_int )
1307 return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
1308
1309 // Check for second word integer store
1310 if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1311 return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
1312
1313 // Check for second word integer load
1314 if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1315 return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
1316
1317 Unimplemented();
1318 return 0; // Mute compiler
1319 }
1320
1321 #ifndef PRODUCT
1322 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
1323 implementation( NULL, ra_, false, st );
1324 }
1325 #endif
1326
1327 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1328 implementation( &cbuf, ra_, false, NULL );
1329 }
1330
1331 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1332 return MachNode::size(ra_);
1333 }
1334
1335
1336 //=============================================================================
1337 #ifndef PRODUCT
1338 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1339 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1340 int reg = ra_->get_reg_first(this);
1341 st->print("LEA %s,[ESP + #%d]",Matcher::regName[reg],offset);
1342 }
1343 #endif
1344
1345 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1346 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1347 int reg = ra_->get_encode(this);
1348 if( offset >= 128 ) {
1708 emit_d8(cbuf, op >> 8 );
1709 emit_d8(cbuf, op & 255);
1710 %}
1711
1712 // emulate a CMOV with a conditional branch around a MOV
1713 enc_class enc_cmov_branch( cmpOp cop, immI brOffs ) %{ // CMOV
1714 // Invert sense of branch from sense of CMOV
1715 emit_cc( cbuf, 0x70, ($cop$$cmpcode^1) );
1716 emit_d8( cbuf, $brOffs$$constant );
1717 %}
1718
1719 enc_class enc_PartialSubtypeCheck( ) %{
1720 Register Redi = as_Register(EDI_enc); // result register
1721 Register Reax = as_Register(EAX_enc); // super class
1722 Register Recx = as_Register(ECX_enc); // killed
1723 Register Resi = as_Register(ESI_enc); // sub class
1724 Label miss;
1725
1726 MacroAssembler _masm(&cbuf);
1727 __ check_klass_subtype_slow_path(Resi, Reax, Recx, Redi,
1728 NULL, &miss,
1729 /*set_cond_codes:*/ true);
1730 if ($primary) {
1731 __ xorptr(Redi, Redi);
1732 }
1733 __ bind(miss);
1734 %}
1735
1736 enc_class FFree_Float_Stack_All %{ // Free_Float_Stack_All
1737 MacroAssembler masm(&cbuf);
1738 int start = masm.offset();
1739 if (UseSSE >= 2) {
1740 if (VerifyFPU) {
1741 masm.verify_FPU(0, "must be empty in SSE2+ mode");
1742 }
1743 } else {
1744 // External c_calling_convention expects the FPU stack to be 'clean'.
1745 // Compiled code leaves it dirty. Do cleanup now.
1746 masm.empty_FPU_stack();
1747 }
1748 if (sizeof_FFree_Float_Stack_All == -1) {
1826 if (!_method) {
1827 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1828 runtime_call_Relocation::spec(),
1829 RELOC_IMM32);
1830 __ post_call_nop();
1831 } else {
1832 int method_index = resolved_method_index(cbuf);
1833 RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
1834 : static_call_Relocation::spec(method_index);
1835 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1836 rspec, RELOC_DISP32);
1837 __ post_call_nop();
1838 address mark = cbuf.insts_mark();
1839 if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
1840 // Calls of the same statically bound method can share
1841 // a stub to the interpreter.
1842 cbuf.shared_stub_to_interp_for(_method, cbuf.insts()->mark_off());
1843 } else {
1844 // Emit stubs for static call.
1845 address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
1846 if (stub == NULL) {
1847 ciEnv::current()->record_failure("CodeCache is full");
1848 return;
1849 }
1850 }
1851 }
1852 %}
1853
1854 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
1855 MacroAssembler _masm(&cbuf);
1856 __ ic_call((address)$meth$$method, resolved_method_index(cbuf));
1857 __ post_call_nop();
1858 %}
1859
1860 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
1861 int disp = in_bytes(Method::from_compiled_offset());
1862 assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
1863
1864 // CALL *[EAX+in_bytes(Method::from_compiled_code_entry_point_offset())]
1865 MacroAssembler _masm(&cbuf);
1866 cbuf.set_insts_mark();
3379
3380 operand immI_8()
3381 %{
3382 predicate(n->get_int() == 8);
3383 match(ConI);
3384
3385 op_cost(0);
3386 format %{ %}
3387 interface(CONST_INTER);
3388 %}
3389
3390 // Pointer Immediate
3391 operand immP() %{
3392 match(ConP);
3393
3394 op_cost(10);
3395 format %{ %}
3396 interface(CONST_INTER);
3397 %}
3398
3399 // NULL Pointer Immediate
3400 operand immP0() %{
3401 predicate( n->get_ptr() == 0 );
3402 match(ConP);
3403 op_cost(0);
3404
3405 format %{ %}
3406 interface(CONST_INTER);
3407 %}
3408
3409 // Long Immediate
3410 operand immL() %{
3411 match(ConL);
3412
3413 op_cost(20);
3414 format %{ %}
3415 interface(CONST_INTER);
3416 %}
3417
3418 // Long Immediate zero
3419 operand immL0() %{
|
594 if (UseSSE >= 2 && VerifyFPU) {
595 st->print("\n\t");
596 st->print("# verify FPU stack (must be clean on entry)");
597 }
598
599 #ifdef ASSERT
600 if (VerifyStackAtCalls) {
601 st->print("\n\t");
602 st->print("# stack alignment check");
603 }
604 #endif
605 st->cr();
606 }
607 #endif
608
609
610 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
611 Compile* C = ra_->C;
612 C2_MacroAssembler _masm(&cbuf);
613
614 __ verified_entry(C);
615
616 C->output()->set_frame_complete(cbuf.insts_size());
617
618 if (C->has_mach_constant_base_node()) {
619 // NOTE: We set the table base offset here because users might be
620 // emitted before MachConstantBaseNode.
621 ConstantTable& constant_table = C->output()->constant_table();
622 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
623 }
624 }
625
626 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
627 return MachNode::size(ra_); // too many variables; just compute it the hard way
628 }
629
630 int MachPrologNode::reloc() const {
631 return 0; // a large enough number
632 }
633
634 //=============================================================================
1032 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1033 // Get registers to move
1034 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1035 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1036 OptoReg::Name dst_second = ra_->get_reg_second(this );
1037 OptoReg::Name dst_first = ra_->get_reg_first(this );
1038
1039 enum RC src_second_rc = rc_class(src_second);
1040 enum RC src_first_rc = rc_class(src_first);
1041 enum RC dst_second_rc = rc_class(dst_second);
1042 enum RC dst_first_rc = rc_class(dst_first);
1043
1044 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1045
1046 // Generate spill code!
1047 int size = 0;
1048
1049 if( src_first == dst_first && src_second == dst_second )
1050 return size; // Self copy, no move
1051
1052 if (bottom_type()->isa_vect() != nullptr && bottom_type()->isa_vectmask() == nullptr) {
1053 uint ireg = ideal_reg();
1054 assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1055 assert((src_first_rc != rc_float && dst_first_rc != rc_float), "sanity");
1056 assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY || ireg == Op_VecZ ), "sanity");
1057 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1058 // mem -> mem
1059 int src_offset = ra_->reg2offset(src_first);
1060 int dst_offset = ra_->reg2offset(dst_first);
1061 vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
1062 } else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
1063 vec_mov_helper(cbuf, src_first, dst_first, src_second, dst_second, ireg, st);
1064 } else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
1065 int stack_offset = ra_->reg2offset(dst_first);
1066 vec_spill_helper(cbuf, false, stack_offset, src_first, ireg, st);
1067 } else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
1068 int stack_offset = ra_->reg2offset(src_first);
1069 vec_spill_helper(cbuf, true, stack_offset, dst_first, ireg, st);
1070 } else {
1071 ShouldNotReachHere();
1072 }
1300 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1301
1302 // Check for second word int-int move
1303 if( src_second_rc == rc_int && dst_second_rc == rc_int )
1304 return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
1305
1306 // Check for second word integer store
1307 if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1308 return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
1309
1310 // Check for second word integer load
1311 if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1312 return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
1313
1314 Unimplemented();
1315 return 0; // Mute compiler
1316 }
1317
1318 #ifndef PRODUCT
1319 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
1320 implementation( nullptr, ra_, false, st );
1321 }
1322 #endif
1323
1324 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1325 implementation( &cbuf, ra_, false, nullptr );
1326 }
1327
1328 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1329 return MachNode::size(ra_);
1330 }
1331
1332
1333 //=============================================================================
1334 #ifndef PRODUCT
1335 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1336 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1337 int reg = ra_->get_reg_first(this);
1338 st->print("LEA %s,[ESP + #%d]",Matcher::regName[reg],offset);
1339 }
1340 #endif
1341
1342 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1343 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1344 int reg = ra_->get_encode(this);
1345 if( offset >= 128 ) {
1705 emit_d8(cbuf, op >> 8 );
1706 emit_d8(cbuf, op & 255);
1707 %}
1708
1709 // emulate a CMOV with a conditional branch around a MOV
1710 enc_class enc_cmov_branch( cmpOp cop, immI brOffs ) %{ // CMOV
1711 // Invert sense of branch from sense of CMOV
1712 emit_cc( cbuf, 0x70, ($cop$$cmpcode^1) );
1713 emit_d8( cbuf, $brOffs$$constant );
1714 %}
1715
1716 enc_class enc_PartialSubtypeCheck( ) %{
1717 Register Redi = as_Register(EDI_enc); // result register
1718 Register Reax = as_Register(EAX_enc); // super class
1719 Register Recx = as_Register(ECX_enc); // killed
1720 Register Resi = as_Register(ESI_enc); // sub class
1721 Label miss;
1722
1723 MacroAssembler _masm(&cbuf);
1724 __ check_klass_subtype_slow_path(Resi, Reax, Recx, Redi,
1725 nullptr, &miss,
1726 /*set_cond_codes:*/ true);
1727 if ($primary) {
1728 __ xorptr(Redi, Redi);
1729 }
1730 __ bind(miss);
1731 %}
1732
1733 enc_class FFree_Float_Stack_All %{ // Free_Float_Stack_All
1734 MacroAssembler masm(&cbuf);
1735 int start = masm.offset();
1736 if (UseSSE >= 2) {
1737 if (VerifyFPU) {
1738 masm.verify_FPU(0, "must be empty in SSE2+ mode");
1739 }
1740 } else {
1741 // External c_calling_convention expects the FPU stack to be 'clean'.
1742 // Compiled code leaves it dirty. Do cleanup now.
1743 masm.empty_FPU_stack();
1744 }
1745 if (sizeof_FFree_Float_Stack_All == -1) {
1823 if (!_method) {
1824 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1825 runtime_call_Relocation::spec(),
1826 RELOC_IMM32);
1827 __ post_call_nop();
1828 } else {
1829 int method_index = resolved_method_index(cbuf);
1830 RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
1831 : static_call_Relocation::spec(method_index);
1832 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1833 rspec, RELOC_DISP32);
1834 __ post_call_nop();
1835 address mark = cbuf.insts_mark();
1836 if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
1837 // Calls of the same statically bound method can share
1838 // a stub to the interpreter.
1839 cbuf.shared_stub_to_interp_for(_method, cbuf.insts()->mark_off());
1840 } else {
1841 // Emit stubs for static call.
1842 address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
1843 if (stub == nullptr) {
1844 ciEnv::current()->record_failure("CodeCache is full");
1845 return;
1846 }
1847 }
1848 }
1849 %}
1850
1851 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
1852 MacroAssembler _masm(&cbuf);
1853 __ ic_call((address)$meth$$method, resolved_method_index(cbuf));
1854 __ post_call_nop();
1855 %}
1856
1857 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
1858 int disp = in_bytes(Method::from_compiled_offset());
1859 assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
1860
1861 // CALL *[EAX+in_bytes(Method::from_compiled_code_entry_point_offset())]
1862 MacroAssembler _masm(&cbuf);
1863 cbuf.set_insts_mark();
3376
3377 operand immI_8()
3378 %{
3379 predicate(n->get_int() == 8);
3380 match(ConI);
3381
3382 op_cost(0);
3383 format %{ %}
3384 interface(CONST_INTER);
3385 %}
3386
3387 // Pointer Immediate
3388 operand immP() %{
3389 match(ConP);
3390
3391 op_cost(10);
3392 format %{ %}
3393 interface(CONST_INTER);
3394 %}
3395
3396 // nullptr Pointer Immediate
3397 operand immP0() %{
3398 predicate( n->get_ptr() == 0 );
3399 match(ConP);
3400 op_cost(0);
3401
3402 format %{ %}
3403 interface(CONST_INTER);
3404 %}
3405
3406 // Long Immediate
3407 operand immL() %{
3408 match(ConL);
3409
3410 op_cost(20);
3411 format %{ %}
3412 interface(CONST_INTER);
3413 %}
3414
3415 // Long Immediate zero
3416 operand immL0() %{
|