< prev index next >

src/hotspot/share/c1/c1_LIRAssembler.cpp

Print this page

 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "asm/assembler.inline.hpp"
 27 #include "c1/c1_Compilation.hpp"
 28 #include "c1/c1_Instruction.hpp"
 29 #include "c1/c1_InstructionPrinter.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_ValueStack.hpp"

 33 #include "ci/ciInstance.hpp"
 34 #include "compiler/compilerDefinitions.inline.hpp"
 35 #include "compiler/oopMap.hpp"
 36 #include "runtime/os.hpp"

 37 #include "runtime/vm_version.hpp"
 38 
 39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
 40   // We must have enough patching space so that call can be inserted.
 41   // We cannot use fat nops here, since the concurrent code rewrite may transiently
 42   // create the illegal instruction sequence.
 43   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
 44     _masm->nop();
 45   }
 46   info->set_force_reexecute();
 47   patch->install(_masm, patch_code, obj, info);
 48   append_code_stub(patch);
 49 
 50 #ifdef ASSERT
 51   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
 52   if (patch->id() == PatchingStub::access_field_id) {
 53     switch (code) {
 54       case Bytecodes::_putstatic:
 55       case Bytecodes::_getstatic:
 56       case Bytecodes::_putfield:

102 //---------------------------------------------------------------
103 
104 
105 LIR_Assembler::LIR_Assembler(Compilation* c):
106    _masm(c->masm())
107  , _compilation(c)
108  , _frame_map(c->frame_map())
109  , _current_block(nullptr)
110  , _pending_non_safepoint(nullptr)
111  , _pending_non_safepoint_offset(0)
112  , _immediate_oops_patched(0)
113 {
114   _slow_case_stubs = new CodeStubList();
115 }
116 
117 
118 LIR_Assembler::~LIR_Assembler() {
119   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
120   // Reset it here to avoid an assertion.
121   _unwind_handler_entry.reset();

122 }
123 
124 
125 void LIR_Assembler::check_codespace() {
126   CodeSection* cs = _masm->code_section();
127   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
128     BAILOUT("CodeBuffer overflow");
129   }
130 }
131 
132 
133 void LIR_Assembler::append_code_stub(CodeStub* stub) {
134   _immediate_oops_patched += stub->nr_immediate_oops_patched();
135   _slow_case_stubs->append(stub);
136 }
137 
138 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
139   for (int m = 0; m < stub_list->length(); m++) {
140     CodeStub* s = stub_list->at(m);
141 

313     }
314 #endif /* PRODUCT */
315   }
316 }
317 
318 #ifdef ASSERT
319 void LIR_Assembler::check_no_unbound_labels() {
320   CHECK_BAILOUT();
321 
322   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
323     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
324       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
325       assert(false, "unbound label");
326     }
327   }
328 }
329 #endif
330 
331 //----------------------------------debug info--------------------------------
332 
333 
334 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
335   int pc_offset = code_offset();
336   flush_debug_info(pc_offset);
337   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
338   if (info->exception_handlers() != nullptr) {
339     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
340   }
341 }
342 
343 
344 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
345   flush_debug_info(pc_offset);
346   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
347   if (cinfo->exception_handlers() != nullptr) {
348     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
349   }
350 }
351 
352 static ValueStack* debug_info(Instruction* ins) {
353   StateSplit* ss = ins->as_StateSplit();
354   if (ss != nullptr) return ss->state();
355   return ins->state_before();
356 }
357 
358 void LIR_Assembler::process_debug_info(LIR_Op* op) {
359   Instruction* src = op->source();
360   if (src == nullptr)  return;
361   int pc_offset = code_offset();
362   if (_pending_non_safepoint == src) {
363     _pending_non_safepoint_offset = pc_offset;
364     return;
365   }
366   ValueStack* vstack = debug_info(src);

471   case lir_dynamic_call:
472     call(op, relocInfo::static_call_type);
473     break;
474   case lir_optvirtual_call:
475     call(op, relocInfo::opt_virtual_call_type);
476     break;
477   case lir_icvirtual_call:
478     ic_call(op);
479     break;
480   default:
481     fatal("unexpected op code: %s", op->name());
482     break;
483   }
484 
485   // JSR 292
486   // Record if this method has MethodHandle invokes.
487   if (op->is_method_handle_invoke()) {
488     compilation()->set_has_method_handle_invokes(true);
489   }
490 






491 #if defined(IA32) && defined(COMPILER2)
492   // C2 leave fpu stack dirty clean it
493   if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
494     int i;
495     for ( i = 1; i <= 7 ; i++ ) {
496       ffree(i);
497     }
498     if (!op->result_opr()->is_float_kind()) {
499       ffree(0);
500     }
501   }
502 #endif // IA32 && COMPILER2
503 }
504 
505 
506 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
507   _masm->bind (*(op->label()));
508 }
509 
510 

577       } else {
578         Unimplemented();
579       }
580       break;
581     }
582 
583     case lir_monaddr:
584       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
585       break;
586 
587     case lir_unwind:
588       unwind_op(op->in_opr());
589       break;
590 
591     default:
592       Unimplemented();
593       break;
594   }
595 }
596 







































































































































597 
598 void LIR_Assembler::emit_op0(LIR_Op0* op) {
599   switch (op->code()) {
600     case lir_nop:
601       assert(op->info() == nullptr, "not supported");
602       _masm->nop();
603       break;
604 
605     case lir_label:
606       Unimplemented();
607       break;
608 
609     case lir_std_entry: {
610       // init offsets
611       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
612       if (needs_icache(compilation()->method())) {
613         int offset = check_icache();
614         offsets()->set_value(CodeOffsets::Entry, offset);
615       }
616       _masm->align(CodeEntryAlignment);
617       offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
618       _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
619       if (needs_clinit_barrier_on_entry(compilation()->method())) {
620         clinit_barrier(compilation()->method());
621       }
622       build_frame();
623       offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
624       break;
625     }
626 
627     case lir_osr_entry:
628       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
629       osr_entry();
630       break;
631 
632 #ifdef IA32
633     case lir_fpop_raw:
634       fpop();
635       break;
636 #endif // IA32
637 
638     case lir_breakpoint:
639       breakpoint();
640       break;
641 
642     case lir_membar:
643       membar();
644       break;
645 

658     case lir_membar_storestore:
659       membar_storestore();
660       break;
661 
662     case lir_membar_loadstore:
663       membar_loadstore();
664       break;
665 
666     case lir_membar_storeload:
667       membar_storeload();
668       break;
669 
670     case lir_get_thread:
671       get_thread(op->result_opr());
672       break;
673 
674     case lir_on_spin_wait:
675       on_spin_wait();
676       break;
677 




678     default:
679       ShouldNotReachHere();
680       break;
681   }
682 }
683 
684 
685 void LIR_Assembler::emit_op2(LIR_Op2* op) {
686   switch (op->code()) {
687     case lir_cmp:
688       if (op->info() != nullptr) {
689         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
690                "shouldn't be codeemitinfo for non-address operands");
691         add_debug_info_for_null_check_here(op->info()); // exception possible
692       }
693       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
694       break;
695 
696     case lir_cmp_l2i:
697     case lir_cmp_fd2i:

758 
759     default:
760       Unimplemented();
761       break;
762   }
763 }
764 
765 void LIR_Assembler::emit_op4(LIR_Op4* op) {
766   switch(op->code()) {
767     case lir_cmove:
768       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
769       break;
770 
771     default:
772       Unimplemented();
773       break;
774   }
775 }
776 
777 void LIR_Assembler::build_frame() {
778   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());

779 }
780 
781 
782 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
783   assert(strict_fp_requires_explicit_rounding, "not required");
784   assert((src->is_single_fpu() && dest->is_single_stack()) ||
785          (src->is_double_fpu() && dest->is_double_stack()),
786          "round_fp: rounds register -> stack location");
787 
788   reg2stack (src, dest, src->type(), pop_fpu_stack);
789 }
790 
791 
792 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
793   if (src->is_register()) {
794     if (dest->is_register()) {
795       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
796       reg2reg(src,  dest);
797     } else if (dest->is_stack()) {
798       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");

 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "asm/assembler.inline.hpp"
 27 #include "c1/c1_Compilation.hpp"
 28 #include "c1/c1_Instruction.hpp"
 29 #include "c1/c1_InstructionPrinter.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_ValueStack.hpp"
 33 #include "ci/ciInlineKlass.hpp"
 34 #include "ci/ciInstance.hpp"
 35 #include "compiler/compilerDefinitions.inline.hpp"
 36 #include "compiler/oopMap.hpp"
 37 #include "runtime/os.hpp"
 38 #include "runtime/sharedRuntime.hpp"
 39 #include "runtime/vm_version.hpp"
 40 
 41 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
 42   // We must have enough patching space so that call can be inserted.
 43   // We cannot use fat nops here, since the concurrent code rewrite may transiently
 44   // create the illegal instruction sequence.
 45   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
 46     _masm->nop();
 47   }
 48   info->set_force_reexecute();
 49   patch->install(_masm, patch_code, obj, info);
 50   append_code_stub(patch);
 51 
 52 #ifdef ASSERT
 53   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
 54   if (patch->id() == PatchingStub::access_field_id) {
 55     switch (code) {
 56       case Bytecodes::_putstatic:
 57       case Bytecodes::_getstatic:
 58       case Bytecodes::_putfield:

104 //---------------------------------------------------------------
105 
106 
107 LIR_Assembler::LIR_Assembler(Compilation* c):
108    _masm(c->masm())
109  , _compilation(c)
110  , _frame_map(c->frame_map())
111  , _current_block(nullptr)
112  , _pending_non_safepoint(nullptr)
113  , _pending_non_safepoint_offset(0)
114  , _immediate_oops_patched(0)
115 {
116   _slow_case_stubs = new CodeStubList();
117 }
118 
119 
120 LIR_Assembler::~LIR_Assembler() {
121   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
122   // Reset it here to avoid an assertion.
123   _unwind_handler_entry.reset();
124   _verified_inline_entry.reset();
125 }
126 
127 
128 void LIR_Assembler::check_codespace() {
129   CodeSection* cs = _masm->code_section();
130   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
131     BAILOUT("CodeBuffer overflow");
132   }
133 }
134 
135 
136 void LIR_Assembler::append_code_stub(CodeStub* stub) {
137   _immediate_oops_patched += stub->nr_immediate_oops_patched();
138   _slow_case_stubs->append(stub);
139 }
140 
141 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
142   for (int m = 0; m < stub_list->length(); m++) {
143     CodeStub* s = stub_list->at(m);
144 

316     }
317 #endif /* PRODUCT */
318   }
319 }
320 
321 #ifdef ASSERT
322 void LIR_Assembler::check_no_unbound_labels() {
323   CHECK_BAILOUT();
324 
325   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
326     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
327       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
328       assert(false, "unbound label");
329     }
330   }
331 }
332 #endif
333 
334 //----------------------------------debug info--------------------------------
335 

336 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
337   int pc_offset = code_offset();
338   flush_debug_info(pc_offset);
339   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
340   if (info->exception_handlers() != nullptr) {
341     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
342   }
343 }
344 
345 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {

346   flush_debug_info(pc_offset);
347   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
348   if (cinfo->exception_handlers() != nullptr) {
349     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
350   }
351 }
352 
353 static ValueStack* debug_info(Instruction* ins) {
354   StateSplit* ss = ins->as_StateSplit();
355   if (ss != nullptr) return ss->state();
356   return ins->state_before();
357 }
358 
359 void LIR_Assembler::process_debug_info(LIR_Op* op) {
360   Instruction* src = op->source();
361   if (src == nullptr)  return;
362   int pc_offset = code_offset();
363   if (_pending_non_safepoint == src) {
364     _pending_non_safepoint_offset = pc_offset;
365     return;
366   }
367   ValueStack* vstack = debug_info(src);

472   case lir_dynamic_call:
473     call(op, relocInfo::static_call_type);
474     break;
475   case lir_optvirtual_call:
476     call(op, relocInfo::opt_virtual_call_type);
477     break;
478   case lir_icvirtual_call:
479     ic_call(op);
480     break;
481   default:
482     fatal("unexpected op code: %s", op->name());
483     break;
484   }
485 
486   // JSR 292
487   // Record if this method has MethodHandle invokes.
488   if (op->is_method_handle_invoke()) {
489     compilation()->set_has_method_handle_invokes(true);
490   }
491 
492   ciInlineKlass* vk = nullptr;
493   if (op->maybe_return_as_fields(&vk)) {
494     int offset = store_inline_type_fields_to_buf(vk);
495     add_call_info(offset, op->info(), true);
496   }
497 
498 #if defined(IA32) && defined(COMPILER2)
499   // C2 leave fpu stack dirty clean it
500   if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
501     int i;
502     for ( i = 1; i <= 7 ; i++ ) {
503       ffree(i);
504     }
505     if (!op->result_opr()->is_float_kind()) {
506       ffree(0);
507     }
508   }
509 #endif // IA32 && COMPILER2
510 }
511 
512 
513 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
514   _masm->bind (*(op->label()));
515 }
516 
517 

584       } else {
585         Unimplemented();
586       }
587       break;
588     }
589 
590     case lir_monaddr:
591       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
592       break;
593 
594     case lir_unwind:
595       unwind_op(op->in_opr());
596       break;
597 
598     default:
599       Unimplemented();
600       break;
601   }
602 }
603 
604 void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
605   flush_debug_info(pc_offset);
606   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
607   // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
608   // before doing any argument shuffling. This call may cause GC. When GC happens,
609   // all the parameters are still as passed by the caller, so we just use
610   // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
611   // There's no need to build a GC map here.
612   OopMap* oop_map = new OopMap(0, 0);
613   debug_info->add_safepoint(pc_offset, oop_map);
614   DebugToken* locvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
615   DebugToken* expvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
616   DebugToken* monvals = debug_info->create_monitor_values(nullptr); // FIXME: need testing with synchronized method
617   bool reexecute = false;
618   bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
619   bool rethrow_exception = false;
620   bool is_method_handle_invoke = false;
621   debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
622   debug_info->end_safepoint(pc_offset);
623 }
624 
625 // The entries points of C1-compiled methods can have the following types:
626 // (1) Methods with no inline type args
627 // (2) Methods with inline type receiver but no inline type args
628 //     VIEP_RO is the same as VIEP
629 // (3) Methods with non-inline type receiver and some inline type args
630 //     VIEP_RO is the same as VEP
631 // (4) Methods with inline type receiver and other inline type args
632 //     Separate VEP, VIEP and VIEP_RO
633 //
634 // (1)               (2)                 (3)                    (4)
635 // UEP/UIEP:         VEP:                UEP:                   UEP:
636 //   check_icache      pack receiver       check_icache           check_icache
637 // VEP/VIEP/VIEP_RO    jump to VIEP      VEP/VIEP_RO:           VIEP_RO:
638 //   body            UEP/UIEP:             pack inline args       pack inline args (except receiver)
639 //                     check_icache        jump to VIEP           jump to VIEP
640 //                   VIEP/VIEP_RO        UIEP:                  VEP:
641 //                     body                check_icache           pack all inline args
642 //                                       VIEP:                    jump to VIEP
643 //                                         body                 UIEP:
644 //                                                                check_icache
645 //                                                              VIEP:
646 //                                                                body
647 void LIR_Assembler::emit_std_entries() {
648   offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
649 
650   _masm->align(CodeEntryAlignment);
651   const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
652   if (ces->has_scalarized_args()) {
653     assert(InlineTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
654     CodeOffsets::Entries ro_entry_type = ces->c1_inline_ro_entry_type();
655 
656     // UEP: check icache and fall-through
657     if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
658       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
659       if (needs_icache(method())) {
660         check_icache();
661       }
662     }
663 
664     // VIEP_RO: pack all value parameters, except the receiver
665     if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
666       emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, ces);
667     }
668 
669     // VEP: pack all value parameters
670     _masm->align(CodeEntryAlignment);
671     emit_std_entry(CodeOffsets::Verified_Entry, ces);
672 
673     // UIEP: check icache and fall-through
674     _masm->align(CodeEntryAlignment);
675     offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
676     if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
677       // Special case if we have VIEP == VIEP(RO):
678       // this means UIEP (called by C1) == UEP (called by C2).
679       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
680     }
681     if (needs_icache(method())) {
682       check_icache();
683     }
684 
685     // VIEP: all value parameters are passed as refs - no packing.
686     emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
687 
688     if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
689       // The VIEP(RO) is the same as VEP or VIEP
690       assert(ro_entry_type == CodeOffsets::Verified_Entry ||
691              ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
692       offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
693                            offsets()->value(ro_entry_type));
694     }
695   } else {
696     // All 3 entries are the same (no inline type packing)
697     offsets()->set_value(CodeOffsets::Entry, _masm->offset());
698     offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
699     if (needs_icache(method())) {
700       check_icache();
701     }
702     emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
703     offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
704     offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
705   }
706 }
707 
708 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
709   offsets()->set_value(entry, _masm->offset());
710   _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
711   switch (entry) {
712   case CodeOffsets::Verified_Entry: {
713     if (needs_clinit_barrier_on_entry(method())) {
714       clinit_barrier(method());
715     }
716     int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
717     add_scalarized_entry_info(rt_call_offset);
718     break;
719   }
720   case CodeOffsets::Verified_Inline_Entry_RO: {
721     assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
722     int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
723     add_scalarized_entry_info(rt_call_offset);
724     break;
725   }
726   case CodeOffsets::Verified_Inline_Entry: {
727     if (needs_clinit_barrier_on_entry(method())) {
728       clinit_barrier(method());
729     }
730     build_frame();
731     offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
732     break;
733   }
734   default:
735     ShouldNotReachHere();
736     break;
737   }
738 }
739 
740 void LIR_Assembler::emit_op0(LIR_Op0* op) {
741   switch (op->code()) {
742     case lir_nop:
743       assert(op->info() == nullptr, "not supported");
744       _masm->nop();
745       break;
746 
747     case lir_label:
748       Unimplemented();
749       break;
750 
751     case lir_std_entry:
752       emit_std_entries();













753       break;

754 
755     case lir_osr_entry:
756       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
757       osr_entry();
758       break;
759 
760 #ifdef IA32
761     case lir_fpop_raw:
762       fpop();
763       break;
764 #endif // IA32
765 
766     case lir_breakpoint:
767       breakpoint();
768       break;
769 
770     case lir_membar:
771       membar();
772       break;
773 

786     case lir_membar_storestore:
787       membar_storestore();
788       break;
789 
790     case lir_membar_loadstore:
791       membar_loadstore();
792       break;
793 
794     case lir_membar_storeload:
795       membar_storeload();
796       break;
797 
798     case lir_get_thread:
799       get_thread(op->result_opr());
800       break;
801 
802     case lir_on_spin_wait:
803       on_spin_wait();
804       break;
805 
806     case lir_check_orig_pc:
807       check_orig_pc();
808       break;
809 
810     default:
811       ShouldNotReachHere();
812       break;
813   }
814 }
815 
816 
817 void LIR_Assembler::emit_op2(LIR_Op2* op) {
818   switch (op->code()) {
819     case lir_cmp:
820       if (op->info() != nullptr) {
821         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
822                "shouldn't be codeemitinfo for non-address operands");
823         add_debug_info_for_null_check_here(op->info()); // exception possible
824       }
825       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
826       break;
827 
828     case lir_cmp_l2i:
829     case lir_cmp_fd2i:

890 
891     default:
892       Unimplemented();
893       break;
894   }
895 }
896 
897 void LIR_Assembler::emit_op4(LIR_Op4* op) {
898   switch(op->code()) {
899     case lir_cmove:
900       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
901       break;
902 
903     default:
904       Unimplemented();
905       break;
906   }
907 }
908 
909 void LIR_Assembler::build_frame() {
910   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
911                      needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
912 }
913 
914 
915 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
916   assert(strict_fp_requires_explicit_rounding, "not required");
917   assert((src->is_single_fpu() && dest->is_single_stack()) ||
918          (src->is_double_fpu() && dest->is_double_stack()),
919          "round_fp: rounds register -> stack location");
920 
921   reg2stack (src, dest, src->type(), pop_fpu_stack);
922 }
923 
924 
925 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
926   if (src->is_register()) {
927     if (dest->is_register()) {
928       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
929       reg2reg(src,  dest);
930     } else if (dest->is_stack()) {
931       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
< prev index next >