< prev index next >

src/hotspot/share/c1/c1_LIRAssembler.cpp

Print this page

 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/assembler.inline.hpp"
 26 #include "c1/c1_Compilation.hpp"
 27 #include "c1/c1_Instruction.hpp"
 28 #include "c1/c1_InstructionPrinter.hpp"
 29 #include "c1/c1_LIRAssembler.hpp"
 30 #include "c1/c1_MacroAssembler.hpp"
 31 #include "c1/c1_ValueStack.hpp"

 32 #include "compiler/compilerDefinitions.inline.hpp"
 33 #include "compiler/oopMap.hpp"
 34 #include "runtime/os.hpp"

 35 #include "runtime/vm_version.hpp"
 36 
 37 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
 38   // We must have enough patching space so that call can be inserted.
 39   // We cannot use fat nops here, since the concurrent code rewrite may transiently
 40   // create the illegal instruction sequence.
 41   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
 42     _masm->nop();
 43   }
 44   info->set_force_reexecute();
 45   patch->install(_masm, patch_code, obj, info);
 46   append_code_stub(patch);
 47 
 48 #ifdef ASSERT
 49   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
 50   if (patch->id() == PatchingStub::access_field_id) {
 51     switch (code) {
 52       case Bytecodes::_putstatic:
 53       case Bytecodes::_getstatic:
 54       case Bytecodes::_putfield:

100 //---------------------------------------------------------------
101 
102 
103 LIR_Assembler::LIR_Assembler(Compilation* c):
104    _masm(c->masm())
105  , _compilation(c)
106  , _frame_map(c->frame_map())
107  , _current_block(nullptr)
108  , _pending_non_safepoint(nullptr)
109  , _pending_non_safepoint_offset(0)
110  , _immediate_oops_patched(0)
111 {
112   _slow_case_stubs = new CodeStubList();
113 }
114 
115 
116 LIR_Assembler::~LIR_Assembler() {
117   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
118   // Reset it here to avoid an assertion.
119   _unwind_handler_entry.reset();

120 }
121 
122 
123 void LIR_Assembler::check_codespace() {
124   CodeSection* cs = _masm->code_section();
125   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
126     BAILOUT("CodeBuffer overflow");
127   }
128 }
129 
130 
131 void LIR_Assembler::append_code_stub(CodeStub* stub) {
132   _immediate_oops_patched += stub->nr_immediate_oops_patched();
133   _slow_case_stubs->append(stub);
134 }
135 
136 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
137   for (int m = 0; m < stub_list->length(); m++) {
138     CodeStub* s = stub_list->at(m);
139 

310     }
311 #endif /* PRODUCT */
312   }
313 }
314 
315 #ifdef ASSERT
316 void LIR_Assembler::check_no_unbound_labels() {
317   CHECK_BAILOUT();
318 
319   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
320     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
321       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
322       assert(false, "unbound label");
323     }
324   }
325 }
326 #endif
327 
328 //----------------------------------debug info--------------------------------
329 
330 
331 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
332   int pc_offset = code_offset();
333   flush_debug_info(pc_offset);
334   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
335   if (info->exception_handlers() != nullptr) {
336     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
337   }
338 }
339 
340 
341 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
342   flush_debug_info(pc_offset);
343   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
344   if (cinfo->exception_handlers() != nullptr) {
345     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
346   }
347 }
348 
349 static ValueStack* debug_info(Instruction* ins) {
350   StateSplit* ss = ins->as_StateSplit();
351   if (ss != nullptr) return ss->state();
352   return ins->state_before();
353 }
354 
355 void LIR_Assembler::process_debug_info(LIR_Op* op) {
356   Instruction* src = op->source();
357   if (src == nullptr)  return;
358   int pc_offset = code_offset();
359   if (_pending_non_safepoint == src) {
360     _pending_non_safepoint_offset = pc_offset;
361     return;
362   }
363   ValueStack* vstack = debug_info(src);

461   } else {
462     emit_static_call_stub();
463   }
464   CHECK_BAILOUT();
465 
466   switch (op->code()) {
467   case lir_static_call:
468   case lir_dynamic_call:
469     call(op, relocInfo::static_call_type);
470     break;
471   case lir_optvirtual_call:
472     call(op, relocInfo::opt_virtual_call_type);
473     break;
474   case lir_icvirtual_call:
475     ic_call(op);
476     break;
477   default:
478     fatal("unexpected op code: %s", op->name());
479     break;
480   }






481 }
482 
483 
484 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
485   _masm->bind (*(op->label()));
486 }
487 
488 
489 void LIR_Assembler::emit_op1(LIR_Op1* op) {
490   switch (op->code()) {
491     case lir_move:
492       if (op->move_kind() == lir_move_volatile) {
493         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
494         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
495       } else {
496         move_op(op->in_opr(), op->result_opr(), op->type(),
497                 op->patch_code(), op->info(),
498                 op->move_kind() == lir_move_wide);
499       }
500       break;

550       } else {
551         Unimplemented();
552       }
553       break;
554     }
555 
556     case lir_monaddr:
557       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
558       break;
559 
560     case lir_unwind:
561       unwind_op(op->in_opr());
562       break;
563 
564     default:
565       Unimplemented();
566       break;
567   }
568 }
569 







































































































































570 
571 void LIR_Assembler::emit_op0(LIR_Op0* op) {
572   switch (op->code()) {
573     case lir_nop:
574       assert(op->info() == nullptr, "not supported");
575       _masm->nop();
576       break;
577 
578     case lir_label:
579       Unimplemented();
580       break;
581 
582     case lir_std_entry: {
583       // init offsets
584       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
585       if (needs_icache(compilation()->method())) {
586         int offset = check_icache();
587         offsets()->set_value(CodeOffsets::Entry, offset);
588       }
589       _masm->align(CodeEntryAlignment);
590       offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
591       _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
592       if (needs_clinit_barrier_on_entry(compilation()->method())) {
593         clinit_barrier(compilation()->method());
594       }
595       build_frame();
596       offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
597       break;
598     }
599 
600     case lir_osr_entry:
601       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
602       osr_entry();
603       break;
604 
605     case lir_breakpoint:
606       breakpoint();
607       break;
608 
609     case lir_membar:
610       membar();
611       break;
612 
613     case lir_membar_acquire:
614       membar_acquire();
615       break;
616 
617     case lir_membar_release:
618       membar_release();

625     case lir_membar_storestore:
626       membar_storestore();
627       break;
628 
629     case lir_membar_loadstore:
630       membar_loadstore();
631       break;
632 
633     case lir_membar_storeload:
634       membar_storeload();
635       break;
636 
637     case lir_get_thread:
638       get_thread(op->result_opr());
639       break;
640 
641     case lir_on_spin_wait:
642       on_spin_wait();
643       break;
644 




645     default:
646       ShouldNotReachHere();
647       break;
648   }
649 }
650 
651 
652 void LIR_Assembler::emit_op2(LIR_Op2* op) {
653   switch (op->code()) {
654     case lir_cmp:
655       if (op->info() != nullptr) {
656         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
657                "shouldn't be codeemitinfo for non-address operands");
658         add_debug_info_for_null_check_here(op->info()); // exception possible
659       }
660       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
661       break;
662 
663     case lir_cmp_l2i:
664     case lir_cmp_fd2i:

710 
711     default:
712       Unimplemented();
713       break;
714   }
715 }
716 
717 void LIR_Assembler::emit_op4(LIR_Op4* op) {
718   switch(op->code()) {
719     case lir_cmove:
720       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
721       break;
722 
723     default:
724       Unimplemented();
725       break;
726   }
727 }
728 
729 void LIR_Assembler::build_frame() {
730   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());

731 }
732 
733 
734 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
735   if (src->is_register()) {
736     if (dest->is_register()) {
737       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
738       reg2reg(src,  dest);
739     } else if (dest->is_stack()) {
740       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
741       reg2stack(src, dest, type);
742     } else if (dest->is_address()) {
743       reg2mem(src, dest, type, patch_code, info, wide);
744     } else {
745       ShouldNotReachHere();
746     }
747 
748   } else if (src->is_stack()) {
749     assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
750     if (dest->is_register()) {

 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/assembler.inline.hpp"
 26 #include "c1/c1_Compilation.hpp"
 27 #include "c1/c1_Instruction.hpp"
 28 #include "c1/c1_InstructionPrinter.hpp"
 29 #include "c1/c1_LIRAssembler.hpp"
 30 #include "c1/c1_MacroAssembler.hpp"
 31 #include "c1/c1_ValueStack.hpp"
 32 #include "ci/ciInlineKlass.hpp"
 33 #include "compiler/compilerDefinitions.inline.hpp"
 34 #include "compiler/oopMap.hpp"
 35 #include "runtime/os.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "runtime/vm_version.hpp"
 38 
 39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
 40   // We must have enough patching space so that call can be inserted.
 41   // We cannot use fat nops here, since the concurrent code rewrite may transiently
 42   // create the illegal instruction sequence.
 43   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
 44     _masm->nop();
 45   }
 46   info->set_force_reexecute();
 47   patch->install(_masm, patch_code, obj, info);
 48   append_code_stub(patch);
 49 
 50 #ifdef ASSERT
 51   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
 52   if (patch->id() == PatchingStub::access_field_id) {
 53     switch (code) {
 54       case Bytecodes::_putstatic:
 55       case Bytecodes::_getstatic:
 56       case Bytecodes::_putfield:

102 //---------------------------------------------------------------
103 
104 
105 LIR_Assembler::LIR_Assembler(Compilation* c):
106    _masm(c->masm())
107  , _compilation(c)
108  , _frame_map(c->frame_map())
109  , _current_block(nullptr)
110  , _pending_non_safepoint(nullptr)
111  , _pending_non_safepoint_offset(0)
112  , _immediate_oops_patched(0)
113 {
114   _slow_case_stubs = new CodeStubList();
115 }
116 
117 
118 LIR_Assembler::~LIR_Assembler() {
119   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
120   // Reset it here to avoid an assertion.
121   _unwind_handler_entry.reset();
122   _verified_inline_entry.reset();
123 }
124 
125 
126 void LIR_Assembler::check_codespace() {
127   CodeSection* cs = _masm->code_section();
128   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
129     BAILOUT("CodeBuffer overflow");
130   }
131 }
132 
133 
134 void LIR_Assembler::append_code_stub(CodeStub* stub) {
135   _immediate_oops_patched += stub->nr_immediate_oops_patched();
136   _slow_case_stubs->append(stub);
137 }
138 
139 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
140   for (int m = 0; m < stub_list->length(); m++) {
141     CodeStub* s = stub_list->at(m);
142 

313     }
314 #endif /* PRODUCT */
315   }
316 }
317 
318 #ifdef ASSERT
319 void LIR_Assembler::check_no_unbound_labels() {
320   CHECK_BAILOUT();
321 
322   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
323     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
324       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
325       assert(false, "unbound label");
326     }
327   }
328 }
329 #endif
330 
331 //----------------------------------debug info--------------------------------
332 

333 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
334   int pc_offset = code_offset();
335   flush_debug_info(pc_offset);
336   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
337   if (info->exception_handlers() != nullptr) {
338     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
339   }
340 }
341 
342 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {

343   flush_debug_info(pc_offset);
344   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
345   if (cinfo->exception_handlers() != nullptr) {
346     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
347   }
348 }
349 
350 static ValueStack* debug_info(Instruction* ins) {
351   StateSplit* ss = ins->as_StateSplit();
352   if (ss != nullptr) return ss->state();
353   return ins->state_before();
354 }
355 
356 void LIR_Assembler::process_debug_info(LIR_Op* op) {
357   Instruction* src = op->source();
358   if (src == nullptr)  return;
359   int pc_offset = code_offset();
360   if (_pending_non_safepoint == src) {
361     _pending_non_safepoint_offset = pc_offset;
362     return;
363   }
364   ValueStack* vstack = debug_info(src);

462   } else {
463     emit_static_call_stub();
464   }
465   CHECK_BAILOUT();
466 
467   switch (op->code()) {
468   case lir_static_call:
469   case lir_dynamic_call:
470     call(op, relocInfo::static_call_type);
471     break;
472   case lir_optvirtual_call:
473     call(op, relocInfo::opt_virtual_call_type);
474     break;
475   case lir_icvirtual_call:
476     ic_call(op);
477     break;
478   default:
479     fatal("unexpected op code: %s", op->name());
480     break;
481   }
482 
483   ciInlineKlass* vk = nullptr;
484   if (op->maybe_return_as_fields(&vk)) {
485     int offset = store_inline_type_fields_to_buf(vk);
486     add_call_info(offset, op->info(), true);
487   }
488 }
489 
490 
491 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
492   _masm->bind (*(op->label()));
493 }
494 
495 
496 void LIR_Assembler::emit_op1(LIR_Op1* op) {
497   switch (op->code()) {
498     case lir_move:
499       if (op->move_kind() == lir_move_volatile) {
500         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
501         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
502       } else {
503         move_op(op->in_opr(), op->result_opr(), op->type(),
504                 op->patch_code(), op->info(),
505                 op->move_kind() == lir_move_wide);
506       }
507       break;

557       } else {
558         Unimplemented();
559       }
560       break;
561     }
562 
563     case lir_monaddr:
564       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
565       break;
566 
567     case lir_unwind:
568       unwind_op(op->in_opr());
569       break;
570 
571     default:
572       Unimplemented();
573       break;
574   }
575 }
576 
577 void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
578   flush_debug_info(pc_offset);
579   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
580   // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
581   // before doing any argument shuffling. This call may cause GC. When GC happens,
582   // all the parameters are still as passed by the caller, so we just use
583   // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
584   // There's no need to build a GC map here.
585   OopMap* oop_map = new OopMap(0, 0);
586   debug_info->add_safepoint(pc_offset, oop_map);
587   DebugToken* locvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
588   DebugToken* expvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
589   DebugToken* monvals = debug_info->create_monitor_values(nullptr); // FIXME: need testing with synchronized method
590   bool reexecute = false;
591   bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
592   bool rethrow_exception = false;
593   bool is_method_handle_invoke = false;
594   debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
595   debug_info->end_safepoint(pc_offset);
596 }
597 
598 // The entries points of C1-compiled methods can have the following types:
599 // (1) Methods with no inline type args
600 // (2) Methods with inline type receiver but no inline type args
601 //     VIEP_RO is the same as VIEP
602 // (3) Methods with non-inline type receiver and some inline type args
603 //     VIEP_RO is the same as VEP
604 // (4) Methods with inline type receiver and other inline type args
605 //     Separate VEP, VIEP and VIEP_RO
606 //
607 // (1)               (2)                 (3)                    (4)
608 // UEP/UIEP:         VEP:                UEP:                   UEP:
609 //   check_icache      pack receiver       check_icache           check_icache
610 // VEP/VIEP/VIEP_RO    jump to VIEP      VEP/VIEP_RO:           VIEP_RO:
611 //   body            UEP/UIEP:             pack inline args       pack inline args (except receiver)
612 //                     check_icache        jump to VIEP           jump to VIEP
613 //                   VIEP/VIEP_RO        UIEP:                  VEP:
614 //                     body                check_icache           pack all inline args
615 //                                       VIEP:                    jump to VIEP
616 //                                         body                 UIEP:
617 //                                                                check_icache
618 //                                                              VIEP:
619 //                                                                body
620 void LIR_Assembler::emit_std_entries() {
621   offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
622 
623   _masm->align(CodeEntryAlignment);
624   const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
625   if (ces->has_scalarized_args()) {
626     assert(InlineTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
627     CodeOffsets::Entries ro_entry_type = ces->c1_inline_ro_entry_type();
628 
629     // UEP: check icache and fall-through
630     if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
631       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
632       if (needs_icache(method())) {
633         check_icache();
634       }
635     }
636 
637     // VIEP_RO: pack all value parameters, except the receiver
638     if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
639       emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, ces);
640     }
641 
642     // VEP: pack all value parameters
643     _masm->align(CodeEntryAlignment);
644     emit_std_entry(CodeOffsets::Verified_Entry, ces);
645 
646     // UIEP: check icache and fall-through
647     _masm->align(CodeEntryAlignment);
648     offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
649     if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
650       // Special case if we have VIEP == VIEP(RO):
651       // this means UIEP (called by C1) == UEP (called by C2).
652       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
653     }
654     if (needs_icache(method())) {
655       check_icache();
656     }
657 
658     // VIEP: all value parameters are passed as refs - no packing.
659     emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
660 
661     if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
662       // The VIEP(RO) is the same as VEP or VIEP
663       assert(ro_entry_type == CodeOffsets::Verified_Entry ||
664              ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
665       offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
666                            offsets()->value(ro_entry_type));
667     }
668   } else {
669     // All 3 entries are the same (no inline type packing)
670     offsets()->set_value(CodeOffsets::Entry, _masm->offset());
671     offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
672     if (needs_icache(method())) {
673       check_icache();
674     }
675     emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
676     offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
677     offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
678   }
679 }
680 
681 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
682   offsets()->set_value(entry, _masm->offset());
683   _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
684   switch (entry) {
685   case CodeOffsets::Verified_Entry: {
686     if (needs_clinit_barrier_on_entry(method())) {
687       clinit_barrier(method());
688     }
689     int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
690     add_scalarized_entry_info(rt_call_offset);
691     break;
692   }
693   case CodeOffsets::Verified_Inline_Entry_RO: {
694     assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
695     int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
696     add_scalarized_entry_info(rt_call_offset);
697     break;
698   }
699   case CodeOffsets::Verified_Inline_Entry: {
700     if (needs_clinit_barrier_on_entry(method())) {
701       clinit_barrier(method());
702     }
703     build_frame();
704     offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
705     break;
706   }
707   default:
708     ShouldNotReachHere();
709     break;
710   }
711 }
712 
713 void LIR_Assembler::emit_op0(LIR_Op0* op) {
714   switch (op->code()) {
715     case lir_nop:
716       assert(op->info() == nullptr, "not supported");
717       _masm->nop();
718       break;
719 
720     case lir_label:
721       Unimplemented();
722       break;
723 
724     case lir_std_entry:
725       emit_std_entries();













726       break;

727 
728     case lir_osr_entry:
729       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
730       osr_entry();
731       break;
732 
733     case lir_breakpoint:
734       breakpoint();
735       break;
736 
737     case lir_membar:
738       membar();
739       break;
740 
741     case lir_membar_acquire:
742       membar_acquire();
743       break;
744 
745     case lir_membar_release:
746       membar_release();

753     case lir_membar_storestore:
754       membar_storestore();
755       break;
756 
757     case lir_membar_loadstore:
758       membar_loadstore();
759       break;
760 
761     case lir_membar_storeload:
762       membar_storeload();
763       break;
764 
765     case lir_get_thread:
766       get_thread(op->result_opr());
767       break;
768 
769     case lir_on_spin_wait:
770       on_spin_wait();
771       break;
772 
773     case lir_check_orig_pc:
774       check_orig_pc();
775       break;
776 
777     default:
778       ShouldNotReachHere();
779       break;
780   }
781 }
782 
783 
784 void LIR_Assembler::emit_op2(LIR_Op2* op) {
785   switch (op->code()) {
786     case lir_cmp:
787       if (op->info() != nullptr) {
788         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
789                "shouldn't be codeemitinfo for non-address operands");
790         add_debug_info_for_null_check_here(op->info()); // exception possible
791       }
792       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
793       break;
794 
795     case lir_cmp_l2i:
796     case lir_cmp_fd2i:

842 
843     default:
844       Unimplemented();
845       break;
846   }
847 }
848 
849 void LIR_Assembler::emit_op4(LIR_Op4* op) {
850   switch(op->code()) {
851     case lir_cmove:
852       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
853       break;
854 
855     default:
856       Unimplemented();
857       break;
858   }
859 }
860 
861 void LIR_Assembler::build_frame() {
862   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
863                      needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
864 }
865 
866 
867 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
868   if (src->is_register()) {
869     if (dest->is_register()) {
870       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
871       reg2reg(src,  dest);
872     } else if (dest->is_stack()) {
873       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
874       reg2stack(src, dest, type);
875     } else if (dest->is_address()) {
876       reg2mem(src, dest, type, patch_code, info, wide);
877     } else {
878       ShouldNotReachHere();
879     }
880 
881   } else if (src->is_stack()) {
882     assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
883     if (dest->is_register()) {
< prev index next >