12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "compiler/compilerDefinitions.inline.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "runtime/os.hpp"
35 #include "runtime/vm_version.hpp"
36
37 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
38 // We must have enough patching space so that call can be inserted.
39 // We cannot use fat nops here, since the concurrent code rewrite may transiently
40 // create the illegal instruction sequence.
41 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
42 _masm->nop();
43 }
44 info->set_force_reexecute();
45 patch->install(_masm, patch_code, obj, info);
46 append_code_stub(patch);
47
48 #ifdef ASSERT
49 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
50 if (patch->id() == PatchingStub::access_field_id) {
51 switch (code) {
52 case Bytecodes::_putstatic:
53 case Bytecodes::_getstatic:
54 case Bytecodes::_putfield:
100 //---------------------------------------------------------------
101
102
103 LIR_Assembler::LIR_Assembler(Compilation* c):
104 _masm(c->masm())
105 , _compilation(c)
106 , _frame_map(c->frame_map())
107 , _current_block(nullptr)
108 , _pending_non_safepoint(nullptr)
109 , _pending_non_safepoint_offset(0)
110 , _immediate_oops_patched(0)
111 {
112 _slow_case_stubs = new CodeStubList();
113 }
114
115
116 LIR_Assembler::~LIR_Assembler() {
117 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
118 // Reset it here to avoid an assertion.
119 _unwind_handler_entry.reset();
120 }
121
122
123 void LIR_Assembler::check_codespace() {
124 CodeSection* cs = _masm->code_section();
125 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
126 BAILOUT("CodeBuffer overflow");
127 }
128 }
129
130
131 void LIR_Assembler::append_code_stub(CodeStub* stub) {
132 _immediate_oops_patched += stub->nr_immediate_oops_patched();
133 _slow_case_stubs->append(stub);
134 }
135
136 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
137 for (int m = 0; m < stub_list->length(); m++) {
138 CodeStub* s = stub_list->at(m);
139
310 }
311 #endif /* PRODUCT */
312 }
313 }
314
315 #ifdef ASSERT
316 void LIR_Assembler::check_no_unbound_labels() {
317 CHECK_BAILOUT();
318
319 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
320 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
321 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
322 assert(false, "unbound label");
323 }
324 }
325 }
326 #endif
327
328 //----------------------------------debug info--------------------------------
329
330
331 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
332 int pc_offset = code_offset();
333 flush_debug_info(pc_offset);
334 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
335 if (info->exception_handlers() != nullptr) {
336 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
337 }
338 }
339
340
341 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
342 flush_debug_info(pc_offset);
343 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
344 if (cinfo->exception_handlers() != nullptr) {
345 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
346 }
347 }
348
349 static ValueStack* debug_info(Instruction* ins) {
350 StateSplit* ss = ins->as_StateSplit();
351 if (ss != nullptr) return ss->state();
352 return ins->state_before();
353 }
354
355 void LIR_Assembler::process_debug_info(LIR_Op* op) {
356 Instruction* src = op->source();
357 if (src == nullptr) return;
358 int pc_offset = code_offset();
359 if (_pending_non_safepoint == src) {
360 _pending_non_safepoint_offset = pc_offset;
361 return;
362 }
363 ValueStack* vstack = debug_info(src);
461 } else {
462 emit_static_call_stub();
463 }
464 CHECK_BAILOUT();
465
466 switch (op->code()) {
467 case lir_static_call:
468 case lir_dynamic_call:
469 call(op, relocInfo::static_call_type);
470 break;
471 case lir_optvirtual_call:
472 call(op, relocInfo::opt_virtual_call_type);
473 break;
474 case lir_icvirtual_call:
475 ic_call(op);
476 break;
477 default:
478 fatal("unexpected op code: %s", op->name());
479 break;
480 }
481 }
482
483
484 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
485 _masm->bind (*(op->label()));
486 }
487
488
489 void LIR_Assembler::emit_op1(LIR_Op1* op) {
490 switch (op->code()) {
491 case lir_move:
492 if (op->move_kind() == lir_move_volatile) {
493 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
494 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
495 } else {
496 move_op(op->in_opr(), op->result_opr(), op->type(),
497 op->patch_code(), op->info(),
498 op->move_kind() == lir_move_wide);
499 }
500 break;
560 } else {
561 Unimplemented();
562 }
563 break;
564 }
565
566 case lir_monaddr:
567 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
568 break;
569
570 case lir_unwind:
571 unwind_op(op->in_opr());
572 break;
573
574 default:
575 Unimplemented();
576 break;
577 }
578 }
579
580
581 void LIR_Assembler::emit_op0(LIR_Op0* op) {
582 switch (op->code()) {
583 case lir_nop:
584 assert(op->info() == nullptr, "not supported");
585 _masm->nop();
586 break;
587
588 case lir_label:
589 Unimplemented();
590 break;
591
592 case lir_std_entry: {
593 // init offsets
594 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
595 if (needs_icache(compilation()->method())) {
596 int offset = check_icache();
597 offsets()->set_value(CodeOffsets::Entry, offset);
598 }
599 _masm->align(CodeEntryAlignment);
600 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
601 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
602 if (needs_clinit_barrier_on_entry(compilation()->method())) {
603 clinit_barrier(compilation()->method());
604 }
605 build_frame();
606 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
607 break;
608 }
609
610 case lir_osr_entry:
611 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
612 osr_entry();
613 break;
614
615 #ifdef IA32
616 case lir_fpop_raw:
617 fpop();
618 break;
619 #endif // IA32
620
621 case lir_breakpoint:
622 breakpoint();
623 break;
624
625 case lir_membar:
626 membar();
627 break;
628
641 case lir_membar_storestore:
642 membar_storestore();
643 break;
644
645 case lir_membar_loadstore:
646 membar_loadstore();
647 break;
648
649 case lir_membar_storeload:
650 membar_storeload();
651 break;
652
653 case lir_get_thread:
654 get_thread(op->result_opr());
655 break;
656
657 case lir_on_spin_wait:
658 on_spin_wait();
659 break;
660
661 default:
662 ShouldNotReachHere();
663 break;
664 }
665 }
666
667
668 void LIR_Assembler::emit_op2(LIR_Op2* op) {
669 switch (op->code()) {
670 case lir_cmp:
671 if (op->info() != nullptr) {
672 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
673 "shouldn't be codeemitinfo for non-address operands");
674 add_debug_info_for_null_check_here(op->info()); // exception possible
675 }
676 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
677 break;
678
679 case lir_cmp_l2i:
680 case lir_cmp_fd2i:
726
727 default:
728 Unimplemented();
729 break;
730 }
731 }
732
733 void LIR_Assembler::emit_op4(LIR_Op4* op) {
734 switch(op->code()) {
735 case lir_cmove:
736 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
737 break;
738
739 default:
740 Unimplemented();
741 break;
742 }
743 }
744
745 void LIR_Assembler::build_frame() {
746 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
747 }
748
749
750 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
751 if (src->is_register()) {
752 if (dest->is_register()) {
753 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
754 reg2reg(src, dest);
755 } else if (dest->is_stack()) {
756 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
757 reg2stack(src, dest, type);
758 } else if (dest->is_address()) {
759 reg2mem(src, dest, type, patch_code, info, wide);
760 } else {
761 ShouldNotReachHere();
762 }
763
764 } else if (src->is_stack()) {
765 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
766 if (dest->is_register()) {
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInlineKlass.hpp"
33 #include "compiler/compilerDefinitions.inline.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/vm_version.hpp"
38
39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
40 // We must have enough patching space so that call can be inserted.
41 // We cannot use fat nops here, since the concurrent code rewrite may transiently
42 // create the illegal instruction sequence.
43 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
44 _masm->nop();
45 }
46 info->set_force_reexecute();
47 patch->install(_masm, patch_code, obj, info);
48 append_code_stub(patch);
49
50 #ifdef ASSERT
51 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
52 if (patch->id() == PatchingStub::access_field_id) {
53 switch (code) {
54 case Bytecodes::_putstatic:
55 case Bytecodes::_getstatic:
56 case Bytecodes::_putfield:
102 //---------------------------------------------------------------
103
104
105 LIR_Assembler::LIR_Assembler(Compilation* c):
106 _masm(c->masm())
107 , _compilation(c)
108 , _frame_map(c->frame_map())
109 , _current_block(nullptr)
110 , _pending_non_safepoint(nullptr)
111 , _pending_non_safepoint_offset(0)
112 , _immediate_oops_patched(0)
113 {
114 _slow_case_stubs = new CodeStubList();
115 }
116
117
118 LIR_Assembler::~LIR_Assembler() {
119 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
120 // Reset it here to avoid an assertion.
121 _unwind_handler_entry.reset();
122 _verified_inline_entry.reset();
123 }
124
125
126 void LIR_Assembler::check_codespace() {
127 CodeSection* cs = _masm->code_section();
128 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
129 BAILOUT("CodeBuffer overflow");
130 }
131 }
132
133
134 void LIR_Assembler::append_code_stub(CodeStub* stub) {
135 _immediate_oops_patched += stub->nr_immediate_oops_patched();
136 _slow_case_stubs->append(stub);
137 }
138
139 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
140 for (int m = 0; m < stub_list->length(); m++) {
141 CodeStub* s = stub_list->at(m);
142
313 }
314 #endif /* PRODUCT */
315 }
316 }
317
318 #ifdef ASSERT
319 void LIR_Assembler::check_no_unbound_labels() {
320 CHECK_BAILOUT();
321
322 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
323 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
324 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
325 assert(false, "unbound label");
326 }
327 }
328 }
329 #endif
330
331 //----------------------------------debug info--------------------------------
332
333 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
334 int pc_offset = code_offset();
335 flush_debug_info(pc_offset);
336 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
337 if (info->exception_handlers() != nullptr) {
338 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
339 }
340 }
341
342 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
343 flush_debug_info(pc_offset);
344 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
345 if (cinfo->exception_handlers() != nullptr) {
346 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
347 }
348 }
349
350 static ValueStack* debug_info(Instruction* ins) {
351 StateSplit* ss = ins->as_StateSplit();
352 if (ss != nullptr) return ss->state();
353 return ins->state_before();
354 }
355
356 void LIR_Assembler::process_debug_info(LIR_Op* op) {
357 Instruction* src = op->source();
358 if (src == nullptr) return;
359 int pc_offset = code_offset();
360 if (_pending_non_safepoint == src) {
361 _pending_non_safepoint_offset = pc_offset;
362 return;
363 }
364 ValueStack* vstack = debug_info(src);
462 } else {
463 emit_static_call_stub();
464 }
465 CHECK_BAILOUT();
466
467 switch (op->code()) {
468 case lir_static_call:
469 case lir_dynamic_call:
470 call(op, relocInfo::static_call_type);
471 break;
472 case lir_optvirtual_call:
473 call(op, relocInfo::opt_virtual_call_type);
474 break;
475 case lir_icvirtual_call:
476 ic_call(op);
477 break;
478 default:
479 fatal("unexpected op code: %s", op->name());
480 break;
481 }
482
483 ciInlineKlass* vk = nullptr;
484 if (op->maybe_return_as_fields(&vk)) {
485 int offset = store_inline_type_fields_to_buf(vk);
486 add_call_info(offset, op->info(), true);
487 }
488 }
489
490
491 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
492 _masm->bind (*(op->label()));
493 }
494
495
496 void LIR_Assembler::emit_op1(LIR_Op1* op) {
497 switch (op->code()) {
498 case lir_move:
499 if (op->move_kind() == lir_move_volatile) {
500 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
501 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
502 } else {
503 move_op(op->in_opr(), op->result_opr(), op->type(),
504 op->patch_code(), op->info(),
505 op->move_kind() == lir_move_wide);
506 }
507 break;
567 } else {
568 Unimplemented();
569 }
570 break;
571 }
572
573 case lir_monaddr:
574 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
575 break;
576
577 case lir_unwind:
578 unwind_op(op->in_opr());
579 break;
580
581 default:
582 Unimplemented();
583 break;
584 }
585 }
586
587 void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
588 flush_debug_info(pc_offset);
589 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
590 // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
591 // before doing any argument shuffling. This call may cause GC. When GC happens,
592 // all the parameters are still as passed by the caller, so we just use
593 // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
594 // There's no need to build a GC map here.
595 OopMap* oop_map = new OopMap(0, 0);
596 debug_info->add_safepoint(pc_offset, oop_map);
597 DebugToken* locvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
598 DebugToken* expvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
599 DebugToken* monvals = debug_info->create_monitor_values(nullptr); // FIXME: need testing with synchronized method
600 bool reexecute = false;
601 bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
602 bool rethrow_exception = false;
603 bool is_method_handle_invoke = false;
604 debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
605 debug_info->end_safepoint(pc_offset);
606 }
607
608 // The entries points of C1-compiled methods can have the following types:
609 // (1) Methods with no inline type args
610 // (2) Methods with inline type receiver but no inline type args
611 // VIEP_RO is the same as VIEP
612 // (3) Methods with non-inline type receiver and some inline type args
613 // VIEP_RO is the same as VEP
614 // (4) Methods with inline type receiver and other inline type args
615 // Separate VEP, VIEP and VIEP_RO
616 //
617 // (1) (2) (3) (4)
618 // UEP/UIEP: VEP: UEP: UEP:
619 // check_icache pack receiver check_icache check_icache
620 // VEP/VIEP/VIEP_RO jump to VIEP VEP/VIEP_RO: VIEP_RO:
621 // body UEP/UIEP: pack inline args pack inline args (except receiver)
622 // check_icache jump to VIEP jump to VIEP
623 // VIEP/VIEP_RO UIEP: VEP:
624 // body check_icache pack all inline args
625 // VIEP: jump to VIEP
626 // body UIEP:
627 // check_icache
628 // VIEP:
629 // body
630 void LIR_Assembler::emit_std_entries() {
631 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
632
633 _masm->align(CodeEntryAlignment);
634 const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
635 if (ces->has_scalarized_args()) {
636 assert(InlineTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
637 CodeOffsets::Entries ro_entry_type = ces->c1_inline_ro_entry_type();
638
639 // UEP: check icache and fall-through
640 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
641 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
642 if (needs_icache(method())) {
643 check_icache();
644 }
645 }
646
647 // VIEP_RO: pack all value parameters, except the receiver
648 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
649 emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, ces);
650 }
651
652 // VEP: pack all value parameters
653 _masm->align(CodeEntryAlignment);
654 emit_std_entry(CodeOffsets::Verified_Entry, ces);
655
656 // UIEP: check icache and fall-through
657 _masm->align(CodeEntryAlignment);
658 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
659 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
660 // Special case if we have VIEP == VIEP(RO):
661 // this means UIEP (called by C1) == UEP (called by C2).
662 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
663 }
664 if (needs_icache(method())) {
665 check_icache();
666 }
667
668 // VIEP: all value parameters are passed as refs - no packing.
669 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
670
671 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
672 // The VIEP(RO) is the same as VEP or VIEP
673 assert(ro_entry_type == CodeOffsets::Verified_Entry ||
674 ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
675 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
676 offsets()->value(ro_entry_type));
677 }
678 } else {
679 // All 3 entries are the same (no inline type packing)
680 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
681 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
682 if (needs_icache(method())) {
683 check_icache();
684 }
685 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
686 offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
687 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
688 }
689 }
690
691 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
692 offsets()->set_value(entry, _masm->offset());
693 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
694 switch (entry) {
695 case CodeOffsets::Verified_Entry: {
696 if (needs_clinit_barrier_on_entry(method())) {
697 clinit_barrier(method());
698 }
699 int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
700 add_scalarized_entry_info(rt_call_offset);
701 break;
702 }
703 case CodeOffsets::Verified_Inline_Entry_RO: {
704 assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
705 int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
706 add_scalarized_entry_info(rt_call_offset);
707 break;
708 }
709 case CodeOffsets::Verified_Inline_Entry: {
710 if (needs_clinit_barrier_on_entry(method())) {
711 clinit_barrier(method());
712 }
713 build_frame();
714 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
715 break;
716 }
717 default:
718 ShouldNotReachHere();
719 break;
720 }
721 }
722
723 void LIR_Assembler::emit_op0(LIR_Op0* op) {
724 switch (op->code()) {
725 case lir_nop:
726 assert(op->info() == nullptr, "not supported");
727 _masm->nop();
728 break;
729
730 case lir_label:
731 Unimplemented();
732 break;
733
734 case lir_std_entry:
735 emit_std_entries();
736 break;
737
738 case lir_osr_entry:
739 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
740 osr_entry();
741 break;
742
743 #ifdef IA32
744 case lir_fpop_raw:
745 fpop();
746 break;
747 #endif // IA32
748
749 case lir_breakpoint:
750 breakpoint();
751 break;
752
753 case lir_membar:
754 membar();
755 break;
756
769 case lir_membar_storestore:
770 membar_storestore();
771 break;
772
773 case lir_membar_loadstore:
774 membar_loadstore();
775 break;
776
777 case lir_membar_storeload:
778 membar_storeload();
779 break;
780
781 case lir_get_thread:
782 get_thread(op->result_opr());
783 break;
784
785 case lir_on_spin_wait:
786 on_spin_wait();
787 break;
788
789 case lir_check_orig_pc:
790 check_orig_pc();
791 break;
792
793 default:
794 ShouldNotReachHere();
795 break;
796 }
797 }
798
799
800 void LIR_Assembler::emit_op2(LIR_Op2* op) {
801 switch (op->code()) {
802 case lir_cmp:
803 if (op->info() != nullptr) {
804 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
805 "shouldn't be codeemitinfo for non-address operands");
806 add_debug_info_for_null_check_here(op->info()); // exception possible
807 }
808 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
809 break;
810
811 case lir_cmp_l2i:
812 case lir_cmp_fd2i:
858
859 default:
860 Unimplemented();
861 break;
862 }
863 }
864
865 void LIR_Assembler::emit_op4(LIR_Op4* op) {
866 switch(op->code()) {
867 case lir_cmove:
868 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
869 break;
870
871 default:
872 Unimplemented();
873 break;
874 }
875 }
876
877 void LIR_Assembler::build_frame() {
878 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
879 needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
880 }
881
882
883 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
884 if (src->is_register()) {
885 if (dest->is_register()) {
886 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
887 reg2reg(src, dest);
888 } else if (dest->is_stack()) {
889 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
890 reg2stack(src, dest, type);
891 } else if (dest->is_address()) {
892 reg2mem(src, dest, type, patch_code, info, wide);
893 } else {
894 ShouldNotReachHere();
895 }
896
897 } else if (src->is_stack()) {
898 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
899 if (dest->is_register()) {
|