12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "compiler/compilerDefinitions.inline.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "runtime/os.hpp"
35 #include "runtime/vm_version.hpp"
36
37 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
38 // We must have enough patching space so that call can be inserted.
39 // We cannot use fat nops here, since the concurrent code rewrite may transiently
40 // create the illegal instruction sequence.
41 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
42 _masm->nop();
43 }
44 info->set_force_reexecute();
45 patch->install(_masm, patch_code, obj, info);
46 append_code_stub(patch);
47
48 #ifdef ASSERT
49 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
50 if (patch->id() == PatchingStub::access_field_id) {
51 switch (code) {
52 case Bytecodes::_putstatic:
53 case Bytecodes::_getstatic:
54 case Bytecodes::_putfield:
100 //---------------------------------------------------------------
101
102
103 LIR_Assembler::LIR_Assembler(Compilation* c):
104 _masm(c->masm())
105 , _compilation(c)
106 , _frame_map(c->frame_map())
107 , _current_block(nullptr)
108 , _pending_non_safepoint(nullptr)
109 , _pending_non_safepoint_offset(0)
110 , _immediate_oops_patched(0)
111 {
112 _slow_case_stubs = new CodeStubList();
113 }
114
115
116 LIR_Assembler::~LIR_Assembler() {
117 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
118 // Reset it here to avoid an assertion.
119 _unwind_handler_entry.reset();
120 }
121
122
123 void LIR_Assembler::check_codespace() {
124 CodeSection* cs = _masm->code_section();
125 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
126 BAILOUT("CodeBuffer overflow");
127 }
128 }
129
130
131 void LIR_Assembler::append_code_stub(CodeStub* stub) {
132 _immediate_oops_patched += stub->nr_immediate_oops_patched();
133 _slow_case_stubs->append(stub);
134 }
135
136 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
137 for (int m = 0; m < stub_list->length(); m++) {
138 CodeStub* s = stub_list->at(m);
139
310 }
311 #endif /* PRODUCT */
312 }
313 }
314
315 #ifdef ASSERT
316 void LIR_Assembler::check_no_unbound_labels() {
317 CHECK_BAILOUT();
318
319 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
320 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
321 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
322 assert(false, "unbound label");
323 }
324 }
325 }
326 #endif
327
328 //----------------------------------debug info--------------------------------
329
330
331 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
332 int pc_offset = code_offset();
333 flush_debug_info(pc_offset);
334 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
335 if (info->exception_handlers() != nullptr) {
336 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
337 }
338 }
339
340
341 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
342 flush_debug_info(pc_offset);
343 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
344 if (cinfo->exception_handlers() != nullptr) {
345 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
346 }
347 }
348
349 static ValueStack* debug_info(Instruction* ins) {
350 StateSplit* ss = ins->as_StateSplit();
351 if (ss != nullptr) return ss->state();
352 return ins->state_before();
353 }
354
355 void LIR_Assembler::process_debug_info(LIR_Op* op) {
356 Instruction* src = op->source();
357 if (src == nullptr) return;
358 int pc_offset = code_offset();
359 if (_pending_non_safepoint == src) {
360 _pending_non_safepoint_offset = pc_offset;
361 return;
362 }
363 ValueStack* vstack = debug_info(src);
461 } else {
462 emit_static_call_stub();
463 }
464 CHECK_BAILOUT();
465
466 switch (op->code()) {
467 case lir_static_call:
468 case lir_dynamic_call:
469 call(op, relocInfo::static_call_type);
470 break;
471 case lir_optvirtual_call:
472 call(op, relocInfo::opt_virtual_call_type);
473 break;
474 case lir_icvirtual_call:
475 ic_call(op);
476 break;
477 default:
478 fatal("unexpected op code: %s", op->name());
479 break;
480 }
481 }
482
483
484 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
485 _masm->bind (*(op->label()));
486 }
487
488
489 void LIR_Assembler::emit_op1(LIR_Op1* op) {
490 switch (op->code()) {
491 case lir_move:
492 if (op->move_kind() == lir_move_volatile) {
493 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
494 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
495 } else {
496 move_op(op->in_opr(), op->result_opr(), op->type(),
497 op->patch_code(), op->info(),
498 op->move_kind() == lir_move_wide);
499 }
500 break;
550 } else {
551 Unimplemented();
552 }
553 break;
554 }
555
556 case lir_monaddr:
557 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
558 break;
559
560 case lir_unwind:
561 unwind_op(op->in_opr());
562 break;
563
564 default:
565 Unimplemented();
566 break;
567 }
568 }
569
570
571 void LIR_Assembler::emit_op0(LIR_Op0* op) {
572 switch (op->code()) {
573 case lir_nop:
574 assert(op->info() == nullptr, "not supported");
575 _masm->nop();
576 break;
577
578 case lir_label:
579 Unimplemented();
580 break;
581
582 case lir_std_entry: {
583 // init offsets
584 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
585 if (needs_icache(compilation()->method())) {
586 int offset = check_icache();
587 offsets()->set_value(CodeOffsets::Entry, offset);
588 }
589 _masm->align(CodeEntryAlignment);
590 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
591 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
592 if (needs_clinit_barrier_on_entry(compilation()->method())) {
593 clinit_barrier(compilation()->method());
594 }
595 build_frame();
596 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
597 break;
598 }
599
600 case lir_osr_entry:
601 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
602 osr_entry();
603 break;
604
605 case lir_breakpoint:
606 breakpoint();
607 break;
608
609 case lir_membar:
610 membar();
611 break;
612
613 case lir_membar_acquire:
614 membar_acquire();
615 break;
616
617 case lir_membar_release:
618 membar_release();
625 case lir_membar_storestore:
626 membar_storestore();
627 break;
628
629 case lir_membar_loadstore:
630 membar_loadstore();
631 break;
632
633 case lir_membar_storeload:
634 membar_storeload();
635 break;
636
637 case lir_get_thread:
638 get_thread(op->result_opr());
639 break;
640
641 case lir_on_spin_wait:
642 on_spin_wait();
643 break;
644
645 default:
646 ShouldNotReachHere();
647 break;
648 }
649 }
650
651
652 void LIR_Assembler::emit_op2(LIR_Op2* op) {
653 switch (op->code()) {
654 case lir_cmp:
655 if (op->info() != nullptr) {
656 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
657 "shouldn't be codeemitinfo for non-address operands");
658 add_debug_info_for_null_check_here(op->info()); // exception possible
659 }
660 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
661 break;
662
663 case lir_cmp_l2i:
664 case lir_cmp_fd2i:
710
711 default:
712 Unimplemented();
713 break;
714 }
715 }
716
717 void LIR_Assembler::emit_op4(LIR_Op4* op) {
718 switch(op->code()) {
719 case lir_cmove:
720 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
721 break;
722
723 default:
724 Unimplemented();
725 break;
726 }
727 }
728
729 void LIR_Assembler::build_frame() {
730 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
731 }
732
733
734 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
735 if (src->is_register()) {
736 if (dest->is_register()) {
737 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
738 reg2reg(src, dest);
739 } else if (dest->is_stack()) {
740 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
741 reg2stack(src, dest, type);
742 } else if (dest->is_address()) {
743 reg2mem(src, dest, type, patch_code, info, wide);
744 } else {
745 ShouldNotReachHere();
746 }
747
748 } else if (src->is_stack()) {
749 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
750 if (dest->is_register()) {
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInlineKlass.hpp"
33 #include "ci/ciUtilities.inline.hpp"
34 #include "compiler/compilerDefinitions.inline.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/vm_version.hpp"
39
40 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
41 // We must have enough patching space so that call can be inserted.
42 // We cannot use fat nops here, since the concurrent code rewrite may transiently
43 // create the illegal instruction sequence.
44 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
45 _masm->nop();
46 }
47 info->set_force_reexecute();
48 patch->install(_masm, patch_code, obj, info);
49 append_code_stub(patch);
50
51 #ifdef ASSERT
52 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
53 if (patch->id() == PatchingStub::access_field_id) {
54 switch (code) {
55 case Bytecodes::_putstatic:
56 case Bytecodes::_getstatic:
57 case Bytecodes::_putfield:
103 //---------------------------------------------------------------
104
105
106 LIR_Assembler::LIR_Assembler(Compilation* c):
107 _masm(c->masm())
108 , _compilation(c)
109 , _frame_map(c->frame_map())
110 , _current_block(nullptr)
111 , _pending_non_safepoint(nullptr)
112 , _pending_non_safepoint_offset(0)
113 , _immediate_oops_patched(0)
114 {
115 _slow_case_stubs = new CodeStubList();
116 }
117
118
119 LIR_Assembler::~LIR_Assembler() {
120 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
121 // Reset it here to avoid an assertion.
122 _unwind_handler_entry.reset();
123 _verified_inline_entry.reset();
124 }
125
126
127 void LIR_Assembler::check_codespace() {
128 CodeSection* cs = _masm->code_section();
129 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
130 BAILOUT("CodeBuffer overflow");
131 }
132 }
133
134
135 void LIR_Assembler::append_code_stub(CodeStub* stub) {
136 _immediate_oops_patched += stub->nr_immediate_oops_patched();
137 _slow_case_stubs->append(stub);
138 }
139
140 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
141 for (int m = 0; m < stub_list->length(); m++) {
142 CodeStub* s = stub_list->at(m);
143
314 }
315 #endif /* PRODUCT */
316 }
317 }
318
319 #ifdef ASSERT
320 void LIR_Assembler::check_no_unbound_labels() {
321 CHECK_BAILOUT();
322
323 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
324 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
325 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
326 assert(false, "unbound label");
327 }
328 }
329 }
330 #endif
331
332 //----------------------------------debug info--------------------------------
333
334 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
335 int pc_offset = code_offset();
336 flush_debug_info(pc_offset);
337 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
338 if (info->exception_handlers() != nullptr) {
339 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
340 }
341 }
342
343 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
344 flush_debug_info(pc_offset);
345 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
346 if (cinfo->exception_handlers() != nullptr) {
347 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
348 }
349 }
350
351 static ValueStack* debug_info(Instruction* ins) {
352 StateSplit* ss = ins->as_StateSplit();
353 if (ss != nullptr) return ss->state();
354 return ins->state_before();
355 }
356
357 void LIR_Assembler::process_debug_info(LIR_Op* op) {
358 Instruction* src = op->source();
359 if (src == nullptr) return;
360 int pc_offset = code_offset();
361 if (_pending_non_safepoint == src) {
362 _pending_non_safepoint_offset = pc_offset;
363 return;
364 }
365 ValueStack* vstack = debug_info(src);
463 } else {
464 emit_static_call_stub();
465 }
466 CHECK_BAILOUT();
467
468 switch (op->code()) {
469 case lir_static_call:
470 case lir_dynamic_call:
471 call(op, relocInfo::static_call_type);
472 break;
473 case lir_optvirtual_call:
474 call(op, relocInfo::opt_virtual_call_type);
475 break;
476 case lir_icvirtual_call:
477 ic_call(op);
478 break;
479 default:
480 fatal("unexpected op code: %s", op->name());
481 break;
482 }
483
484 ciInlineKlass* vk = nullptr;
485 if (op->maybe_return_as_fields(&vk)) {
486 int offset = store_inline_type_fields_to_buf(vk);
487 add_call_info(offset, op->info(), true);
488 }
489 }
490
491
492 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
493 _masm->bind (*(op->label()));
494 }
495
496
497 void LIR_Assembler::emit_op1(LIR_Op1* op) {
498 switch (op->code()) {
499 case lir_move:
500 if (op->move_kind() == lir_move_volatile) {
501 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
502 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
503 } else {
504 move_op(op->in_opr(), op->result_opr(), op->type(),
505 op->patch_code(), op->info(),
506 op->move_kind() == lir_move_wide);
507 }
508 break;
558 } else {
559 Unimplemented();
560 }
561 break;
562 }
563
564 case lir_monaddr:
565 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
566 break;
567
568 case lir_unwind:
569 unwind_op(op->in_opr());
570 break;
571
572 default:
573 Unimplemented();
574 break;
575 }
576 }
577
578 void LIR_Assembler::add_scalarized_debug_info(int pc_offset) {
579 // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
580 // before doing any argument shuffling. This call may cause GC. When GC happens,
581 // all the parameters are still as passed by the caller, so we just use
582 // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
583 // Deoptimization is delayed until we enter the method body, so we only need a
584 // scope for stack walking here. There are no materialized locals, expression
585 // stack entries, or monitors yet.
586 flush_debug_info(pc_offset);
587 OopMap* oop_map = new OopMap(0, 0);
588 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
589 debug_info->add_safepoint(pc_offset, oop_map);
590 bool reexecute = false;
591 debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute);
592 debug_info->end_safepoint(pc_offset);
593 }
594
595 // The entries points of C1-compiled methods can have the following types:
596 // (1) Methods with no inline type args
597 // (2) Methods with inline type receiver but no inline type args
598 // VIEP_RO is the same as VIEP
599 // (3) Methods with non-inline type receiver and some inline type args
600 // VIEP_RO is the same as VEP
601 // (4) Methods with inline type receiver and other inline type args
602 // Separate VEP, VIEP and VIEP_RO
603 //
604 // (1) (2) (3) (4)
605 // UEP/UIEP: VEP: UEP: UEP:
606 // check_icache pack receiver check_icache check_icache
607 // VEP/VIEP/VIEP_RO jump to VIEP VEP/VIEP_RO: VIEP_RO:
608 // body UEP/UIEP: pack inline args pack inline args (except receiver)
609 // check_icache jump to VIEP jump to VIEP
610 // VIEP/VIEP_RO UIEP: VEP:
611 // body check_icache pack all inline args
612 // VIEP: jump to VIEP
613 // body UIEP:
614 // check_icache
615 // VIEP:
616 // body
617 void LIR_Assembler::emit_std_entries() {
618 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
619
620 _masm->align(CodeEntryAlignment);
621
622 if (method()->has_scalarized_args()) {
623 VM_ENTRY_MARK;
624 assert(InlineTypePassFieldsAsArgs, "must be");
625 CompiledEntrySignature ces(method()->get_Method());
626 ces.compute_calling_conventions(false);
627 CodeOffsets::Entries ro_entry_type = ces.c1_inline_ro_entry_type();
628
629 // UEP: check icache and fall-through
630 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
631 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
632 if (needs_icache(method())) {
633 check_icache();
634 }
635 }
636
637 // VIEP_RO: pack all value parameters, except the receiver
638 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
639 emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, &ces);
640 }
641
642 // VEP: pack all value parameters
643 _masm->align(CodeEntryAlignment);
644 emit_std_entry(CodeOffsets::Verified_Entry, &ces);
645
646 // UIEP: check icache and fall-through
647 _masm->align(CodeEntryAlignment);
648 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
649 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
650 // Special case if we have VIEP == VIEP(RO):
651 // this means UIEP (called by C1) == UEP (called by C2).
652 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
653 }
654 if (needs_icache(method())) {
655 check_icache();
656 }
657
658 // VIEP: all value parameters are passed as refs - no packing.
659 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
660
661 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
662 // The VIEP(RO) is the same as VEP or VIEP
663 assert(ro_entry_type == CodeOffsets::Verified_Entry ||
664 ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
665 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
666 offsets()->value(ro_entry_type));
667 }
668 } else {
669 // All 3 entries are the same (no inline type packing)
670 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
671 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
672 if (needs_icache(method())) {
673 check_icache();
674 }
675 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
676 offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
677 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
678 }
679 }
680
681 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
682 offsets()->set_value(entry, _masm->offset());
683 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
684 switch (entry) {
685 case CodeOffsets::Verified_Entry: {
686 if (needs_clinit_barrier_on_entry(method())) {
687 clinit_barrier(method());
688 }
689 int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
690 add_scalarized_debug_info(rt_call_offset);
691 break;
692 }
693 case CodeOffsets::Verified_Inline_Entry_RO: {
694 assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
695 int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
696 add_scalarized_debug_info(rt_call_offset);
697 break;
698 }
699 case CodeOffsets::Verified_Inline_Entry: {
700 if (needs_clinit_barrier_on_entry(method())) {
701 clinit_barrier(method());
702 }
703 build_frame();
704 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
705 break;
706 }
707 default:
708 ShouldNotReachHere();
709 break;
710 }
711 }
712
713 void LIR_Assembler::emit_op0(LIR_Op0* op) {
714 switch (op->code()) {
715 case lir_nop:
716 assert(op->info() == nullptr, "not supported");
717 _masm->nop();
718 break;
719
720 case lir_label:
721 Unimplemented();
722 break;
723
724 case lir_std_entry:
725 emit_std_entries();
726 break;
727
728 case lir_osr_entry:
729 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
730 osr_entry();
731 break;
732
733 case lir_breakpoint:
734 breakpoint();
735 break;
736
737 case lir_membar:
738 membar();
739 break;
740
741 case lir_membar_acquire:
742 membar_acquire();
743 break;
744
745 case lir_membar_release:
746 membar_release();
753 case lir_membar_storestore:
754 membar_storestore();
755 break;
756
757 case lir_membar_loadstore:
758 membar_loadstore();
759 break;
760
761 case lir_membar_storeload:
762 membar_storeload();
763 break;
764
765 case lir_get_thread:
766 get_thread(op->result_opr());
767 break;
768
769 case lir_on_spin_wait:
770 on_spin_wait();
771 break;
772
773 case lir_check_orig_pc:
774 check_orig_pc();
775 break;
776
777 default:
778 ShouldNotReachHere();
779 break;
780 }
781 }
782
783
784 void LIR_Assembler::emit_op2(LIR_Op2* op) {
785 switch (op->code()) {
786 case lir_cmp:
787 if (op->info() != nullptr) {
788 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
789 "shouldn't be codeemitinfo for non-address operands");
790 add_debug_info_for_null_check_here(op->info()); // exception possible
791 }
792 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
793 break;
794
795 case lir_cmp_l2i:
796 case lir_cmp_fd2i:
842
843 default:
844 Unimplemented();
845 break;
846 }
847 }
848
849 void LIR_Assembler::emit_op4(LIR_Op4* op) {
850 switch(op->code()) {
851 case lir_cmove:
852 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
853 break;
854
855 default:
856 Unimplemented();
857 break;
858 }
859 }
860
861 void LIR_Assembler::build_frame() {
862 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
863 needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
864 }
865
866
867 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
868 if (src->is_register()) {
869 if (dest->is_register()) {
870 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
871 reg2reg(src, dest);
872 } else if (dest->is_stack()) {
873 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
874 reg2stack(src, dest, type);
875 } else if (dest->is_address()) {
876 reg2mem(src, dest, type, patch_code, info, wide);
877 } else {
878 ShouldNotReachHere();
879 }
880
881 } else if (src->is_stack()) {
882 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
883 if (dest->is_register()) {
|