13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_InstructionPrinter.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "compiler/compilerDefinitions.inline.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/vm_version.hpp"
38
39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
40 // We must have enough patching space so that call can be inserted.
41 // We cannot use fat nops here, since the concurrent code rewrite may transiently
42 // create the illegal instruction sequence.
43 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
44 _masm->nop();
45 }
46 info->set_force_reexecute();
47 patch->install(_masm, patch_code, obj, info);
48 append_code_stub(patch);
49
50 #ifdef ASSERT
51 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
52 if (patch->id() == PatchingStub::access_field_id) {
53 switch (code) {
54 case Bytecodes::_putstatic:
55 case Bytecodes::_getstatic:
56 case Bytecodes::_putfield:
57 case Bytecodes::_getfield:
58 break;
59 default:
60 ShouldNotReachHere();
61 }
62 } else if (patch->id() == PatchingStub::load_klass_id) {
63 switch (code) {
64 case Bytecodes::_new:
65 case Bytecodes::_anewarray:
66 case Bytecodes::_multianewarray:
67 case Bytecodes::_instanceof:
68 case Bytecodes::_checkcast:
69 break;
70 default:
71 ShouldNotReachHere();
72 }
73 } else if (patch->id() == PatchingStub::load_mirror_id) {
74 switch (code) {
75 case Bytecodes::_putstatic:
76 case Bytecodes::_getstatic:
77 case Bytecodes::_ldc:
78 case Bytecodes::_ldc_w:
79 case Bytecodes::_ldc2_w:
80 break;
81 default:
82 ShouldNotReachHere();
83 }
84 } else if (patch->id() == PatchingStub::load_appendix_id) {
102 //---------------------------------------------------------------
103
104
105 LIR_Assembler::LIR_Assembler(Compilation* c):
106 _masm(c->masm())
107 , _compilation(c)
108 , _frame_map(c->frame_map())
109 , _current_block(NULL)
110 , _pending_non_safepoint(NULL)
111 , _pending_non_safepoint_offset(0)
112 , _immediate_oops_patched(0)
113 {
114 _slow_case_stubs = new CodeStubList();
115 }
116
117
118 LIR_Assembler::~LIR_Assembler() {
119 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
120 // Reset it here to avoid an assertion.
121 _unwind_handler_entry.reset();
122 }
123
124
125 void LIR_Assembler::check_codespace() {
126 CodeSection* cs = _masm->code_section();
127 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
128 BAILOUT("CodeBuffer overflow");
129 }
130 }
131
132
133 void LIR_Assembler::append_code_stub(CodeStub* stub) {
134 _immediate_oops_patched += stub->nr_immediate_oops_patched();
135 _slow_case_stubs->append(stub);
136 }
137
138 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
139 for (int m = 0; m < stub_list->length(); m++) {
140 CodeStub* s = stub_list->at(m);
141
313 }
314 #endif /* PRODUCT */
315 }
316 }
317
318 #ifdef ASSERT
319 void LIR_Assembler::check_no_unbound_labels() {
320 CHECK_BAILOUT();
321
322 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
323 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
324 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
325 assert(false, "unbound label");
326 }
327 }
328 }
329 #endif
330
331 //----------------------------------debug info--------------------------------
332
333
334 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
335 int pc_offset = code_offset();
336 flush_debug_info(pc_offset);
337 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
338 if (info->exception_handlers() != NULL) {
339 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
340 }
341 }
342
343
344 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
345 flush_debug_info(pc_offset);
346 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
347 if (cinfo->exception_handlers() != NULL) {
348 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
349 }
350 }
351
352 static ValueStack* debug_info(Instruction* ins) {
353 StateSplit* ss = ins->as_StateSplit();
354 if (ss != NULL) return ss->state();
355 return ins->state_before();
356 }
357
358 void LIR_Assembler::process_debug_info(LIR_Op* op) {
359 Instruction* src = op->source();
360 if (src == NULL) return;
361 int pc_offset = code_offset();
362 if (_pending_non_safepoint == src) {
363 _pending_non_safepoint_offset = pc_offset;
364 return;
365 }
366 ValueStack* vstack = debug_info(src);
471 case lir_dynamic_call:
472 call(op, relocInfo::static_call_type);
473 break;
474 case lir_optvirtual_call:
475 call(op, relocInfo::opt_virtual_call_type);
476 break;
477 case lir_icvirtual_call:
478 ic_call(op);
479 break;
480 default:
481 fatal("unexpected op code: %s", op->name());
482 break;
483 }
484
485 // JSR 292
486 // Record if this method has MethodHandle invokes.
487 if (op->is_method_handle_invoke()) {
488 compilation()->set_has_method_handle_invokes(true);
489 }
490
491 #if defined(IA32) && defined(COMPILER2)
492 // C2 leave fpu stack dirty clean it
493 if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
494 int i;
495 for ( i = 1; i <= 7 ; i++ ) {
496 ffree(i);
497 }
498 if (!op->result_opr()->is_float_kind()) {
499 ffree(0);
500 }
501 }
502 #endif // IA32 && COMPILER2
503 }
504
505
506 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
507 _masm->bind (*(op->label()));
508 }
509
510
577 } else {
578 Unimplemented();
579 }
580 break;
581 }
582
583 case lir_monaddr:
584 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
585 break;
586
587 case lir_unwind:
588 unwind_op(op->in_opr());
589 break;
590
591 default:
592 Unimplemented();
593 break;
594 }
595 }
596
597
598 void LIR_Assembler::emit_op0(LIR_Op0* op) {
599 switch (op->code()) {
600 case lir_nop:
601 assert(op->info() == NULL, "not supported");
602 _masm->nop();
603 break;
604
605 case lir_label:
606 Unimplemented();
607 break;
608
609 case lir_std_entry:
610 // init offsets
611 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
612 _masm->align(CodeEntryAlignment);
613 if (needs_icache(compilation()->method())) {
614 check_icache();
615 }
616 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
617 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
618 if (needs_clinit_barrier_on_entry(compilation()->method())) {
619 clinit_barrier(compilation()->method());
620 }
621 build_frame();
622 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
623 break;
624
625 case lir_osr_entry:
626 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
627 osr_entry();
628 break;
629
630 #ifdef IA32
631 case lir_fpop_raw:
632 fpop();
633 break;
634 #endif // IA32
635
636 case lir_breakpoint:
637 breakpoint();
638 break;
639
640 case lir_membar:
641 membar();
642 break;
656 case lir_membar_storestore:
657 membar_storestore();
658 break;
659
660 case lir_membar_loadstore:
661 membar_loadstore();
662 break;
663
664 case lir_membar_storeload:
665 membar_storeload();
666 break;
667
668 case lir_get_thread:
669 get_thread(op->result_opr());
670 break;
671
672 case lir_on_spin_wait:
673 on_spin_wait();
674 break;
675
676 default:
677 ShouldNotReachHere();
678 break;
679 }
680 }
681
682
683 void LIR_Assembler::emit_op2(LIR_Op2* op) {
684 switch (op->code()) {
685 case lir_cmp:
686 if (op->info() != NULL) {
687 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
688 "shouldn't be codeemitinfo for non-address operands");
689 add_debug_info_for_null_check_here(op->info()); // exception possible
690 }
691 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
692 break;
693
694 case lir_cmp_l2i:
695 case lir_cmp_fd2i:
754
755 default:
756 Unimplemented();
757 break;
758 }
759 }
760
761 void LIR_Assembler::emit_op4(LIR_Op4* op) {
762 switch(op->code()) {
763 case lir_cmove:
764 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
765 break;
766
767 default:
768 Unimplemented();
769 break;
770 }
771 }
772
773 void LIR_Assembler::build_frame() {
774 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
775 }
776
777
778 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
779 assert(strict_fp_requires_explicit_rounding, "not required");
780 assert((src->is_single_fpu() && dest->is_single_stack()) ||
781 (src->is_double_fpu() && dest->is_double_stack()),
782 "round_fp: rounds register -> stack location");
783
784 reg2stack (src, dest, src->type(), pop_fpu_stack);
785 }
786
787
788 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
789 if (src->is_register()) {
790 if (dest->is_register()) {
791 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
792 reg2reg(src, dest);
793 } else if (dest->is_stack()) {
794 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_InstructionPrinter.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciInlineKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "compiler/compilerDefinitions.inline.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "runtime/os.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/vm_version.hpp"
40
41 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
42 // We must have enough patching space so that call can be inserted.
43 // We cannot use fat nops here, since the concurrent code rewrite may transiently
44 // create the illegal instruction sequence.
45 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
46 _masm->nop();
47 }
48 info->set_force_reexecute();
49 patch->install(_masm, patch_code, obj, info);
50 append_code_stub(patch);
51
52 #ifdef ASSERT
53 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
54 if (patch->id() == PatchingStub::access_field_id) {
55 switch (code) {
56 case Bytecodes::_putstatic:
57 case Bytecodes::_getstatic:
58 case Bytecodes::_putfield:
59 case Bytecodes::_getfield:
60 case Bytecodes::_withfield:
61 break;
62 default:
63 ShouldNotReachHere();
64 }
65 } else if (patch->id() == PatchingStub::load_klass_id) {
66 switch (code) {
67 case Bytecodes::_new:
68 case Bytecodes::_aconst_init:
69 case Bytecodes::_anewarray:
70 case Bytecodes::_multianewarray:
71 case Bytecodes::_instanceof:
72 case Bytecodes::_checkcast:
73 break;
74 default:
75 ShouldNotReachHere();
76 }
77 } else if (patch->id() == PatchingStub::load_mirror_id) {
78 switch (code) {
79 case Bytecodes::_putstatic:
80 case Bytecodes::_getstatic:
81 case Bytecodes::_ldc:
82 case Bytecodes::_ldc_w:
83 case Bytecodes::_ldc2_w:
84 break;
85 default:
86 ShouldNotReachHere();
87 }
88 } else if (patch->id() == PatchingStub::load_appendix_id) {
106 //---------------------------------------------------------------
107
108
109 LIR_Assembler::LIR_Assembler(Compilation* c):
110 _masm(c->masm())
111 , _compilation(c)
112 , _frame_map(c->frame_map())
113 , _current_block(NULL)
114 , _pending_non_safepoint(NULL)
115 , _pending_non_safepoint_offset(0)
116 , _immediate_oops_patched(0)
117 {
118 _slow_case_stubs = new CodeStubList();
119 }
120
121
122 LIR_Assembler::~LIR_Assembler() {
123 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
124 // Reset it here to avoid an assertion.
125 _unwind_handler_entry.reset();
126 _verified_inline_entry.reset();
127 }
128
129
130 void LIR_Assembler::check_codespace() {
131 CodeSection* cs = _masm->code_section();
132 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
133 BAILOUT("CodeBuffer overflow");
134 }
135 }
136
137
138 void LIR_Assembler::append_code_stub(CodeStub* stub) {
139 _immediate_oops_patched += stub->nr_immediate_oops_patched();
140 _slow_case_stubs->append(stub);
141 }
142
143 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
144 for (int m = 0; m < stub_list->length(); m++) {
145 CodeStub* s = stub_list->at(m);
146
318 }
319 #endif /* PRODUCT */
320 }
321 }
322
323 #ifdef ASSERT
324 void LIR_Assembler::check_no_unbound_labels() {
325 CHECK_BAILOUT();
326
327 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
328 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
329 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
330 assert(false, "unbound label");
331 }
332 }
333 }
334 #endif
335
336 //----------------------------------debug info--------------------------------
337
338 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
339 int pc_offset = code_offset();
340 flush_debug_info(pc_offset);
341 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
342 if (info->exception_handlers() != NULL) {
343 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
344 }
345 }
346
347 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
348 flush_debug_info(pc_offset);
349 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
350 if (cinfo->exception_handlers() != NULL) {
351 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
352 }
353 }
354
355 static ValueStack* debug_info(Instruction* ins) {
356 StateSplit* ss = ins->as_StateSplit();
357 if (ss != NULL) return ss->state();
358 return ins->state_before();
359 }
360
361 void LIR_Assembler::process_debug_info(LIR_Op* op) {
362 Instruction* src = op->source();
363 if (src == NULL) return;
364 int pc_offset = code_offset();
365 if (_pending_non_safepoint == src) {
366 _pending_non_safepoint_offset = pc_offset;
367 return;
368 }
369 ValueStack* vstack = debug_info(src);
474 case lir_dynamic_call:
475 call(op, relocInfo::static_call_type);
476 break;
477 case lir_optvirtual_call:
478 call(op, relocInfo::opt_virtual_call_type);
479 break;
480 case lir_icvirtual_call:
481 ic_call(op);
482 break;
483 default:
484 fatal("unexpected op code: %s", op->name());
485 break;
486 }
487
488 // JSR 292
489 // Record if this method has MethodHandle invokes.
490 if (op->is_method_handle_invoke()) {
491 compilation()->set_has_method_handle_invokes(true);
492 }
493
494 ciInlineKlass* vk = NULL;
495 if (op->maybe_return_as_fields(&vk)) {
496 int offset = store_inline_type_fields_to_buf(vk);
497 add_call_info(offset, op->info(), true);
498 }
499
500 #if defined(IA32) && defined(COMPILER2)
501 // C2 leave fpu stack dirty clean it
502 if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
503 int i;
504 for ( i = 1; i <= 7 ; i++ ) {
505 ffree(i);
506 }
507 if (!op->result_opr()->is_float_kind()) {
508 ffree(0);
509 }
510 }
511 #endif // IA32 && COMPILER2
512 }
513
514
515 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
516 _masm->bind (*(op->label()));
517 }
518
519
586 } else {
587 Unimplemented();
588 }
589 break;
590 }
591
592 case lir_monaddr:
593 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
594 break;
595
596 case lir_unwind:
597 unwind_op(op->in_opr());
598 break;
599
600 default:
601 Unimplemented();
602 break;
603 }
604 }
605
606 void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
607 flush_debug_info(pc_offset);
608 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
609 // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
610 // before doing any argument shuffling. This call may cause GC. When GC happens,
611 // all the parameters are still as passed by the caller, so we just use
612 // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
613 // There's no need to build a GC map here.
614 OopMap* oop_map = new OopMap(0, 0);
615 debug_info->add_safepoint(pc_offset, oop_map);
616 DebugToken* locvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??)
617 DebugToken* expvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??)
618 DebugToken* monvals = debug_info->create_monitor_values(NULL); // FIXME: need testing with synchronized method
619 bool reexecute = false;
620 bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
621 bool rethrow_exception = false;
622 bool is_method_handle_invoke = false;
623 debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
624 debug_info->end_safepoint(pc_offset);
625 }
626
627 // The entries points of C1-compiled methods can have the following types:
628 // (1) Methods with no inline type args
629 // (2) Methods with inline type receiver but no inline type args
630 // VIEP_RO is the same as VIEP
631 // (3) Methods with non-inline type receiver and some inline type args
632 // VIEP_RO is the same as VEP
633 // (4) Methods with inline type receiver and other inline type args
634 // Separate VEP, VIEP and VIEP_RO
635 //
636 // (1) (2) (3) (4)
637 // UEP/UIEP: VEP: UEP: UEP:
638 // check_icache pack receiver check_icache check_icache
639 // VEP/VIEP/VIEP_RO jump to VIEP VEP/VIEP_RO: VIEP_RO:
640 // body UEP/UIEP: pack inline args pack inline args (except receiver)
641 // check_icache jump to VIEP jump to VIEP
642 // VIEP/VIEP_RO UIEP: VEP:
643 // body check_icache pack all inline args
644 // VIEP: jump to VIEP
645 // body UIEP:
646 // check_icache
647 // VIEP:
648 // body
649 void LIR_Assembler::emit_std_entries() {
650 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
651
652 _masm->align(CodeEntryAlignment);
653 const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
654 if (ces->has_scalarized_args()) {
655 assert(InlineTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
656 CodeOffsets::Entries ro_entry_type = ces->c1_inline_ro_entry_type();
657
658 // UEP: check icache and fall-through
659 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
660 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
661 if (needs_icache(method())) {
662 check_icache();
663 }
664 }
665
666 // VIEP_RO: pack all value parameters, except the receiver
667 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
668 emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, ces);
669 }
670
671 // VEP: pack all value parameters
672 _masm->align(CodeEntryAlignment);
673 emit_std_entry(CodeOffsets::Verified_Entry, ces);
674
675 // UIEP: check icache and fall-through
676 _masm->align(CodeEntryAlignment);
677 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
678 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
679 // Special case if we have VIEP == VIEP(RO):
680 // this means UIEP (called by C1) == UEP (called by C2).
681 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
682 }
683 if (needs_icache(method())) {
684 check_icache();
685 }
686
687 // VIEP: all value parameters are passed as refs - no packing.
688 emit_std_entry(CodeOffsets::Verified_Inline_Entry, NULL);
689
690 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
691 // The VIEP(RO) is the same as VEP or VIEP
692 assert(ro_entry_type == CodeOffsets::Verified_Entry ||
693 ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
694 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
695 offsets()->value(ro_entry_type));
696 }
697 } else {
698 // All 3 entries are the same (no inline type packing)
699 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
700 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
701 if (needs_icache(method())) {
702 check_icache();
703 }
704 emit_std_entry(CodeOffsets::Verified_Inline_Entry, NULL);
705 offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
706 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
707 }
708 }
709
710 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
711 offsets()->set_value(entry, _masm->offset());
712 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
713 switch (entry) {
714 case CodeOffsets::Verified_Entry: {
715 if (needs_clinit_barrier_on_entry(method())) {
716 clinit_barrier(method());
717 }
718 int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
719 add_scalarized_entry_info(rt_call_offset);
720 break;
721 }
722 case CodeOffsets::Verified_Inline_Entry_RO: {
723 assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
724 int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
725 add_scalarized_entry_info(rt_call_offset);
726 break;
727 }
728 case CodeOffsets::Verified_Inline_Entry: {
729 if (needs_clinit_barrier_on_entry(method())) {
730 clinit_barrier(method());
731 }
732 build_frame();
733 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
734 break;
735 }
736 default:
737 ShouldNotReachHere();
738 break;
739 }
740 }
741
742 void LIR_Assembler::emit_op0(LIR_Op0* op) {
743 switch (op->code()) {
744 case lir_nop:
745 assert(op->info() == NULL, "not supported");
746 _masm->nop();
747 break;
748
749 case lir_label:
750 Unimplemented();
751 break;
752
753 case lir_std_entry:
754 emit_std_entries();
755 break;
756
757 case lir_osr_entry:
758 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
759 osr_entry();
760 break;
761
762 #ifdef IA32
763 case lir_fpop_raw:
764 fpop();
765 break;
766 #endif // IA32
767
768 case lir_breakpoint:
769 breakpoint();
770 break;
771
772 case lir_membar:
773 membar();
774 break;
788 case lir_membar_storestore:
789 membar_storestore();
790 break;
791
792 case lir_membar_loadstore:
793 membar_loadstore();
794 break;
795
796 case lir_membar_storeload:
797 membar_storeload();
798 break;
799
800 case lir_get_thread:
801 get_thread(op->result_opr());
802 break;
803
804 case lir_on_spin_wait:
805 on_spin_wait();
806 break;
807
808 case lir_check_orig_pc:
809 check_orig_pc();
810 break;
811
812 default:
813 ShouldNotReachHere();
814 break;
815 }
816 }
817
818
819 void LIR_Assembler::emit_op2(LIR_Op2* op) {
820 switch (op->code()) {
821 case lir_cmp:
822 if (op->info() != NULL) {
823 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
824 "shouldn't be codeemitinfo for non-address operands");
825 add_debug_info_for_null_check_here(op->info()); // exception possible
826 }
827 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
828 break;
829
830 case lir_cmp_l2i:
831 case lir_cmp_fd2i:
890
891 default:
892 Unimplemented();
893 break;
894 }
895 }
896
897 void LIR_Assembler::emit_op4(LIR_Op4* op) {
898 switch(op->code()) {
899 case lir_cmove:
900 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
901 break;
902
903 default:
904 Unimplemented();
905 break;
906 }
907 }
908
909 void LIR_Assembler::build_frame() {
910 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
911 needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
912 }
913
914
915 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
916 assert(strict_fp_requires_explicit_rounding, "not required");
917 assert((src->is_single_fpu() && dest->is_single_stack()) ||
918 (src->is_double_fpu() && dest->is_double_stack()),
919 "round_fp: rounds register -> stack location");
920
921 reg2stack (src, dest, src->type(), pop_fpu_stack);
922 }
923
924
925 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
926 if (src->is_register()) {
927 if (dest->is_register()) {
928 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
929 reg2reg(src, dest);
930 } else if (dest->is_stack()) {
931 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|