1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInlineKlass.hpp"
33 #include "ci/ciUtilities.inline.hpp"
34 #include "compiler/compilerDefinitions.inline.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/vm_version.hpp"
39
40 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
41 // We must have enough patching space so that call can be inserted.
42 // We cannot use fat nops here, since the concurrent code rewrite may transiently
43 // create the illegal instruction sequence.
44 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
45 _masm->nop();
46 }
47 info->set_force_reexecute();
48 patch->install(_masm, patch_code, obj, info);
49 append_code_stub(patch);
50
51 #ifdef ASSERT
52 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
53 if (patch->id() == PatchingStub::access_field_id) {
54 switch (code) {
55 case Bytecodes::_putstatic:
56 case Bytecodes::_getstatic:
57 case Bytecodes::_putfield:
58 case Bytecodes::_getfield:
59 break;
60 default:
61 ShouldNotReachHere();
62 }
63 } else if (patch->id() == PatchingStub::load_klass_id) {
64 switch (code) {
65 case Bytecodes::_new:
66 case Bytecodes::_anewarray:
67 case Bytecodes::_multianewarray:
68 case Bytecodes::_instanceof:
69 case Bytecodes::_checkcast:
70 break;
71 default:
72 ShouldNotReachHere();
73 }
74 } else if (patch->id() == PatchingStub::load_mirror_id) {
75 switch (code) {
76 case Bytecodes::_putstatic:
77 case Bytecodes::_getstatic:
78 case Bytecodes::_ldc:
79 case Bytecodes::_ldc_w:
80 case Bytecodes::_ldc2_w:
81 break;
82 default:
83 ShouldNotReachHere();
84 }
85 } else if (patch->id() == PatchingStub::load_appendix_id) {
86 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
87 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
88 } else {
89 ShouldNotReachHere();
90 }
91 #endif
92 }
93
94 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
95 IRScope* scope = info->scope();
96 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
97 if (Bytecodes::has_optional_appendix(bc_raw)) {
98 return PatchingStub::load_appendix_id;
99 }
100 return PatchingStub::load_mirror_id;
101 }
102
103 //---------------------------------------------------------------
104
105
106 LIR_Assembler::LIR_Assembler(Compilation* c):
107 _masm(c->masm())
108 , _compilation(c)
109 , _frame_map(c->frame_map())
110 , _current_block(nullptr)
111 , _pending_non_safepoint(nullptr)
112 , _pending_non_safepoint_offset(0)
113 , _immediate_oops_patched(0)
114 {
115 _slow_case_stubs = new CodeStubList();
116 }
117
118
119 LIR_Assembler::~LIR_Assembler() {
120 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
121 // Reset it here to avoid an assertion.
122 _unwind_handler_entry.reset();
123 _verified_inline_entry.reset();
124 }
125
126
127 void LIR_Assembler::check_codespace() {
128 CodeSection* cs = _masm->code_section();
129 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
130 BAILOUT("CodeBuffer overflow");
131 }
132 }
133
134
135 void LIR_Assembler::append_code_stub(CodeStub* stub) {
136 _immediate_oops_patched += stub->nr_immediate_oops_patched();
137 _slow_case_stubs->append(stub);
138 }
139
140 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
141 for (int m = 0; m < stub_list->length(); m++) {
142 CodeStub* s = stub_list->at(m);
143
144 check_codespace();
145 CHECK_BAILOUT();
146
147 #ifndef PRODUCT
148 if (CommentedAssembly) {
149 stringStream st;
150 s->print_name(&st);
151 st.print(" slow case");
152 _masm->block_comment(st.freeze());
153 }
154 #endif
155 s->emit_code(this);
156 #ifdef ASSERT
157 s->assert_no_unbound_labels();
158 #endif
159 }
160 }
161
162
163 void LIR_Assembler::emit_slow_case_stubs() {
164 emit_stubs(_slow_case_stubs);
165 }
166
167
168 bool LIR_Assembler::needs_icache(ciMethod* method) const {
169 return !method->is_static();
170 }
171
172 bool LIR_Assembler::needs_clinit_barrier_on_entry(ciMethod* method) const {
173 return VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier();
174 }
175
176 int LIR_Assembler::code_offset() const {
177 return _masm->offset();
178 }
179
180
181 address LIR_Assembler::pc() const {
182 return _masm->pc();
183 }
184
185 // To bang the stack of this compiled method we use the stack size
186 // that the interpreter would need in case of a deoptimization. This
187 // removes the need to bang the stack in the deoptimization blob which
188 // in turn simplifies stack overflow handling.
189 int LIR_Assembler::bang_size_in_bytes() const {
190 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
191 }
192
193 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
194 for (int i = 0; i < info_list->length(); i++) {
195 XHandlers* handlers = info_list->at(i)->exception_handlers();
196
197 for (int j = 0; j < handlers->length(); j++) {
198 XHandler* handler = handlers->handler_at(j);
199 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
200 assert(handler->entry_code() == nullptr ||
201 handler->entry_code()->instructions_list()->last()->code() == lir_branch, "last operation must be branch");
202
203 if (handler->entry_pco() == -1) {
204 // entry code not emitted yet
205 if (handler->entry_code() != nullptr && handler->entry_code()->instructions_list()->length() > 1) {
206 handler->set_entry_pco(code_offset());
207 if (CommentedAssembly) {
208 _masm->block_comment("Exception adapter block");
209 }
210 emit_lir_list(handler->entry_code());
211 } else {
212 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
213 }
214
215 assert(handler->entry_pco() != -1, "must be set now");
216 }
217 }
218 }
219 }
220
221
222 void LIR_Assembler::emit_code(BlockList* hir) {
223 if (PrintLIR) {
224 print_LIR(hir);
225 }
226
227 int n = hir->length();
228 for (int i = 0; i < n; i++) {
229 emit_block(hir->at(i));
230 CHECK_BAILOUT();
231 }
232
233 flush_debug_info(code_offset());
234
235 DEBUG_ONLY(check_no_unbound_labels());
236 }
237
238
239 void LIR_Assembler::emit_block(BlockBegin* block) {
240 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
241 align_backward_branch_target();
242 }
243
244 // if this block is the start of an exception handler, record the
245 // PC offset of the first instruction for later construction of
246 // the ExceptionHandlerTable
247 if (block->is_set(BlockBegin::exception_entry_flag)) {
248 block->set_exception_handler_pco(code_offset());
249 }
250
251 #ifndef PRODUCT
252 if (PrintLIRWithAssembly) {
253 // don't print Phi's
254 InstructionPrinter ip(false);
255 block->print(ip);
256 }
257 #endif /* PRODUCT */
258
259 assert(block->lir() != nullptr, "must have LIR");
260 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
261
262 #ifndef PRODUCT
263 if (CommentedAssembly) {
264 stringStream st;
265 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
266 _masm->block_comment(st.freeze());
267 }
268 #endif
269
270 emit_lir_list(block->lir());
271
272 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
273 }
274
275
276 void LIR_Assembler::emit_lir_list(LIR_List* list) {
277 peephole(list);
278
279 int n = list->length();
280 for (int i = 0; i < n; i++) {
281 LIR_Op* op = list->at(i);
282
283 check_codespace();
284 CHECK_BAILOUT();
285
286 #ifndef PRODUCT
287 if (CommentedAssembly) {
288 // Don't record out every op since that's too verbose. Print
289 // branches since they include block and stub names. Also print
290 // patching moves since they generate funny looking code.
291 if (op->code() == lir_branch ||
292 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
293 (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
294 stringStream st;
295 op->print_on(&st);
296 _masm->block_comment(st.freeze());
297 }
298 }
299 if (PrintLIRWithAssembly) {
300 // print out the LIR operation followed by the resulting assembly
301 list->at(i)->print(); tty->cr();
302 }
303 #endif /* PRODUCT */
304
305 op->emit_code(this);
306
307 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
308 process_debug_info(op);
309 }
310
311 #ifndef PRODUCT
312 if (PrintLIRWithAssembly) {
313 _masm->code()->decode();
314 }
315 #endif /* PRODUCT */
316 }
317 }
318
319 #ifdef ASSERT
320 void LIR_Assembler::check_no_unbound_labels() {
321 CHECK_BAILOUT();
322
323 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
324 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
325 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
326 assert(false, "unbound label");
327 }
328 }
329 }
330 #endif
331
332 //----------------------------------debug info--------------------------------
333
334 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
335 int pc_offset = code_offset();
336 flush_debug_info(pc_offset);
337 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
338 if (info->exception_handlers() != nullptr) {
339 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
340 }
341 }
342
343 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
344 flush_debug_info(pc_offset);
345 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
346 if (cinfo->exception_handlers() != nullptr) {
347 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
348 }
349 }
350
351 static ValueStack* debug_info(Instruction* ins) {
352 StateSplit* ss = ins->as_StateSplit();
353 if (ss != nullptr) return ss->state();
354 return ins->state_before();
355 }
356
357 void LIR_Assembler::process_debug_info(LIR_Op* op) {
358 Instruction* src = op->source();
359 if (src == nullptr) return;
360 int pc_offset = code_offset();
361 if (_pending_non_safepoint == src) {
362 _pending_non_safepoint_offset = pc_offset;
363 return;
364 }
365 ValueStack* vstack = debug_info(src);
366 if (vstack == nullptr) return;
367 if (_pending_non_safepoint != nullptr) {
368 // Got some old debug info. Get rid of it.
369 if (debug_info(_pending_non_safepoint) == vstack) {
370 _pending_non_safepoint_offset = pc_offset;
371 return;
372 }
373 if (_pending_non_safepoint_offset < pc_offset) {
374 record_non_safepoint_debug_info();
375 }
376 _pending_non_safepoint = nullptr;
377 }
378 // Remember the debug info.
379 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
380 _pending_non_safepoint = src;
381 _pending_non_safepoint_offset = pc_offset;
382 }
383 }
384
385 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
386 // Return null if n is too large.
387 // Returns the caller_bci for the next-younger state, also.
388 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
389 ValueStack* t = s;
390 for (int i = 0; i < n; i++) {
391 if (t == nullptr) break;
392 t = t->caller_state();
393 }
394 if (t == nullptr) return nullptr;
395 for (;;) {
396 ValueStack* tc = t->caller_state();
397 if (tc == nullptr) return s;
398 t = tc;
399 bci_result = tc->bci();
400 s = s->caller_state();
401 }
402 }
403
404 void LIR_Assembler::record_non_safepoint_debug_info() {
405 int pc_offset = _pending_non_safepoint_offset;
406 ValueStack* vstack = debug_info(_pending_non_safepoint);
407 int bci = vstack->bci();
408
409 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
410 assert(debug_info->recording_non_safepoints(), "sanity");
411
412 debug_info->add_non_safepoint(pc_offset);
413
414 // Visit scopes from oldest to youngest.
415 for (int n = 0; ; n++) {
416 int s_bci = bci;
417 ValueStack* s = nth_oldest(vstack, n, s_bci);
418 if (s == nullptr) break;
419 IRScope* scope = s->scope();
420 //Always pass false for reexecute since these ScopeDescs are never used for deopt
421 methodHandle null_mh;
422 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
423 }
424
425 debug_info->end_non_safepoint(pc_offset);
426 }
427
428
429 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
430 return add_debug_info_for_null_check(code_offset(), cinfo);
431 }
432
433 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
434 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
435 append_code_stub(stub);
436 return stub;
437 }
438
439 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
440 add_debug_info_for_div0(code_offset(), info);
441 }
442
443 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
444 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
445 append_code_stub(stub);
446 }
447
448 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
449 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
450 }
451
452 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
453 verify_oop_map(op->info());
454
455 // must align calls sites, otherwise they can't be updated atomically
456 align_call(op->code());
457
458 if (CodeBuffer::supports_shared_stubs() && op->method()->can_be_statically_bound()) {
459 // Calls of the same statically bound method can share
460 // a stub to the interpreter.
461 CodeBuffer::csize_t call_offset = pc() - _masm->code()->insts_begin();
462 _masm->code()->shared_stub_to_interp_for(op->method(), call_offset);
463 } else {
464 emit_static_call_stub();
465 }
466 CHECK_BAILOUT();
467
468 switch (op->code()) {
469 case lir_static_call:
470 case lir_dynamic_call:
471 call(op, relocInfo::static_call_type);
472 break;
473 case lir_optvirtual_call:
474 call(op, relocInfo::opt_virtual_call_type);
475 break;
476 case lir_icvirtual_call:
477 ic_call(op);
478 break;
479 default:
480 fatal("unexpected op code: %s", op->name());
481 break;
482 }
483
484 ciInlineKlass* vk = nullptr;
485 if (op->maybe_return_as_fields(&vk)) {
486 int offset = store_inline_type_fields_to_buf(vk);
487 add_call_info(offset, op->info(), true);
488 }
489 }
490
491
492 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
493 _masm->bind (*(op->label()));
494 }
495
496
497 void LIR_Assembler::emit_op1(LIR_Op1* op) {
498 switch (op->code()) {
499 case lir_move:
500 if (op->move_kind() == lir_move_volatile) {
501 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
502 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
503 } else {
504 move_op(op->in_opr(), op->result_opr(), op->type(),
505 op->patch_code(), op->info(),
506 op->move_kind() == lir_move_wide);
507 }
508 break;
509
510 case lir_abs:
511 case lir_sqrt:
512 case lir_f2hf:
513 case lir_hf2f:
514 intrinsic_op(op->code(), op->in_opr(), op->tmp_opr(), op->result_opr(), op);
515 break;
516
517 case lir_neg:
518 negate(op->in_opr(), op->result_opr(), op->tmp_opr());
519 break;
520
521 case lir_return: {
522 assert(op->as_OpReturn() != nullptr, "sanity");
523 LIR_OpReturn *ret_op = (LIR_OpReturn*)op;
524 return_op(ret_op->in_opr(), ret_op->stub());
525 if (ret_op->stub() != nullptr) {
526 append_code_stub(ret_op->stub());
527 }
528 break;
529 }
530
531 case lir_safepoint:
532 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
533 _masm->nop();
534 }
535 safepoint_poll(op->in_opr(), op->info());
536 break;
537
538 case lir_branch:
539 break;
540
541 case lir_push:
542 push(op->in_opr());
543 break;
544
545 case lir_pop:
546 pop(op->in_opr());
547 break;
548
549 case lir_leal:
550 leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
551 break;
552
553 case lir_null_check: {
554 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
555
556 if (op->in_opr()->is_single_cpu()) {
557 _masm->null_check(op->in_opr()->as_register(), stub->entry());
558 } else {
559 Unimplemented();
560 }
561 break;
562 }
563
564 case lir_monaddr:
565 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
566 break;
567
568 case lir_unwind:
569 unwind_op(op->in_opr());
570 break;
571
572 default:
573 Unimplemented();
574 break;
575 }
576 }
577
578 void LIR_Assembler::add_scalarized_debug_info(int pc_offset) {
579 // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
580 // before doing any argument shuffling. This call may cause GC. When GC happens,
581 // all the parameters are still as passed by the caller, so we just use
582 // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
583 // Deoptimization is delayed until we enter the method body, so we only need a
584 // scope for stack walking here. There are no materialized locals, expression
585 // stack entries, or monitors yet.
586 flush_debug_info(pc_offset);
587 OopMap* oop_map = new OopMap(0, 0);
588 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
589 debug_info->add_safepoint(pc_offset, oop_map);
590 bool reexecute = false;
591 debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute);
592 debug_info->end_safepoint(pc_offset);
593 }
594
595 // The entries points of C1-compiled methods can have the following types:
596 // (1) Methods with no inline type args
597 // (2) Methods with inline type receiver but no inline type args
598 // VIEP_RO is the same as VIEP
599 // (3) Methods with non-inline type receiver and some inline type args
600 // VIEP_RO is the same as VEP
601 // (4) Methods with inline type receiver and other inline type args
602 // Separate VEP, VIEP and VIEP_RO
603 //
604 // (1) (2) (3) (4)
605 // UEP/UIEP: VEP: UEP: UEP:
606 // check_icache pack receiver check_icache check_icache
607 // VEP/VIEP/VIEP_RO jump to VIEP VEP/VIEP_RO: VIEP_RO:
608 // body UEP/UIEP: pack inline args pack inline args (except receiver)
609 // check_icache jump to VIEP jump to VIEP
610 // VIEP/VIEP_RO UIEP: VEP:
611 // body check_icache pack all inline args
612 // VIEP: jump to VIEP
613 // body UIEP:
614 // check_icache
615 // VIEP:
616 // body
617 void LIR_Assembler::emit_std_entries() {
618 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
619
620 _masm->align(CodeEntryAlignment);
621
622 if (method()->has_scalarized_args()) {
623 VM_ENTRY_MARK;
624 assert(InlineTypePassFieldsAsArgs, "must be");
625 CompiledEntrySignature ces(method()->get_Method());
626 ces.compute_calling_conventions(false);
627 CodeOffsets::Entries ro_entry_type = ces.c1_inline_ro_entry_type();
628
629 // UEP: check icache and fall-through
630 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
631 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
632 if (needs_icache(method())) {
633 check_icache();
634 }
635 }
636
637 // VIEP_RO: pack all value parameters, except the receiver
638 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
639 emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, &ces);
640 }
641
642 // VEP: pack all value parameters
643 _masm->align(CodeEntryAlignment);
644 emit_std_entry(CodeOffsets::Verified_Entry, &ces);
645
646 // UIEP: check icache and fall-through
647 _masm->align(CodeEntryAlignment);
648 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
649 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
650 // Special case if we have VIEP == VIEP(RO):
651 // this means UIEP (called by C1) == UEP (called by C2).
652 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
653 }
654 if (needs_icache(method())) {
655 check_icache();
656 }
657
658 // VIEP: all value parameters are passed as refs - no packing.
659 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
660
661 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
662 // The VIEP(RO) is the same as VEP or VIEP
663 assert(ro_entry_type == CodeOffsets::Verified_Entry ||
664 ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
665 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
666 offsets()->value(ro_entry_type));
667 }
668 } else {
669 // All 3 entries are the same (no inline type packing)
670 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
671 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
672 if (needs_icache(method())) {
673 check_icache();
674 }
675 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
676 offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
677 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
678 }
679 }
680
681 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
682 offsets()->set_value(entry, _masm->offset());
683 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
684 switch (entry) {
685 case CodeOffsets::Verified_Entry: {
686 if (needs_clinit_barrier_on_entry(method())) {
687 clinit_barrier(method());
688 }
689 int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
690 add_scalarized_debug_info(rt_call_offset);
691 break;
692 }
693 case CodeOffsets::Verified_Inline_Entry_RO: {
694 assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
695 int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
696 add_scalarized_debug_info(rt_call_offset);
697 break;
698 }
699 case CodeOffsets::Verified_Inline_Entry: {
700 if (needs_clinit_barrier_on_entry(method())) {
701 clinit_barrier(method());
702 }
703 build_frame();
704 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
705 break;
706 }
707 default:
708 ShouldNotReachHere();
709 break;
710 }
711 }
712
713 void LIR_Assembler::emit_op0(LIR_Op0* op) {
714 switch (op->code()) {
715 case lir_nop:
716 assert(op->info() == nullptr, "not supported");
717 _masm->nop();
718 break;
719
720 case lir_label:
721 Unimplemented();
722 break;
723
724 case lir_std_entry:
725 emit_std_entries();
726 break;
727
728 case lir_osr_entry:
729 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
730 osr_entry();
731 break;
732
733 case lir_breakpoint:
734 breakpoint();
735 break;
736
737 case lir_membar:
738 membar();
739 break;
740
741 case lir_membar_acquire:
742 membar_acquire();
743 break;
744
745 case lir_membar_release:
746 membar_release();
747 break;
748
749 case lir_membar_loadload:
750 membar_loadload();
751 break;
752
753 case lir_membar_storestore:
754 membar_storestore();
755 break;
756
757 case lir_membar_loadstore:
758 membar_loadstore();
759 break;
760
761 case lir_membar_storeload:
762 membar_storeload();
763 break;
764
765 case lir_get_thread:
766 get_thread(op->result_opr());
767 break;
768
769 case lir_on_spin_wait:
770 on_spin_wait();
771 break;
772
773 case lir_check_orig_pc:
774 check_orig_pc();
775 break;
776
777 default:
778 ShouldNotReachHere();
779 break;
780 }
781 }
782
783
784 void LIR_Assembler::emit_op2(LIR_Op2* op) {
785 switch (op->code()) {
786 case lir_cmp:
787 if (op->info() != nullptr) {
788 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
789 "shouldn't be codeemitinfo for non-address operands");
790 add_debug_info_for_null_check_here(op->info()); // exception possible
791 }
792 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
793 break;
794
795 case lir_cmp_l2i:
796 case lir_cmp_fd2i:
797 case lir_ucmp_fd2i:
798 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
799 break;
800
801 case lir_shl:
802 case lir_shr:
803 case lir_ushr:
804 if (op->in_opr2()->is_constant()) {
805 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
806 } else {
807 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
808 }
809 break;
810
811 case lir_add:
812 case lir_sub:
813 case lir_mul:
814 case lir_div:
815 case lir_rem:
816 arith_op(
817 op->code(),
818 op->in_opr1(),
819 op->in_opr2(),
820 op->result_opr(),
821 op->info());
822 break;
823
824 case lir_logic_and:
825 case lir_logic_or:
826 case lir_logic_xor:
827 logic_op(
828 op->code(),
829 op->in_opr1(),
830 op->in_opr2(),
831 op->result_opr());
832 break;
833
834 case lir_throw:
835 throw_op(op->in_opr1(), op->in_opr2(), op->info());
836 break;
837
838 case lir_xadd:
839 case lir_xchg:
840 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
841 break;
842
843 default:
844 Unimplemented();
845 break;
846 }
847 }
848
849 void LIR_Assembler::emit_op4(LIR_Op4* op) {
850 switch(op->code()) {
851 case lir_cmove:
852 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
853 break;
854
855 default:
856 Unimplemented();
857 break;
858 }
859 }
860
861 void LIR_Assembler::build_frame() {
862 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
863 needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
864 }
865
866
867 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
868 if (src->is_register()) {
869 if (dest->is_register()) {
870 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
871 reg2reg(src, dest);
872 } else if (dest->is_stack()) {
873 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
874 reg2stack(src, dest, type);
875 } else if (dest->is_address()) {
876 reg2mem(src, dest, type, patch_code, info, wide);
877 } else {
878 ShouldNotReachHere();
879 }
880
881 } else if (src->is_stack()) {
882 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
883 if (dest->is_register()) {
884 stack2reg(src, dest, type);
885 } else if (dest->is_stack()) {
886 stack2stack(src, dest, type);
887 } else {
888 ShouldNotReachHere();
889 }
890
891 } else if (src->is_constant()) {
892 if (dest->is_register()) {
893 const2reg(src, dest, patch_code, info); // patching is possible
894 } else if (dest->is_stack()) {
895 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
896 const2stack(src, dest);
897 } else if (dest->is_address()) {
898 assert(patch_code == lir_patch_none, "no patching allowed here");
899 const2mem(src, dest, type, info, wide);
900 } else {
901 ShouldNotReachHere();
902 }
903
904 } else if (src->is_address()) {
905 mem2reg(src, dest, type, patch_code, info, wide);
906 } else {
907 ShouldNotReachHere();
908 }
909 }
910
911
912 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
913 #ifndef PRODUCT
914 if (VerifyOops) {
915 OopMapStream s(info->oop_map());
916 while (!s.is_done()) {
917 OopMapValue v = s.current();
918 if (v.is_oop()) {
919 VMReg r = v.reg();
920 if (!r->is_stack()) {
921 _masm->verify_oop(r->as_Register());
922 } else {
923 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
924 }
925 }
926 check_codespace();
927 CHECK_BAILOUT();
928
929 s.next();
930 }
931 }
932 #endif
933 }