1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInlineKlass.hpp"
33 #include "compiler/compilerDefinitions.inline.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/vm_version.hpp"
38
39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
40 // We must have enough patching space so that call can be inserted.
41 // We cannot use fat nops here, since the concurrent code rewrite may transiently
42 // create the illegal instruction sequence.
43 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
44 _masm->nop();
45 }
46 info->set_force_reexecute();
47 patch->install(_masm, patch_code, obj, info);
48 append_code_stub(patch);
49
50 #ifdef ASSERT
51 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
52 if (patch->id() == PatchingStub::access_field_id) {
53 switch (code) {
54 case Bytecodes::_putstatic:
55 case Bytecodes::_getstatic:
56 case Bytecodes::_putfield:
57 case Bytecodes::_getfield:
58 break;
59 default:
60 ShouldNotReachHere();
61 }
62 } else if (patch->id() == PatchingStub::load_klass_id) {
63 switch (code) {
64 case Bytecodes::_new:
65 case Bytecodes::_anewarray:
66 case Bytecodes::_multianewarray:
67 case Bytecodes::_instanceof:
68 case Bytecodes::_checkcast:
69 break;
70 default:
71 ShouldNotReachHere();
72 }
73 } else if (patch->id() == PatchingStub::load_mirror_id) {
74 switch (code) {
75 case Bytecodes::_putstatic:
76 case Bytecodes::_getstatic:
77 case Bytecodes::_ldc:
78 case Bytecodes::_ldc_w:
79 case Bytecodes::_ldc2_w:
80 break;
81 default:
82 ShouldNotReachHere();
83 }
84 } else if (patch->id() == PatchingStub::load_appendix_id) {
85 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
86 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
87 } else {
88 ShouldNotReachHere();
89 }
90 #endif
91 }
92
93 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
94 IRScope* scope = info->scope();
95 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
96 if (Bytecodes::has_optional_appendix(bc_raw)) {
97 return PatchingStub::load_appendix_id;
98 }
99 return PatchingStub::load_mirror_id;
100 }
101
102 //---------------------------------------------------------------
103
104
105 LIR_Assembler::LIR_Assembler(Compilation* c):
106 _masm(c->masm())
107 , _compilation(c)
108 , _frame_map(c->frame_map())
109 , _current_block(nullptr)
110 , _pending_non_safepoint(nullptr)
111 , _pending_non_safepoint_offset(0)
112 , _immediate_oops_patched(0)
113 {
114 _slow_case_stubs = new CodeStubList();
115 }
116
117
118 LIR_Assembler::~LIR_Assembler() {
119 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
120 // Reset it here to avoid an assertion.
121 _unwind_handler_entry.reset();
122 _verified_inline_entry.reset();
123 }
124
125
126 void LIR_Assembler::check_codespace() {
127 CodeSection* cs = _masm->code_section();
128 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
129 BAILOUT("CodeBuffer overflow");
130 }
131 }
132
133
134 void LIR_Assembler::append_code_stub(CodeStub* stub) {
135 _immediate_oops_patched += stub->nr_immediate_oops_patched();
136 _slow_case_stubs->append(stub);
137 }
138
139 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
140 for (int m = 0; m < stub_list->length(); m++) {
141 CodeStub* s = stub_list->at(m);
142
143 check_codespace();
144 CHECK_BAILOUT();
145
146 #ifndef PRODUCT
147 if (CommentedAssembly) {
148 stringStream st;
149 s->print_name(&st);
150 st.print(" slow case");
151 _masm->block_comment(st.freeze());
152 }
153 #endif
154 s->emit_code(this);
155 #ifdef ASSERT
156 s->assert_no_unbound_labels();
157 #endif
158 }
159 }
160
161
162 void LIR_Assembler::emit_slow_case_stubs() {
163 emit_stubs(_slow_case_stubs);
164 }
165
166
167 bool LIR_Assembler::needs_icache(ciMethod* method) const {
168 return !method->is_static();
169 }
170
171 bool LIR_Assembler::needs_clinit_barrier_on_entry(ciMethod* method) const {
172 return VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier();
173 }
174
175 int LIR_Assembler::code_offset() const {
176 return _masm->offset();
177 }
178
179
180 address LIR_Assembler::pc() const {
181 return _masm->pc();
182 }
183
184 // To bang the stack of this compiled method we use the stack size
185 // that the interpreter would need in case of a deoptimization. This
186 // removes the need to bang the stack in the deoptimization blob which
187 // in turn simplifies stack overflow handling.
188 int LIR_Assembler::bang_size_in_bytes() const {
189 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
190 }
191
192 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
193 for (int i = 0; i < info_list->length(); i++) {
194 XHandlers* handlers = info_list->at(i)->exception_handlers();
195
196 for (int j = 0; j < handlers->length(); j++) {
197 XHandler* handler = handlers->handler_at(j);
198 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
199 assert(handler->entry_code() == nullptr ||
200 handler->entry_code()->instructions_list()->last()->code() == lir_branch, "last operation must be branch");
201
202 if (handler->entry_pco() == -1) {
203 // entry code not emitted yet
204 if (handler->entry_code() != nullptr && handler->entry_code()->instructions_list()->length() > 1) {
205 handler->set_entry_pco(code_offset());
206 if (CommentedAssembly) {
207 _masm->block_comment("Exception adapter block");
208 }
209 emit_lir_list(handler->entry_code());
210 } else {
211 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
212 }
213
214 assert(handler->entry_pco() != -1, "must be set now");
215 }
216 }
217 }
218 }
219
220
221 void LIR_Assembler::emit_code(BlockList* hir) {
222 if (PrintLIR) {
223 print_LIR(hir);
224 }
225
226 int n = hir->length();
227 for (int i = 0; i < n; i++) {
228 emit_block(hir->at(i));
229 CHECK_BAILOUT();
230 }
231
232 flush_debug_info(code_offset());
233
234 DEBUG_ONLY(check_no_unbound_labels());
235 }
236
237
238 void LIR_Assembler::emit_block(BlockBegin* block) {
239 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
240 align_backward_branch_target();
241 }
242
243 // if this block is the start of an exception handler, record the
244 // PC offset of the first instruction for later construction of
245 // the ExceptionHandlerTable
246 if (block->is_set(BlockBegin::exception_entry_flag)) {
247 block->set_exception_handler_pco(code_offset());
248 }
249
250 #ifndef PRODUCT
251 if (PrintLIRWithAssembly) {
252 // don't print Phi's
253 InstructionPrinter ip(false);
254 block->print(ip);
255 }
256 #endif /* PRODUCT */
257
258 assert(block->lir() != nullptr, "must have LIR");
259 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
260
261 #ifndef PRODUCT
262 if (CommentedAssembly) {
263 stringStream st;
264 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
265 _masm->block_comment(st.freeze());
266 }
267 #endif
268
269 emit_lir_list(block->lir());
270
271 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
272 }
273
274
275 void LIR_Assembler::emit_lir_list(LIR_List* list) {
276 peephole(list);
277
278 int n = list->length();
279 for (int i = 0; i < n; i++) {
280 LIR_Op* op = list->at(i);
281
282 check_codespace();
283 CHECK_BAILOUT();
284
285 #ifndef PRODUCT
286 if (CommentedAssembly) {
287 // Don't record out every op since that's too verbose. Print
288 // branches since they include block and stub names. Also print
289 // patching moves since they generate funny looking code.
290 if (op->code() == lir_branch ||
291 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
292 (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
293 stringStream st;
294 op->print_on(&st);
295 _masm->block_comment(st.freeze());
296 }
297 }
298 if (PrintLIRWithAssembly) {
299 // print out the LIR operation followed by the resulting assembly
300 list->at(i)->print(); tty->cr();
301 }
302 #endif /* PRODUCT */
303
304 op->emit_code(this);
305
306 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
307 process_debug_info(op);
308 }
309
310 #ifndef PRODUCT
311 if (PrintLIRWithAssembly) {
312 _masm->code()->decode();
313 }
314 #endif /* PRODUCT */
315 }
316 }
317
318 #ifdef ASSERT
319 void LIR_Assembler::check_no_unbound_labels() {
320 CHECK_BAILOUT();
321
322 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
323 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
324 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
325 assert(false, "unbound label");
326 }
327 }
328 }
329 #endif
330
331 //----------------------------------debug info--------------------------------
332
333 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
334 int pc_offset = code_offset();
335 flush_debug_info(pc_offset);
336 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
337 if (info->exception_handlers() != nullptr) {
338 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
339 }
340 }
341
342 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
343 flush_debug_info(pc_offset);
344 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
345 if (cinfo->exception_handlers() != nullptr) {
346 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
347 }
348 }
349
350 static ValueStack* debug_info(Instruction* ins) {
351 StateSplit* ss = ins->as_StateSplit();
352 if (ss != nullptr) return ss->state();
353 return ins->state_before();
354 }
355
356 void LIR_Assembler::process_debug_info(LIR_Op* op) {
357 Instruction* src = op->source();
358 if (src == nullptr) return;
359 int pc_offset = code_offset();
360 if (_pending_non_safepoint == src) {
361 _pending_non_safepoint_offset = pc_offset;
362 return;
363 }
364 ValueStack* vstack = debug_info(src);
365 if (vstack == nullptr) return;
366 if (_pending_non_safepoint != nullptr) {
367 // Got some old debug info. Get rid of it.
368 if (debug_info(_pending_non_safepoint) == vstack) {
369 _pending_non_safepoint_offset = pc_offset;
370 return;
371 }
372 if (_pending_non_safepoint_offset < pc_offset) {
373 record_non_safepoint_debug_info();
374 }
375 _pending_non_safepoint = nullptr;
376 }
377 // Remember the debug info.
378 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
379 _pending_non_safepoint = src;
380 _pending_non_safepoint_offset = pc_offset;
381 }
382 }
383
384 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
385 // Return null if n is too large.
386 // Returns the caller_bci for the next-younger state, also.
387 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
388 ValueStack* t = s;
389 for (int i = 0; i < n; i++) {
390 if (t == nullptr) break;
391 t = t->caller_state();
392 }
393 if (t == nullptr) return nullptr;
394 for (;;) {
395 ValueStack* tc = t->caller_state();
396 if (tc == nullptr) return s;
397 t = tc;
398 bci_result = tc->bci();
399 s = s->caller_state();
400 }
401 }
402
403 void LIR_Assembler::record_non_safepoint_debug_info() {
404 int pc_offset = _pending_non_safepoint_offset;
405 ValueStack* vstack = debug_info(_pending_non_safepoint);
406 int bci = vstack->bci();
407
408 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
409 assert(debug_info->recording_non_safepoints(), "sanity");
410
411 debug_info->add_non_safepoint(pc_offset);
412
413 // Visit scopes from oldest to youngest.
414 for (int n = 0; ; n++) {
415 int s_bci = bci;
416 ValueStack* s = nth_oldest(vstack, n, s_bci);
417 if (s == nullptr) break;
418 IRScope* scope = s->scope();
419 //Always pass false for reexecute since these ScopeDescs are never used for deopt
420 methodHandle null_mh;
421 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
422 }
423
424 debug_info->end_non_safepoint(pc_offset);
425 }
426
427
428 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
429 return add_debug_info_for_null_check(code_offset(), cinfo);
430 }
431
432 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
433 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
434 append_code_stub(stub);
435 return stub;
436 }
437
438 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
439 add_debug_info_for_div0(code_offset(), info);
440 }
441
442 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
443 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
444 append_code_stub(stub);
445 }
446
447 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
448 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
449 }
450
451 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
452 verify_oop_map(op->info());
453
454 // must align calls sites, otherwise they can't be updated atomically
455 align_call(op->code());
456
457 if (CodeBuffer::supports_shared_stubs() && op->method()->can_be_statically_bound()) {
458 // Calls of the same statically bound method can share
459 // a stub to the interpreter.
460 CodeBuffer::csize_t call_offset = pc() - _masm->code()->insts_begin();
461 _masm->code()->shared_stub_to_interp_for(op->method(), call_offset);
462 } else {
463 emit_static_call_stub();
464 }
465 CHECK_BAILOUT();
466
467 switch (op->code()) {
468 case lir_static_call:
469 case lir_dynamic_call:
470 call(op, relocInfo::static_call_type);
471 break;
472 case lir_optvirtual_call:
473 call(op, relocInfo::opt_virtual_call_type);
474 break;
475 case lir_icvirtual_call:
476 ic_call(op);
477 break;
478 default:
479 fatal("unexpected op code: %s", op->name());
480 break;
481 }
482
483 ciInlineKlass* vk = nullptr;
484 if (op->maybe_return_as_fields(&vk)) {
485 int offset = store_inline_type_fields_to_buf(vk);
486 add_call_info(offset, op->info(), true);
487 }
488 }
489
490
491 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
492 _masm->bind (*(op->label()));
493 }
494
495
496 void LIR_Assembler::emit_op1(LIR_Op1* op) {
497 switch (op->code()) {
498 case lir_move:
499 if (op->move_kind() == lir_move_volatile) {
500 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
501 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
502 } else {
503 move_op(op->in_opr(), op->result_opr(), op->type(),
504 op->patch_code(), op->info(),
505 op->move_kind() == lir_move_wide);
506 }
507 break;
508
509 case lir_abs:
510 case lir_sqrt:
511 case lir_f2hf:
512 case lir_hf2f:
513 intrinsic_op(op->code(), op->in_opr(), op->tmp_opr(), op->result_opr(), op);
514 break;
515
516 case lir_neg:
517 negate(op->in_opr(), op->result_opr(), op->tmp_opr());
518 break;
519
520 case lir_return: {
521 assert(op->as_OpReturn() != nullptr, "sanity");
522 LIR_OpReturn *ret_op = (LIR_OpReturn*)op;
523 return_op(ret_op->in_opr(), ret_op->stub());
524 if (ret_op->stub() != nullptr) {
525 append_code_stub(ret_op->stub());
526 }
527 break;
528 }
529
530 case lir_safepoint:
531 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
532 _masm->nop();
533 }
534 safepoint_poll(op->in_opr(), op->info());
535 break;
536
537 case lir_branch:
538 break;
539
540 case lir_push:
541 push(op->in_opr());
542 break;
543
544 case lir_pop:
545 pop(op->in_opr());
546 break;
547
548 case lir_leal:
549 leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
550 break;
551
552 case lir_null_check: {
553 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
554
555 if (op->in_opr()->is_single_cpu()) {
556 _masm->null_check(op->in_opr()->as_register(), stub->entry());
557 } else {
558 Unimplemented();
559 }
560 break;
561 }
562
563 case lir_monaddr:
564 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
565 break;
566
567 case lir_unwind:
568 unwind_op(op->in_opr());
569 break;
570
571 default:
572 Unimplemented();
573 break;
574 }
575 }
576
577 void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
578 flush_debug_info(pc_offset);
579 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
580 // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
581 // before doing any argument shuffling. This call may cause GC. When GC happens,
582 // all the parameters are still as passed by the caller, so we just use
583 // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
584 // There's no need to build a GC map here.
585 OopMap* oop_map = new OopMap(0, 0);
586 debug_info->add_safepoint(pc_offset, oop_map);
587 DebugToken* locvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
588 DebugToken* expvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
589 DebugToken* monvals = debug_info->create_monitor_values(nullptr); // FIXME: need testing with synchronized method
590 bool reexecute = false;
591 bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
592 bool rethrow_exception = false;
593 bool is_method_handle_invoke = false;
594 debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
595 debug_info->end_safepoint(pc_offset);
596 }
597
598 // The entries points of C1-compiled methods can have the following types:
599 // (1) Methods with no inline type args
600 // (2) Methods with inline type receiver but no inline type args
601 // VIEP_RO is the same as VIEP
602 // (3) Methods with non-inline type receiver and some inline type args
603 // VIEP_RO is the same as VEP
604 // (4) Methods with inline type receiver and other inline type args
605 // Separate VEP, VIEP and VIEP_RO
606 //
607 // (1) (2) (3) (4)
608 // UEP/UIEP: VEP: UEP: UEP:
609 // check_icache pack receiver check_icache check_icache
610 // VEP/VIEP/VIEP_RO jump to VIEP VEP/VIEP_RO: VIEP_RO:
611 // body UEP/UIEP: pack inline args pack inline args (except receiver)
612 // check_icache jump to VIEP jump to VIEP
613 // VIEP/VIEP_RO UIEP: VEP:
614 // body check_icache pack all inline args
615 // VIEP: jump to VIEP
616 // body UIEP:
617 // check_icache
618 // VIEP:
619 // body
620 void LIR_Assembler::emit_std_entries() {
621 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
622
623 _masm->align(CodeEntryAlignment);
624 const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
625 if (ces->has_scalarized_args()) {
626 assert(InlineTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
627 CodeOffsets::Entries ro_entry_type = ces->c1_inline_ro_entry_type();
628
629 // UEP: check icache and fall-through
630 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
631 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
632 if (needs_icache(method())) {
633 check_icache();
634 }
635 }
636
637 // VIEP_RO: pack all value parameters, except the receiver
638 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
639 emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, ces);
640 }
641
642 // VEP: pack all value parameters
643 _masm->align(CodeEntryAlignment);
644 emit_std_entry(CodeOffsets::Verified_Entry, ces);
645
646 // UIEP: check icache and fall-through
647 _masm->align(CodeEntryAlignment);
648 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
649 if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
650 // Special case if we have VIEP == VIEP(RO):
651 // this means UIEP (called by C1) == UEP (called by C2).
652 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
653 }
654 if (needs_icache(method())) {
655 check_icache();
656 }
657
658 // VIEP: all value parameters are passed as refs - no packing.
659 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
660
661 if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
662 // The VIEP(RO) is the same as VEP or VIEP
663 assert(ro_entry_type == CodeOffsets::Verified_Entry ||
664 ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
665 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
666 offsets()->value(ro_entry_type));
667 }
668 } else {
669 // All 3 entries are the same (no inline type packing)
670 offsets()->set_value(CodeOffsets::Entry, _masm->offset());
671 offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
672 if (needs_icache(method())) {
673 check_icache();
674 }
675 emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
676 offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
677 offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
678 }
679 }
680
681 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
682 offsets()->set_value(entry, _masm->offset());
683 _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
684 switch (entry) {
685 case CodeOffsets::Verified_Entry: {
686 if (needs_clinit_barrier_on_entry(method())) {
687 clinit_barrier(method());
688 }
689 int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
690 add_scalarized_entry_info(rt_call_offset);
691 break;
692 }
693 case CodeOffsets::Verified_Inline_Entry_RO: {
694 assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
695 int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
696 add_scalarized_entry_info(rt_call_offset);
697 break;
698 }
699 case CodeOffsets::Verified_Inline_Entry: {
700 if (needs_clinit_barrier_on_entry(method())) {
701 clinit_barrier(method());
702 }
703 build_frame();
704 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
705 break;
706 }
707 default:
708 ShouldNotReachHere();
709 break;
710 }
711 }
712
713 void LIR_Assembler::emit_op0(LIR_Op0* op) {
714 switch (op->code()) {
715 case lir_nop:
716 assert(op->info() == nullptr, "not supported");
717 _masm->nop();
718 break;
719
720 case lir_label:
721 Unimplemented();
722 break;
723
724 case lir_std_entry:
725 emit_std_entries();
726 break;
727
728 case lir_osr_entry:
729 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
730 osr_entry();
731 break;
732
733 case lir_breakpoint:
734 breakpoint();
735 break;
736
737 case lir_membar:
738 membar();
739 break;
740
741 case lir_membar_acquire:
742 membar_acquire();
743 break;
744
745 case lir_membar_release:
746 membar_release();
747 break;
748
749 case lir_membar_loadload:
750 membar_loadload();
751 break;
752
753 case lir_membar_storestore:
754 membar_storestore();
755 break;
756
757 case lir_membar_loadstore:
758 membar_loadstore();
759 break;
760
761 case lir_membar_storeload:
762 membar_storeload();
763 break;
764
765 case lir_get_thread:
766 get_thread(op->result_opr());
767 break;
768
769 case lir_on_spin_wait:
770 on_spin_wait();
771 break;
772
773 case lir_check_orig_pc:
774 check_orig_pc();
775 break;
776
777 default:
778 ShouldNotReachHere();
779 break;
780 }
781 }
782
783
784 void LIR_Assembler::emit_op2(LIR_Op2* op) {
785 switch (op->code()) {
786 case lir_cmp:
787 if (op->info() != nullptr) {
788 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
789 "shouldn't be codeemitinfo for non-address operands");
790 add_debug_info_for_null_check_here(op->info()); // exception possible
791 }
792 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
793 break;
794
795 case lir_cmp_l2i:
796 case lir_cmp_fd2i:
797 case lir_ucmp_fd2i:
798 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
799 break;
800
801 case lir_shl:
802 case lir_shr:
803 case lir_ushr:
804 if (op->in_opr2()->is_constant()) {
805 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
806 } else {
807 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
808 }
809 break;
810
811 case lir_add:
812 case lir_sub:
813 case lir_mul:
814 case lir_div:
815 case lir_rem:
816 arith_op(
817 op->code(),
818 op->in_opr1(),
819 op->in_opr2(),
820 op->result_opr(),
821 op->info());
822 break;
823
824 case lir_logic_and:
825 case lir_logic_or:
826 case lir_logic_xor:
827 logic_op(
828 op->code(),
829 op->in_opr1(),
830 op->in_opr2(),
831 op->result_opr());
832 break;
833
834 case lir_throw:
835 throw_op(op->in_opr1(), op->in_opr2(), op->info());
836 break;
837
838 case lir_xadd:
839 case lir_xchg:
840 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
841 break;
842
843 default:
844 Unimplemented();
845 break;
846 }
847 }
848
849 void LIR_Assembler::emit_op4(LIR_Op4* op) {
850 switch(op->code()) {
851 case lir_cmove:
852 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
853 break;
854
855 default:
856 Unimplemented();
857 break;
858 }
859 }
860
861 void LIR_Assembler::build_frame() {
862 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
863 needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
864 }
865
866
867 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
868 if (src->is_register()) {
869 if (dest->is_register()) {
870 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
871 reg2reg(src, dest);
872 } else if (dest->is_stack()) {
873 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
874 reg2stack(src, dest, type);
875 } else if (dest->is_address()) {
876 reg2mem(src, dest, type, patch_code, info, wide);
877 } else {
878 ShouldNotReachHere();
879 }
880
881 } else if (src->is_stack()) {
882 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
883 if (dest->is_register()) {
884 stack2reg(src, dest, type);
885 } else if (dest->is_stack()) {
886 stack2stack(src, dest, type);
887 } else {
888 ShouldNotReachHere();
889 }
890
891 } else if (src->is_constant()) {
892 if (dest->is_register()) {
893 const2reg(src, dest, patch_code, info); // patching is possible
894 } else if (dest->is_stack()) {
895 assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
896 const2stack(src, dest);
897 } else if (dest->is_address()) {
898 assert(patch_code == lir_patch_none, "no patching allowed here");
899 const2mem(src, dest, type, info, wide);
900 } else {
901 ShouldNotReachHere();
902 }
903
904 } else if (src->is_address()) {
905 mem2reg(src, dest, type, patch_code, info, wide);
906 } else {
907 ShouldNotReachHere();
908 }
909 }
910
911
912 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
913 #ifndef PRODUCT
914 if (VerifyOops) {
915 OopMapStream s(info->oop_map());
916 while (!s.is_done()) {
917 OopMapValue v = s.current();
918 if (v.is_oop()) {
919 VMReg r = v.reg();
920 if (!r->is_stack()) {
921 _masm->verify_oop(r->as_Register());
922 } else {
923 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
924 }
925 }
926 check_codespace();
927 CHECK_BAILOUT();
928
929 s.next();
930 }
931 }
932 #endif
933 }