1 /*
  2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/assembler.inline.hpp"
 26 #include "c1/c1_Compilation.hpp"
 27 #include "c1/c1_Instruction.hpp"
 28 #include "c1/c1_InstructionPrinter.hpp"
 29 #include "c1/c1_LIRAssembler.hpp"
 30 #include "c1/c1_MacroAssembler.hpp"
 31 #include "c1/c1_ValueStack.hpp"
 32 #include "ci/ciInlineKlass.hpp"
 33 #include "compiler/compilerDefinitions.inline.hpp"
 34 #include "compiler/oopMap.hpp"
 35 #include "runtime/os.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "runtime/vm_version.hpp"
 38 
 39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
 40   // We must have enough patching space so that call can be inserted.
 41   // We cannot use fat nops here, since the concurrent code rewrite may transiently
 42   // create the illegal instruction sequence.
 43   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
 44     _masm->nop();
 45   }
 46   info->set_force_reexecute();
 47   patch->install(_masm, patch_code, obj, info);
 48   append_code_stub(patch);
 49 
 50 #ifdef ASSERT
 51   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
 52   if (patch->id() == PatchingStub::access_field_id) {
 53     switch (code) {
 54       case Bytecodes::_putstatic:
 55       case Bytecodes::_getstatic:
 56       case Bytecodes::_putfield:
 57       case Bytecodes::_getfield:
 58         break;
 59       default:
 60         ShouldNotReachHere();
 61     }
 62   } else if (patch->id() == PatchingStub::load_klass_id) {
 63     switch (code) {
 64       case Bytecodes::_new:
 65       case Bytecodes::_anewarray:
 66       case Bytecodes::_multianewarray:
 67       case Bytecodes::_instanceof:
 68       case Bytecodes::_checkcast:
 69         break;
 70       default:
 71         ShouldNotReachHere();
 72     }
 73   } else if (patch->id() == PatchingStub::load_mirror_id) {
 74     switch (code) {
 75       case Bytecodes::_putstatic:
 76       case Bytecodes::_getstatic:
 77       case Bytecodes::_ldc:
 78       case Bytecodes::_ldc_w:
 79       case Bytecodes::_ldc2_w:
 80         break;
 81       default:
 82         ShouldNotReachHere();
 83     }
 84   } else if (patch->id() == PatchingStub::load_appendix_id) {
 85     Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
 86     assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
 87   } else {
 88     ShouldNotReachHere();
 89   }
 90 #endif
 91 }
 92 
 93 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
 94   IRScope* scope = info->scope();
 95   Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
 96   if (Bytecodes::has_optional_appendix(bc_raw)) {
 97     return PatchingStub::load_appendix_id;
 98   }
 99   return PatchingStub::load_mirror_id;
100 }
101 
102 //---------------------------------------------------------------
103 
104 
105 LIR_Assembler::LIR_Assembler(Compilation* c):
106    _masm(c->masm())
107  , _compilation(c)
108  , _frame_map(c->frame_map())
109  , _current_block(nullptr)
110  , _pending_non_safepoint(nullptr)
111  , _pending_non_safepoint_offset(0)
112  , _immediate_oops_patched(0)
113 {
114   _slow_case_stubs = new CodeStubList();
115 }
116 
117 
118 LIR_Assembler::~LIR_Assembler() {
119   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
120   // Reset it here to avoid an assertion.
121   _unwind_handler_entry.reset();
122   _verified_inline_entry.reset();
123 }
124 
125 
126 void LIR_Assembler::check_codespace() {
127   CodeSection* cs = _masm->code_section();
128   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
129     BAILOUT("CodeBuffer overflow");
130   }
131 }
132 
133 
134 void LIR_Assembler::append_code_stub(CodeStub* stub) {
135   _immediate_oops_patched += stub->nr_immediate_oops_patched();
136   _slow_case_stubs->append(stub);
137 }
138 
139 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
140   for (int m = 0; m < stub_list->length(); m++) {
141     CodeStub* s = stub_list->at(m);
142 
143     check_codespace();
144     CHECK_BAILOUT();
145 
146 #ifndef PRODUCT
147     if (CommentedAssembly) {
148       stringStream st;
149       s->print_name(&st);
150       st.print(" slow case");
151       _masm->block_comment(st.freeze());
152     }
153 #endif
154     s->emit_code(this);
155 #ifdef ASSERT
156     s->assert_no_unbound_labels();
157 #endif
158   }
159 }
160 
161 
162 void LIR_Assembler::emit_slow_case_stubs() {
163   emit_stubs(_slow_case_stubs);
164 }
165 
166 
167 bool LIR_Assembler::needs_icache(ciMethod* method) const {
168   return !method->is_static();
169 }
170 
171 bool LIR_Assembler::needs_clinit_barrier_on_entry(ciMethod* method) const {
172   return VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier();
173 }
174 
175 int LIR_Assembler::code_offset() const {
176   return _masm->offset();
177 }
178 
179 
180 address LIR_Assembler::pc() const {
181   return _masm->pc();
182 }
183 
184 // To bang the stack of this compiled method we use the stack size
185 // that the interpreter would need in case of a deoptimization. This
186 // removes the need to bang the stack in the deoptimization blob which
187 // in turn simplifies stack overflow handling.
188 int LIR_Assembler::bang_size_in_bytes() const {
189   return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
190 }
191 
192 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
193   for (int i = 0; i < info_list->length(); i++) {
194     XHandlers* handlers = info_list->at(i)->exception_handlers();
195 
196     for (int j = 0; j < handlers->length(); j++) {
197       XHandler* handler = handlers->handler_at(j);
198       assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
199       assert(handler->entry_code() == nullptr ||
200              handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
201              handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
202 
203       if (handler->entry_pco() == -1) {
204         // entry code not emitted yet
205         if (handler->entry_code() != nullptr && handler->entry_code()->instructions_list()->length() > 1) {
206           handler->set_entry_pco(code_offset());
207           if (CommentedAssembly) {
208             _masm->block_comment("Exception adapter block");
209           }
210           emit_lir_list(handler->entry_code());
211         } else {
212           handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
213         }
214 
215         assert(handler->entry_pco() != -1, "must be set now");
216       }
217     }
218   }
219 }
220 
221 
222 void LIR_Assembler::emit_code(BlockList* hir) {
223   if (PrintLIR) {
224     print_LIR(hir);
225   }
226 
227   int n = hir->length();
228   for (int i = 0; i < n; i++) {
229     emit_block(hir->at(i));
230     CHECK_BAILOUT();
231   }
232 
233   flush_debug_info(code_offset());
234 
235   DEBUG_ONLY(check_no_unbound_labels());
236 }
237 
238 
239 void LIR_Assembler::emit_block(BlockBegin* block) {
240   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
241     align_backward_branch_target();
242   }
243 
244   // if this block is the start of an exception handler, record the
245   // PC offset of the first instruction for later construction of
246   // the ExceptionHandlerTable
247   if (block->is_set(BlockBegin::exception_entry_flag)) {
248     block->set_exception_handler_pco(code_offset());
249   }
250 
251 #ifndef PRODUCT
252   if (PrintLIRWithAssembly) {
253     // don't print Phi's
254     InstructionPrinter ip(false);
255     block->print(ip);
256   }
257 #endif /* PRODUCT */
258 
259   assert(block->lir() != nullptr, "must have LIR");
260   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
261 
262 #ifndef PRODUCT
263   if (CommentedAssembly) {
264     stringStream st;
265     st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
266     _masm->block_comment(st.freeze());
267   }
268 #endif
269 
270   emit_lir_list(block->lir());
271 
272   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
273 }
274 
275 
276 void LIR_Assembler::emit_lir_list(LIR_List* list) {
277   peephole(list);
278 
279   int n = list->length();
280   for (int i = 0; i < n; i++) {
281     LIR_Op* op = list->at(i);
282 
283     check_codespace();
284     CHECK_BAILOUT();
285 
286 #ifndef PRODUCT
287     if (CommentedAssembly) {
288       // Don't record out every op since that's too verbose.  Print
289       // branches since they include block and stub names.  Also print
290       // patching moves since they generate funny looking code.
291       if (op->code() == lir_branch ||
292           (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
293           (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
294         stringStream st;
295         op->print_on(&st);
296         _masm->block_comment(st.freeze());
297       }
298     }
299     if (PrintLIRWithAssembly) {
300       // print out the LIR operation followed by the resulting assembly
301       list->at(i)->print(); tty->cr();
302     }
303 #endif /* PRODUCT */
304 
305     op->emit_code(this);
306 
307     if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
308       process_debug_info(op);
309     }
310 
311 #ifndef PRODUCT
312     if (PrintLIRWithAssembly) {
313       _masm->code()->decode();
314     }
315 #endif /* PRODUCT */
316   }
317 }
318 
319 #ifdef ASSERT
320 void LIR_Assembler::check_no_unbound_labels() {
321   CHECK_BAILOUT();
322 
323   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
324     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
325       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
326       assert(false, "unbound label");
327     }
328   }
329 }
330 #endif
331 
332 //----------------------------------debug info--------------------------------
333 
334 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
335   int pc_offset = code_offset();
336   flush_debug_info(pc_offset);
337   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
338   if (info->exception_handlers() != nullptr) {
339     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
340   }
341 }
342 
343 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
344   flush_debug_info(pc_offset);
345   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
346   if (cinfo->exception_handlers() != nullptr) {
347     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
348   }
349 }
350 
351 static ValueStack* debug_info(Instruction* ins) {
352   StateSplit* ss = ins->as_StateSplit();
353   if (ss != nullptr) return ss->state();
354   return ins->state_before();
355 }
356 
357 void LIR_Assembler::process_debug_info(LIR_Op* op) {
358   Instruction* src = op->source();
359   if (src == nullptr)  return;
360   int pc_offset = code_offset();
361   if (_pending_non_safepoint == src) {
362     _pending_non_safepoint_offset = pc_offset;
363     return;
364   }
365   ValueStack* vstack = debug_info(src);
366   if (vstack == nullptr)  return;
367   if (_pending_non_safepoint != nullptr) {
368     // Got some old debug info.  Get rid of it.
369     if (debug_info(_pending_non_safepoint) == vstack) {
370       _pending_non_safepoint_offset = pc_offset;
371       return;
372     }
373     if (_pending_non_safepoint_offset < pc_offset) {
374       record_non_safepoint_debug_info();
375     }
376     _pending_non_safepoint = nullptr;
377   }
378   // Remember the debug info.
379   if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
380     _pending_non_safepoint = src;
381     _pending_non_safepoint_offset = pc_offset;
382   }
383 }
384 
385 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
386 // Return null if n is too large.
387 // Returns the caller_bci for the next-younger state, also.
388 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
389   ValueStack* t = s;
390   for (int i = 0; i < n; i++) {
391     if (t == nullptr)  break;
392     t = t->caller_state();
393   }
394   if (t == nullptr)  return nullptr;
395   for (;;) {
396     ValueStack* tc = t->caller_state();
397     if (tc == nullptr)  return s;
398     t = tc;
399     bci_result = tc->bci();
400     s = s->caller_state();
401   }
402 }
403 
404 void LIR_Assembler::record_non_safepoint_debug_info() {
405   int         pc_offset = _pending_non_safepoint_offset;
406   ValueStack* vstack    = debug_info(_pending_non_safepoint);
407   int         bci       = vstack->bci();
408 
409   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
410   assert(debug_info->recording_non_safepoints(), "sanity");
411 
412   debug_info->add_non_safepoint(pc_offset);
413 
414   // Visit scopes from oldest to youngest.
415   for (int n = 0; ; n++) {
416     int s_bci = bci;
417     ValueStack* s = nth_oldest(vstack, n, s_bci);
418     if (s == nullptr)  break;
419     IRScope* scope = s->scope();
420     //Always pass false for reexecute since these ScopeDescs are never used for deopt
421     methodHandle null_mh;
422     debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
423   }
424 
425   debug_info->end_non_safepoint(pc_offset);
426 }
427 
428 
429 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
430   return add_debug_info_for_null_check(code_offset(), cinfo);
431 }
432 
433 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
434   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
435   append_code_stub(stub);
436   return stub;
437 }
438 
439 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
440   add_debug_info_for_div0(code_offset(), info);
441 }
442 
443 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
444   DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
445   append_code_stub(stub);
446 }
447 
448 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
449   rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
450 }
451 
452 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
453   verify_oop_map(op->info());
454 
455   // must align calls sites, otherwise they can't be updated atomically
456   align_call(op->code());
457 
458   if (CodeBuffer::supports_shared_stubs() && op->method()->can_be_statically_bound()) {
459     // Calls of the same statically bound method can share
460     // a stub to the interpreter.
461     CodeBuffer::csize_t call_offset = pc() - _masm->code()->insts_begin();
462     _masm->code()->shared_stub_to_interp_for(op->method(), call_offset);
463   } else {
464     emit_static_call_stub();
465   }
466   CHECK_BAILOUT();
467 
468   switch (op->code()) {
469   case lir_static_call:
470   case lir_dynamic_call:
471     call(op, relocInfo::static_call_type);
472     break;
473   case lir_optvirtual_call:
474     call(op, relocInfo::opt_virtual_call_type);
475     break;
476   case lir_icvirtual_call:
477     ic_call(op);
478     break;
479   default:
480     fatal("unexpected op code: %s", op->name());
481     break;
482   }
483 
484   // JSR 292
485   // Record if this method has MethodHandle invokes.
486   if (op->is_method_handle_invoke()) {
487     compilation()->set_has_method_handle_invokes(true);
488   }
489 
490   ciInlineKlass* vk = nullptr;
491   if (op->maybe_return_as_fields(&vk)) {
492     int offset = store_inline_type_fields_to_buf(vk);
493     add_call_info(offset, op->info(), true);
494   }
495 }
496 
497 
498 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
499   _masm->bind (*(op->label()));
500 }
501 
502 
503 void LIR_Assembler::emit_op1(LIR_Op1* op) {
504   switch (op->code()) {
505     case lir_move:
506       if (op->move_kind() == lir_move_volatile) {
507         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
508         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
509       } else {
510         move_op(op->in_opr(), op->result_opr(), op->type(),
511                 op->patch_code(), op->info(),
512                 op->move_kind() == lir_move_wide);
513       }
514       break;
515 
516     case lir_abs:
517     case lir_sqrt:
518     case lir_f2hf:
519     case lir_hf2f:
520       intrinsic_op(op->code(), op->in_opr(), op->tmp_opr(), op->result_opr(), op);
521       break;
522 
523     case lir_neg:
524       negate(op->in_opr(), op->result_opr(), op->tmp_opr());
525       break;
526 
527     case lir_return: {
528       assert(op->as_OpReturn() != nullptr, "sanity");
529       LIR_OpReturn *ret_op = (LIR_OpReturn*)op;
530       return_op(ret_op->in_opr(), ret_op->stub());
531       if (ret_op->stub() != nullptr) {
532         append_code_stub(ret_op->stub());
533       }
534       break;
535     }
536 
537     case lir_safepoint:
538       if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
539         _masm->nop();
540       }
541       safepoint_poll(op->in_opr(), op->info());
542       break;
543 
544 #ifdef IA32
545     case lir_fxch:
546       fxch(op->in_opr()->as_jint());
547       break;
548 
549     case lir_fld:
550       fld(op->in_opr()->as_jint());
551       break;
552 #endif // IA32
553 
554     case lir_branch:
555       break;
556 
557     case lir_push:
558       push(op->in_opr());
559       break;
560 
561     case lir_pop:
562       pop(op->in_opr());
563       break;
564 
565     case lir_leal:
566       leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
567       break;
568 
569     case lir_null_check: {
570       ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
571 
572       if (op->in_opr()->is_single_cpu()) {
573         _masm->null_check(op->in_opr()->as_register(), stub->entry());
574       } else {
575         Unimplemented();
576       }
577       break;
578     }
579 
580     case lir_monaddr:
581       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
582       break;
583 
584     case lir_unwind:
585       unwind_op(op->in_opr());
586       break;
587 
588     default:
589       Unimplemented();
590       break;
591   }
592 }
593 
594 void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
595   flush_debug_info(pc_offset);
596   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
597   // The VEP and VIEP(RO) of a C1-compiled method call buffer_inline_args_xxx()
598   // before doing any argument shuffling. This call may cause GC. When GC happens,
599   // all the parameters are still as passed by the caller, so we just use
600   // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
601   // There's no need to build a GC map here.
602   OopMap* oop_map = new OopMap(0, 0);
603   debug_info->add_safepoint(pc_offset, oop_map);
604   DebugToken* locvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
605   DebugToken* expvals = debug_info->create_scope_values(nullptr); // FIXME is this needed (for Java debugging to work properly??)
606   DebugToken* monvals = debug_info->create_monitor_values(nullptr); // FIXME: need testing with synchronized method
607   bool reexecute = false;
608   bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
609   bool rethrow_exception = false;
610   bool is_method_handle_invoke = false;
611   debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
612   debug_info->end_safepoint(pc_offset);
613 }
614 
615 // The entries points of C1-compiled methods can have the following types:
616 // (1) Methods with no inline type args
617 // (2) Methods with inline type receiver but no inline type args
618 //     VIEP_RO is the same as VIEP
619 // (3) Methods with non-inline type receiver and some inline type args
620 //     VIEP_RO is the same as VEP
621 // (4) Methods with inline type receiver and other inline type args
622 //     Separate VEP, VIEP and VIEP_RO
623 //
624 // (1)               (2)                 (3)                    (4)
625 // UEP/UIEP:         VEP:                UEP:                   UEP:
626 //   check_icache      pack receiver       check_icache           check_icache
627 // VEP/VIEP/VIEP_RO    jump to VIEP      VEP/VIEP_RO:           VIEP_RO:
628 //   body            UEP/UIEP:             pack inline args       pack inline args (except receiver)
629 //                     check_icache        jump to VIEP           jump to VIEP
630 //                   VIEP/VIEP_RO        UIEP:                  VEP:
631 //                     body                check_icache           pack all inline args
632 //                                       VIEP:                    jump to VIEP
633 //                                         body                 UIEP:
634 //                                                                check_icache
635 //                                                              VIEP:
636 //                                                                body
637 void LIR_Assembler::emit_std_entries() {
638   offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
639 
640   _masm->align(CodeEntryAlignment);
641   const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
642   if (ces->has_scalarized_args()) {
643     assert(InlineTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
644     CodeOffsets::Entries ro_entry_type = ces->c1_inline_ro_entry_type();
645 
646     // UEP: check icache and fall-through
647     if (ro_entry_type != CodeOffsets::Verified_Inline_Entry) {
648       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
649       if (needs_icache(method())) {
650         check_icache();
651       }
652     }
653 
654     // VIEP_RO: pack all value parameters, except the receiver
655     if (ro_entry_type == CodeOffsets::Verified_Inline_Entry_RO) {
656       emit_std_entry(CodeOffsets::Verified_Inline_Entry_RO, ces);
657     }
658 
659     // VEP: pack all value parameters
660     _masm->align(CodeEntryAlignment);
661     emit_std_entry(CodeOffsets::Verified_Entry, ces);
662 
663     // UIEP: check icache and fall-through
664     _masm->align(CodeEntryAlignment);
665     offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
666     if (ro_entry_type == CodeOffsets::Verified_Inline_Entry) {
667       // Special case if we have VIEP == VIEP(RO):
668       // this means UIEP (called by C1) == UEP (called by C2).
669       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
670     }
671     if (needs_icache(method())) {
672       check_icache();
673     }
674 
675     // VIEP: all value parameters are passed as refs - no packing.
676     emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
677 
678     if (ro_entry_type != CodeOffsets::Verified_Inline_Entry_RO) {
679       // The VIEP(RO) is the same as VEP or VIEP
680       assert(ro_entry_type == CodeOffsets::Verified_Entry ||
681              ro_entry_type == CodeOffsets::Verified_Inline_Entry, "must be");
682       offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO,
683                            offsets()->value(ro_entry_type));
684     }
685   } else {
686     // All 3 entries are the same (no inline type packing)
687     offsets()->set_value(CodeOffsets::Entry, _masm->offset());
688     offsets()->set_value(CodeOffsets::Inline_Entry, _masm->offset());
689     if (needs_icache(method())) {
690       check_icache();
691     }
692     emit_std_entry(CodeOffsets::Verified_Inline_Entry, nullptr);
693     offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Inline_Entry));
694     offsets()->set_value(CodeOffsets::Verified_Inline_Entry_RO, offsets()->value(CodeOffsets::Verified_Inline_Entry));
695   }
696 }
697 
698 void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
699   offsets()->set_value(entry, _masm->offset());
700   _masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
701   switch (entry) {
702   case CodeOffsets::Verified_Entry: {
703     if (needs_clinit_barrier_on_entry(method())) {
704       clinit_barrier(method());
705     }
706     int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
707     add_scalarized_entry_info(rt_call_offset);
708     break;
709   }
710   case CodeOffsets::Verified_Inline_Entry_RO: {
711     assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
712     int rt_call_offset = _masm->verified_inline_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_inline_entry);
713     add_scalarized_entry_info(rt_call_offset);
714     break;
715   }
716   case CodeOffsets::Verified_Inline_Entry: {
717     if (needs_clinit_barrier_on_entry(method())) {
718       clinit_barrier(method());
719     }
720     build_frame();
721     offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
722     break;
723   }
724   default:
725     ShouldNotReachHere();
726     break;
727   }
728 }
729 
730 void LIR_Assembler::emit_op0(LIR_Op0* op) {
731   switch (op->code()) {
732     case lir_nop:
733       assert(op->info() == nullptr, "not supported");
734       _masm->nop();
735       break;
736 
737     case lir_label:
738       Unimplemented();
739       break;
740 
741     case lir_std_entry:
742       emit_std_entries();
743       break;
744 
745     case lir_osr_entry:
746       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
747       osr_entry();
748       break;
749 
750 #ifdef IA32
751     case lir_fpop_raw:
752       fpop();
753       break;
754 #endif // IA32
755 
756     case lir_breakpoint:
757       breakpoint();
758       break;
759 
760     case lir_membar:
761       membar();
762       break;
763 
764     case lir_membar_acquire:
765       membar_acquire();
766       break;
767 
768     case lir_membar_release:
769       membar_release();
770       break;
771 
772     case lir_membar_loadload:
773       membar_loadload();
774       break;
775 
776     case lir_membar_storestore:
777       membar_storestore();
778       break;
779 
780     case lir_membar_loadstore:
781       membar_loadstore();
782       break;
783 
784     case lir_membar_storeload:
785       membar_storeload();
786       break;
787 
788     case lir_get_thread:
789       get_thread(op->result_opr());
790       break;
791 
792     case lir_on_spin_wait:
793       on_spin_wait();
794       break;
795 
796     case lir_check_orig_pc:
797       check_orig_pc();
798       break;
799 
800     default:
801       ShouldNotReachHere();
802       break;
803   }
804 }
805 
806 
807 void LIR_Assembler::emit_op2(LIR_Op2* op) {
808   switch (op->code()) {
809     case lir_cmp:
810       if (op->info() != nullptr) {
811         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
812                "shouldn't be codeemitinfo for non-address operands");
813         add_debug_info_for_null_check_here(op->info()); // exception possible
814       }
815       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
816       break;
817 
818     case lir_cmp_l2i:
819     case lir_cmp_fd2i:
820     case lir_ucmp_fd2i:
821       comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
822       break;
823 
824     case lir_shl:
825     case lir_shr:
826     case lir_ushr:
827       if (op->in_opr2()->is_constant()) {
828         shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
829       } else {
830         shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
831       }
832       break;
833 
834     case lir_add:
835     case lir_sub:
836     case lir_mul:
837     case lir_div:
838     case lir_rem:
839       arith_op(
840         op->code(),
841         op->in_opr1(),
842         op->in_opr2(),
843         op->result_opr(),
844         op->info());
845       break;
846 
847     case lir_logic_and:
848     case lir_logic_or:
849     case lir_logic_xor:
850       logic_op(
851         op->code(),
852         op->in_opr1(),
853         op->in_opr2(),
854         op->result_opr());
855       break;
856 
857     case lir_throw:
858       throw_op(op->in_opr1(), op->in_opr2(), op->info());
859       break;
860 
861     case lir_xadd:
862     case lir_xchg:
863       atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
864       break;
865 
866     default:
867       Unimplemented();
868       break;
869   }
870 }
871 
872 void LIR_Assembler::emit_op4(LIR_Op4* op) {
873   switch(op->code()) {
874     case lir_cmove:
875       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4());
876       break;
877 
878     default:
879       Unimplemented();
880       break;
881   }
882 }
883 
884 void LIR_Assembler::build_frame() {
885   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
886                      needs_stack_repair(), method()->has_scalarized_args(), &_verified_inline_entry);
887 }
888 
889 
890 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
891   if (src->is_register()) {
892     if (dest->is_register()) {
893       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
894       reg2reg(src,  dest);
895     } else if (dest->is_stack()) {
896       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
897       reg2stack(src, dest, type);
898     } else if (dest->is_address()) {
899       reg2mem(src, dest, type, patch_code, info, wide);
900     } else {
901       ShouldNotReachHere();
902     }
903 
904   } else if (src->is_stack()) {
905     assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
906     if (dest->is_register()) {
907       stack2reg(src, dest, type);
908     } else if (dest->is_stack()) {
909       stack2stack(src, dest, type);
910     } else {
911       ShouldNotReachHere();
912     }
913 
914   } else if (src->is_constant()) {
915     if (dest->is_register()) {
916       const2reg(src, dest, patch_code, info); // patching is possible
917     } else if (dest->is_stack()) {
918       assert(patch_code == lir_patch_none && info == nullptr, "no patching and info allowed here");
919       const2stack(src, dest);
920     } else if (dest->is_address()) {
921       assert(patch_code == lir_patch_none, "no patching allowed here");
922       const2mem(src, dest, type, info, wide);
923     } else {
924       ShouldNotReachHere();
925     }
926 
927   } else if (src->is_address()) {
928     mem2reg(src, dest, type, patch_code, info, wide);
929   } else {
930     ShouldNotReachHere();
931   }
932 }
933 
934 
935 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
936 #ifndef PRODUCT
937   if (VerifyOops) {
938     OopMapStream s(info->oop_map());
939     while (!s.is_done()) {
940       OopMapValue v = s.current();
941       if (v.is_oop()) {
942         VMReg r = v.reg();
943         if (!r->is_stack()) {
944           _masm->verify_oop(r->as_Register());
945         } else {
946           _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
947         }
948       }
949       check_codespace();
950       CHECK_BAILOUT();
951 
952       s.next();
953     }
954   }
955 #endif
956 }