1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_InstructionPrinter.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciInstance.hpp"
  34 #include "ci/ciValueKlass.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "runtime/os.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 
  39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
  40   // We must have enough patching space so that call can be inserted.
  41   // We cannot use fat nops here, since the concurrent code rewrite may transiently
  42   // create the illegal instruction sequence.
  43   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
  44     _masm->nop();
  45   }
  46   patch->install(_masm, patch_code, obj, info);
  47   append_code_stub(patch);
  48 
  49 #ifdef ASSERT
  50   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
  51   if (patch->id() == PatchingStub::access_field_id) {
  52     switch (code) {
  53       case Bytecodes::_putstatic:
  54       case Bytecodes::_getstatic:
  55       case Bytecodes::_putfield:
  56       case Bytecodes::_getfield:
  57         break;
  58       default:
  59         ShouldNotReachHere();
  60     }
  61   } else if (patch->id() == PatchingStub::load_klass_id) {
  62     switch (code) {
  63       case Bytecodes::_new:
  64       case Bytecodes::_defaultvalue:
  65       case Bytecodes::_anewarray:
  66       case Bytecodes::_multianewarray:
  67       case Bytecodes::_instanceof:
  68       case Bytecodes::_checkcast:
  69         break;
  70       default:
  71         ShouldNotReachHere();
  72     }
  73   } else if (patch->id() == PatchingStub::load_mirror_id) {
  74     switch (code) {
  75       case Bytecodes::_putstatic:
  76       case Bytecodes::_getstatic:
  77       case Bytecodes::_ldc:
  78       case Bytecodes::_ldc_w:
  79         break;
  80       default:
  81         ShouldNotReachHere();
  82     }
  83   } else if (patch->id() == PatchingStub::load_appendix_id) {
  84     Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
  85     assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
  86   } else {
  87     ShouldNotReachHere();
  88   }
  89 #endif
  90 }
  91 
  92 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
  93   IRScope* scope = info->scope();
  94   Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
  95   if (Bytecodes::has_optional_appendix(bc_raw)) {
  96     return PatchingStub::load_appendix_id;
  97   }
  98   return PatchingStub::load_mirror_id;
  99 }
 100 
 101 //---------------------------------------------------------------
 102 
 103 
 104 LIR_Assembler::LIR_Assembler(Compilation* c):
 105    _masm(c->masm())
 106  , _bs(BarrierSet::barrier_set())
 107  , _compilation(c)
 108  , _frame_map(c->frame_map())
 109  , _current_block(NULL)
 110  , _pending_non_safepoint(NULL)
 111  , _pending_non_safepoint_offset(0)
 112 {
 113   _slow_case_stubs = new CodeStubList();
 114 }
 115 
 116 
 117 LIR_Assembler::~LIR_Assembler() {
 118   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
 119   // Reset it here to avoid an assertion.
 120   _unwind_handler_entry.reset();
 121   _verified_value_entry.reset();
 122 }
 123 
 124 
 125 void LIR_Assembler::check_codespace() {
 126   CodeSection* cs = _masm->code_section();
 127   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
 128     BAILOUT("CodeBuffer overflow");
 129   }
 130 }
 131 
 132 
 133 void LIR_Assembler::append_code_stub(CodeStub* stub) {
 134   _slow_case_stubs->append(stub);
 135 }
 136 
 137 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
 138   for (int m = 0; m < stub_list->length(); m++) {
 139     CodeStub* s = stub_list->at(m);
 140 
 141     check_codespace();
 142     CHECK_BAILOUT();
 143 
 144 #ifndef PRODUCT
 145     if (CommentedAssembly) {
 146       stringStream st;
 147       s->print_name(&st);
 148       st.print(" slow case");
 149       _masm->block_comment(st.as_string());
 150     }
 151 #endif
 152     s->emit_code(this);
 153 #ifdef ASSERT
 154     s->assert_no_unbound_labels();
 155 #endif
 156   }
 157 }
 158 
 159 
 160 void LIR_Assembler::emit_slow_case_stubs() {
 161   emit_stubs(_slow_case_stubs);
 162 }
 163 
 164 
 165 bool LIR_Assembler::needs_icache(ciMethod* method) const {
 166   return !method->is_static();
 167 }
 168 
 169 bool LIR_Assembler::needs_clinit_barrier_on_entry(ciMethod* method) const {
 170   return VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier();
 171 }
 172 
 173 int LIR_Assembler::code_offset() const {
 174   return _masm->offset();
 175 }
 176 
 177 
 178 address LIR_Assembler::pc() const {
 179   return _masm->pc();
 180 }
 181 
 182 // To bang the stack of this compiled method we use the stack size
 183 // that the interpreter would need in case of a deoptimization. This
 184 // removes the need to bang the stack in the deoptimization blob which
 185 // in turn simplifies stack overflow handling.
 186 int LIR_Assembler::bang_size_in_bytes() const {
 187   return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
 188 }
 189 
 190 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
 191   for (int i = 0; i < info_list->length(); i++) {
 192     XHandlers* handlers = info_list->at(i)->exception_handlers();
 193 
 194     for (int j = 0; j < handlers->length(); j++) {
 195       XHandler* handler = handlers->handler_at(j);
 196       assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
 197       assert(handler->entry_code() == NULL ||
 198              handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
 199              handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
 200 
 201       if (handler->entry_pco() == -1) {
 202         // entry code not emitted yet
 203         if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
 204           handler->set_entry_pco(code_offset());
 205           if (CommentedAssembly) {
 206             _masm->block_comment("Exception adapter block");
 207           }
 208           emit_lir_list(handler->entry_code());
 209         } else {
 210           handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
 211         }
 212 
 213         assert(handler->entry_pco() != -1, "must be set now");
 214       }
 215     }
 216   }
 217 }
 218 
 219 
 220 void LIR_Assembler::emit_code(BlockList* hir) {
 221   if (PrintLIR) {
 222     print_LIR(hir);
 223   }
 224 
 225   int n = hir->length();
 226   for (int i = 0; i < n; i++) {
 227     emit_block(hir->at(i));
 228     CHECK_BAILOUT();
 229   }
 230 
 231   flush_debug_info(code_offset());
 232 
 233   DEBUG_ONLY(check_no_unbound_labels());
 234 }
 235 
 236 
 237 void LIR_Assembler::emit_block(BlockBegin* block) {
 238   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
 239     align_backward_branch_target();
 240   }
 241 
 242   // if this block is the start of an exception handler, record the
 243   // PC offset of the first instruction for later construction of
 244   // the ExceptionHandlerTable
 245   if (block->is_set(BlockBegin::exception_entry_flag)) {
 246     block->set_exception_handler_pco(code_offset());
 247   }
 248 
 249 #ifndef PRODUCT
 250   if (PrintLIRWithAssembly) {
 251     // don't print Phi's
 252     InstructionPrinter ip(false);
 253     block->print(ip);
 254   }
 255 #endif /* PRODUCT */
 256 
 257   assert(block->lir() != NULL, "must have LIR");
 258   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 259 
 260 #ifndef PRODUCT
 261   if (CommentedAssembly) {
 262     stringStream st;
 263     st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
 264     _masm->block_comment(st.as_string());
 265   }
 266 #endif
 267 
 268   emit_lir_list(block->lir());
 269 
 270   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 271 }
 272 
 273 
 274 void LIR_Assembler::emit_lir_list(LIR_List* list) {
 275   peephole(list);
 276 
 277   int n = list->length();
 278   for (int i = 0; i < n; i++) {
 279     LIR_Op* op = list->at(i);
 280 
 281     check_codespace();
 282     CHECK_BAILOUT();
 283 
 284 #ifndef PRODUCT
 285     if (CommentedAssembly) {
 286       // Don't record out every op since that's too verbose.  Print
 287       // branches since they include block and stub names.  Also print
 288       // patching moves since they generate funny looking code.
 289       if (op->code() == lir_branch ||
 290           (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
 291           (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
 292         stringStream st;
 293         op->print_on(&st);
 294         _masm->block_comment(st.as_string());
 295       }
 296     }
 297     if (PrintLIRWithAssembly) {
 298       // print out the LIR operation followed by the resulting assembly
 299       list->at(i)->print(); tty->cr();
 300     }
 301 #endif /* PRODUCT */
 302 
 303     op->emit_code(this);
 304 
 305     if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
 306       process_debug_info(op);
 307     }
 308 
 309 #ifndef PRODUCT
 310     if (PrintLIRWithAssembly) {
 311       _masm->code()->decode();
 312     }
 313 #endif /* PRODUCT */
 314   }
 315 }
 316 
 317 #ifdef ASSERT
 318 void LIR_Assembler::check_no_unbound_labels() {
 319   CHECK_BAILOUT();
 320 
 321   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
 322     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
 323       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
 324       assert(false, "unbound label");
 325     }
 326   }
 327 }
 328 #endif
 329 
 330 //----------------------------------debug info--------------------------------
 331 
 332 
 333 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
 334   int pc_offset = code_offset();
 335   flush_debug_info(pc_offset);
 336   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 337   if (info->exception_handlers() != NULL) {
 338     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 339   }
 340 }
 341 
 342 
 343 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool maybe_return_as_fields) {
 344   flush_debug_info(pc_offset);
 345   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, maybe_return_as_fields);
 346   if (cinfo->exception_handlers() != NULL) {
 347     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
 348   }
 349 }
 350 
 351 static ValueStack* debug_info(Instruction* ins) {
 352   StateSplit* ss = ins->as_StateSplit();
 353   if (ss != NULL) return ss->state();
 354   return ins->state_before();
 355 }
 356 
 357 void LIR_Assembler::process_debug_info(LIR_Op* op) {
 358   Instruction* src = op->source();
 359   if (src == NULL)  return;
 360   int pc_offset = code_offset();
 361   if (_pending_non_safepoint == src) {
 362     _pending_non_safepoint_offset = pc_offset;
 363     return;
 364   }
 365   ValueStack* vstack = debug_info(src);
 366   if (vstack == NULL)  return;
 367   if (_pending_non_safepoint != NULL) {
 368     // Got some old debug info.  Get rid of it.
 369     if (debug_info(_pending_non_safepoint) == vstack) {
 370       _pending_non_safepoint_offset = pc_offset;
 371       return;
 372     }
 373     if (_pending_non_safepoint_offset < pc_offset) {
 374       record_non_safepoint_debug_info();
 375     }
 376     _pending_non_safepoint = NULL;
 377   }
 378   // Remember the debug info.
 379   if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
 380     _pending_non_safepoint = src;
 381     _pending_non_safepoint_offset = pc_offset;
 382   }
 383 }
 384 
 385 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
 386 // Return NULL if n is too large.
 387 // Returns the caller_bci for the next-younger state, also.
 388 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
 389   ValueStack* t = s;
 390   for (int i = 0; i < n; i++) {
 391     if (t == NULL)  break;
 392     t = t->caller_state();
 393   }
 394   if (t == NULL)  return NULL;
 395   for (;;) {
 396     ValueStack* tc = t->caller_state();
 397     if (tc == NULL)  return s;
 398     t = tc;
 399     bci_result = tc->bci();
 400     s = s->caller_state();
 401   }
 402 }
 403 
 404 void LIR_Assembler::record_non_safepoint_debug_info() {
 405   int         pc_offset = _pending_non_safepoint_offset;
 406   ValueStack* vstack    = debug_info(_pending_non_safepoint);
 407   int         bci       = vstack->bci();
 408 
 409   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
 410   assert(debug_info->recording_non_safepoints(), "sanity");
 411 
 412   debug_info->add_non_safepoint(pc_offset);
 413 
 414   // Visit scopes from oldest to youngest.
 415   for (int n = 0; ; n++) {
 416     int s_bci = bci;
 417     ValueStack* s = nth_oldest(vstack, n, s_bci);
 418     if (s == NULL)  break;
 419     IRScope* scope = s->scope();
 420     //Always pass false for reexecute since these ScopeDescs are never used for deopt
 421     methodHandle null_mh;
 422     debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
 423   }
 424 
 425   debug_info->end_non_safepoint(pc_offset);
 426 }
 427 
 428 
 429 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
 430   return add_debug_info_for_null_check(code_offset(), cinfo);
 431 }
 432 
 433 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
 434   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
 435   append_code_stub(stub);
 436   return stub;
 437 }
 438 
 439 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
 440   add_debug_info_for_div0(code_offset(), info);
 441 }
 442 
 443 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
 444   DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
 445   append_code_stub(stub);
 446 }
 447 
 448 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
 449   rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
 450 }
 451 
 452 
 453 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
 454   verify_oop_map(op->info());
 455 
 456   // must align calls sites, otherwise they can't be updated atomically
 457   align_call(op->code());
 458 
 459   // emit the static call stub stuff out of line
 460   emit_static_call_stub();
 461   CHECK_BAILOUT();
 462 
 463   switch (op->code()) {
 464   case lir_static_call:
 465   case lir_dynamic_call:
 466     call(op, relocInfo::static_call_type);
 467     break;
 468   case lir_optvirtual_call:
 469     call(op, relocInfo::opt_virtual_call_type);
 470     break;
 471   case lir_icvirtual_call:
 472     ic_call(op);
 473     break;
 474   case lir_virtual_call:
 475     vtable_call(op);
 476     break;
 477   default:
 478     fatal("unexpected op code: %s", op->name());
 479     break;
 480   }
 481 
 482   // JSR 292
 483   // Record if this method has MethodHandle invokes.
 484   if (op->is_method_handle_invoke()) {
 485     compilation()->set_has_method_handle_invokes(true);
 486   }
 487 
 488   ciValueKlass* vk;
 489   if (op->maybe_return_as_fields(&vk)) {
 490     int offset = store_value_type_fields_to_buf(vk);
 491     add_call_info(offset, op->info(), true);
 492   }
 493 
 494 #if defined(X86) && defined(TIERED)
 495   // C2 leave fpu stack dirty clean it
 496   if (UseSSE < 2) {
 497     int i;
 498     for ( i = 1; i <= 7 ; i++ ) {
 499       ffree(i);
 500     }
 501     if (!op->result_opr()->is_float_kind()) {
 502       ffree(0);
 503     }
 504   }
 505 #endif // X86 && TIERED
 506 }
 507 
 508 
 509 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
 510   _masm->bind (*(op->label()));
 511 }
 512 
 513 
 514 void LIR_Assembler::emit_op1(LIR_Op1* op) {
 515   switch (op->code()) {
 516     case lir_move:
 517       if (op->move_kind() == lir_move_volatile) {
 518         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
 519         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
 520       } else {
 521         move_op(op->in_opr(), op->result_opr(), op->type(),
 522                 op->patch_code(), op->info(), op->pop_fpu_stack(),
 523                 op->move_kind() == lir_move_unaligned,
 524                 op->move_kind() == lir_move_wide);
 525       }
 526       break;
 527 
 528     case lir_roundfp: {
 529       LIR_OpRoundFP* round_op = op->as_OpRoundFP();
 530       roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
 531       break;
 532     }
 533 
 534     case lir_return:
 535       return_op(op->in_opr());
 536       break;
 537 
 538     case lir_safepoint:
 539       if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
 540         _masm->nop();
 541       }
 542       safepoint_poll(op->in_opr(), op->info());
 543       break;
 544 
 545     case lir_fxch:
 546       fxch(op->in_opr()->as_jint());
 547       break;
 548 
 549     case lir_fld:
 550       fld(op->in_opr()->as_jint());
 551       break;
 552 
 553     case lir_ffree:
 554       ffree(op->in_opr()->as_jint());
 555       break;
 556 
 557     case lir_branch:
 558       break;
 559 
 560     case lir_push:
 561       push(op->in_opr());
 562       break;
 563 
 564     case lir_pop:
 565       pop(op->in_opr());
 566       break;
 567 
 568     case lir_leal:
 569       leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
 570       break;
 571 
 572     case lir_null_check: {
 573       ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
 574 
 575       if (op->in_opr()->is_single_cpu()) {
 576         _masm->null_check(op->in_opr()->as_register(), stub->entry());
 577       } else {
 578         Unimplemented();
 579       }
 580       break;
 581     }
 582 
 583     case lir_monaddr:
 584       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
 585       break;
 586 
 587 #ifdef SPARC
 588     case lir_pack64:
 589       pack64(op->in_opr(), op->result_opr());
 590       break;
 591 
 592     case lir_unpack64:
 593       unpack64(op->in_opr(), op->result_opr());
 594       break;
 595 #endif
 596 
 597     case lir_unwind:
 598       unwind_op(op->in_opr());
 599       break;
 600 
 601     default:
 602       Unimplemented();
 603       break;
 604   }
 605 }
 606 
 607 void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
 608   flush_debug_info(pc_offset);
 609   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
 610   // The VEP and VVEP(RO) of a C1-compiled method call buffer_value_args_xxx()
 611   // before doing any argument shuffling. This call may cause GC. When GC happens,
 612   // all the parameters are still as passed by the caller, so we just use
 613   // map->set_include_argument_oops() inside frame::sender_for_compiled_frame(RegisterMap* map).
 614   // There's no need to build a GC map here.
 615   OopMap* oop_map = new OopMap(0, 0);
 616   debug_info->add_safepoint(pc_offset, oop_map);
 617   DebugToken* locvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??)
 618   DebugToken* expvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??)
 619   DebugToken* monvals = debug_info->create_monitor_values(NULL); // FIXME: need testing with synchronized method
 620   bool reexecute = false;
 621   bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
 622   bool rethrow_exception = false;
 623   bool is_method_handle_invoke = false;
 624   debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals);
 625   debug_info->end_safepoint(pc_offset);
 626 }
 627 
 628 // The entries points of C1-compiled methods can have the following types:
 629 // (1) Methods with no value args
 630 // (2) Methods with value receiver but no value args
 631 //     VVEP_RO is the same as VVEP
 632 // (3) Methods with non-value receiver and some value args
 633 //     VVEP_RO is the same as VEP
 634 // (4) Methods with value receiver and other value args
 635 //     Separate VEP, VVEP and VVEP_RO
 636 //
 637 // (1)               (2)                 (3)                    (4)
 638 // UEP/UVEP:         VEP:                UEP:                   UEP:
 639 //   check_icache      pack receiver       check_icache           check_icache
 640 // VEP/VVEP/VVEP_RO  UEP/UVEP:           VEP/VVEP_RO:           VVEP_RO:
 641 //   body              check_icache        pack value args        pack value args (except receiver)
 642 //                   VVEP/VVEP_RO        UVEP:                  VEP:
 643 //                     body                check_icache           pack all value args
 644 //                                       VVEP:                  UVEP:
 645 //                                         body                   check_icache
 646 //                                                              VVEP:
 647 //                                                                body
 648 //
 649 // Note: after packing, we jump to the method body.
 650 void LIR_Assembler::emit_std_entries() {
 651   offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
 652 
 653   const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
 654 
 655   _masm->align(CodeEntryAlignment);
 656 
 657   if (ces->has_scalarized_args()) {
 658     assert(ValueTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");
 659 
 660     CodeOffsets::Entries ro_entry_type = ces->c1_value_ro_entry_type();
 661 
 662     if (ro_entry_type != CodeOffsets::Verified_Value_Entry) {
 663       // This is the UEP. It will fall-through to VEP or VVEP(RO)
 664       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
 665       if (needs_icache(compilation()->method())) {
 666         check_icache();
 667       }
 668     }
 669 
 670     if (ro_entry_type == CodeOffsets::Verified_Value_Entry_RO) {
 671       // VVEP(RO) = pack all value parameters, except the <this> object.
 672       add_scalarized_entry_info(emit_std_entry(CodeOffsets::Verified_Value_Entry_RO, ces));
 673     }
 674 
 675     // VEP = pack all value parameters
 676     _masm->align(CodeEntryAlignment);
 677     add_scalarized_entry_info(emit_std_entry(CodeOffsets::Verified_Entry, ces));
 678 
 679     _masm->align(CodeEntryAlignment);
 680     // This is the UVEP. It will fall-through to VVEP.
 681     offsets()->set_value(CodeOffsets::Value_Entry, _masm->offset());
 682     if (ro_entry_type == CodeOffsets::Verified_Value_Entry) {
 683       // Special case if we have VVEP == VVEP(RO):
 684       // this means UVEP (called by C1) == UEP (called by C2).
 685       offsets()->set_value(CodeOffsets::Entry, _masm->offset());
 686     }
 687 
 688     if (needs_icache(compilation()->method())) {
 689       check_icache();
 690     }
 691     // VVEP = all value parameters are passed as refs - no packing.
 692     emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL);
 693 
 694     if (ro_entry_type != CodeOffsets::Verified_Value_Entry_RO) {
 695       // The VVEP(RO) is the same as VEP or VVEP
 696       assert(ro_entry_type == CodeOffsets::Verified_Entry ||
 697              ro_entry_type == CodeOffsets::Verified_Value_Entry, "must be");
 698       offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO,
 699                            offsets()->value(ro_entry_type));
 700     }
 701   } else {
 702     // All 3 entries are the same (no value-type packing)
 703     offsets()->set_value(CodeOffsets::Entry, _masm->offset());
 704     offsets()->set_value(CodeOffsets::Value_Entry, _masm->offset());
 705     if (needs_icache(compilation()->method())) {
 706       check_icache();
 707     }
 708     int offset = emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL);
 709     offsets()->set_value(CodeOffsets::Verified_Entry, offset);
 710     offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, offset);
 711   }
 712 }
 713 
 714 int LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
 715   offsets()->set_value(entry, _masm->offset());
 716   int offset = _masm->offset();
 717   switch (entry) {
 718   case CodeOffsets::Verified_Entry:
 719     offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry);
 720     if (needs_clinit_barrier_on_entry(compilation()->method())) {
 721       clinit_barrier(compilation()->method());
 722     }
 723     return offset;
 724   case CodeOffsets::Verified_Value_Entry_RO:
 725     offset = _masm->verified_value_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry);
 726     if (needs_clinit_barrier_on_entry(compilation()->method())) {
 727       clinit_barrier(compilation()->method());
 728     }
 729     return offset;
 730   default:
 731     {
 732       assert(entry == CodeOffsets::Verified_Value_Entry, "must be");
 733       _masm->verified_value_entry();
 734       if (needs_clinit_barrier_on_entry(compilation()->method())) {
 735         clinit_barrier(compilation()->method());
 736       }
 737       build_frame();
 738       offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
 739       return offset;
 740     }
 741   }
 742 }
 743 
 744 void LIR_Assembler::emit_op0(LIR_Op0* op) {
 745   switch (op->code()) {
 746     case lir_word_align: {
 747       _masm->align(BytesPerWord);
 748       break;
 749     }
 750 
 751     case lir_nop:
 752       assert(op->info() == NULL, "not supported");
 753       _masm->nop();
 754       break;
 755 
 756     case lir_label:
 757       Unimplemented();
 758       break;
 759 
 760     case lir_build_frame:
 761       build_frame();
 762       break;
 763 
 764     case lir_std_entry:
 765       emit_std_entries();
 766       break;
 767 
 768     case lir_osr_entry:
 769       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
 770       osr_entry();
 771       break;
 772 
 773     case lir_24bit_FPU:
 774       set_24bit_FPU();
 775       break;
 776 
 777     case lir_reset_FPU:
 778       reset_FPU();
 779       break;
 780 
 781     case lir_breakpoint:
 782       breakpoint();
 783       break;
 784 
 785     case lir_fpop_raw:
 786       fpop();
 787       break;
 788 
 789     case lir_membar:
 790       membar();
 791       break;
 792 
 793     case lir_membar_acquire:
 794       membar_acquire();
 795       break;
 796 
 797     case lir_membar_release:
 798       membar_release();
 799       break;
 800 
 801     case lir_membar_loadload:
 802       membar_loadload();
 803       break;
 804 
 805     case lir_membar_storestore:
 806       membar_storestore();
 807       break;
 808 
 809     case lir_membar_loadstore:
 810       membar_loadstore();
 811       break;
 812 
 813     case lir_membar_storeload:
 814       membar_storeload();
 815       break;
 816 
 817     case lir_get_thread:
 818       get_thread(op->result_opr());
 819       break;
 820 
 821     case lir_on_spin_wait:
 822       on_spin_wait();
 823       break;
 824 
 825     default:
 826       ShouldNotReachHere();
 827       break;
 828   }
 829 }
 830 
 831 
 832 void LIR_Assembler::emit_op2(LIR_Op2* op) {
 833   switch (op->code()) {
 834     case lir_cmp:
 835       if (op->info() != NULL) {
 836         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
 837                "shouldn't be codeemitinfo for non-address operands");
 838         add_debug_info_for_null_check_here(op->info()); // exception possible
 839       }
 840       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
 841       break;
 842 
 843     case lir_cmp_l2i:
 844     case lir_cmp_fd2i:
 845     case lir_ucmp_fd2i:
 846       comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
 847       break;
 848 
 849     case lir_cmove:
 850       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
 851       break;
 852 
 853     case lir_shl:
 854     case lir_shr:
 855     case lir_ushr:
 856       if (op->in_opr2()->is_constant()) {
 857         shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
 858       } else {
 859         shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
 860       }
 861       break;
 862 
 863     case lir_add:
 864     case lir_sub:
 865     case lir_mul:
 866     case lir_mul_strictfp:
 867     case lir_div:
 868     case lir_div_strictfp:
 869     case lir_rem:
 870       assert(op->fpu_pop_count() < 2, "");
 871       arith_op(
 872         op->code(),
 873         op->in_opr1(),
 874         op->in_opr2(),
 875         op->result_opr(),
 876         op->info(),
 877         op->fpu_pop_count() == 1);
 878       break;
 879 
 880     case lir_abs:
 881     case lir_sqrt:
 882     case lir_tan:
 883     case lir_log10:
 884       intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
 885       break;
 886 
 887     case lir_neg:
 888       negate(op->in_opr1(), op->result_opr(), op->in_opr2());
 889       break;
 890 
 891     case lir_logic_and:
 892     case lir_logic_or:
 893     case lir_logic_xor:
 894       logic_op(
 895         op->code(),
 896         op->in_opr1(),
 897         op->in_opr2(),
 898         op->result_opr());
 899       break;
 900 
 901     case lir_throw:
 902       throw_op(op->in_opr1(), op->in_opr2(), op->info());
 903       break;
 904 
 905     case lir_xadd:
 906     case lir_xchg:
 907       atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
 908       break;
 909 
 910     default:
 911       Unimplemented();
 912       break;
 913   }
 914 }
 915 
 916 
 917 void LIR_Assembler::build_frame() {
 918   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(),
 919                      compilation()->compiled_entry_signature()->c1_needs_stack_repair(),
 920                      &_verified_value_entry);
 921 }
 922 
 923 
 924 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
 925   assert((src->is_single_fpu() && dest->is_single_stack()) ||
 926          (src->is_double_fpu() && dest->is_double_stack()),
 927          "round_fp: rounds register -> stack location");
 928 
 929   reg2stack (src, dest, src->type(), pop_fpu_stack);
 930 }
 931 
 932 
 933 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
 934   if (src->is_register()) {
 935     if (dest->is_register()) {
 936       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 937       reg2reg(src,  dest);
 938     } else if (dest->is_stack()) {
 939       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 940       reg2stack(src, dest, type, pop_fpu_stack);
 941     } else if (dest->is_address()) {
 942       reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
 943     } else {
 944       ShouldNotReachHere();
 945     }
 946 
 947   } else if (src->is_stack()) {
 948     assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 949     if (dest->is_register()) {
 950       stack2reg(src, dest, type);
 951     } else if (dest->is_stack()) {
 952       stack2stack(src, dest, type);
 953     } else {
 954       ShouldNotReachHere();
 955     }
 956 
 957   } else if (src->is_constant()) {
 958     if (dest->is_register()) {
 959       const2reg(src, dest, patch_code, info); // patching is possible
 960     } else if (dest->is_stack()) {
 961       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 962       const2stack(src, dest);
 963     } else if (dest->is_address()) {
 964       assert(patch_code == lir_patch_none, "no patching allowed here");
 965       const2mem(src, dest, type, info, wide);
 966     } else {
 967       ShouldNotReachHere();
 968     }
 969 
 970   } else if (src->is_address()) {
 971     mem2reg(src, dest, type, patch_code, info, wide, unaligned);
 972 
 973   } else {
 974     ShouldNotReachHere();
 975   }
 976 }
 977 
 978 
 979 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
 980 #ifndef PRODUCT
 981   if (VerifyOops) {
 982     OopMapStream s(info->oop_map());
 983     while (!s.is_done()) {
 984       OopMapValue v = s.current();
 985       if (v.is_oop()) {
 986         VMReg r = v.reg();
 987         if (!r->is_stack()) {
 988           stringStream st;
 989           st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
 990 #ifdef SPARC
 991           _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
 992 #else
 993           _masm->verify_oop(r->as_Register());
 994 #endif
 995         } else {
 996           _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
 997         }
 998       }
 999       check_codespace();
1000       CHECK_BAILOUT();
1001 
1002       s.next();
1003     }
1004   }
1005 #endif
1006 }