1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Instruction.hpp"
  28 #include "c1/c1_InstructionPrinter.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciInstance.hpp"
  33 #ifdef TARGET_ARCH_x86
  34 # include "nativeInst_x86.hpp"
  35 # include "vmreg_x86.inline.hpp"
  36 #endif
  37 #ifdef TARGET_ARCH_aarch64
  38 # include "nativeInst_aarch64.hpp"
  39 # include "vmreg_aarch64.inline.hpp"
  40 #endif
  41 #ifdef TARGET_ARCH_sparc
  42 # include "nativeInst_sparc.hpp"
  43 # include "vmreg_sparc.inline.hpp"
  44 #endif
  45 #ifdef TARGET_ARCH_zero
  46 # include "nativeInst_zero.hpp"
  47 # include "vmreg_zero.inline.hpp"
  48 #endif
  49 #ifdef TARGET_ARCH_arm
  50 # include "nativeInst_arm.hpp"
  51 # include "vmreg_arm.inline.hpp"
  52 #endif
  53 #ifdef TARGET_ARCH_ppc
  54 # include "nativeInst_ppc.hpp"
  55 # include "vmreg_ppc.inline.hpp"
  56 #endif
  57 
  58 
  59 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
  60   // we must have enough patching space so that call can be inserted
  61   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
  62     _masm->nop();
  63   }
  64   patch->install(_masm, patch_code, obj, info);
  65   append_code_stub(patch);
  66 
  67 #ifdef ASSERT
  68   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
  69   if (patch->id() == PatchingStub::access_field_id) {
  70     switch (code) {
  71       case Bytecodes::_putstatic:
  72       case Bytecodes::_getstatic:
  73       case Bytecodes::_putfield:
  74       case Bytecodes::_getfield:
  75         break;
  76       default:
  77         ShouldNotReachHere();
  78     }
  79   } else if (patch->id() == PatchingStub::load_klass_id) {
  80     switch (code) {
  81       case Bytecodes::_new:
  82       case Bytecodes::_anewarray:
  83       case Bytecodes::_multianewarray:
  84       case Bytecodes::_instanceof:
  85       case Bytecodes::_checkcast:
  86         break;
  87       default:
  88         ShouldNotReachHere();
  89     }
  90   } else if (patch->id() == PatchingStub::load_mirror_id) {
  91     switch (code) {
  92       case Bytecodes::_putstatic:
  93       case Bytecodes::_getstatic:
  94       case Bytecodes::_ldc:
  95       case Bytecodes::_ldc_w:
  96         break;
  97       default:
  98         ShouldNotReachHere();
  99     }
 100   } else if (patch->id() == PatchingStub::load_appendix_id) {
 101     Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
 102     assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
 103   } else {
 104     ShouldNotReachHere();
 105   }
 106 #endif
 107 }
 108 
 109 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
 110   IRScope* scope = info->scope();
 111   Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
 112   if (Bytecodes::has_optional_appendix(bc_raw)) {
 113     return PatchingStub::load_appendix_id;
 114   }
 115   return PatchingStub::load_mirror_id;
 116 }
 117 
 118 //---------------------------------------------------------------
 119 
 120 
 121 LIR_Assembler::LIR_Assembler(Compilation* c):
 122    _compilation(c)
 123  , _masm(c->masm())
 124  , _bs(Universe::heap()->barrier_set())
 125  , _frame_map(c->frame_map())
 126  , _current_block(NULL)
 127  , _pending_non_safepoint(NULL)
 128  , _pending_non_safepoint_offset(0)
 129 {
 130   _slow_case_stubs = new CodeStubList();
 131 #ifdef TARGET_ARCH_aarch64
 132   init(); // Target-dependent initialization
 133 #endif
 134 }
 135 
 136 
 137 LIR_Assembler::~LIR_Assembler() {
 138   // The unwind handler label may be unbound if this destructor is invoked because of a bail-out.
 139   // Reset it here to avoid an assertion.
 140   _unwind_handler_entry.reset();
 141 }
 142 
 143 
 144 void LIR_Assembler::check_codespace() {
 145   CodeSection* cs = _masm->code_section();
 146   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
 147     BAILOUT("CodeBuffer overflow");
 148   }
 149 }
 150 
 151 
 152 void LIR_Assembler::append_code_stub(CodeStub* stub) {
 153   _slow_case_stubs->append(stub);
 154 }
 155 
 156 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
 157   for (int m = 0; m < stub_list->length(); m++) {
 158     CodeStub* s = (*stub_list)[m];
 159 
 160     check_codespace();
 161     CHECK_BAILOUT();
 162 
 163 #ifndef PRODUCT
 164     if (CommentedAssembly) {
 165       stringStream st;
 166       s->print_name(&st);
 167       st.print(" slow case");
 168       _masm->block_comment(st.as_string());
 169     }
 170 #endif
 171     s->emit_code(this);
 172 #ifdef ASSERT
 173 #ifndef AARCH64
 174     s->assert_no_unbound_labels();
 175 #endif
 176 #endif
 177   }
 178 }
 179 
 180 
 181 void LIR_Assembler::emit_slow_case_stubs() {
 182   emit_stubs(_slow_case_stubs);
 183 }
 184 
 185 
 186 bool LIR_Assembler::needs_icache(ciMethod* method) const {
 187   return !method->is_static();
 188 }
 189 
 190 
 191 int LIR_Assembler::code_offset() const {
 192   return _masm->offset();
 193 }
 194 
 195 
 196 address LIR_Assembler::pc() const {
 197   return _masm->pc();
 198 }
 199 
 200 // To bang the stack of this compiled method we use the stack size
 201 // that the interpreter would need in case of a deoptimization. This
 202 // removes the need to bang the stack in the deoptimization blob which
 203 // in turn simplifies stack overflow handling.
 204 int LIR_Assembler::bang_size_in_bytes() const {
 205   return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
 206 }
 207 
 208 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
 209   for (int i = 0; i < info_list->length(); i++) {
 210     XHandlers* handlers = info_list->at(i)->exception_handlers();
 211 
 212     for (int j = 0; j < handlers->length(); j++) {
 213       XHandler* handler = handlers->handler_at(j);
 214       assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
 215       assert(handler->entry_code() == NULL ||
 216              handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
 217              handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
 218 
 219       if (handler->entry_pco() == -1) {
 220         // entry code not emitted yet
 221         if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
 222           handler->set_entry_pco(code_offset());
 223           if (CommentedAssembly) {
 224             _masm->block_comment("Exception adapter block");
 225           }
 226           emit_lir_list(handler->entry_code());
 227         } else {
 228           handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
 229         }
 230 
 231         assert(handler->entry_pco() != -1, "must be set now");
 232       }
 233     }
 234   }
 235 }
 236 
 237 
 238 void LIR_Assembler::emit_code(BlockList* hir) {
 239   if (PrintLIR) {
 240     print_LIR(hir);
 241   }
 242 
 243   int n = hir->length();
 244   for (int i = 0; i < n; i++) {
 245     emit_block(hir->at(i));
 246     CHECK_BAILOUT();
 247   }
 248 
 249   flush_debug_info(code_offset());
 250 
 251   DEBUG_ONLY(check_no_unbound_labels());
 252 }
 253 
 254 
 255 void LIR_Assembler::emit_block(BlockBegin* block) {
 256   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
 257     align_backward_branch_target();
 258   }
 259 
 260   // if this block is the start of an exception handler, record the
 261   // PC offset of the first instruction for later construction of
 262   // the ExceptionHandlerTable
 263   if (block->is_set(BlockBegin::exception_entry_flag)) {
 264     block->set_exception_handler_pco(code_offset());
 265   }
 266 
 267 #ifndef PRODUCT
 268   if (PrintLIRWithAssembly) {
 269     // don't print Phi's
 270     InstructionPrinter ip(false);
 271     block->print(ip);
 272   }
 273 #endif /* PRODUCT */
 274 
 275   assert(block->lir() != NULL, "must have LIR");
 276   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 277 
 278 #ifndef PRODUCT
 279   if (CommentedAssembly) {
 280     stringStream st;
 281     st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
 282     _masm->block_comment(st.as_string());
 283   }
 284 #endif
 285 
 286   emit_lir_list(block->lir());
 287 
 288   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 289 }
 290 
 291 
 292 void LIR_Assembler::emit_lir_list(LIR_List* list) {
 293   peephole(list);
 294 
 295   int n = list->length();
 296   for (int i = 0; i < n; i++) {
 297     LIR_Op* op = list->at(i);
 298 
 299     check_codespace();
 300     CHECK_BAILOUT();
 301 
 302 #ifndef PRODUCT
 303     if (CommentedAssembly) {
 304       // Don't record out every op since that's too verbose.  Print
 305       // branches since they include block and stub names.  Also print
 306       // patching moves since they generate funny looking code.
 307       if (op->code() == lir_branch ||
 308           (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
 309           (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
 310         stringStream st;
 311         op->print_on(&st);
 312         _masm->block_comment(st.as_string());
 313       }
 314     }
 315     if (PrintLIRWithAssembly) {
 316       // print out the LIR operation followed by the resulting assembly
 317       list->at(i)->print(); tty->cr();
 318     }
 319 #endif /* PRODUCT */
 320 
 321     op->emit_code(this);
 322 
 323     if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
 324       process_debug_info(op);
 325     }
 326 
 327 #ifndef PRODUCT
 328     if (PrintLIRWithAssembly) {
 329       _masm->code()->decode();
 330     }
 331 #endif /* PRODUCT */
 332   }
 333 }
 334 
 335 #ifdef ASSERT
 336 void LIR_Assembler::check_no_unbound_labels() {
 337   CHECK_BAILOUT();
 338 
 339   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
 340     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
 341       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
 342       assert(false, "unbound label");
 343     }
 344   }
 345 }
 346 #endif
 347 
 348 //----------------------------------debug info--------------------------------
 349 
 350 
 351 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
 352   _masm->code_section()->relocate(pc(), relocInfo::poll_type);
 353   int pc_offset = code_offset();
 354   flush_debug_info(pc_offset);
 355   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 356   if (info->exception_handlers() != NULL) {
 357     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 358   }
 359 }
 360 
 361 
 362 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
 363   flush_debug_info(pc_offset);
 364   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 365   if (cinfo->exception_handlers() != NULL) {
 366     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
 367   }
 368 }
 369 
 370 static ValueStack* debug_info(Instruction* ins) {
 371   StateSplit* ss = ins->as_StateSplit();
 372   if (ss != NULL) return ss->state();
 373   return ins->state_before();
 374 }
 375 
 376 void LIR_Assembler::process_debug_info(LIR_Op* op) {
 377   Instruction* src = op->source();
 378   if (src == NULL)  return;
 379   int pc_offset = code_offset();
 380   if (_pending_non_safepoint == src) {
 381     _pending_non_safepoint_offset = pc_offset;
 382     return;
 383   }
 384   ValueStack* vstack = debug_info(src);
 385   if (vstack == NULL)  return;
 386   if (_pending_non_safepoint != NULL) {
 387     // Got some old debug info.  Get rid of it.
 388     if (debug_info(_pending_non_safepoint) == vstack) {
 389       _pending_non_safepoint_offset = pc_offset;
 390       return;
 391     }
 392     if (_pending_non_safepoint_offset < pc_offset) {
 393       record_non_safepoint_debug_info();
 394     }
 395     _pending_non_safepoint = NULL;
 396   }
 397   // Remember the debug info.
 398   if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
 399     _pending_non_safepoint = src;
 400     _pending_non_safepoint_offset = pc_offset;
 401   }
 402 }
 403 
 404 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
 405 // Return NULL if n is too large.
 406 // Returns the caller_bci for the next-younger state, also.
 407 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
 408   ValueStack* t = s;
 409   for (int i = 0; i < n; i++) {
 410     if (t == NULL)  break;
 411     t = t->caller_state();
 412   }
 413   if (t == NULL)  return NULL;
 414   for (;;) {
 415     ValueStack* tc = t->caller_state();
 416     if (tc == NULL)  return s;
 417     t = tc;
 418     bci_result = tc->bci();
 419     s = s->caller_state();
 420   }
 421 }
 422 
 423 void LIR_Assembler::record_non_safepoint_debug_info() {
 424   int         pc_offset = _pending_non_safepoint_offset;
 425   ValueStack* vstack    = debug_info(_pending_non_safepoint);
 426   int         bci       = vstack->bci();
 427 
 428   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
 429   assert(debug_info->recording_non_safepoints(), "sanity");
 430 
 431   debug_info->add_non_safepoint(pc_offset);
 432 
 433   // Visit scopes from oldest to youngest.
 434   for (int n = 0; ; n++) {
 435     int s_bci = bci;
 436     ValueStack* s = nth_oldest(vstack, n, s_bci);
 437     if (s == NULL)  break;
 438     IRScope* scope = s->scope();
 439     //Always pass false for reexecute since these ScopeDescs are never used for deopt
 440     debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
 441   }
 442 
 443   debug_info->end_non_safepoint(pc_offset);
 444 }
 445 
 446 
 447 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
 448   add_debug_info_for_null_check(code_offset(), cinfo);
 449 }
 450 
 451 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
 452   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
 453   append_code_stub(stub);
 454 }
 455 
 456 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
 457   add_debug_info_for_div0(code_offset(), info);
 458 }
 459 
 460 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
 461   DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
 462   append_code_stub(stub);
 463 }
 464 
 465 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
 466   rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
 467 }
 468 
 469 
 470 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
 471   verify_oop_map(op->info());
 472 
 473   if (os::is_MP()) {
 474     // must align calls sites, otherwise they can't be updated atomically on MP hardware
 475     align_call(op->code());
 476   }
 477 
 478   // emit the static call stub stuff out of line
 479   emit_static_call_stub();
 480   CHECK_BAILOUT();
 481 
 482   switch (op->code()) {
 483   case lir_static_call:
 484   case lir_dynamic_call:
 485     call(op, relocInfo::static_call_type);
 486     break;
 487   case lir_optvirtual_call:
 488     call(op, relocInfo::opt_virtual_call_type);
 489     break;
 490   case lir_icvirtual_call:
 491     ic_call(op);
 492     break;
 493   case lir_virtual_call:
 494     vtable_call(op);
 495     break;
 496   default:
 497     fatal(err_msg_res("unexpected op code: %s", op->name()));
 498     break;
 499   }
 500 
 501   // JSR 292
 502   // Record if this method has MethodHandle invokes.
 503   if (op->is_method_handle_invoke()) {
 504     compilation()->set_has_method_handle_invokes(true);
 505   }
 506 
 507 #if defined(X86) && defined(TIERED)
 508   // C2 leave fpu stack dirty clean it
 509   if (UseSSE < 2) {
 510     int i;
 511     for ( i = 1; i <= 7 ; i++ ) {
 512       ffree(i);
 513     }
 514     if (!op->result_opr()->is_float_kind()) {
 515       ffree(0);
 516     }
 517   }
 518 #endif // X86 && TIERED
 519 }
 520 
 521 
 522 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
 523   _masm->bind (*(op->label()));
 524 }
 525 
 526 
 527 void LIR_Assembler::emit_op1(LIR_Op1* op) {
 528   switch (op->code()) {
 529     case lir_move:
 530       if (op->move_kind() == lir_move_volatile) {
 531         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
 532         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
 533       } else {
 534         move_op(op->in_opr(), op->result_opr(), op->type(),
 535                 op->patch_code(), op->info(), op->pop_fpu_stack(),
 536                 op->move_kind() == lir_move_unaligned,
 537                 op->move_kind() == lir_move_wide);
 538       }
 539       break;
 540 
 541     case lir_prefetchr:
 542       prefetchr(op->in_opr());
 543       break;
 544 
 545     case lir_prefetchw:
 546       prefetchw(op->in_opr());
 547       break;
 548 
 549     case lir_roundfp: {
 550       LIR_OpRoundFP* round_op = op->as_OpRoundFP();
 551       roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
 552       break;
 553     }
 554 
 555     case lir_return:
 556       return_op(op->in_opr());
 557       break;
 558 
 559     case lir_safepoint:
 560       if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
 561         _masm->nop();
 562       }
 563       safepoint_poll(op->in_opr(), op->info());
 564       break;
 565 
 566     case lir_fxch:
 567       fxch(op->in_opr()->as_jint());
 568       break;
 569 
 570     case lir_fld:
 571       fld(op->in_opr()->as_jint());
 572       break;
 573 
 574     case lir_ffree:
 575       ffree(op->in_opr()->as_jint());
 576       break;
 577 
 578     case lir_branch:
 579       break;
 580 
 581     case lir_push:
 582       push(op->in_opr());
 583       break;
 584 
 585     case lir_pop:
 586       pop(op->in_opr());
 587       break;
 588 
 589     case lir_neg:
 590       negate(op->in_opr(), op->result_opr());
 591       break;
 592 
 593     case lir_leal:
 594       leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
 595       break;
 596 
 597     case lir_null_check:
 598       if (GenerateCompilerNullChecks) {
 599         add_debug_info_for_null_check_here(op->info());
 600 
 601         if (op->in_opr()->is_single_cpu()) {
 602           _masm->null_check(op->in_opr()->as_register());
 603         } else {
 604           Unimplemented();
 605         }
 606       }
 607       break;
 608 
 609     case lir_monaddr:
 610       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
 611       break;
 612 
 613 #ifdef SPARC
 614     case lir_pack64:
 615       pack64(op->in_opr(), op->result_opr());
 616       break;
 617 
 618     case lir_unpack64:
 619       unpack64(op->in_opr(), op->result_opr());
 620       break;
 621 #endif
 622 
 623     case lir_unwind:
 624       unwind_op(op->in_opr());
 625       break;
 626 
 627     default:
 628       Unimplemented();
 629       break;
 630   }
 631 }
 632 
 633 
 634 void LIR_Assembler::emit_op0(LIR_Op0* op) {
 635   switch (op->code()) {
 636     case lir_word_align: {
 637       while (code_offset() % BytesPerWord != 0) {
 638         _masm->nop();
 639       }
 640       break;
 641     }
 642 
 643     case lir_nop:
 644       assert(op->info() == NULL, "not supported");
 645       _masm->nop();
 646       break;
 647 
 648     case lir_label:
 649       Unimplemented();
 650       break;
 651 
 652     case lir_build_frame:
 653       build_frame();
 654       break;
 655 
 656     case lir_std_entry:
 657       // init offsets
 658       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
 659       _masm->align(CodeEntryAlignment);
 660       if (needs_icache(compilation()->method())) {
 661         check_icache();
 662       }
 663       offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
 664       _masm->verified_entry();
 665       build_frame();
 666       offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
 667       break;
 668 
 669     case lir_osr_entry:
 670       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
 671       osr_entry();
 672       break;
 673 
 674     case lir_24bit_FPU:
 675       set_24bit_FPU();
 676       break;
 677 
 678     case lir_reset_FPU:
 679       reset_FPU();
 680       break;
 681 
 682     case lir_breakpoint:
 683       breakpoint();
 684       break;
 685 
 686     case lir_fpop_raw:
 687       fpop();
 688       break;
 689 
 690     case lir_membar:
 691       membar();
 692       break;
 693 
 694     case lir_membar_acquire:
 695       membar_acquire();
 696       break;
 697 
 698     case lir_membar_release:
 699       membar_release();
 700       break;
 701 
 702     case lir_membar_loadload:
 703       membar_loadload();
 704       break;
 705 
 706     case lir_membar_storestore:
 707       membar_storestore();
 708       break;
 709 
 710     case lir_membar_loadstore:
 711       membar_loadstore();
 712       break;
 713 
 714     case lir_membar_storeload:
 715       membar_storeload();
 716       break;
 717 
 718     case lir_get_thread:
 719       get_thread(op->result_opr());
 720       break;
 721 
 722     default:
 723       ShouldNotReachHere();
 724       break;
 725   }
 726 }
 727 
 728 
 729 void LIR_Assembler::emit_op2(LIR_Op2* op) {
 730   switch (op->code()) {
 731     case lir_cmp:
 732       if (op->info() != NULL) {
 733         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
 734                "shouldn't be codeemitinfo for non-address operands");
 735         add_debug_info_for_null_check_here(op->info()); // exception possible
 736       }
 737       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
 738       break;
 739 
 740     case lir_cmp_l2i:
 741     case lir_cmp_fd2i:
 742     case lir_ucmp_fd2i:
 743       comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
 744       break;
 745 
 746     case lir_cmove:
 747       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
 748       break;
 749 
 750     case lir_shl:
 751     case lir_shr:
 752     case lir_ushr:
 753       if (op->in_opr2()->is_constant()) {
 754         shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
 755       } else {
 756         shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
 757       }
 758       break;
 759 
 760     case lir_add:
 761     case lir_sub:
 762     case lir_mul:
 763     case lir_mul_strictfp:
 764     case lir_div:
 765     case lir_div_strictfp:
 766     case lir_rem:
 767       assert(op->fpu_pop_count() < 2, "");
 768       arith_op(
 769         op->code(),
 770         op->in_opr1(),
 771         op->in_opr2(),
 772         op->result_opr(),
 773         op->info(),
 774         op->fpu_pop_count() == 1);
 775       break;
 776 
 777     case lir_abs:
 778     case lir_sqrt:
 779     case lir_sin:
 780     case lir_tan:
 781     case lir_cos:
 782     case lir_log:
 783     case lir_log10:
 784     case lir_exp:
 785     case lir_pow:
 786       intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
 787       break;
 788 
 789     case lir_logic_and:
 790     case lir_logic_or:
 791     case lir_logic_xor:
 792       logic_op(
 793         op->code(),
 794         op->in_opr1(),
 795         op->in_opr2(),
 796         op->result_opr());
 797       break;
 798 
 799     case lir_throw:
 800       throw_op(op->in_opr1(), op->in_opr2(), op->info());
 801       break;
 802 
 803     case lir_xadd:
 804     case lir_xchg:
 805       atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
 806       break;
 807 
 808     default:
 809       Unimplemented();
 810       break;
 811   }
 812 }
 813 
 814 
 815 void LIR_Assembler::build_frame() {
 816   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 817 }
 818 
 819 
 820 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
 821   assert((src->is_single_fpu() && dest->is_single_stack()) ||
 822          (src->is_double_fpu() && dest->is_double_stack()),
 823          "round_fp: rounds register -> stack location");
 824 
 825   reg2stack (src, dest, src->type(), pop_fpu_stack);
 826 }
 827 
 828 
 829 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
 830   if (src->is_register()) {
 831     if (dest->is_register()) {
 832       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 833       reg2reg(src,  dest);
 834     } else if (dest->is_stack()) {
 835       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 836       reg2stack(src, dest, type, pop_fpu_stack);
 837     } else if (dest->is_address()) {
 838       reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
 839     } else {
 840       ShouldNotReachHere();
 841     }
 842 
 843   } else if (src->is_stack()) {
 844     assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 845     if (dest->is_register()) {
 846       stack2reg(src, dest, type);
 847     } else if (dest->is_stack()) {
 848       stack2stack(src, dest, type);
 849     } else {
 850       ShouldNotReachHere();
 851     }
 852 
 853   } else if (src->is_constant()) {
 854     if (dest->is_register()) {
 855       const2reg(src, dest, patch_code, info); // patching is possible
 856     } else if (dest->is_stack()) {
 857       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 858       const2stack(src, dest);
 859     } else if (dest->is_address()) {
 860       assert(patch_code == lir_patch_none, "no patching allowed here");
 861       const2mem(src, dest, type, info, wide);
 862     } else {
 863       ShouldNotReachHere();
 864     }
 865 
 866   } else if (src->is_address()) {
 867     mem2reg(src, dest, type, patch_code, info, wide, unaligned);
 868 
 869   } else {
 870     ShouldNotReachHere();
 871   }
 872 }
 873 
 874 
 875 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
 876 #ifndef PRODUCT
 877   if (VerifyOops) {
 878     OopMapStream s(info->oop_map());
 879     while (!s.is_done()) {
 880       OopMapValue v = s.current();
 881       if (v.is_oop()) {
 882         VMReg r = v.reg();
 883         if (!r->is_stack()) {
 884           stringStream st;
 885           st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
 886 #ifdef SPARC
 887           _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
 888 #else
 889           _masm->verify_oop(r->as_Register());
 890 #endif
 891         } else {
 892           _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
 893         }
 894       }
 895       check_codespace();
 896       CHECK_BAILOUT();
 897 
 898       s.next();
 899     }
 900   }
 901 #endif
 902 }